Commit cebe33f2 authored by Lei Zhang's avatar Lei Zhang Committed by Commit Bot

Remove use of this->foo in partition_allocator code.

Most of them are not necessary at all. One spot has parameters shadowing
member variables. Fix this by renaming the parameters.

Change-Id: I16c13588d40269d5c18bfe8877af31abc3ac5516
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1947828Reviewed-by: default avatarChris Palmer <palmer@chromium.org>
Commit-Queue: Lei Zhang <thestig@chromium.org>
Cr-Commit-Position: refs/heads/master@{#721710}
parent 1f684026
...@@ -192,19 +192,19 @@ void PartitionAllocGlobalInit(void (*oom_handling_function)()) { ...@@ -192,19 +192,19 @@ void PartitionAllocGlobalInit(void (*oom_handling_function)()) {
internal::PartitionRootBase::gOomHandlingFunction = oom_handling_function; internal::PartitionRootBase::gOomHandlingFunction = oom_handling_function;
} }
void PartitionRoot::Init(size_t num_buckets, size_t max_allocation) { void PartitionRoot::Init(size_t bucket_count, size_t maximum_allocation) {
PartitionAllocBaseInit(this); PartitionAllocBaseInit(this);
this->num_buckets = num_buckets; num_buckets = bucket_count;
this->max_allocation = max_allocation; max_allocation = maximum_allocation;
for (size_t i = 0; i < this->num_buckets; ++i) { for (size_t i = 0; i < num_buckets; ++i) {
internal::PartitionBucket* bucket = &this->buckets()[i]; internal::PartitionBucket& bucket = buckets()[i];
bucket->Init(i == 0 ? kAllocationGranularity : (i << kBucketShift)); bucket.Init(i == 0 ? kAllocationGranularity : (i << kBucketShift));
} }
} }
void PartitionRootGeneric::Init() { void PartitionRootGeneric::Init() {
subtle::SpinLock::Guard guard(this->lock); subtle::SpinLock::Guard guard(lock);
PartitionAllocBaseInit(this); PartitionAllocBaseInit(this);
...@@ -222,7 +222,7 @@ void PartitionRootGeneric::Init() { ...@@ -222,7 +222,7 @@ void PartitionRootGeneric::Init() {
order_index_shift = 0; order_index_shift = 0;
else else
order_index_shift = order - (kGenericNumBucketsPerOrderBits + 1); order_index_shift = order - (kGenericNumBucketsPerOrderBits + 1);
this->order_index_shifts[order] = order_index_shift; order_index_shifts[order] = order_index_shift;
size_t sub_order_index_mask; size_t sub_order_index_mask;
if (order == kBitsPerSizeT) { if (order == kBitsPerSizeT) {
// This avoids invoking undefined behavior for an excessive shift. // This avoids invoking undefined behavior for an excessive shift.
...@@ -232,7 +232,7 @@ void PartitionRootGeneric::Init() { ...@@ -232,7 +232,7 @@ void PartitionRootGeneric::Init() {
sub_order_index_mask = ((static_cast<size_t>(1) << order) - 1) >> sub_order_index_mask = ((static_cast<size_t>(1) << order) - 1) >>
(kGenericNumBucketsPerOrderBits + 1); (kGenericNumBucketsPerOrderBits + 1);
} }
this->order_sub_index_masks[order] = sub_order_index_mask; order_sub_index_masks[order] = sub_order_index_mask;
} }
// Set up the actual usable buckets first. // Set up the actual usable buckets first.
...@@ -245,7 +245,7 @@ void PartitionRootGeneric::Init() { ...@@ -245,7 +245,7 @@ void PartitionRootGeneric::Init() {
size_t current_size = kGenericSmallestBucket; size_t current_size = kGenericSmallestBucket;
size_t current_increment = size_t current_increment =
kGenericSmallestBucket >> kGenericNumBucketsPerOrderBits; kGenericSmallestBucket >> kGenericNumBucketsPerOrderBits;
internal::PartitionBucket* bucket = &this->buckets[0]; internal::PartitionBucket* bucket = &buckets[0];
for (i = 0; i < kGenericNumBucketedOrders; ++i) { for (i = 0; i < kGenericNumBucketedOrders; ++i) {
for (j = 0; j < kGenericNumBucketsPerOrder; ++j) { for (j = 0; j < kGenericNumBucketsPerOrder; ++j) {
bucket->Init(current_size); bucket->Init(current_size);
...@@ -258,16 +258,16 @@ void PartitionRootGeneric::Init() { ...@@ -258,16 +258,16 @@ void PartitionRootGeneric::Init() {
current_increment <<= 1; current_increment <<= 1;
} }
DCHECK(current_size == 1 << kGenericMaxBucketedOrder); DCHECK(current_size == 1 << kGenericMaxBucketedOrder);
DCHECK(bucket == &this->buckets[0] + kGenericNumBuckets); DCHECK(bucket == &buckets[0] + kGenericNumBuckets);
// Then set up the fast size -> bucket lookup table. // Then set up the fast size -> bucket lookup table.
bucket = &this->buckets[0]; bucket = &buckets[0];
internal::PartitionBucket** bucket_ptr = &this->bucket_lookups[0]; internal::PartitionBucket** bucket_ptr = &bucket_lookups[0];
for (order = 0; order <= kBitsPerSizeT; ++order) { for (order = 0; order <= kBitsPerSizeT; ++order) {
for (j = 0; j < kGenericNumBucketsPerOrder; ++j) { for (j = 0; j < kGenericNumBucketsPerOrder; ++j) {
if (order < kGenericMinBucketedOrder) { if (order < kGenericMinBucketedOrder) {
// Use the bucket of the finest granularity for malloc(0) etc. // Use the bucket of the finest granularity for malloc(0) etc.
*bucket_ptr++ = &this->buckets[0]; *bucket_ptr++ = &buckets[0];
} else if (order > kGenericMaxBucketedOrder) { } else if (order > kGenericMaxBucketedOrder) {
*bucket_ptr++ = internal::PartitionBucket::get_sentinel_bucket(); *bucket_ptr++ = internal::PartitionBucket::get_sentinel_bucket();
} else { } else {
...@@ -280,8 +280,8 @@ void PartitionRootGeneric::Init() { ...@@ -280,8 +280,8 @@ void PartitionRootGeneric::Init() {
} }
} }
} }
DCHECK(bucket == &this->buckets[0] + kGenericNumBuckets); DCHECK(bucket == &buckets[0] + kGenericNumBuckets);
DCHECK(bucket_ptr == &this->bucket_lookups[0] + DCHECK(bucket_ptr == &bucket_lookups[0] +
((kBitsPerSizeT + 1) * kGenericNumBucketsPerOrder)); ((kBitsPerSizeT + 1) * kGenericNumBucketsPerOrder));
// And there's one last bucket lookup that will be hit for e.g. malloc(-1), // And there's one last bucket lookup that will be hit for e.g. malloc(-1),
// which tries to overflow to a non-existant order. // which tries to overflow to a non-existant order.
...@@ -619,12 +619,12 @@ void PartitionRoot::PurgeMemory(int flags) { ...@@ -619,12 +619,12 @@ void PartitionRoot::PurgeMemory(int flags) {
} }
void PartitionRootGeneric::PurgeMemory(int flags) { void PartitionRootGeneric::PurgeMemory(int flags) {
subtle::SpinLock::Guard guard(this->lock); subtle::SpinLock::Guard guard(lock);
if (flags & PartitionPurgeDecommitEmptyPages) if (flags & PartitionPurgeDecommitEmptyPages)
DecommitEmptyPages(); DecommitEmptyPages();
if (flags & PartitionPurgeDiscardUnusedSystemPages) { if (flags & PartitionPurgeDiscardUnusedSystemPages) {
for (size_t i = 0; i < kGenericNumBuckets; ++i) { for (size_t i = 0; i < kGenericNumBuckets; ++i) {
internal::PartitionBucket* bucket = &this->buckets[i]; internal::PartitionBucket* bucket = &buckets[i];
if (bucket->slot_size >= kSystemPageSize) if (bucket->slot_size >= kSystemPageSize)
PartitionPurgeBucket(bucket); PartitionPurgeBucket(bucket);
} }
...@@ -716,8 +716,8 @@ void PartitionRootGeneric::DumpStats(const char* partition_name, ...@@ -716,8 +716,8 @@ void PartitionRootGeneric::DumpStats(const char* partition_name,
PartitionStatsDumper* dumper) { PartitionStatsDumper* dumper) {
PartitionMemoryStats stats = {0}; PartitionMemoryStats stats = {0};
stats.total_mmapped_bytes = stats.total_mmapped_bytes =
this->total_size_of_super_pages + this->total_size_of_direct_mapped_pages; total_size_of_super_pages + total_size_of_direct_mapped_pages;
stats.total_committed_bytes = this->total_size_of_committed_pages; stats.total_committed_bytes = total_size_of_committed_pages;
size_t direct_mapped_allocations_total_size = 0; size_t direct_mapped_allocations_total_size = 0;
...@@ -734,10 +734,10 @@ void PartitionRootGeneric::DumpStats(const char* partition_name, ...@@ -734,10 +734,10 @@ void PartitionRootGeneric::DumpStats(const char* partition_name,
PartitionBucketMemoryStats bucket_stats[kGenericNumBuckets]; PartitionBucketMemoryStats bucket_stats[kGenericNumBuckets];
size_t num_direct_mapped_allocations = 0; size_t num_direct_mapped_allocations = 0;
{ {
subtle::SpinLock::Guard guard(this->lock); subtle::SpinLock::Guard guard(lock);
for (size_t i = 0; i < kGenericNumBuckets; ++i) { for (size_t i = 0; i < kGenericNumBuckets; ++i) {
const internal::PartitionBucket* bucket = &this->buckets[i]; const internal::PartitionBucket* bucket = &buckets[i];
// Don't report the pseudo buckets that the generic allocator sets up in // Don't report the pseudo buckets that the generic allocator sets up in
// order to preserve a fast size->bucket map (see // order to preserve a fast size->bucket map (see
// PartitionRootGeneric::Init() for details). // PartitionRootGeneric::Init() for details).
...@@ -753,7 +753,7 @@ void PartitionRootGeneric::DumpStats(const char* partition_name, ...@@ -753,7 +753,7 @@ void PartitionRootGeneric::DumpStats(const char* partition_name,
} }
} }
for (internal::PartitionDirectMapExtent *extent = this->direct_map_list; for (internal::PartitionDirectMapExtent* extent = direct_map_list;
extent && num_direct_mapped_allocations < kMaxReportableDirectMaps; extent && num_direct_mapped_allocations < kMaxReportableDirectMaps;
extent = extent->next_extent, ++num_direct_mapped_allocations) { extent = extent->next_extent, ++num_direct_mapped_allocations) {
DCHECK(!extent->next_extent || DCHECK(!extent->next_extent ||
...@@ -799,9 +799,9 @@ void PartitionRoot::DumpStats(const char* partition_name, ...@@ -799,9 +799,9 @@ void PartitionRoot::DumpStats(const char* partition_name,
bool is_light_dump, bool is_light_dump,
PartitionStatsDumper* dumper) { PartitionStatsDumper* dumper) {
PartitionMemoryStats stats = {0}; PartitionMemoryStats stats = {0};
stats.total_mmapped_bytes = this->total_size_of_super_pages; stats.total_mmapped_bytes = total_size_of_super_pages;
stats.total_committed_bytes = this->total_size_of_committed_pages; stats.total_committed_bytes = total_size_of_committed_pages;
DCHECK(!this->total_size_of_direct_mapped_pages); DCHECK(!total_size_of_direct_mapped_pages);
static constexpr size_t kMaxReportableBuckets = 4096 / sizeof(void*); static constexpr size_t kMaxReportableBuckets = 4096 / sizeof(void*);
std::unique_ptr<PartitionBucketMemoryStats[]> memory_stats; std::unique_ptr<PartitionBucketMemoryStats[]> memory_stats;
...@@ -810,12 +810,12 @@ void PartitionRoot::DumpStats(const char* partition_name, ...@@ -810,12 +810,12 @@ void PartitionRoot::DumpStats(const char* partition_name,
new PartitionBucketMemoryStats[kMaxReportableBuckets]); new PartitionBucketMemoryStats[kMaxReportableBuckets]);
} }
const size_t partition_num_buckets = this->num_buckets; const size_t partition_num_buckets = num_buckets;
DCHECK(partition_num_buckets <= kMaxReportableBuckets); DCHECK(partition_num_buckets <= kMaxReportableBuckets);
for (size_t i = 0; i < partition_num_buckets; ++i) { for (size_t i = 0; i < partition_num_buckets; ++i) {
PartitionBucketMemoryStats bucket_stats = {0}; PartitionBucketMemoryStats bucket_stats = {0};
PartitionDumpBucketStats(&bucket_stats, &this->buckets()[i]); PartitionDumpBucketStats(&bucket_stats, &buckets()[i]);
if (bucket_stats.is_valid) { if (bucket_stats.is_valid) {
stats.total_resident_bytes += bucket_stats.resident_bytes; stats.total_resident_bytes += bucket_stats.resident_bytes;
stats.total_active_bytes += bucket_stats.active_bytes; stats.total_active_bytes += bucket_stats.active_bytes;
......
...@@ -122,7 +122,7 @@ struct BASE_EXPORT PartitionRoot : public internal::PartitionRootBase { ...@@ -122,7 +122,7 @@ struct BASE_EXPORT PartitionRoot : public internal::PartitionRootBase {
return reinterpret_cast<const internal::PartitionBucket*>(this + 1); return reinterpret_cast<const internal::PartitionBucket*>(this + 1);
} }
void Init(size_t num_buckets, size_t max_allocation); void Init(size_t bucket_count, size_t maximum_allocation);
ALWAYS_INLINE void* Alloc(size_t size, const char* type_name); ALWAYS_INLINE void* Alloc(size_t size, const char* type_name);
ALWAYS_INLINE void* AllocFlags(int flags, size_t size, const char* type_name); ALWAYS_INLINE void* AllocFlags(int flags, size_t size, const char* type_name);
...@@ -318,11 +318,11 @@ ALWAYS_INLINE void* PartitionRoot::AllocFlags(int flags, ...@@ -318,11 +318,11 @@ ALWAYS_INLINE void* PartitionRoot::AllocFlags(int flags,
} }
size_t requested_size = size; size_t requested_size = size;
size = internal::PartitionCookieSizeAdjustAdd(size); size = internal::PartitionCookieSizeAdjustAdd(size);
DCHECK(this->initialized); DCHECK(initialized);
size_t index = size >> kBucketShift; size_t index = size >> kBucketShift;
DCHECK(index < this->num_buckets); DCHECK(index < num_buckets);
DCHECK(size == index << kBucketShift); DCHECK(size == index << kBucketShift);
internal::PartitionBucket* bucket = &this->buckets()[index]; internal::PartitionBucket* bucket = &buckets()[index];
result = AllocFromBucket(bucket, flags, size); result = AllocFromBucket(bucket, flags, size);
if (UNLIKELY(hooks_enabled)) { if (UNLIKELY(hooks_enabled)) {
PartitionAllocHooks::AllocationObserverHookIfEnabled(result, requested_size, PartitionAllocHooks::AllocationObserverHookIfEnabled(result, requested_size,
...@@ -447,7 +447,7 @@ ALWAYS_INLINE void PartitionRootGeneric::Free(void* ptr) { ...@@ -447,7 +447,7 @@ ALWAYS_INLINE void PartitionRootGeneric::Free(void* ptr) {
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
free(ptr); free(ptr);
#else #else
DCHECK(this->initialized); DCHECK(initialized);
if (UNLIKELY(!ptr)) if (UNLIKELY(!ptr))
return; return;
...@@ -463,7 +463,7 @@ ALWAYS_INLINE void PartitionRootGeneric::Free(void* ptr) { ...@@ -463,7 +463,7 @@ ALWAYS_INLINE void PartitionRootGeneric::Free(void* ptr) {
// TODO(palmer): See if we can afford to make this a CHECK. // TODO(palmer): See if we can afford to make this a CHECK.
DCHECK(IsValidPage(page)); DCHECK(IsValidPage(page));
{ {
subtle::SpinLock::Guard guard(this->lock); subtle::SpinLock::Guard guard(lock);
page->Free(ptr); page->Free(ptr);
} }
#endif #endif
...@@ -479,7 +479,7 @@ ALWAYS_INLINE size_t PartitionRootGeneric::ActualSize(size_t size) { ...@@ -479,7 +479,7 @@ ALWAYS_INLINE size_t PartitionRootGeneric::ActualSize(size_t size) {
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
return size; return size;
#else #else
DCHECK(this->initialized); DCHECK(initialized);
size = internal::PartitionCookieSizeAdjustAdd(size); size = internal::PartitionCookieSizeAdjustAdd(size);
internal::PartitionBucket* bucket = PartitionGenericSizeToBucket(this, size); internal::PartitionBucket* bucket = PartitionGenericSizeToBucket(this, size);
if (LIKELY(!bucket->is_direct_mapped())) { if (LIKELY(!bucket->is_direct_mapped())) {
......
...@@ -133,23 +133,23 @@ uint8_t PartitionBucket::get_system_pages_per_slot_span() { ...@@ -133,23 +133,23 @@ uint8_t PartitionBucket::get_system_pages_per_slot_span() {
// to using fewer system pages. // to using fewer system pages.
double best_waste_ratio = 1.0f; double best_waste_ratio = 1.0f;
uint16_t best_pages = 0; uint16_t best_pages = 0;
if (this->slot_size > kMaxSystemPagesPerSlotSpan * kSystemPageSize) { if (slot_size > kMaxSystemPagesPerSlotSpan * kSystemPageSize) {
// TODO(ajwong): Why is there a DCHECK here for this? // TODO(ajwong): Why is there a DCHECK here for this?
// http://crbug.com/776537 // http://crbug.com/776537
DCHECK(!(this->slot_size % kSystemPageSize)); DCHECK(!(slot_size % kSystemPageSize));
best_pages = static_cast<uint16_t>(this->slot_size / kSystemPageSize); best_pages = static_cast<uint16_t>(slot_size / kSystemPageSize);
// TODO(ajwong): Should this be checking against // TODO(ajwong): Should this be checking against
// kMaxSystemPagesPerSlotSpan or numeric_limits<uint8_t>::max? // kMaxSystemPagesPerSlotSpan or numeric_limits<uint8_t>::max?
// http://crbug.com/776537 // http://crbug.com/776537
CHECK(best_pages < (1 << 8)); CHECK(best_pages < (1 << 8));
return static_cast<uint8_t>(best_pages); return static_cast<uint8_t>(best_pages);
} }
DCHECK(this->slot_size <= kMaxSystemPagesPerSlotSpan * kSystemPageSize); DCHECK(slot_size <= kMaxSystemPagesPerSlotSpan * kSystemPageSize);
for (uint16_t i = kNumSystemPagesPerPartitionPage - 1; for (uint16_t i = kNumSystemPagesPerPartitionPage - 1;
i <= kMaxSystemPagesPerSlotSpan; ++i) { i <= kMaxSystemPagesPerSlotSpan; ++i) {
size_t page_size = kSystemPageSize * i; size_t page_size = kSystemPageSize * i;
size_t num_slots = page_size / this->slot_size; size_t num_slots = page_size / slot_size;
size_t waste = page_size - (num_slots * this->slot_size); size_t waste = page_size - (num_slots * slot_size);
// Leaving a page unfaulted is not free; the page will occupy an empty page // Leaving a page unfaulted is not free; the page will occupy an empty page
// table entry. Make a simple attempt to account for that. // table entry. Make a simple attempt to account for that.
// //
...@@ -344,12 +344,12 @@ ALWAYS_INLINE char* PartitionBucket::AllocAndFillFreelist(PartitionPage* page) { ...@@ -344,12 +344,12 @@ ALWAYS_INLINE char* PartitionBucket::AllocAndFillFreelist(PartitionPage* page) {
// We should only get here when _every_ slot is either used or unprovisioned. // We should only get here when _every_ slot is either used or unprovisioned.
// (The third state is "on the freelist". If we have a non-empty freelist, we // (The third state is "on the freelist". If we have a non-empty freelist, we
// should not get here.) // should not get here.)
DCHECK(num_slots + page->num_allocated_slots == this->get_slots_per_span()); DCHECK(num_slots + page->num_allocated_slots == get_slots_per_span());
// Similarly, make explicitly sure that the freelist is empty. // Similarly, make explicitly sure that the freelist is empty.
DCHECK(!page->freelist_head); DCHECK(!page->freelist_head);
DCHECK(page->num_allocated_slots >= 0); DCHECK(page->num_allocated_slots >= 0);
size_t size = this->slot_size; size_t size = slot_size;
char* base = reinterpret_cast<char*>(PartitionPage::ToPointer(page)); char* base = reinterpret_cast<char*>(PartitionPage::ToPointer(page));
char* return_object = base + (size * page->num_allocated_slots); char* return_object = base + (size * page->num_allocated_slots);
char* first_freelist_pointer = return_object + size; char* first_freelist_pointer = return_object + size;
...@@ -405,7 +405,7 @@ ALWAYS_INLINE char* PartitionBucket::AllocAndFillFreelist(PartitionPage* page) { ...@@ -405,7 +405,7 @@ ALWAYS_INLINE char* PartitionBucket::AllocAndFillFreelist(PartitionPage* page) {
} }
bool PartitionBucket::SetNewActivePage() { bool PartitionBucket::SetNewActivePage() {
PartitionPage* page = this->active_pages_head; PartitionPage* page = active_pages_head;
if (page == PartitionPage::get_sentinel_page()) if (page == PartitionPage::get_sentinel_page())
return false; return false;
...@@ -414,40 +414,40 @@ bool PartitionBucket::SetNewActivePage() { ...@@ -414,40 +414,40 @@ bool PartitionBucket::SetNewActivePage() {
for (; page; page = next_page) { for (; page; page = next_page) {
next_page = page->next_page; next_page = page->next_page;
DCHECK(page->bucket == this); DCHECK(page->bucket == this);
DCHECK(page != this->empty_pages_head); DCHECK(page != empty_pages_head);
DCHECK(page != this->decommitted_pages_head); DCHECK(page != decommitted_pages_head);
if (LIKELY(page->is_active())) { if (LIKELY(page->is_active())) {
// This page is usable because it has freelist entries, or has // This page is usable because it has freelist entries, or has
// unprovisioned slots we can create freelist entries from. // unprovisioned slots we can create freelist entries from.
this->active_pages_head = page; active_pages_head = page;
return true; return true;
} }
// Deal with empty and decommitted pages. // Deal with empty and decommitted pages.
if (LIKELY(page->is_empty())) { if (LIKELY(page->is_empty())) {
page->next_page = this->empty_pages_head; page->next_page = empty_pages_head;
this->empty_pages_head = page; empty_pages_head = page;
} else if (LIKELY(page->is_decommitted())) { } else if (LIKELY(page->is_decommitted())) {
page->next_page = this->decommitted_pages_head; page->next_page = decommitted_pages_head;
this->decommitted_pages_head = page; decommitted_pages_head = page;
} else { } else {
DCHECK(page->is_full()); DCHECK(page->is_full());
// If we get here, we found a full page. Skip over it too, and also // If we get here, we found a full page. Skip over it too, and also
// tag it as full (via a negative value). We need it tagged so that // tag it as full (via a negative value). We need it tagged so that
// free'ing can tell, and move it back into the active page list. // free'ing can tell, and move it back into the active page list.
page->num_allocated_slots = -page->num_allocated_slots; page->num_allocated_slots = -page->num_allocated_slots;
++this->num_full_pages; ++num_full_pages;
// num_full_pages is a uint16_t for efficient packing so guard against // num_full_pages is a uint16_t for efficient packing so guard against
// overflow to be safe. // overflow to be safe.
if (UNLIKELY(!this->num_full_pages)) if (UNLIKELY(!num_full_pages))
OnFull(); OnFull();
// Not necessary but might help stop accidents. // Not necessary but might help stop accidents.
page->next_page = nullptr; page->next_page = nullptr;
} }
} }
this->active_pages_head = PartitionPage::get_sentinel_page(); active_pages_head = PartitionPage::get_sentinel_page();
return false; return false;
} }
...@@ -456,7 +456,7 @@ void* PartitionBucket::SlowPathAlloc(PartitionRootBase* root, ...@@ -456,7 +456,7 @@ void* PartitionBucket::SlowPathAlloc(PartitionRootBase* root,
size_t size, size_t size,
bool* is_already_zeroed) { bool* is_already_zeroed) {
// The slow path is called when the freelist is empty. // The slow path is called when the freelist is empty.
DCHECK(!this->active_pages_head->freelist_head); DCHECK(!active_pages_head->freelist_head);
PartitionPage* new_page = nullptr; PartitionPage* new_page = nullptr;
*is_already_zeroed = false; *is_already_zeroed = false;
...@@ -471,10 +471,10 @@ void* PartitionBucket::SlowPathAlloc(PartitionRootBase* root, ...@@ -471,10 +471,10 @@ void* PartitionBucket::SlowPathAlloc(PartitionRootBase* root,
// false where it sweeps the active page list and may move things into // false where it sweeps the active page list and may move things into
// the empty or decommitted lists which affects the subsequent conditional. // the empty or decommitted lists which affects the subsequent conditional.
bool return_null = flags & PartitionAllocReturnNull; bool return_null = flags & PartitionAllocReturnNull;
if (UNLIKELY(this->is_direct_mapped())) { if (UNLIKELY(is_direct_mapped())) {
DCHECK(size > kGenericMaxBucketed); DCHECK(size > kGenericMaxBucketed);
DCHECK(this == get_sentinel_bucket()); DCHECK(this == get_sentinel_bucket());
DCHECK(this->active_pages_head == PartitionPage::get_sentinel_page()); DCHECK(active_pages_head == PartitionPage::get_sentinel_page());
if (size > kGenericMaxDirectMapped) { if (size > kGenericMaxDirectMapped) {
if (return_null) if (return_null)
return nullptr; return nullptr;
...@@ -485,34 +485,33 @@ void* PartitionBucket::SlowPathAlloc(PartitionRootBase* root, ...@@ -485,34 +485,33 @@ void* PartitionBucket::SlowPathAlloc(PartitionRootBase* root,
// Turn off the optimization to see if it helps https://crbug.com/892550. // Turn off the optimization to see if it helps https://crbug.com/892550.
*is_already_zeroed = true; *is_already_zeroed = true;
#endif #endif
} else if (LIKELY(this->SetNewActivePage())) { } else if (LIKELY(SetNewActivePage())) {
// First, did we find an active page in the active pages list? // First, did we find an active page in the active pages list?
new_page = this->active_pages_head; new_page = active_pages_head;
DCHECK(new_page->is_active()); DCHECK(new_page->is_active());
} else if (LIKELY(this->empty_pages_head != nullptr) || } else if (LIKELY(empty_pages_head != nullptr) ||
LIKELY(this->decommitted_pages_head != nullptr)) { LIKELY(decommitted_pages_head != nullptr)) {
// Second, look in our lists of empty and decommitted pages. // Second, look in our lists of empty and decommitted pages.
// Check empty pages first, which are preferred, but beware that an // Check empty pages first, which are preferred, but beware that an
// empty page might have been decommitted. // empty page might have been decommitted.
while (LIKELY((new_page = this->empty_pages_head) != nullptr)) { while (LIKELY((new_page = empty_pages_head) != nullptr)) {
DCHECK(new_page->bucket == this); DCHECK(new_page->bucket == this);
DCHECK(new_page->is_empty() || new_page->is_decommitted()); DCHECK(new_page->is_empty() || new_page->is_decommitted());
this->empty_pages_head = new_page->next_page; empty_pages_head = new_page->next_page;
// Accept the empty page unless it got decommitted. // Accept the empty page unless it got decommitted.
if (new_page->freelist_head) { if (new_page->freelist_head) {
new_page->next_page = nullptr; new_page->next_page = nullptr;
break; break;
} }
DCHECK(new_page->is_decommitted()); DCHECK(new_page->is_decommitted());
new_page->next_page = this->decommitted_pages_head; new_page->next_page = decommitted_pages_head;
this->decommitted_pages_head = new_page; decommitted_pages_head = new_page;
} }
if (UNLIKELY(!new_page) && if (UNLIKELY(!new_page) && LIKELY(decommitted_pages_head != nullptr)) {
LIKELY(this->decommitted_pages_head != nullptr)) { new_page = decommitted_pages_head;
new_page = this->decommitted_pages_head;
DCHECK(new_page->bucket == this); DCHECK(new_page->bucket == this);
DCHECK(new_page->is_decommitted()); DCHECK(new_page->is_decommitted());
this->decommitted_pages_head = new_page->next_page; decommitted_pages_head = new_page->next_page;
void* addr = PartitionPage::ToPointer(new_page); void* addr = PartitionPage::ToPointer(new_page);
root->RecommitSystemPages(addr, new_page->bucket->get_bytes_per_span()); root->RecommitSystemPages(addr, new_page->bucket->get_bytes_per_span());
new_page->Reset(); new_page->Reset();
...@@ -523,7 +522,7 @@ void* PartitionBucket::SlowPathAlloc(PartitionRootBase* root, ...@@ -523,7 +522,7 @@ void* PartitionBucket::SlowPathAlloc(PartitionRootBase* root,
DCHECK(new_page); DCHECK(new_page);
} else { } else {
// Third. If we get here, we need a brand new page. // Third. If we get here, we need a brand new page.
uint16_t num_partition_pages = this->get_pages_per_slot_span(); uint16_t num_partition_pages = get_pages_per_slot_span();
void* raw_pages = AllocNewSlotSpan(root, flags, num_partition_pages); void* raw_pages = AllocNewSlotSpan(root, flags, num_partition_pages);
if (LIKELY(raw_pages != nullptr)) { if (LIKELY(raw_pages != nullptr)) {
new_page = PartitionPage::FromPointerNoAlignmentCheck(raw_pages); new_page = PartitionPage::FromPointerNoAlignmentCheck(raw_pages);
...@@ -536,7 +535,7 @@ void* PartitionBucket::SlowPathAlloc(PartitionRootBase* root, ...@@ -536,7 +535,7 @@ void* PartitionBucket::SlowPathAlloc(PartitionRootBase* root,
// Bail if we had a memory allocation failure. // Bail if we had a memory allocation failure.
if (UNLIKELY(!new_page)) { if (UNLIKELY(!new_page)) {
DCHECK(this->active_pages_head == PartitionPage::get_sentinel_page()); DCHECK(active_pages_head == PartitionPage::get_sentinel_page());
if (return_null) if (return_null)
return nullptr; return nullptr;
root->OutOfMemory(); root->OutOfMemory();
......
...@@ -92,7 +92,7 @@ PartitionPage* PartitionPage::get_sentinel_page() { ...@@ -92,7 +92,7 @@ PartitionPage* PartitionPage::get_sentinel_page() {
void PartitionPage::FreeSlowPath() { void PartitionPage::FreeSlowPath() {
DCHECK(this != get_sentinel_page()); DCHECK(this != get_sentinel_page());
if (LIKELY(this->num_allocated_slots == 0)) { if (LIKELY(num_allocated_slots == 0)) {
// Page became fully unused. // Page became fully unused.
if (UNLIKELY(bucket->is_direct_mapped())) { if (UNLIKELY(bucket->is_direct_mapped())) {
PartitionDirectUnmap(this); PartitionDirectUnmap(this);
...@@ -112,24 +112,24 @@ void PartitionPage::FreeSlowPath() { ...@@ -112,24 +112,24 @@ void PartitionPage::FreeSlowPath() {
DCHECK(!bucket->is_direct_mapped()); DCHECK(!bucket->is_direct_mapped());
// Ensure that the page is full. That's the only valid case if we // Ensure that the page is full. That's the only valid case if we
// arrive here. // arrive here.
DCHECK(this->num_allocated_slots < 0); DCHECK(num_allocated_slots < 0);
// A transition of num_allocated_slots from 0 to -1 is not legal, and // A transition of num_allocated_slots from 0 to -1 is not legal, and
// likely indicates a double-free. // likely indicates a double-free.
CHECK(this->num_allocated_slots != -1); CHECK(num_allocated_slots != -1);
this->num_allocated_slots = -this->num_allocated_slots - 2; num_allocated_slots = -num_allocated_slots - 2;
DCHECK(this->num_allocated_slots == bucket->get_slots_per_span() - 1); DCHECK(num_allocated_slots == bucket->get_slots_per_span() - 1);
// Fully used page became partially used. It must be put back on the // Fully used page became partially used. It must be put back on the
// non-full page list. Also make it the current page to increase the // non-full page list. Also make it the current page to increase the
// chances of it being filled up again. The old current page will be // chances of it being filled up again. The old current page will be
// the next page. // the next page.
DCHECK(!this->next_page); DCHECK(!next_page);
if (LIKELY(bucket->active_pages_head != get_sentinel_page())) if (LIKELY(bucket->active_pages_head != get_sentinel_page()))
this->next_page = bucket->active_pages_head; next_page = bucket->active_pages_head;
bucket->active_pages_head = this; bucket->active_pages_head = this;
--bucket->num_full_pages; --bucket->num_full_pages;
// Special case: for a partition page with just a single slot, it may // Special case: for a partition page with just a single slot, it may
// now be empty and we want to run it through the empty logic. // now be empty and we want to run it through the empty logic.
if (UNLIKELY(this->num_allocated_slots == 0)) if (UNLIKELY(num_allocated_slots == 0))
FreeSlowPath(); FreeSlowPath();
} }
} }
......
...@@ -203,7 +203,7 @@ ALWAYS_INLINE size_t PartitionPage::get_raw_size() const { ...@@ -203,7 +203,7 @@ ALWAYS_INLINE size_t PartitionPage::get_raw_size() const {
ALWAYS_INLINE void PartitionPage::Free(void* ptr) { ALWAYS_INLINE void PartitionPage::Free(void* ptr) {
#if DCHECK_IS_ON() #if DCHECK_IS_ON()
size_t slot_size = this->bucket->slot_size; size_t slot_size = bucket->slot_size;
const size_t raw_size = get_raw_size(); const size_t raw_size = get_raw_size();
if (raw_size) { if (raw_size) {
slot_size = raw_size; slot_size = raw_size;
...@@ -217,7 +217,7 @@ ALWAYS_INLINE void PartitionPage::Free(void* ptr) { ...@@ -217,7 +217,7 @@ ALWAYS_INLINE void PartitionPage::Free(void* ptr) {
memset(ptr, kFreedByte, slot_size); memset(ptr, kFreedByte, slot_size);
#endif #endif
DCHECK(this->num_allocated_slots); DCHECK(num_allocated_slots);
// Catches an immediate double free. // Catches an immediate double free.
CHECK(ptr != freelist_head); CHECK(ptr != freelist_head);
// Look for double free one level deeper in debug. // Look for double free one level deeper in debug.
...@@ -227,8 +227,8 @@ ALWAYS_INLINE void PartitionPage::Free(void* ptr) { ...@@ -227,8 +227,8 @@ ALWAYS_INLINE void PartitionPage::Free(void* ptr) {
static_cast<internal::PartitionFreelistEntry*>(ptr); static_cast<internal::PartitionFreelistEntry*>(ptr);
entry->next = internal::PartitionFreelistEntry::Encode(freelist_head); entry->next = internal::PartitionFreelistEntry::Encode(freelist_head);
freelist_head = entry; freelist_head = entry;
--this->num_allocated_slots; --num_allocated_slots;
if (UNLIKELY(this->num_allocated_slots <= 0)) { if (UNLIKELY(num_allocated_slots <= 0)) {
FreeSlowPath(); FreeSlowPath();
} else { } else {
// All single-slot allocations must go through the slow path to // All single-slot allocations must go through the slow path to
...@@ -279,7 +279,7 @@ ALWAYS_INLINE void PartitionPage::set_raw_size(size_t size) { ...@@ -279,7 +279,7 @@ ALWAYS_INLINE void PartitionPage::set_raw_size(size_t size) {
} }
ALWAYS_INLINE void PartitionPage::Reset() { ALWAYS_INLINE void PartitionPage::Reset() {
DCHECK(this->is_decommitted()); DCHECK(is_decommitted());
num_unprovisioned_slots = bucket->get_slots_per_span(); num_unprovisioned_slots = bucket->get_slots_per_span();
DCHECK(num_unprovisioned_slots); DCHECK(num_unprovisioned_slots);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment