Commit f48b1f3e authored by Albert J. Wong's avatar Albert J. Wong Committed by Commit Bot

PA: PartitionRootGeneric funcs -> methods

This is a mechanical refactor moving all free functions that take
PartitionRootGeneric as the first parameter into the PartitionRootGeneric struct.

See related bug for more details on methodology.

Bug: 787153
Change-Id: Ia95fc30ed030397303dc1999f4fdac8a72c01c9b
Reviewed-on: https://chromium-review.googlesource.com/780759
Commit-Queue: Albert J. Wong <ajwong@chromium.org>
Reviewed-by: default avatarKentaro Hara <haraken@chromium.org>
Cr-Commit-Position: refs/heads/master@{#518438}
parent c4fd39b5
...@@ -52,15 +52,15 @@ bucket size is 1MB = 0x100000 which is greater than 1/2 the available space in ...@@ -52,15 +52,15 @@ bucket size is 1MB = 0x100000 which is greater than 1/2 the available space in
a SuperPage meaning it would not be possible to pack even 2 sequential a SuperPage meaning it would not be possible to pack even 2 sequential
alloctions in a SuperPage. alloctions in a SuperPage.
`PartitionAllocGeneric` acquires a lock for thread safety. (The current `PartitionRootGeneric::Alloc()` acquires a lock for thread safety. (The current
implementation uses a spin lock on the assumption that thread contention will be implementation uses a spin lock on the assumption that thread contention will be
rare in its callers. The original caller was Blink, where this is generally rare in its callers. The original caller was Blink, where this is generally
true. Spin locks also have the benefit of simplicity.) true. Spin locks also have the benefit of simplicity.)
Callers can get thread-unsafe performance using a Callers can get thread-unsafe performance using a
`SizeSpecificPartitionAllocator` or otherwise using `PartitionAlloc` (instead of `SizeSpecificPartitionAllocator` or otherwise using `PartitionAlloc` (instead of
`PartitionAllocGeneric`). Callers can also arrange for low contention, such as `PartitionRootGeneric::Alloc()`). Callers can also arrange for low contention,
by using a dedicated partition for single-threaded, latency-critical such as by using a dedicated partition for single-threaded, latency-critical
allocations. allocations.
Because PartitionAlloc guarantees that address space regions used for one Because PartitionAlloc guarantees that address space regions used for one
......
...@@ -190,10 +190,10 @@ void PartitionRoot::Init(size_t num_buckets, size_t max_allocation) { ...@@ -190,10 +190,10 @@ void PartitionRoot::Init(size_t num_buckets, size_t max_allocation) {
} }
} }
void PartitionAllocGenericInit(PartitionRootGeneric* root) { void PartitionRootGeneric::Init() {
subtle::SpinLock::Guard guard(root->lock); subtle::SpinLock::Guard guard(this->lock);
PartitionAllocBaseInit(root); PartitionAllocBaseInit(this);
// Precalculate some shift and mask constants used in the hot path. // Precalculate some shift and mask constants used in the hot path.
// Example: malloc(41) == 101001 binary. // Example: malloc(41) == 101001 binary.
...@@ -209,7 +209,7 @@ void PartitionAllocGenericInit(PartitionRootGeneric* root) { ...@@ -209,7 +209,7 @@ void PartitionAllocGenericInit(PartitionRootGeneric* root) {
order_index_shift = 0; order_index_shift = 0;
else else
order_index_shift = order - (kGenericNumBucketsPerOrderBits + 1); order_index_shift = order - (kGenericNumBucketsPerOrderBits + 1);
root->order_index_shifts[order] = order_index_shift; this->order_index_shifts[order] = order_index_shift;
size_t sub_order_index_mask; size_t sub_order_index_mask;
if (order == kBitsPerSizeT) { if (order == kBitsPerSizeT) {
// This avoids invoking undefined behavior for an excessive shift. // This avoids invoking undefined behavior for an excessive shift.
...@@ -219,7 +219,7 @@ void PartitionAllocGenericInit(PartitionRootGeneric* root) { ...@@ -219,7 +219,7 @@ void PartitionAllocGenericInit(PartitionRootGeneric* root) {
sub_order_index_mask = ((static_cast<size_t>(1) << order) - 1) >> sub_order_index_mask = ((static_cast<size_t>(1) << order) - 1) >>
(kGenericNumBucketsPerOrderBits + 1); (kGenericNumBucketsPerOrderBits + 1);
} }
root->order_sub_index_masks[order] = sub_order_index_mask; this->order_sub_index_masks[order] = sub_order_index_mask;
} }
// Set up the actual usable buckets first. // Set up the actual usable buckets first.
...@@ -232,11 +232,11 @@ void PartitionAllocGenericInit(PartitionRootGeneric* root) { ...@@ -232,11 +232,11 @@ void PartitionAllocGenericInit(PartitionRootGeneric* root) {
size_t current_size = kGenericSmallestBucket; size_t current_size = kGenericSmallestBucket;
size_t currentIncrement = size_t currentIncrement =
kGenericSmallestBucket >> kGenericNumBucketsPerOrderBits; kGenericSmallestBucket >> kGenericNumBucketsPerOrderBits;
PartitionBucket* bucket = &root->buckets[0]; PartitionBucket* bucket = &this->buckets[0];
for (i = 0; i < kGenericNumBucketedOrders; ++i) { for (i = 0; i < kGenericNumBucketedOrders; ++i) {
for (j = 0; j < kGenericNumBucketsPerOrder; ++j) { for (j = 0; j < kGenericNumBucketsPerOrder; ++j) {
bucket->slot_size = current_size; bucket->slot_size = current_size;
PartitionBucketInitBase(bucket, root); PartitionBucketInitBase(bucket, this);
// Disable psuedo buckets so that touching them faults. // Disable psuedo buckets so that touching them faults.
if (current_size % kGenericSmallestBucket) if (current_size % kGenericSmallestBucket)
bucket->active_pages_head = nullptr; bucket->active_pages_head = nullptr;
...@@ -246,16 +246,16 @@ void PartitionAllocGenericInit(PartitionRootGeneric* root) { ...@@ -246,16 +246,16 @@ void PartitionAllocGenericInit(PartitionRootGeneric* root) {
currentIncrement <<= 1; currentIncrement <<= 1;
} }
DCHECK(current_size == 1 << kGenericMaxBucketedOrder); DCHECK(current_size == 1 << kGenericMaxBucketedOrder);
DCHECK(bucket == &root->buckets[0] + kGenericNumBuckets); DCHECK(bucket == &this->buckets[0] + kGenericNumBuckets);
// Then set up the fast size -> bucket lookup table. // Then set up the fast size -> bucket lookup table.
bucket = &root->buckets[0]; bucket = &this->buckets[0];
PartitionBucket** bucketPtr = &root->bucket_lookups[0]; PartitionBucket** bucketPtr = &this->bucket_lookups[0];
for (order = 0; order <= kBitsPerSizeT; ++order) { for (order = 0; order <= kBitsPerSizeT; ++order) {
for (j = 0; j < kGenericNumBucketsPerOrder; ++j) { for (j = 0; j < kGenericNumBucketsPerOrder; ++j) {
if (order < kGenericMinBucketedOrder) { if (order < kGenericMinBucketedOrder) {
// Use the bucket of the finest granularity for malloc(0) etc. // Use the bucket of the finest granularity for malloc(0) etc.
*bucketPtr++ = &root->buckets[0]; *bucketPtr++ = &this->buckets[0];
} else if (order > kGenericMaxBucketedOrder) { } else if (order > kGenericMaxBucketedOrder) {
*bucketPtr++ = &g_sentinel_bucket; *bucketPtr++ = &g_sentinel_bucket;
} else { } else {
...@@ -268,10 +268,9 @@ void PartitionAllocGenericInit(PartitionRootGeneric* root) { ...@@ -268,10 +268,9 @@ void PartitionAllocGenericInit(PartitionRootGeneric* root) {
} }
} }
} }
DCHECK(bucket == &root->buckets[0] + kGenericNumBuckets); DCHECK(bucket == &this->buckets[0] + kGenericNumBuckets);
DCHECK(bucketPtr == DCHECK(bucketPtr == &this->bucket_lookups[0] +
&root->bucket_lookups[0] + ((kBitsPerSizeT + 1) * kGenericNumBucketsPerOrder));
((kBitsPerSizeT + 1) * kGenericNumBucketsPerOrder));
// And there's one last bucket lookup that will be hit for e.g. malloc(-1), // And there's one last bucket lookup that will be hit for e.g. malloc(-1),
// which tries to overflow to a non-existant order. // which tries to overflow to a non-existant order.
*bucketPtr = &g_sentinel_bucket; *bucketPtr = &g_sentinel_bucket;
...@@ -810,9 +809,9 @@ void* PartitionAllocSlowPath(PartitionRootBase* root, ...@@ -810,9 +809,9 @@ void* PartitionAllocSlowPath(PartitionRootBase* root,
PartitionPage* new_page = nullptr; PartitionPage* new_page = nullptr;
// For the PartitionAllocGeneric API, we have a bunch of buckets marked // For the PartitionRootGeneric::Alloc() API, we have a bunch of buckets
// as special cases. We bounce them through to the slow path so that we // marked as special cases. We bounce them through to the slow path so that
// can still have a blazing fast hot path due to lack of corner-case // we can still have a blazing fast hot path due to lack of corner-case
// branches. // branches.
// //
// Note: The ordering of the conditionals matter! In particular, // Note: The ordering of the conditionals matter! In particular,
...@@ -1081,17 +1080,16 @@ bool PartitionReallocDirectMappedInPlace(PartitionRootGeneric* root, ...@@ -1081,17 +1080,16 @@ bool PartitionReallocDirectMappedInPlace(PartitionRootGeneric* root,
return true; return true;
} }
void* PartitionReallocGeneric(PartitionRootGeneric* root, void* PartitionRootGeneric::Realloc(void* ptr,
void* ptr, size_t new_size,
size_t new_size, const char* type_name) {
const char* type_name) {
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
return realloc(ptr, new_size); return realloc(ptr, new_size);
#else #else
if (UNLIKELY(!ptr)) if (UNLIKELY(!ptr))
return PartitionAllocGeneric(root, new_size, type_name); return this->Alloc(new_size, type_name);
if (UNLIKELY(!new_size)) { if (UNLIKELY(!new_size)) {
PartitionFreeGeneric(root, ptr); this->Free(ptr);
return nullptr; return nullptr;
} }
...@@ -1107,13 +1105,13 @@ void* PartitionReallocGeneric(PartitionRootGeneric* root, ...@@ -1107,13 +1105,13 @@ void* PartitionReallocGeneric(PartitionRootGeneric* root,
// We may be able to perform the realloc in place by changing the // We may be able to perform the realloc in place by changing the
// accessibility of memory pages and, if reducing the size, decommitting // accessibility of memory pages and, if reducing the size, decommitting
// them. // them.
if (PartitionReallocDirectMappedInPlace(root, page, new_size)) { if (PartitionReallocDirectMappedInPlace(this, page, new_size)) {
PartitionAllocHooks::ReallocHookIfEnabled(ptr, ptr, new_size, type_name); PartitionAllocHooks::ReallocHookIfEnabled(ptr, ptr, new_size, type_name);
return ptr; return ptr;
} }
} }
size_t actual_new_size = PartitionAllocActualSize(root, new_size); size_t actual_new_size = this->ActualSize(new_size);
size_t actual_old_size = PartitionAllocGetSize(ptr); size_t actual_old_size = PartitionAllocGetSize(ptr);
// TODO: note that tcmalloc will "ignore" a downsizing realloc() unless the // TODO: note that tcmalloc will "ignore" a downsizing realloc() unless the
...@@ -1134,13 +1132,13 @@ void* PartitionReallocGeneric(PartitionRootGeneric* root, ...@@ -1134,13 +1132,13 @@ void* PartitionReallocGeneric(PartitionRootGeneric* root,
} }
// This realloc cannot be resized in-place. Sadness. // This realloc cannot be resized in-place. Sadness.
void* ret = PartitionAllocGeneric(root, new_size, type_name); void* ret = this->Alloc(new_size, type_name);
size_t copy_size = actual_old_size; size_t copy_size = actual_old_size;
if (new_size < copy_size) if (new_size < copy_size)
copy_size = new_size; copy_size = new_size;
memcpy(ret, ptr, copy_size); memcpy(ret, ptr, copy_size);
PartitionFreeGeneric(root, ptr); this->Free(ptr);
return ret; return ret;
#endif #endif
} }
...@@ -1301,13 +1299,13 @@ void PartitionRoot::PurgeMemory(int flags) { ...@@ -1301,13 +1299,13 @@ void PartitionRoot::PurgeMemory(int flags) {
// at the moment. // at the moment.
} }
void PartitionPurgeMemoryGeneric(PartitionRootGeneric* root, int flags) { void PartitionRootGeneric::PurgeMemory(int flags) {
subtle::SpinLock::Guard guard(root->lock); subtle::SpinLock::Guard guard(this->lock);
if (flags & PartitionPurgeDecommitEmptyPages) if (flags & PartitionPurgeDecommitEmptyPages)
PartitionDecommitEmptyPages(root); PartitionDecommitEmptyPages(this);
if (flags & PartitionPurgeDiscardUnusedSystemPages) { if (flags & PartitionPurgeDiscardUnusedSystemPages) {
for (size_t i = 0; i < kGenericNumBuckets; ++i) { for (size_t i = 0; i < kGenericNumBuckets; ++i) {
PartitionBucket* bucket = &root->buckets[i]; PartitionBucket* bucket = &this->buckets[i];
if (bucket->slot_size >= kSystemPageSize) if (bucket->slot_size >= kSystemPageSize)
PartitionPurgeBucket(bucket); PartitionPurgeBucket(bucket);
} }
...@@ -1393,14 +1391,13 @@ static void PartitionDumpBucketStats(PartitionBucketMemoryStats* stats_out, ...@@ -1393,14 +1391,13 @@ static void PartitionDumpBucketStats(PartitionBucketMemoryStats* stats_out,
} }
} }
void PartitionDumpStatsGeneric(PartitionRootGeneric* partition, void PartitionRootGeneric::DumpStats(const char* partition_name,
const char* partition_name, bool is_light_dump,
bool is_light_dump, PartitionStatsDumper* dumper) {
PartitionStatsDumper* dumper) {
PartitionMemoryStats stats = {0}; PartitionMemoryStats stats = {0};
stats.total_mmapped_bytes = partition->total_size_of_super_pages + stats.total_mmapped_bytes =
partition->total_size_of_direct_mapped_pages; this->total_size_of_super_pages + this->total_size_of_direct_mapped_pages;
stats.total_committed_bytes = partition->total_size_of_committed_pages; stats.total_committed_bytes = this->total_size_of_committed_pages;
size_t direct_mapped_allocations_total_size = 0; size_t direct_mapped_allocations_total_size = 0;
...@@ -1417,13 +1414,13 @@ void PartitionDumpStatsGeneric(PartitionRootGeneric* partition, ...@@ -1417,13 +1414,13 @@ void PartitionDumpStatsGeneric(PartitionRootGeneric* partition,
PartitionBucketMemoryStats bucket_stats[kGenericNumBuckets]; PartitionBucketMemoryStats bucket_stats[kGenericNumBuckets];
size_t num_direct_mapped_allocations = 0; size_t num_direct_mapped_allocations = 0;
{ {
subtle::SpinLock::Guard guard(partition->lock); subtle::SpinLock::Guard guard(this->lock);
for (size_t i = 0; i < kGenericNumBuckets; ++i) { for (size_t i = 0; i < kGenericNumBuckets; ++i) {
const PartitionBucket* bucket = &partition->buckets[i]; const PartitionBucket* bucket = &this->buckets[i];
// Don't report the pseudo buckets that the generic allocator sets up in // Don't report the pseudo buckets that the generic allocator sets up in
// order to preserve a fast size->bucket map (see // order to preserve a fast size->bucket map (see
// PartitionAllocGenericInit for details). // PartitionRootGeneric::Init() for details).
if (!bucket->active_pages_head) if (!bucket->active_pages_head)
bucket_stats[i].is_valid = false; bucket_stats[i].is_valid = false;
else else
...@@ -1436,7 +1433,7 @@ void PartitionDumpStatsGeneric(PartitionRootGeneric* partition, ...@@ -1436,7 +1433,7 @@ void PartitionDumpStatsGeneric(PartitionRootGeneric* partition,
} }
} }
for (PartitionDirectMapExtent *extent = partition->direct_map_list; for (PartitionDirectMapExtent *extent = this->direct_map_list;
extent && num_direct_mapped_allocations < kMaxReportableDirectMaps; extent && num_direct_mapped_allocations < kMaxReportableDirectMaps;
extent = extent->next_extent, ++num_direct_mapped_allocations) { extent = extent->next_extent, ++num_direct_mapped_allocations) {
DCHECK(!extent->next_extent || DCHECK(!extent->next_extent ||
...@@ -1451,8 +1448,8 @@ void PartitionDumpStatsGeneric(PartitionRootGeneric* partition, ...@@ -1451,8 +1448,8 @@ void PartitionDumpStatsGeneric(PartitionRootGeneric* partition,
if (!is_light_dump) { if (!is_light_dump) {
// Call |PartitionsDumpBucketStats| after collecting stats because it can // Call |PartitionsDumpBucketStats| after collecting stats because it can
// try to allocate using |PartitionAllocGeneric| and it can't obtain the // try to allocate using |PartitionRootGeneric::Alloc()| and it can't
// lock. // obtain the lock.
for (size_t i = 0; i < kGenericNumBuckets; ++i) { for (size_t i = 0; i < kGenericNumBuckets; ++i) {
if (bucket_stats[i].is_valid) if (bucket_stats[i].is_valid)
dumper->PartitionsDumpBucketStats(partition_name, &bucket_stats[i]); dumper->PartitionsDumpBucketStats(partition_name, &bucket_stats[i]);
...@@ -1513,7 +1510,8 @@ void PartitionRoot::DumpStats(const char* partition_name, ...@@ -1513,7 +1510,8 @@ void PartitionRoot::DumpStats(const char* partition_name,
} }
if (!is_light_dump) { if (!is_light_dump) {
// PartitionsDumpBucketStats is called after collecting stats because it // PartitionsDumpBucketStats is called after collecting stats because it
// can use PartitionAlloc to allocate and this can affect the statistics. // can use PartitionRoot::Alloc() to allocate and this can affect the
// statistics.
for (size_t i = 0; i < partitionNumBuckets; ++i) { for (size_t i = 0; i < partitionNumBuckets; ++i) {
if (memory_stats[i].is_valid) if (memory_stats[i].is_valid)
dumper->PartitionsDumpBucketStats(partition_name, &memory_stats[i]); dumper->PartitionsDumpBucketStats(partition_name, &memory_stats[i]);
......
...@@ -6,8 +6,9 @@ ...@@ -6,8 +6,9 @@
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H_ #define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H_
// DESCRIPTION // DESCRIPTION
// PartitionRoot::Alloc() / PartitionAllocGeneric() and PartitionFree() / // PartitionRoot::Alloc() / PartitionRootGeneric::Alloc() and PartitionFree() /
// PartitionFreeGeneric() are approximately analagous to malloc() and free(). // PartitionRootGeneric::Free() are approximately analagous to malloc() and
// free().
// //
// The main difference is that a PartitionRoot / PartitionRootGeneric object // The main difference is that a PartitionRoot / PartitionRootGeneric object
// must be supplied to these functions, representing a specific "heap partition" // must be supplied to these functions, representing a specific "heap partition"
...@@ -23,14 +24,14 @@ ...@@ -23,14 +24,14 @@
// PartitionRoot is really just a header adjacent to other data areas provided // PartitionRoot is really just a header adjacent to other data areas provided
// by the allocator class. // by the allocator class.
// //
// The partitionAlloc() variant of the API has the following caveats: // The PartitionRoot::Alloc() variant of the API has the following caveats:
// - Allocations and frees against a single partition must be single threaded. // - Allocations and frees against a single partition must be single threaded.
// - Allocations must not exceed a max size, chosen at compile-time via a // - Allocations must not exceed a max size, chosen at compile-time via a
// templated parameter to PartitionAllocator. // templated parameter to PartitionAllocator.
// - Allocation sizes must be aligned to the system pointer size. // - Allocation sizes must be aligned to the system pointer size.
// - Allocations are bucketed exactly according to size. // - Allocations are bucketed exactly according to size.
// //
// And for PartitionAllocGeneric(): // And for PartitionRootGeneric::Alloc():
// - Multi-threaded use against a single partition is ok; locking is handled. // - Multi-threaded use against a single partition is ok; locking is handled.
// - Allocations of any arbitrary size can be handled (subject to a limit of // - Allocations of any arbitrary size can be handled (subject to a limit of
// INT_MAX bytes for security reasons). // INT_MAX bytes for security reasons).
...@@ -94,8 +95,8 @@ static const size_t kBucketShift = (kAllocationGranularity == 8) ? 3 : 2; ...@@ -94,8 +95,8 @@ static const size_t kBucketShift = (kAllocationGranularity == 8) ? 3 : 2;
// Slot span sizes are adjusted depending on the allocation size, to make sure // Slot span sizes are adjusted depending on the allocation size, to make sure
// the packing does not lead to unused (wasted) space at the end of the last // the packing does not lead to unused (wasted) space at the end of the last
// system page of the span. For our current max slot span size of 64k and other // system page of the span. For our current max slot span size of 64k and other
// constant values, we pack _all_ PartitionAllocGeneric() sizes perfectly up // constant values, we pack _all_ PartitionRootGeneric::Alloc() sizes perfectly
// against the end of a system page. // up against the end of a system page.
static const size_t kPartitionPageShift = 14; // 16KB static const size_t kPartitionPageShift = 14; // 16KB
static const size_t kPartitionPageSize = 1 << kPartitionPageShift; static const size_t kPartitionPageSize = 1 << kPartitionPageShift;
static const size_t kPartitionPageOffsetMask = kPartitionPageSize - 1; static const size_t kPartitionPageOffsetMask = kPartitionPageSize - 1;
...@@ -382,6 +383,22 @@ struct BASE_EXPORT PartitionRootGeneric : public PartitionRootBase { ...@@ -382,6 +383,22 @@ struct BASE_EXPORT PartitionRootGeneric : public PartitionRootBase {
bucket_lookups[((kBitsPerSizeT + 1) * kGenericNumBucketsPerOrder) + 1] = bucket_lookups[((kBitsPerSizeT + 1) * kGenericNumBucketsPerOrder) + 1] =
{}; {};
PartitionBucket buckets[kGenericNumBuckets] = {}; PartitionBucket buckets[kGenericNumBuckets] = {};
// Public API.
void Init();
ALWAYS_INLINE void* Alloc(size_t size, const char* type_name);
ALWAYS_INLINE void Free(void* ptr);
NOINLINE void* Realloc(void* ptr, size_t new_size, const char* type_name);
ALWAYS_INLINE size_t ActualSize(size_t size);
void PurgeMemory(int flags);
void DumpStats(const char* partition_name,
bool is_light_dump,
PartitionStatsDumper* partition_stats_dumper);
}; };
// Flags for PartitionAllocGenericFlags. // Flags for PartitionAllocGenericFlags.
...@@ -436,24 +453,12 @@ class BASE_EXPORT PartitionStatsDumper { ...@@ -436,24 +453,12 @@ class BASE_EXPORT PartitionStatsDumper {
}; };
BASE_EXPORT void PartitionAllocGlobalInit(void (*oom_handling_function)()); BASE_EXPORT void PartitionAllocGlobalInit(void (*oom_handling_function)());
BASE_EXPORT void PartitionAllocGenericInit(PartitionRootGeneric*);
BASE_EXPORT void PartitionPurgeMemoryGeneric(PartitionRootGeneric*, int);
BASE_EXPORT NOINLINE void* PartitionAllocSlowPath(PartitionRootBase*, BASE_EXPORT NOINLINE void* PartitionAllocSlowPath(PartitionRootBase*,
int, int,
size_t, size_t,
PartitionBucket*); PartitionBucket*);
BASE_EXPORT NOINLINE void PartitionFreeSlowPath(PartitionPage*); BASE_EXPORT NOINLINE void PartitionFreeSlowPath(PartitionPage*);
BASE_EXPORT NOINLINE void* PartitionReallocGeneric(PartitionRootGeneric*,
void*,
size_t,
const char* type_name);
BASE_EXPORT void PartitionDumpStatsGeneric(PartitionRootGeneric*,
const char* partition_name,
bool is_light_dump,
PartitionStatsDumper*);
class BASE_EXPORT PartitionAllocHooks { class BASE_EXPORT PartitionAllocHooks {
public: public:
...@@ -836,17 +841,16 @@ ALWAYS_INLINE void* PartitionAllocGenericFlags(PartitionRootGeneric* root, ...@@ -836,17 +841,16 @@ ALWAYS_INLINE void* PartitionAllocGenericFlags(PartitionRootGeneric* root,
#endif #endif
} }
ALWAYS_INLINE void* PartitionAllocGeneric(PartitionRootGeneric* root, ALWAYS_INLINE void* PartitionRootGeneric::Alloc(size_t size,
size_t size, const char* type_name) {
const char* type_name) { return PartitionAllocGenericFlags(this, 0, size, type_name);
return PartitionAllocGenericFlags(root, 0, size, type_name);
} }
ALWAYS_INLINE void PartitionFreeGeneric(PartitionRootGeneric* root, void* ptr) { ALWAYS_INLINE void PartitionRootGeneric::Free(void* ptr) {
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
free(ptr); free(ptr);
#else #else
DCHECK(root->initialized); DCHECK(this->initialized);
if (UNLIKELY(!ptr)) if (UNLIKELY(!ptr))
return; return;
...@@ -857,7 +861,7 @@ ALWAYS_INLINE void PartitionFreeGeneric(PartitionRootGeneric* root, void* ptr) { ...@@ -857,7 +861,7 @@ ALWAYS_INLINE void PartitionFreeGeneric(PartitionRootGeneric* root, void* ptr) {
// TODO(palmer): See if we can afford to make this a CHECK. // TODO(palmer): See if we can afford to make this a CHECK.
DCHECK(PartitionPagePointerIsValid(page)); DCHECK(PartitionPagePointerIsValid(page));
{ {
subtle::SpinLock::Guard guard(root->lock); subtle::SpinLock::Guard guard(this->lock);
PartitionFreeWithPage(ptr, page); PartitionFreeWithPage(ptr, page);
} }
#endif #endif
...@@ -871,14 +875,13 @@ ALWAYS_INLINE size_t PartitionDirectMapSize(size_t size) { ...@@ -871,14 +875,13 @@ ALWAYS_INLINE size_t PartitionDirectMapSize(size_t size) {
return (size + kSystemPageOffsetMask) & kSystemPageBaseMask; return (size + kSystemPageOffsetMask) & kSystemPageBaseMask;
} }
ALWAYS_INLINE size_t PartitionAllocActualSize(PartitionRootGeneric* root, ALWAYS_INLINE size_t PartitionRootGeneric::ActualSize(size_t size) {
size_t size) {
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
return size; return size;
#else #else
DCHECK(root->initialized); DCHECK(this->initialized);
size = PartitionCookieSizeAdjustAdd(size); size = PartitionCookieSizeAdjustAdd(size);
PartitionBucket* bucket = PartitionGenericSizeToBucket(root, size); PartitionBucket* bucket = PartitionGenericSizeToBucket(this, size);
if (LIKELY(!PartitionBucketIsDirectMapped(bucket))) { if (LIKELY(!PartitionBucketIsDirectMapped(bucket))) {
size = bucket->slot_size; size = bucket->slot_size;
} else if (size > kGenericMaxDirectMapped) { } else if (size > kGenericMaxDirectMapped) {
...@@ -933,7 +936,7 @@ class BASE_EXPORT PartitionAllocatorGeneric { ...@@ -933,7 +936,7 @@ class BASE_EXPORT PartitionAllocatorGeneric {
PartitionAllocatorGeneric(); PartitionAllocatorGeneric();
~PartitionAllocatorGeneric(); ~PartitionAllocatorGeneric();
void init() { PartitionAllocGenericInit(&partition_root_); } void init() { partition_root_.Init(); }
ALWAYS_INLINE PartitionRootGeneric* root() { return &partition_root_; } ALWAYS_INLINE PartitionRootGeneric* root() { return &partition_root_; }
private: private:
......
...@@ -159,13 +159,12 @@ class PartitionAllocTest : public testing::Test { ...@@ -159,13 +159,12 @@ class PartitionAllocTest : public testing::Test {
void CycleGenericFreeCache(size_t size) { void CycleGenericFreeCache(size_t size) {
for (size_t i = 0; i < kMaxFreeableSpans; ++i) { for (size_t i = 0; i < kMaxFreeableSpans; ++i) {
void* ptr = void* ptr = generic_allocator.root()->Alloc(size, type_name);
PartitionAllocGeneric(generic_allocator.root(), size, type_name);
PartitionPage* page = PartitionPage* page =
PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr)); PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr));
PartitionBucket* bucket = page->bucket; PartitionBucket* bucket = page->bucket;
EXPECT_EQ(1, bucket->active_pages_head->num_allocated_slots); EXPECT_EQ(1, bucket->active_pages_head->num_allocated_slots);
PartitionFreeGeneric(generic_allocator.root(), ptr); generic_allocator.root()->Free(ptr);
EXPECT_EQ(0, bucket->active_pages_head->num_allocated_slots); EXPECT_EQ(0, bucket->active_pages_head->num_allocated_slots);
EXPECT_NE(-1, bucket->active_pages_head->empty_cache_index); EXPECT_NE(-1, bucket->active_pages_head->empty_cache_index);
} }
...@@ -186,8 +185,8 @@ class PartitionAllocTest : public testing::Test { ...@@ -186,8 +185,8 @@ class PartitionAllocTest : public testing::Test {
// Work out the number of allocations for 6 GB of memory. // Work out the number of allocations for 6 GB of memory.
const int numAllocations = (6 * 1024 * 1024) / (allocSize / 1024); const int numAllocations = (6 * 1024 * 1024) / (allocSize / 1024);
void** ptrs = reinterpret_cast<void**>(PartitionAllocGeneric( void** ptrs = reinterpret_cast<void**>(generic_allocator.root()->Alloc(
generic_allocator.root(), numAllocations * sizeof(void*), type_name)); numAllocations * sizeof(void*), type_name));
int i; int i;
for (i = 0; i < numAllocations; ++i) { for (i = 0; i < numAllocations; ++i) {
...@@ -212,15 +211,15 @@ class PartitionAllocTest : public testing::Test { ...@@ -212,15 +211,15 @@ class PartitionAllocTest : public testing::Test {
// Free, reallocate and free again each block we allocated. We do this to // Free, reallocate and free again each block we allocated. We do this to
// check that freeing memory also works correctly after a failed allocation. // check that freeing memory also works correctly after a failed allocation.
for (--i; i >= 0; --i) { for (--i; i >= 0; --i) {
PartitionFreeGeneric(generic_allocator.root(), ptrs[i]); generic_allocator.root()->Free(ptrs[i]);
ptrs[i] = PartitionAllocGenericFlags(generic_allocator.root(), ptrs[i] = PartitionAllocGenericFlags(generic_allocator.root(),
PartitionAllocReturnNull, allocSize, PartitionAllocReturnNull, allocSize,
type_name); type_name);
EXPECT_TRUE(ptrs[i]); EXPECT_TRUE(ptrs[i]);
PartitionFreeGeneric(generic_allocator.root(), ptrs[i]); generic_allocator.root()->Free(ptrs[i]);
} }
PartitionFreeGeneric(generic_allocator.root(), ptrs); generic_allocator.root()->Free(ptrs);
EXPECT_TRUE(ClearAddressSpaceLimit()); EXPECT_TRUE(ClearAddressSpaceLimit());
} }
...@@ -614,33 +613,31 @@ TEST_F(PartitionAllocTest, MultiPageAllocs) { ...@@ -614,33 +613,31 @@ TEST_F(PartitionAllocTest, MultiPageAllocs) {
// Test the generic allocation functions that can handle arbitrary sizes and // Test the generic allocation functions that can handle arbitrary sizes and
// reallocing etc. // reallocing etc.
TEST_F(PartitionAllocTest, GenericAlloc) { TEST_F(PartitionAllocTest, GenericAlloc) {
void* ptr = PartitionAllocGeneric(generic_allocator.root(), 1, type_name); void* ptr = generic_allocator.root()->Alloc(1, type_name);
EXPECT_TRUE(ptr); EXPECT_TRUE(ptr);
PartitionFreeGeneric(generic_allocator.root(), ptr); generic_allocator.root()->Free(ptr);
ptr = PartitionAllocGeneric(generic_allocator.root(), kGenericMaxBucketed + 1, ptr = generic_allocator.root()->Alloc(kGenericMaxBucketed + 1, type_name);
type_name);
EXPECT_TRUE(ptr); EXPECT_TRUE(ptr);
PartitionFreeGeneric(generic_allocator.root(), ptr); generic_allocator.root()->Free(ptr);
ptr = PartitionAllocGeneric(generic_allocator.root(), 1, type_name); ptr = generic_allocator.root()->Alloc(1, type_name);
EXPECT_TRUE(ptr); EXPECT_TRUE(ptr);
void* origPtr = ptr; void* origPtr = ptr;
char* charPtr = static_cast<char*>(ptr); char* charPtr = static_cast<char*>(ptr);
*charPtr = 'A'; *charPtr = 'A';
// Change the size of the realloc, remaining inside the same bucket. // Change the size of the realloc, remaining inside the same bucket.
void* newPtr = void* newPtr = generic_allocator.root()->Realloc(ptr, 2, type_name);
PartitionReallocGeneric(generic_allocator.root(), ptr, 2, type_name);
EXPECT_EQ(ptr, newPtr); EXPECT_EQ(ptr, newPtr);
newPtr = PartitionReallocGeneric(generic_allocator.root(), ptr, 1, type_name); newPtr = generic_allocator.root()->Realloc(ptr, 1, type_name);
EXPECT_EQ(ptr, newPtr); EXPECT_EQ(ptr, newPtr);
newPtr = PartitionReallocGeneric(generic_allocator.root(), ptr, newPtr =
kGenericSmallestBucket, type_name); generic_allocator.root()->Realloc(ptr, kGenericSmallestBucket, type_name);
EXPECT_EQ(ptr, newPtr); EXPECT_EQ(ptr, newPtr);
// Change the size of the realloc, switching buckets. // Change the size of the realloc, switching buckets.
newPtr = PartitionReallocGeneric(generic_allocator.root(), ptr, newPtr = generic_allocator.root()->Realloc(ptr, kGenericSmallestBucket + 1,
kGenericSmallestBucket + 1, type_name); type_name);
EXPECT_NE(newPtr, ptr); EXPECT_NE(newPtr, ptr);
// Check that the realloc copied correctly. // Check that the realloc copied correctly.
char* newCharPtr = static_cast<char*>(newPtr); char* newCharPtr = static_cast<char*>(newPtr);
...@@ -656,14 +653,13 @@ TEST_F(PartitionAllocTest, GenericAlloc) { ...@@ -656,14 +653,13 @@ TEST_F(PartitionAllocTest, GenericAlloc) {
// The realloc moved. To check that the old allocation was freed, we can // The realloc moved. To check that the old allocation was freed, we can
// do an alloc of the old allocation size and check that the old allocation // do an alloc of the old allocation size and check that the old allocation
// address is at the head of the freelist and reused. // address is at the head of the freelist and reused.
void* reusedPtr = void* reusedPtr = generic_allocator.root()->Alloc(1, type_name);
PartitionAllocGeneric(generic_allocator.root(), 1, type_name);
EXPECT_EQ(reusedPtr, origPtr); EXPECT_EQ(reusedPtr, origPtr);
PartitionFreeGeneric(generic_allocator.root(), reusedPtr); generic_allocator.root()->Free(reusedPtr);
// Downsize the realloc. // Downsize the realloc.
ptr = newPtr; ptr = newPtr;
newPtr = PartitionReallocGeneric(generic_allocator.root(), ptr, 1, type_name); newPtr = generic_allocator.root()->Realloc(ptr, 1, type_name);
EXPECT_EQ(newPtr, origPtr); EXPECT_EQ(newPtr, origPtr);
newCharPtr = static_cast<char*>(newPtr); newCharPtr = static_cast<char*>(newPtr);
EXPECT_EQ(*newCharPtr, 'B'); EXPECT_EQ(*newCharPtr, 'B');
...@@ -671,8 +667,8 @@ TEST_F(PartitionAllocTest, GenericAlloc) { ...@@ -671,8 +667,8 @@ TEST_F(PartitionAllocTest, GenericAlloc) {
// Upsize the realloc to outside the partition. // Upsize the realloc to outside the partition.
ptr = newPtr; ptr = newPtr;
newPtr = PartitionReallocGeneric(generic_allocator.root(), ptr, newPtr = generic_allocator.root()->Realloc(ptr, kGenericMaxBucketed + 1,
kGenericMaxBucketed + 1, type_name); type_name);
EXPECT_NE(newPtr, ptr); EXPECT_NE(newPtr, ptr);
newCharPtr = static_cast<char*>(newPtr); newCharPtr = static_cast<char*>(newPtr);
EXPECT_EQ(*newCharPtr, 'C'); EXPECT_EQ(*newCharPtr, 'C');
...@@ -680,62 +676,62 @@ TEST_F(PartitionAllocTest, GenericAlloc) { ...@@ -680,62 +676,62 @@ TEST_F(PartitionAllocTest, GenericAlloc) {
// Upsize and downsize the realloc, remaining outside the partition. // Upsize and downsize the realloc, remaining outside the partition.
ptr = newPtr; ptr = newPtr;
newPtr = PartitionReallocGeneric(generic_allocator.root(), ptr, newPtr = generic_allocator.root()->Realloc(ptr, kGenericMaxBucketed * 10,
kGenericMaxBucketed * 10, type_name); type_name);
newCharPtr = static_cast<char*>(newPtr); newCharPtr = static_cast<char*>(newPtr);
EXPECT_EQ(*newCharPtr, 'D'); EXPECT_EQ(*newCharPtr, 'D');
*newCharPtr = 'E'; *newCharPtr = 'E';
ptr = newPtr; ptr = newPtr;
newPtr = PartitionReallocGeneric(generic_allocator.root(), ptr, newPtr = generic_allocator.root()->Realloc(ptr, kGenericMaxBucketed * 2,
kGenericMaxBucketed * 2, type_name); type_name);
newCharPtr = static_cast<char*>(newPtr); newCharPtr = static_cast<char*>(newPtr);
EXPECT_EQ(*newCharPtr, 'E'); EXPECT_EQ(*newCharPtr, 'E');
*newCharPtr = 'F'; *newCharPtr = 'F';
// Downsize the realloc to inside the partition. // Downsize the realloc to inside the partition.
ptr = newPtr; ptr = newPtr;
newPtr = PartitionReallocGeneric(generic_allocator.root(), ptr, 1, type_name); newPtr = generic_allocator.root()->Realloc(ptr, 1, type_name);
EXPECT_NE(newPtr, ptr); EXPECT_NE(newPtr, ptr);
EXPECT_EQ(newPtr, origPtr); EXPECT_EQ(newPtr, origPtr);
newCharPtr = static_cast<char*>(newPtr); newCharPtr = static_cast<char*>(newPtr);
EXPECT_EQ(*newCharPtr, 'F'); EXPECT_EQ(*newCharPtr, 'F');
PartitionFreeGeneric(generic_allocator.root(), newPtr); generic_allocator.root()->Free(newPtr);
} }
// Test the generic allocation functions can handle some specific sizes of // Test the generic allocation functions can handle some specific sizes of
// interest. // interest.
TEST_F(PartitionAllocTest, GenericAllocSizes) { TEST_F(PartitionAllocTest, GenericAllocSizes) {
void* ptr = PartitionAllocGeneric(generic_allocator.root(), 0, type_name); void* ptr = generic_allocator.root()->Alloc(0, type_name);
EXPECT_TRUE(ptr); EXPECT_TRUE(ptr);
PartitionFreeGeneric(generic_allocator.root(), ptr); generic_allocator.root()->Free(ptr);
// kPartitionPageSize is interesting because it results in just one // kPartitionPageSize is interesting because it results in just one
// allocation per page, which tripped up some corner cases. // allocation per page, which tripped up some corner cases.
size_t size = kPartitionPageSize - kExtraAllocSize; size_t size = kPartitionPageSize - kExtraAllocSize;
ptr = PartitionAllocGeneric(generic_allocator.root(), size, type_name); ptr = generic_allocator.root()->Alloc(size, type_name);
EXPECT_TRUE(ptr); EXPECT_TRUE(ptr);
void* ptr2 = PartitionAllocGeneric(generic_allocator.root(), size, type_name); void* ptr2 = generic_allocator.root()->Alloc(size, type_name);
EXPECT_TRUE(ptr2); EXPECT_TRUE(ptr2);
PartitionFreeGeneric(generic_allocator.root(), ptr); generic_allocator.root()->Free(ptr);
// Should be freeable at this point. // Should be freeable at this point.
PartitionPage* page = PartitionPage* page =
PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr)); PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr));
EXPECT_NE(-1, page->empty_cache_index); EXPECT_NE(-1, page->empty_cache_index);
PartitionFreeGeneric(generic_allocator.root(), ptr2); generic_allocator.root()->Free(ptr2);
size = (((kPartitionPageSize * kMaxPartitionPagesPerSlotSpan) - size = (((kPartitionPageSize * kMaxPartitionPagesPerSlotSpan) -
kSystemPageSize) / kSystemPageSize) /
2) - 2) -
kExtraAllocSize; kExtraAllocSize;
ptr = PartitionAllocGeneric(generic_allocator.root(), size, type_name); ptr = generic_allocator.root()->Alloc(size, type_name);
EXPECT_TRUE(ptr); EXPECT_TRUE(ptr);
memset(ptr, 'A', size); memset(ptr, 'A', size);
ptr2 = PartitionAllocGeneric(generic_allocator.root(), size, type_name); ptr2 = generic_allocator.root()->Alloc(size, type_name);
EXPECT_TRUE(ptr2); EXPECT_TRUE(ptr2);
void* ptr3 = PartitionAllocGeneric(generic_allocator.root(), size, type_name); void* ptr3 = generic_allocator.root()->Alloc(size, type_name);
EXPECT_TRUE(ptr3); EXPECT_TRUE(ptr3);
void* ptr4 = PartitionAllocGeneric(generic_allocator.root(), size, type_name); void* ptr4 = generic_allocator.root()->Alloc(size, type_name);
EXPECT_TRUE(ptr4); EXPECT_TRUE(ptr4);
page = PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr)); page = PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr));
...@@ -743,17 +739,16 @@ TEST_F(PartitionAllocTest, GenericAllocSizes) { ...@@ -743,17 +739,16 @@ TEST_F(PartitionAllocTest, GenericAllocSizes) {
PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr3)); PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr3));
EXPECT_NE(page, page2); EXPECT_NE(page, page2);
PartitionFreeGeneric(generic_allocator.root(), ptr); generic_allocator.root()->Free(ptr);
PartitionFreeGeneric(generic_allocator.root(), ptr3); generic_allocator.root()->Free(ptr3);
PartitionFreeGeneric(generic_allocator.root(), ptr2); generic_allocator.root()->Free(ptr2);
// Should be freeable at this point. // Should be freeable at this point.
EXPECT_NE(-1, page->empty_cache_index); EXPECT_NE(-1, page->empty_cache_index);
EXPECT_EQ(0, page->num_allocated_slots); EXPECT_EQ(0, page->num_allocated_slots);
EXPECT_EQ(0, page->num_unprovisioned_slots); EXPECT_EQ(0, page->num_unprovisioned_slots);
void* newPtr = void* newPtr = generic_allocator.root()->Alloc(size, type_name);
PartitionAllocGeneric(generic_allocator.root(), size, type_name);
EXPECT_EQ(ptr3, newPtr); EXPECT_EQ(ptr3, newPtr);
newPtr = PartitionAllocGeneric(generic_allocator.root(), size, type_name); newPtr = generic_allocator.root()->Alloc(size, type_name);
EXPECT_EQ(ptr2, newPtr); EXPECT_EQ(ptr2, newPtr);
#if defined(OS_LINUX) && !DCHECK_IS_ON() #if defined(OS_LINUX) && !DCHECK_IS_ON()
// On Linux, we have a guarantee that freelisting a page should cause its // On Linux, we have a guarantee that freelisting a page should cause its
...@@ -765,18 +760,17 @@ TEST_F(PartitionAllocTest, GenericAllocSizes) { ...@@ -765,18 +760,17 @@ TEST_F(PartitionAllocTest, GenericAllocSizes) {
// byte pattern. // byte pattern.
EXPECT_EQ(0, *(reinterpret_cast<char*>(newPtr) + (size - 1))); EXPECT_EQ(0, *(reinterpret_cast<char*>(newPtr) + (size - 1)));
#endif #endif
PartitionFreeGeneric(generic_allocator.root(), newPtr); generic_allocator.root()->Free(newPtr);
PartitionFreeGeneric(generic_allocator.root(), ptr3); generic_allocator.root()->Free(ptr3);
PartitionFreeGeneric(generic_allocator.root(), ptr4); generic_allocator.root()->Free(ptr4);
// Can we allocate a massive (512MB) size? // Can we allocate a massive (512MB) size?
// Allocate 512MB, but +1, to test for cookie writing alignment issues. // Allocate 512MB, but +1, to test for cookie writing alignment issues.
// Test this only if the device has enough memory or it might fail due // Test this only if the device has enough memory or it might fail due
// to OOM. // to OOM.
if (IsLargeMemoryDevice()) { if (IsLargeMemoryDevice()) {
ptr = PartitionAllocGeneric(generic_allocator.root(), 512 * 1024 * 1024 + 1, ptr = generic_allocator.root()->Alloc(512 * 1024 * 1024 + 1, type_name);
type_name); generic_allocator.root()->Free(ptr);
PartitionFreeGeneric(generic_allocator.root(), ptr);
} }
// Check a more reasonable, but still direct mapped, size. // Check a more reasonable, but still direct mapped, size.
...@@ -784,13 +778,13 @@ TEST_F(PartitionAllocTest, GenericAllocSizes) { ...@@ -784,13 +778,13 @@ TEST_F(PartitionAllocTest, GenericAllocSizes) {
size = 20 * 1024 * 1024; size = 20 * 1024 * 1024;
size -= kSystemPageSize; size -= kSystemPageSize;
size -= 1; size -= 1;
ptr = PartitionAllocGeneric(generic_allocator.root(), size, type_name); ptr = generic_allocator.root()->Alloc(size, type_name);
char* charPtr = reinterpret_cast<char*>(ptr); char* charPtr = reinterpret_cast<char*>(ptr);
*(charPtr + (size - 1)) = 'A'; *(charPtr + (size - 1)) = 'A';
PartitionFreeGeneric(generic_allocator.root(), ptr); generic_allocator.root()->Free(ptr);
// Can we free null? // Can we free null?
PartitionFreeGeneric(generic_allocator.root(), nullptr); generic_allocator.root()->Free(nullptr);
// Do we correctly get a null for a failed allocation? // Do we correctly get a null for a failed allocation?
EXPECT_EQ(nullptr, PartitionAllocGenericFlags( EXPECT_EQ(nullptr, PartitionAllocGenericFlags(
...@@ -807,36 +801,30 @@ TEST_F(PartitionAllocTest, GenericAllocGetSize) { ...@@ -807,36 +801,30 @@ TEST_F(PartitionAllocTest, GenericAllocGetSize) {
// Allocate something small. // Allocate something small.
requestedSize = 511 - kExtraAllocSize; requestedSize = 511 - kExtraAllocSize;
predictedSize = predictedSize = generic_allocator.root()->ActualSize(requestedSize);
PartitionAllocActualSize(generic_allocator.root(), requestedSize); ptr = generic_allocator.root()->Alloc(requestedSize, type_name);
ptr =
PartitionAllocGeneric(generic_allocator.root(), requestedSize, type_name);
EXPECT_TRUE(ptr); EXPECT_TRUE(ptr);
actualSize = PartitionAllocGetSize(ptr); actualSize = PartitionAllocGetSize(ptr);
EXPECT_EQ(predictedSize, actualSize); EXPECT_EQ(predictedSize, actualSize);
EXPECT_LT(requestedSize, actualSize); EXPECT_LT(requestedSize, actualSize);
PartitionFreeGeneric(generic_allocator.root(), ptr); generic_allocator.root()->Free(ptr);
// Allocate a size that should be a perfect match for a bucket, because it // Allocate a size that should be a perfect match for a bucket, because it
// is an exact power of 2. // is an exact power of 2.
requestedSize = (256 * 1024) - kExtraAllocSize; requestedSize = (256 * 1024) - kExtraAllocSize;
predictedSize = predictedSize = generic_allocator.root()->ActualSize(requestedSize);
PartitionAllocActualSize(generic_allocator.root(), requestedSize); ptr = generic_allocator.root()->Alloc(requestedSize, type_name);
ptr =
PartitionAllocGeneric(generic_allocator.root(), requestedSize, type_name);
EXPECT_TRUE(ptr); EXPECT_TRUE(ptr);
actualSize = PartitionAllocGetSize(ptr); actualSize = PartitionAllocGetSize(ptr);
EXPECT_EQ(predictedSize, actualSize); EXPECT_EQ(predictedSize, actualSize);
EXPECT_EQ(requestedSize, actualSize); EXPECT_EQ(requestedSize, actualSize);
PartitionFreeGeneric(generic_allocator.root(), ptr); generic_allocator.root()->Free(ptr);
// Allocate a size that is a system page smaller than a bucket. GetSize() // Allocate a size that is a system page smaller than a bucket. GetSize()
// should return a larger size than we asked for now. // should return a larger size than we asked for now.
requestedSize = (256 * 1024) - kSystemPageSize - kExtraAllocSize; requestedSize = (256 * 1024) - kSystemPageSize - kExtraAllocSize;
predictedSize = predictedSize = generic_allocator.root()->ActualSize(requestedSize);
PartitionAllocActualSize(generic_allocator.root(), requestedSize); ptr = generic_allocator.root()->Alloc(requestedSize, type_name);
ptr =
PartitionAllocGeneric(generic_allocator.root(), requestedSize, type_name);
EXPECT_TRUE(ptr); EXPECT_TRUE(ptr);
actualSize = PartitionAllocGetSize(ptr); actualSize = PartitionAllocGetSize(ptr);
EXPECT_EQ(predictedSize, actualSize); EXPECT_EQ(predictedSize, actualSize);
...@@ -844,51 +832,46 @@ TEST_F(PartitionAllocTest, GenericAllocGetSize) { ...@@ -844,51 +832,46 @@ TEST_F(PartitionAllocTest, GenericAllocGetSize) {
// Check that we can write at the end of the reported size too. // Check that we can write at the end of the reported size too.
char* charPtr = reinterpret_cast<char*>(ptr); char* charPtr = reinterpret_cast<char*>(ptr);
*(charPtr + (actualSize - 1)) = 'A'; *(charPtr + (actualSize - 1)) = 'A';
PartitionFreeGeneric(generic_allocator.root(), ptr); generic_allocator.root()->Free(ptr);
// Allocate something very large, and uneven. // Allocate something very large, and uneven.
if (IsLargeMemoryDevice()) { if (IsLargeMemoryDevice()) {
requestedSize = 512 * 1024 * 1024 - 1; requestedSize = 512 * 1024 * 1024 - 1;
predictedSize = predictedSize = generic_allocator.root()->ActualSize(requestedSize);
PartitionAllocActualSize(generic_allocator.root(), requestedSize); ptr = generic_allocator.root()->Alloc(requestedSize, type_name);
ptr = PartitionAllocGeneric(generic_allocator.root(), requestedSize,
type_name);
EXPECT_TRUE(ptr); EXPECT_TRUE(ptr);
actualSize = PartitionAllocGetSize(ptr); actualSize = PartitionAllocGetSize(ptr);
EXPECT_EQ(predictedSize, actualSize); EXPECT_EQ(predictedSize, actualSize);
EXPECT_LT(requestedSize, actualSize); EXPECT_LT(requestedSize, actualSize);
PartitionFreeGeneric(generic_allocator.root(), ptr); generic_allocator.root()->Free(ptr);
} }
// Too large allocation. // Too large allocation.
requestedSize = INT_MAX; requestedSize = INT_MAX;
predictedSize = predictedSize = generic_allocator.root()->ActualSize(requestedSize);
PartitionAllocActualSize(generic_allocator.root(), requestedSize);
EXPECT_EQ(requestedSize, predictedSize); EXPECT_EQ(requestedSize, predictedSize);
} }
// Test the realloc() contract. // Test the realloc() contract.
TEST_F(PartitionAllocTest, Realloc) { TEST_F(PartitionAllocTest, Realloc) {
// realloc(0, size) should be equivalent to malloc(). // realloc(0, size) should be equivalent to malloc().
void* ptr = PartitionReallocGeneric(generic_allocator.root(), nullptr, void* ptr =
kTestAllocSize, type_name); generic_allocator.root()->Realloc(nullptr, kTestAllocSize, type_name);
memset(ptr, 'A', kTestAllocSize); memset(ptr, 'A', kTestAllocSize);
PartitionPage* page = PartitionPage* page =
PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr)); PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr));
// realloc(ptr, 0) should be equivalent to free(). // realloc(ptr, 0) should be equivalent to free().
void* ptr2 = void* ptr2 = generic_allocator.root()->Realloc(ptr, 0, type_name);
PartitionReallocGeneric(generic_allocator.root(), ptr, 0, type_name);
EXPECT_EQ(nullptr, ptr2); EXPECT_EQ(nullptr, ptr2);
EXPECT_EQ(PartitionCookieFreePointerAdjust(ptr), page->freelist_head); EXPECT_EQ(PartitionCookieFreePointerAdjust(ptr), page->freelist_head);
// Test that growing an allocation with realloc() copies everything from the // Test that growing an allocation with realloc() copies everything from the
// old allocation. // old allocation.
size_t size = kSystemPageSize - kExtraAllocSize; size_t size = kSystemPageSize - kExtraAllocSize;
EXPECT_EQ(size, PartitionAllocActualSize(generic_allocator.root(), size)); EXPECT_EQ(size, generic_allocator.root()->ActualSize(size));
ptr = PartitionAllocGeneric(generic_allocator.root(), size, type_name); ptr = generic_allocator.root()->Alloc(size, type_name);
memset(ptr, 'A', size); memset(ptr, 'A', size);
ptr2 = PartitionReallocGeneric(generic_allocator.root(), ptr, size + 1, ptr2 = generic_allocator.root()->Realloc(ptr, size + 1, type_name);
type_name);
EXPECT_NE(ptr, ptr2); EXPECT_NE(ptr, ptr2);
char* charPtr2 = static_cast<char*>(ptr2); char* charPtr2 = static_cast<char*>(ptr2);
EXPECT_EQ('A', charPtr2[0]); EXPECT_EQ('A', charPtr2[0]);
...@@ -899,8 +882,7 @@ TEST_F(PartitionAllocTest, Realloc) { ...@@ -899,8 +882,7 @@ TEST_F(PartitionAllocTest, Realloc) {
// Test that shrinking an allocation with realloc() also copies everything // Test that shrinking an allocation with realloc() also copies everything
// from the old allocation. // from the old allocation.
ptr = PartitionReallocGeneric(generic_allocator.root(), ptr2, size - 1, ptr = generic_allocator.root()->Realloc(ptr2, size - 1, type_name);
type_name);
EXPECT_NE(ptr2, ptr); EXPECT_NE(ptr2, ptr);
char* charPtr = static_cast<char*>(ptr); char* charPtr = static_cast<char*>(ptr);
EXPECT_EQ('A', charPtr[0]); EXPECT_EQ('A', charPtr[0]);
...@@ -909,32 +891,30 @@ TEST_F(PartitionAllocTest, Realloc) { ...@@ -909,32 +891,30 @@ TEST_F(PartitionAllocTest, Realloc) {
EXPECT_EQ(kUninitializedByte, static_cast<unsigned char>(charPtr[size - 1])); EXPECT_EQ(kUninitializedByte, static_cast<unsigned char>(charPtr[size - 1]));
#endif #endif
PartitionFreeGeneric(generic_allocator.root(), ptr); generic_allocator.root()->Free(ptr);
// Test that shrinking a direct mapped allocation happens in-place. // Test that shrinking a direct mapped allocation happens in-place.
size = kGenericMaxBucketed + 16 * kSystemPageSize; size = kGenericMaxBucketed + 16 * kSystemPageSize;
ptr = PartitionAllocGeneric(generic_allocator.root(), size, type_name); ptr = generic_allocator.root()->Alloc(size, type_name);
size_t actualSize = PartitionAllocGetSize(ptr); size_t actualSize = PartitionAllocGetSize(ptr);
ptr2 = PartitionReallocGeneric(generic_allocator.root(), ptr, ptr2 = generic_allocator.root()->Realloc(
kGenericMaxBucketed + 8 * kSystemPageSize, ptr, kGenericMaxBucketed + 8 * kSystemPageSize, type_name);
type_name);
EXPECT_EQ(ptr, ptr2); EXPECT_EQ(ptr, ptr2);
EXPECT_EQ(actualSize - 8 * kSystemPageSize, PartitionAllocGetSize(ptr2)); EXPECT_EQ(actualSize - 8 * kSystemPageSize, PartitionAllocGetSize(ptr2));
// Test that a previously in-place shrunk direct mapped allocation can be // Test that a previously in-place shrunk direct mapped allocation can be
// expanded up again within its original size. // expanded up again within its original size.
ptr = PartitionReallocGeneric(generic_allocator.root(), ptr2, ptr = generic_allocator.root()->Realloc(ptr2, size - kSystemPageSize,
size - kSystemPageSize, type_name); type_name);
EXPECT_EQ(ptr2, ptr); EXPECT_EQ(ptr2, ptr);
EXPECT_EQ(actualSize - kSystemPageSize, PartitionAllocGetSize(ptr)); EXPECT_EQ(actualSize - kSystemPageSize, PartitionAllocGetSize(ptr));
// Test that a direct mapped allocation is performed not in-place when the // Test that a direct mapped allocation is performed not in-place when the
// new size is small enough. // new size is small enough.
ptr2 = PartitionReallocGeneric(generic_allocator.root(), ptr, kSystemPageSize, ptr2 = generic_allocator.root()->Realloc(ptr, kSystemPageSize, type_name);
type_name);
EXPECT_NE(ptr, ptr2); EXPECT_NE(ptr, ptr2);
PartitionFreeGeneric(generic_allocator.root(), ptr2); generic_allocator.root()->Free(ptr2);
} }
// Tests the handing out of freelists for partial pages. // Tests the handing out of freelists for partial pages.
...@@ -1065,8 +1045,7 @@ TEST_F(PartitionAllocTest, PartialPageFreelists) { ...@@ -1065,8 +1045,7 @@ TEST_F(PartitionAllocTest, PartialPageFreelists) {
// larger than a system page. // larger than a system page.
size_t pageAndAHalfSize = size_t pageAndAHalfSize =
(kSystemPageSize + (kSystemPageSize / 2)) - kExtraAllocSize; (kSystemPageSize + (kSystemPageSize / 2)) - kExtraAllocSize;
ptr = PartitionAllocGeneric(generic_allocator.root(), pageAndAHalfSize, ptr = generic_allocator.root()->Alloc(pageAndAHalfSize, type_name);
type_name);
EXPECT_TRUE(ptr); EXPECT_TRUE(ptr);
page = PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr)); page = PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr));
EXPECT_EQ(1, page->num_allocated_slots); EXPECT_EQ(1, page->num_allocated_slots);
...@@ -1075,11 +1054,11 @@ TEST_F(PartitionAllocTest, PartialPageFreelists) { ...@@ -1075,11 +1054,11 @@ TEST_F(PartitionAllocTest, PartialPageFreelists) {
(page->bucket->num_system_pages_per_slot_span * kSystemPageSize) / (page->bucket->num_system_pages_per_slot_span * kSystemPageSize) /
(pageAndAHalfSize + kExtraAllocSize); (pageAndAHalfSize + kExtraAllocSize);
EXPECT_EQ(totalSlots - 2, page->num_unprovisioned_slots); EXPECT_EQ(totalSlots - 2, page->num_unprovisioned_slots);
PartitionFreeGeneric(generic_allocator.root(), ptr); generic_allocator.root()->Free(ptr);
// And then make sure than exactly the page size only faults one page. // And then make sure than exactly the page size only faults one page.
size_t pageSize = kSystemPageSize - kExtraAllocSize; size_t pageSize = kSystemPageSize - kExtraAllocSize;
ptr = PartitionAllocGeneric(generic_allocator.root(), pageSize, type_name); ptr = generic_allocator.root()->Alloc(pageSize, type_name);
EXPECT_TRUE(ptr); EXPECT_TRUE(ptr);
page = PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr)); page = PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr));
EXPECT_EQ(1, page->num_allocated_slots); EXPECT_EQ(1, page->num_allocated_slots);
...@@ -1088,7 +1067,7 @@ TEST_F(PartitionAllocTest, PartialPageFreelists) { ...@@ -1088,7 +1067,7 @@ TEST_F(PartitionAllocTest, PartialPageFreelists) {
(page->bucket->num_system_pages_per_slot_span * kSystemPageSize) / (page->bucket->num_system_pages_per_slot_span * kSystemPageSize) /
(pageSize + kExtraAllocSize); (pageSize + kExtraAllocSize);
EXPECT_EQ(totalSlots - 1, page->num_unprovisioned_slots); EXPECT_EQ(totalSlots - 1, page->num_unprovisioned_slots);
PartitionFreeGeneric(generic_allocator.root(), ptr); generic_allocator.root()->Free(ptr);
} }
// Test some of the fragmentation-resistant properties of the allocator. // Test some of the fragmentation-resistant properties of the allocator.
...@@ -1283,9 +1262,9 @@ TEST_F(PartitionAllocTest, FreeCache) { ...@@ -1283,9 +1262,9 @@ TEST_F(PartitionAllocTest, FreeCache) {
TEST_F(PartitionAllocTest, LostFreePagesBug) { TEST_F(PartitionAllocTest, LostFreePagesBug) {
size_t size = kPartitionPageSize - kExtraAllocSize; size_t size = kPartitionPageSize - kExtraAllocSize;
void* ptr = PartitionAllocGeneric(generic_allocator.root(), size, type_name); void* ptr = generic_allocator.root()->Alloc(size, type_name);
EXPECT_TRUE(ptr); EXPECT_TRUE(ptr);
void* ptr2 = PartitionAllocGeneric(generic_allocator.root(), size, type_name); void* ptr2 = generic_allocator.root()->Alloc(size, type_name);
EXPECT_TRUE(ptr2); EXPECT_TRUE(ptr2);
PartitionPage* page = PartitionPage* page =
...@@ -1298,8 +1277,8 @@ TEST_F(PartitionAllocTest, LostFreePagesBug) { ...@@ -1298,8 +1277,8 @@ TEST_F(PartitionAllocTest, LostFreePagesBug) {
EXPECT_EQ(-1, page->num_allocated_slots); EXPECT_EQ(-1, page->num_allocated_slots);
EXPECT_EQ(1, page2->num_allocated_slots); EXPECT_EQ(1, page2->num_allocated_slots);
PartitionFreeGeneric(generic_allocator.root(), ptr); generic_allocator.root()->Free(ptr);
PartitionFreeGeneric(generic_allocator.root(), ptr2); generic_allocator.root()->Free(ptr2);
EXPECT_TRUE(bucket->empty_pages_head); EXPECT_TRUE(bucket->empty_pages_head);
EXPECT_TRUE(bucket->empty_pages_head->next_page); EXPECT_TRUE(bucket->empty_pages_head->next_page);
...@@ -1318,9 +1297,9 @@ TEST_F(PartitionAllocTest, LostFreePagesBug) { ...@@ -1318,9 +1297,9 @@ TEST_F(PartitionAllocTest, LostFreePagesBug) {
EXPECT_EQ(GetSentinelPageForTesting(), bucket->active_pages_head); EXPECT_EQ(GetSentinelPageForTesting(), bucket->active_pages_head);
// At this moment, we have two decommitted pages, on the empty list. // At this moment, we have two decommitted pages, on the empty list.
ptr = PartitionAllocGeneric(generic_allocator.root(), size, type_name); ptr = generic_allocator.root()->Alloc(size, type_name);
EXPECT_TRUE(ptr); EXPECT_TRUE(ptr);
PartitionFreeGeneric(generic_allocator.root(), ptr); generic_allocator.root()->Free(ptr);
EXPECT_EQ(GetSentinelPageForTesting(), bucket->active_pages_head); EXPECT_EQ(GetSentinelPageForTesting(), bucket->active_pages_head);
EXPECT_TRUE(bucket->empty_pages_head); EXPECT_TRUE(bucket->empty_pages_head);
...@@ -1331,9 +1310,9 @@ TEST_F(PartitionAllocTest, LostFreePagesBug) { ...@@ -1331,9 +1310,9 @@ TEST_F(PartitionAllocTest, LostFreePagesBug) {
// We're now set up to trigger a historical bug by scanning over the active // We're now set up to trigger a historical bug by scanning over the active
// pages list. The current code gets into a different state, but we'll keep // pages list. The current code gets into a different state, but we'll keep
// the test as being an interesting corner case. // the test as being an interesting corner case.
ptr = PartitionAllocGeneric(generic_allocator.root(), size, type_name); ptr = generic_allocator.root()->Alloc(size, type_name);
EXPECT_TRUE(ptr); EXPECT_TRUE(ptr);
PartitionFreeGeneric(generic_allocator.root(), ptr); generic_allocator.root()->Free(ptr);
EXPECT_TRUE(bucket->active_pages_head); EXPECT_TRUE(bucket->active_pages_head);
EXPECT_TRUE(bucket->empty_pages_head); EXPECT_TRUE(bucket->empty_pages_head);
...@@ -1392,40 +1371,35 @@ TEST_F(PartitionAllocTest, MAYBE_RepeatedReturnNull) { ...@@ -1392,40 +1371,35 @@ TEST_F(PartitionAllocTest, MAYBE_RepeatedReturnNull) {
// malloc(0), which is not good. // malloc(0), which is not good.
TEST_F(PartitionAllocDeathTest, LargeAllocs) { TEST_F(PartitionAllocDeathTest, LargeAllocs) {
// Largest alloc. // Largest alloc.
EXPECT_DEATH(PartitionAllocGeneric(generic_allocator.root(),
static_cast<size_t>(-1), type_name),
"");
// And the smallest allocation we expect to die.
EXPECT_DEATH( EXPECT_DEATH(
PartitionAllocGeneric(generic_allocator.root(), generic_allocator.root()->Alloc(static_cast<size_t>(-1), type_name), "");
static_cast<size_t>(INT_MAX) + 1, type_name), // And the smallest allocation we expect to die.
""); EXPECT_DEATH(generic_allocator.root()->Alloc(static_cast<size_t>(INT_MAX) + 1,
type_name),
"");
} }
// Check that our immediate double-free detection works. // Check that our immediate double-free detection works.
TEST_F(PartitionAllocDeathTest, ImmediateDoubleFree) { TEST_F(PartitionAllocDeathTest, ImmediateDoubleFree) {
void* ptr = PartitionAllocGeneric(generic_allocator.root(), kTestAllocSize, void* ptr = generic_allocator.root()->Alloc(kTestAllocSize, type_name);
type_name);
EXPECT_TRUE(ptr); EXPECT_TRUE(ptr);
PartitionFreeGeneric(generic_allocator.root(), ptr); generic_allocator.root()->Free(ptr);
EXPECT_DEATH(PartitionFreeGeneric(generic_allocator.root(), ptr), ""); EXPECT_DEATH(generic_allocator.root()->Free(ptr), "");
} }
// Check that our refcount-based double-free detection works. // Check that our refcount-based double-free detection works.
TEST_F(PartitionAllocDeathTest, RefcountDoubleFree) { TEST_F(PartitionAllocDeathTest, RefcountDoubleFree) {
void* ptr = PartitionAllocGeneric(generic_allocator.root(), kTestAllocSize, void* ptr = generic_allocator.root()->Alloc(kTestAllocSize, type_name);
type_name);
EXPECT_TRUE(ptr); EXPECT_TRUE(ptr);
void* ptr2 = PartitionAllocGeneric(generic_allocator.root(), kTestAllocSize, void* ptr2 = generic_allocator.root()->Alloc(kTestAllocSize, type_name);
type_name);
EXPECT_TRUE(ptr2); EXPECT_TRUE(ptr2);
PartitionFreeGeneric(generic_allocator.root(), ptr); generic_allocator.root()->Free(ptr);
PartitionFreeGeneric(generic_allocator.root(), ptr2); generic_allocator.root()->Free(ptr2);
// This is not an immediate double-free so our immediate detection won't // This is not an immediate double-free so our immediate detection won't
// fire. However, it does take the "refcount" of the partition page to -1, // fire. However, it does take the "refcount" of the partition page to -1,
// which is illegal and should be trapped. // which is illegal and should be trapped.
EXPECT_DEATH(PartitionFreeGeneric(generic_allocator.root(), ptr), ""); EXPECT_DEATH(generic_allocator.root()->Free(ptr), "");
} }
// Check that guard pages are present where expected. // Check that guard pages are present where expected.
...@@ -1450,7 +1424,7 @@ TEST_F(PartitionAllocDeathTest, GuardPages) { ...@@ -1450,7 +1424,7 @@ TEST_F(PartitionAllocDeathTest, GuardPages) {
static_assert(kSize > kGenericMaxBucketed, static_assert(kSize > kGenericMaxBucketed,
"allocation not large enough for direct allocation"); "allocation not large enough for direct allocation");
size_t size = kSize - kExtraAllocSize; size_t size = kSize - kExtraAllocSize;
void* ptr = PartitionAllocGeneric(generic_allocator.root(), size, type_name); void* ptr = generic_allocator.root()->Alloc(size, type_name);
EXPECT_TRUE(ptr); EXPECT_TRUE(ptr);
char* charPtr = reinterpret_cast<char*>(ptr) - kPointerOffset; char* charPtr = reinterpret_cast<char*>(ptr) - kPointerOffset;
...@@ -1458,7 +1432,7 @@ TEST_F(PartitionAllocDeathTest, GuardPages) { ...@@ -1458,7 +1432,7 @@ TEST_F(PartitionAllocDeathTest, GuardPages) {
EXPECT_DEATH(*(charPtr - 1) = 'A', ""); EXPECT_DEATH(*(charPtr - 1) = 'A', "");
EXPECT_DEATH(*(charPtr + size + kExtraAllocSize) = 'A', ""); EXPECT_DEATH(*(charPtr + size + kExtraAllocSize) = 'A', "");
PartitionFreeGeneric(generic_allocator.root(), ptr); generic_allocator.root()->Free(ptr);
} }
// Check that a bad free() is caught where the free() refers to an unused // Check that a bad free() is caught where the free() refers to an unused
...@@ -1466,14 +1440,14 @@ TEST_F(PartitionAllocDeathTest, GuardPages) { ...@@ -1466,14 +1440,14 @@ TEST_F(PartitionAllocDeathTest, GuardPages) {
TEST_F(PartitionAllocDeathTest, FreeWrongPartitionPage) { TEST_F(PartitionAllocDeathTest, FreeWrongPartitionPage) {
// This large size will result in a direct mapped allocation with guard // This large size will result in a direct mapped allocation with guard
// pages at either end. // pages at either end.
void* ptr = PartitionAllocGeneric(generic_allocator.root(), void* ptr =
kPartitionPageSize * 2, type_name); generic_allocator.root()->Alloc(kPartitionPageSize * 2, type_name);
EXPECT_TRUE(ptr); EXPECT_TRUE(ptr);
char* badPtr = reinterpret_cast<char*>(ptr) + kPartitionPageSize; char* badPtr = reinterpret_cast<char*>(ptr) + kPartitionPageSize;
EXPECT_DEATH(PartitionFreeGeneric(generic_allocator.root(), badPtr), ""); EXPECT_DEATH(generic_allocator.root()->Free(badPtr), "");
PartitionFreeGeneric(generic_allocator.root(), ptr); generic_allocator.root()->Free(ptr);
} }
#endif // !defined(OS_ANDROID) && !defined(OS_IOS) #endif // !defined(OS_ANDROID) && !defined(OS_IOS)
...@@ -1493,12 +1467,11 @@ TEST_F(PartitionAllocTest, DumpMemoryStats) { ...@@ -1493,12 +1467,11 @@ TEST_F(PartitionAllocTest, DumpMemoryStats) {
// This series of tests checks the active -> empty -> decommitted states. // This series of tests checks the active -> empty -> decommitted states.
{ {
{ {
void* ptr = PartitionAllocGeneric(generic_allocator.root(), void* ptr =
2048 - kExtraAllocSize, type_name); generic_allocator.root()->Alloc(2048 - kExtraAllocSize, type_name);
MockPartitionStatsDumper dumper; MockPartitionStatsDumper dumper;
PartitionDumpStatsGeneric(generic_allocator.root(), generic_allocator.root()->DumpStats("mock_generic_allocator",
"mock_generic_allocator", false /* detailed dump */, &dumper);
false /* detailed dump */, &dumper);
EXPECT_TRUE(dumper.IsMemoryAllocationRecorded()); EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048); const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048);
...@@ -1513,14 +1486,13 @@ TEST_F(PartitionAllocTest, DumpMemoryStats) { ...@@ -1513,14 +1486,13 @@ TEST_F(PartitionAllocTest, DumpMemoryStats) {
EXPECT_EQ(1u, stats->num_active_pages); EXPECT_EQ(1u, stats->num_active_pages);
EXPECT_EQ(0u, stats->num_empty_pages); EXPECT_EQ(0u, stats->num_empty_pages);
EXPECT_EQ(0u, stats->num_decommitted_pages); EXPECT_EQ(0u, stats->num_decommitted_pages);
PartitionFreeGeneric(generic_allocator.root(), ptr); generic_allocator.root()->Free(ptr);
} }
{ {
MockPartitionStatsDumper dumper; MockPartitionStatsDumper dumper;
PartitionDumpStatsGeneric(generic_allocator.root(), generic_allocator.root()->DumpStats("mock_generic_allocator",
"mock_generic_allocator", false /* detailed dump */, &dumper);
false /* detailed dump */, &dumper);
EXPECT_FALSE(dumper.IsMemoryAllocationRecorded()); EXPECT_FALSE(dumper.IsMemoryAllocationRecorded());
const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048); const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048);
...@@ -1544,9 +1516,8 @@ TEST_F(PartitionAllocTest, DumpMemoryStats) { ...@@ -1544,9 +1516,8 @@ TEST_F(PartitionAllocTest, DumpMemoryStats) {
{ {
MockPartitionStatsDumper dumper; MockPartitionStatsDumper dumper;
PartitionDumpStatsGeneric(generic_allocator.root(), generic_allocator.root()->DumpStats("mock_generic_allocator",
"mock_generic_allocator", false /* detailed dump */, &dumper);
false /* detailed dump */, &dumper);
EXPECT_FALSE(dumper.IsMemoryAllocationRecorded()); EXPECT_FALSE(dumper.IsMemoryAllocationRecorded());
const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048); const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048);
...@@ -1567,22 +1538,19 @@ TEST_F(PartitionAllocTest, DumpMemoryStats) { ...@@ -1567,22 +1538,19 @@ TEST_F(PartitionAllocTest, DumpMemoryStats) {
// This test checks for correct empty page list accounting. // This test checks for correct empty page list accounting.
{ {
size_t size = kPartitionPageSize - kExtraAllocSize; size_t size = kPartitionPageSize - kExtraAllocSize;
void* ptr1 = void* ptr1 = generic_allocator.root()->Alloc(size, type_name);
PartitionAllocGeneric(generic_allocator.root(), size, type_name); void* ptr2 = generic_allocator.root()->Alloc(size, type_name);
void* ptr2 = generic_allocator.root()->Free(ptr1);
PartitionAllocGeneric(generic_allocator.root(), size, type_name); generic_allocator.root()->Free(ptr2);
PartitionFreeGeneric(generic_allocator.root(), ptr1);
PartitionFreeGeneric(generic_allocator.root(), ptr2);
CycleGenericFreeCache(kTestAllocSize); CycleGenericFreeCache(kTestAllocSize);
ptr1 = PartitionAllocGeneric(generic_allocator.root(), size, type_name); ptr1 = generic_allocator.root()->Alloc(size, type_name);
{ {
MockPartitionStatsDumper dumper; MockPartitionStatsDumper dumper;
PartitionDumpStatsGeneric(generic_allocator.root(), generic_allocator.root()->DumpStats("mock_generic_allocator",
"mock_generic_allocator", false /* detailed dump */, &dumper);
false /* detailed dump */, &dumper);
EXPECT_TRUE(dumper.IsMemoryAllocationRecorded()); EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
const PartitionBucketMemoryStats* stats = const PartitionBucketMemoryStats* stats =
...@@ -1599,7 +1567,7 @@ TEST_F(PartitionAllocTest, DumpMemoryStats) { ...@@ -1599,7 +1567,7 @@ TEST_F(PartitionAllocTest, DumpMemoryStats) {
EXPECT_EQ(0u, stats->num_empty_pages); EXPECT_EQ(0u, stats->num_empty_pages);
EXPECT_EQ(1u, stats->num_decommitted_pages); EXPECT_EQ(1u, stats->num_decommitted_pages);
} }
PartitionFreeGeneric(generic_allocator.root(), ptr1); generic_allocator.root()->Free(ptr1);
} }
// This test checks for correct direct mapped accounting. // This test checks for correct direct mapped accounting.
...@@ -1610,16 +1578,13 @@ TEST_F(PartitionAllocTest, DumpMemoryStats) { ...@@ -1610,16 +1578,13 @@ TEST_F(PartitionAllocTest, DumpMemoryStats) {
(size_smaller + kSystemPageOffsetMask) & kSystemPageBaseMask; (size_smaller + kSystemPageOffsetMask) & kSystemPageBaseMask;
size_t real_size_bigger = size_t real_size_bigger =
(size_bigger + kSystemPageOffsetMask) & kSystemPageBaseMask; (size_bigger + kSystemPageOffsetMask) & kSystemPageBaseMask;
void* ptr = PartitionAllocGeneric(generic_allocator.root(), size_smaller, void* ptr = generic_allocator.root()->Alloc(size_smaller, type_name);
type_name); void* ptr2 = generic_allocator.root()->Alloc(size_bigger, type_name);
void* ptr2 =
PartitionAllocGeneric(generic_allocator.root(), size_bigger, type_name);
{ {
MockPartitionStatsDumper dumper; MockPartitionStatsDumper dumper;
PartitionDumpStatsGeneric(generic_allocator.root(), generic_allocator.root()->DumpStats("mock_generic_allocator",
"mock_generic_allocator", false /* detailed dump */, &dumper);
false /* detailed dump */, &dumper);
EXPECT_TRUE(dumper.IsMemoryAllocationRecorded()); EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
const PartitionBucketMemoryStats* stats = const PartitionBucketMemoryStats* stats =
...@@ -1652,29 +1617,25 @@ TEST_F(PartitionAllocTest, DumpMemoryStats) { ...@@ -1652,29 +1617,25 @@ TEST_F(PartitionAllocTest, DumpMemoryStats) {
EXPECT_EQ(0u, stats->num_decommitted_pages); EXPECT_EQ(0u, stats->num_decommitted_pages);
} }
PartitionFreeGeneric(generic_allocator.root(), ptr2); generic_allocator.root()->Free(ptr2);
PartitionFreeGeneric(generic_allocator.root(), ptr); generic_allocator.root()->Free(ptr);
// Whilst we're here, allocate again and free with different ordering to // Whilst we're here, allocate again and free with different ordering to
// give a workout to our linked list code. // give a workout to our linked list code.
ptr = PartitionAllocGeneric(generic_allocator.root(), size_smaller, ptr = generic_allocator.root()->Alloc(size_smaller, type_name);
type_name); ptr2 = generic_allocator.root()->Alloc(size_bigger, type_name);
ptr2 = generic_allocator.root()->Free(ptr);
PartitionAllocGeneric(generic_allocator.root(), size_bigger, type_name); generic_allocator.root()->Free(ptr2);
PartitionFreeGeneric(generic_allocator.root(), ptr);
PartitionFreeGeneric(generic_allocator.root(), ptr2);
} }
// This test checks large-but-not-quite-direct allocations. // This test checks large-but-not-quite-direct allocations.
{ {
void* ptr = void* ptr = generic_allocator.root()->Alloc(65536 + 1, type_name);
PartitionAllocGeneric(generic_allocator.root(), 65536 + 1, type_name);
{ {
MockPartitionStatsDumper dumper; MockPartitionStatsDumper dumper;
PartitionDumpStatsGeneric(generic_allocator.root(), generic_allocator.root()->DumpStats("mock_generic_allocator",
"mock_generic_allocator", false /* detailed dump */, &dumper);
false /* detailed dump */, &dumper);
EXPECT_TRUE(dumper.IsMemoryAllocationRecorded()); EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
size_t slot_size = 65536 + (65536 / kGenericNumBucketsPerOrder); size_t slot_size = 65536 + (65536 / kGenericNumBucketsPerOrder);
...@@ -1694,13 +1655,12 @@ TEST_F(PartitionAllocTest, DumpMemoryStats) { ...@@ -1694,13 +1655,12 @@ TEST_F(PartitionAllocTest, DumpMemoryStats) {
EXPECT_EQ(0u, stats->num_decommitted_pages); EXPECT_EQ(0u, stats->num_decommitted_pages);
} }
PartitionFreeGeneric(generic_allocator.root(), ptr); generic_allocator.root()->Free(ptr);
{ {
MockPartitionStatsDumper dumper; MockPartitionStatsDumper dumper;
PartitionDumpStatsGeneric(generic_allocator.root(), generic_allocator.root()->DumpStats("mock_generic_allocator",
"mock_generic_allocator", false /* detailed dump */, &dumper);
false /* detailed dump */, &dumper);
EXPECT_FALSE(dumper.IsMemoryAllocationRecorded()); EXPECT_FALSE(dumper.IsMemoryAllocationRecorded());
size_t slot_size = 65536 + (65536 / kGenericNumBucketsPerOrder); size_t slot_size = 65536 + (65536 / kGenericNumBucketsPerOrder);
...@@ -1719,15 +1679,14 @@ TEST_F(PartitionAllocTest, DumpMemoryStats) { ...@@ -1719,15 +1679,14 @@ TEST_F(PartitionAllocTest, DumpMemoryStats) {
EXPECT_EQ(0u, stats->num_decommitted_pages); EXPECT_EQ(0u, stats->num_decommitted_pages);
} }
void* ptr2 = PartitionAllocGeneric(generic_allocator.root(), void* ptr2 =
65536 + kSystemPageSize + 1, type_name); generic_allocator.root()->Alloc(65536 + kSystemPageSize + 1, type_name);
EXPECT_EQ(ptr, ptr2); EXPECT_EQ(ptr, ptr2);
{ {
MockPartitionStatsDumper dumper; MockPartitionStatsDumper dumper;
PartitionDumpStatsGeneric(generic_allocator.root(), generic_allocator.root()->DumpStats("mock_generic_allocator",
"mock_generic_allocator", false /* detailed dump */, &dumper);
false /* detailed dump */, &dumper);
EXPECT_TRUE(dumper.IsMemoryAllocationRecorded()); EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
size_t slot_size = 65536 + (65536 / kGenericNumBucketsPerOrder); size_t slot_size = 65536 + (65536 / kGenericNumBucketsPerOrder);
...@@ -1748,20 +1707,19 @@ TEST_F(PartitionAllocTest, DumpMemoryStats) { ...@@ -1748,20 +1707,19 @@ TEST_F(PartitionAllocTest, DumpMemoryStats) {
EXPECT_EQ(0u, stats->num_decommitted_pages); EXPECT_EQ(0u, stats->num_decommitted_pages);
} }
PartitionFreeGeneric(generic_allocator.root(), ptr2); generic_allocator.root()->Free(ptr2);
} }
} }
// Tests the API to purge freeable memory. // Tests the API to purge freeable memory.
TEST_F(PartitionAllocTest, Purge) { TEST_F(PartitionAllocTest, Purge) {
char* ptr = reinterpret_cast<char*>(PartitionAllocGeneric( char* ptr = reinterpret_cast<char*>(
generic_allocator.root(), 2048 - kExtraAllocSize, type_name)); generic_allocator.root()->Alloc(2048 - kExtraAllocSize, type_name));
PartitionFreeGeneric(generic_allocator.root(), ptr); generic_allocator.root()->Free(ptr);
{ {
MockPartitionStatsDumper dumper; MockPartitionStatsDumper dumper;
PartitionDumpStatsGeneric(generic_allocator.root(), generic_allocator.root()->DumpStats("mock_generic_allocator",
"mock_generic_allocator", false /* detailed dump */, &dumper);
false /* detailed dump */, &dumper);
EXPECT_FALSE(dumper.IsMemoryAllocationRecorded()); EXPECT_FALSE(dumper.IsMemoryAllocationRecorded());
const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048); const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048);
...@@ -1770,13 +1728,11 @@ TEST_F(PartitionAllocTest, Purge) { ...@@ -1770,13 +1728,11 @@ TEST_F(PartitionAllocTest, Purge) {
EXPECT_EQ(kSystemPageSize, stats->decommittable_bytes); EXPECT_EQ(kSystemPageSize, stats->decommittable_bytes);
EXPECT_EQ(kSystemPageSize, stats->resident_bytes); EXPECT_EQ(kSystemPageSize, stats->resident_bytes);
} }
PartitionPurgeMemoryGeneric(generic_allocator.root(), generic_allocator.root()->PurgeMemory(PartitionPurgeDecommitEmptyPages);
PartitionPurgeDecommitEmptyPages);
{ {
MockPartitionStatsDumper dumper; MockPartitionStatsDumper dumper;
PartitionDumpStatsGeneric(generic_allocator.root(), generic_allocator.root()->DumpStats("mock_generic_allocator",
"mock_generic_allocator", false /* detailed dump */, &dumper);
false /* detailed dump */, &dumper);
EXPECT_FALSE(dumper.IsMemoryAllocationRecorded()); EXPECT_FALSE(dumper.IsMemoryAllocationRecorded());
const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048); const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048);
...@@ -1787,14 +1743,12 @@ TEST_F(PartitionAllocTest, Purge) { ...@@ -1787,14 +1743,12 @@ TEST_F(PartitionAllocTest, Purge) {
} }
// Calling purge again here is a good way of testing we didn't mess up the // Calling purge again here is a good way of testing we didn't mess up the
// state of the free cache ring. // state of the free cache ring.
PartitionPurgeMemoryGeneric(generic_allocator.root(), generic_allocator.root()->PurgeMemory(PartitionPurgeDecommitEmptyPages);
PartitionPurgeDecommitEmptyPages);
char* bigPtr = reinterpret_cast<char*>( char* bigPtr = reinterpret_cast<char*>(
PartitionAllocGeneric(generic_allocator.root(), 256 * 1024, type_name)); generic_allocator.root()->Alloc(256 * 1024, type_name));
PartitionFreeGeneric(generic_allocator.root(), bigPtr); generic_allocator.root()->Free(bigPtr);
PartitionPurgeMemoryGeneric(generic_allocator.root(), generic_allocator.root()->PurgeMemory(PartitionPurgeDecommitEmptyPages);
PartitionPurgeDecommitEmptyPages);
CheckPageInCore(ptr - kPointerOffset, false); CheckPageInCore(ptr - kPointerOffset, false);
CheckPageInCore(bigPtr - kPointerOffset, false); CheckPageInCore(bigPtr - kPointerOffset, false);
...@@ -1808,12 +1762,12 @@ TEST_F(PartitionAllocTest, PreferActiveOverEmpty) { ...@@ -1808,12 +1762,12 @@ TEST_F(PartitionAllocTest, PreferActiveOverEmpty) {
// Allocate 3 full slot spans worth of 8192-byte allocations. // Allocate 3 full slot spans worth of 8192-byte allocations.
// Each slot span for this size is 16384 bytes, or 1 partition page and 2 // Each slot span for this size is 16384 bytes, or 1 partition page and 2
// slots. // slots.
void* ptr1 = PartitionAllocGeneric(generic_allocator.root(), size, type_name); void* ptr1 = generic_allocator.root()->Alloc(size, type_name);
void* ptr2 = PartitionAllocGeneric(generic_allocator.root(), size, type_name); void* ptr2 = generic_allocator.root()->Alloc(size, type_name);
void* ptr3 = PartitionAllocGeneric(generic_allocator.root(), size, type_name); void* ptr3 = generic_allocator.root()->Alloc(size, type_name);
void* ptr4 = PartitionAllocGeneric(generic_allocator.root(), size, type_name); void* ptr4 = generic_allocator.root()->Alloc(size, type_name);
void* ptr5 = PartitionAllocGeneric(generic_allocator.root(), size, type_name); void* ptr5 = generic_allocator.root()->Alloc(size, type_name);
void* ptr6 = PartitionAllocGeneric(generic_allocator.root(), size, type_name); void* ptr6 = generic_allocator.root()->Alloc(size, type_name);
PartitionPage* page1 = PartitionPage* page1 =
PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr1)); PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr1));
...@@ -1829,46 +1783,44 @@ TEST_F(PartitionAllocTest, PreferActiveOverEmpty) { ...@@ -1829,46 +1783,44 @@ TEST_F(PartitionAllocTest, PreferActiveOverEmpty) {
// Free up the 2nd slot in each slot span. // Free up the 2nd slot in each slot span.
// This leaves the active list containing 3 pages, each with 1 used and 1 // This leaves the active list containing 3 pages, each with 1 used and 1
// free slot. The active page will be the one containing ptr1. // free slot. The active page will be the one containing ptr1.
PartitionFreeGeneric(generic_allocator.root(), ptr6); generic_allocator.root()->Free(ptr6);
PartitionFreeGeneric(generic_allocator.root(), ptr4); generic_allocator.root()->Free(ptr4);
PartitionFreeGeneric(generic_allocator.root(), ptr2); generic_allocator.root()->Free(ptr2);
EXPECT_EQ(page1, bucket->active_pages_head); EXPECT_EQ(page1, bucket->active_pages_head);
// Empty the middle page in the active list. // Empty the middle page in the active list.
PartitionFreeGeneric(generic_allocator.root(), ptr3); generic_allocator.root()->Free(ptr3);
EXPECT_EQ(page1, bucket->active_pages_head); EXPECT_EQ(page1, bucket->active_pages_head);
// Empty the the first page in the active list -- also the current page. // Empty the the first page in the active list -- also the current page.
PartitionFreeGeneric(generic_allocator.root(), ptr1); generic_allocator.root()->Free(ptr1);
// A good choice here is to re-fill the third page since the first two are // A good choice here is to re-fill the third page since the first two are
// empty. We used to fail that. // empty. We used to fail that.
void* ptr7 = PartitionAllocGeneric(generic_allocator.root(), size, type_name); void* ptr7 = generic_allocator.root()->Alloc(size, type_name);
EXPECT_EQ(ptr6, ptr7); EXPECT_EQ(ptr6, ptr7);
EXPECT_EQ(page3, bucket->active_pages_head); EXPECT_EQ(page3, bucket->active_pages_head);
PartitionFreeGeneric(generic_allocator.root(), ptr5); generic_allocator.root()->Free(ptr5);
PartitionFreeGeneric(generic_allocator.root(), ptr7); generic_allocator.root()->Free(ptr7);
} }
// Tests the API to purge discardable memory. // Tests the API to purge discardable memory.
TEST_F(PartitionAllocTest, PurgeDiscardable) { TEST_F(PartitionAllocTest, PurgeDiscardable) {
// Free the second of two 4096 byte allocations and then purge. // Free the second of two 4096 byte allocations and then purge.
{ {
void* ptr1 = PartitionAllocGeneric( void* ptr1 = generic_allocator.root()->Alloc(
generic_allocator.root(), kSystemPageSize - kExtraAllocSize, type_name); kSystemPageSize - kExtraAllocSize, type_name);
char* ptr2 = reinterpret_cast<char*>( char* ptr2 = reinterpret_cast<char*>(generic_allocator.root()->Alloc(
PartitionAllocGeneric(generic_allocator.root(), kSystemPageSize - kExtraAllocSize, type_name));
kSystemPageSize - kExtraAllocSize, type_name)); generic_allocator.root()->Free(ptr2);
PartitionFreeGeneric(generic_allocator.root(), ptr2);
PartitionPage* page = PartitionPage* page =
PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr1)); PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr1));
EXPECT_EQ(2u, page->num_unprovisioned_slots); EXPECT_EQ(2u, page->num_unprovisioned_slots);
{ {
MockPartitionStatsDumper dumper; MockPartitionStatsDumper dumper;
PartitionDumpStatsGeneric(generic_allocator.root(), generic_allocator.root()->DumpStats("mock_generic_allocator",
"mock_generic_allocator", false /* detailed dump */, &dumper);
false /* detailed dump */, &dumper);
EXPECT_TRUE(dumper.IsMemoryAllocationRecorded()); EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
const PartitionBucketMemoryStats* stats = const PartitionBucketMemoryStats* stats =
...@@ -1881,26 +1833,24 @@ TEST_F(PartitionAllocTest, PurgeDiscardable) { ...@@ -1881,26 +1833,24 @@ TEST_F(PartitionAllocTest, PurgeDiscardable) {
EXPECT_EQ(2 * kSystemPageSize, stats->resident_bytes); EXPECT_EQ(2 * kSystemPageSize, stats->resident_bytes);
} }
CheckPageInCore(ptr2 - kPointerOffset, true); CheckPageInCore(ptr2 - kPointerOffset, true);
PartitionPurgeMemoryGeneric(generic_allocator.root(), generic_allocator.root()->PurgeMemory(
PartitionPurgeDiscardUnusedSystemPages); PartitionPurgeDiscardUnusedSystemPages);
CheckPageInCore(ptr2 - kPointerOffset, false); CheckPageInCore(ptr2 - kPointerOffset, false);
EXPECT_EQ(3u, page->num_unprovisioned_slots); EXPECT_EQ(3u, page->num_unprovisioned_slots);
PartitionFreeGeneric(generic_allocator.root(), ptr1); generic_allocator.root()->Free(ptr1);
} }
// Free the first of two 4096 byte allocations and then purge. // Free the first of two 4096 byte allocations and then purge.
{ {
char* ptr1 = reinterpret_cast<char*>( char* ptr1 = reinterpret_cast<char*>(generic_allocator.root()->Alloc(
PartitionAllocGeneric(generic_allocator.root(), kSystemPageSize - kExtraAllocSize, type_name));
kSystemPageSize - kExtraAllocSize, type_name)); void* ptr2 = generic_allocator.root()->Alloc(
void* ptr2 = PartitionAllocGeneric( kSystemPageSize - kExtraAllocSize, type_name);
generic_allocator.root(), kSystemPageSize - kExtraAllocSize, type_name); generic_allocator.root()->Free(ptr1);
PartitionFreeGeneric(generic_allocator.root(), ptr1);
{ {
MockPartitionStatsDumper dumper; MockPartitionStatsDumper dumper;
PartitionDumpStatsGeneric(generic_allocator.root(), generic_allocator.root()->DumpStats("mock_generic_allocator",
"mock_generic_allocator", false /* detailed dump */, &dumper);
false /* detailed dump */, &dumper);
EXPECT_TRUE(dumper.IsMemoryAllocationRecorded()); EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
const PartitionBucketMemoryStats* stats = const PartitionBucketMemoryStats* stats =
...@@ -1917,34 +1867,33 @@ TEST_F(PartitionAllocTest, PurgeDiscardable) { ...@@ -1917,34 +1867,33 @@ TEST_F(PartitionAllocTest, PurgeDiscardable) {
EXPECT_EQ(2 * kSystemPageSize, stats->resident_bytes); EXPECT_EQ(2 * kSystemPageSize, stats->resident_bytes);
} }
CheckPageInCore(ptr1 - kPointerOffset, true); CheckPageInCore(ptr1 - kPointerOffset, true);
PartitionPurgeMemoryGeneric(generic_allocator.root(), generic_allocator.root()->PurgeMemory(
PartitionPurgeDiscardUnusedSystemPages); PartitionPurgeDiscardUnusedSystemPages);
#if defined(OS_WIN) #if defined(OS_WIN)
CheckPageInCore(ptr1 - kPointerOffset, true); CheckPageInCore(ptr1 - kPointerOffset, true);
#else #else
CheckPageInCore(ptr1 - kPointerOffset, false); CheckPageInCore(ptr1 - kPointerOffset, false);
#endif #endif
PartitionFreeGeneric(generic_allocator.root(), ptr2); generic_allocator.root()->Free(ptr2);
} }
{ {
char* ptr1 = reinterpret_cast<char*>(PartitionAllocGeneric( char* ptr1 = reinterpret_cast<char*>(
generic_allocator.root(), 9216 - kExtraAllocSize, type_name)); generic_allocator.root()->Alloc(9216 - kExtraAllocSize, type_name));
void* ptr2 = PartitionAllocGeneric(generic_allocator.root(), void* ptr2 =
9216 - kExtraAllocSize, type_name); generic_allocator.root()->Alloc(9216 - kExtraAllocSize, type_name);
void* ptr3 = PartitionAllocGeneric(generic_allocator.root(), void* ptr3 =
9216 - kExtraAllocSize, type_name); generic_allocator.root()->Alloc(9216 - kExtraAllocSize, type_name);
void* ptr4 = PartitionAllocGeneric(generic_allocator.root(), void* ptr4 =
9216 - kExtraAllocSize, type_name); generic_allocator.root()->Alloc(9216 - kExtraAllocSize, type_name);
memset(ptr1, 'A', 9216 - kExtraAllocSize); memset(ptr1, 'A', 9216 - kExtraAllocSize);
memset(ptr2, 'A', 9216 - kExtraAllocSize); memset(ptr2, 'A', 9216 - kExtraAllocSize);
PartitionFreeGeneric(generic_allocator.root(), ptr2); generic_allocator.root()->Free(ptr2);
PartitionFreeGeneric(generic_allocator.root(), ptr1); generic_allocator.root()->Free(ptr1);
{ {
MockPartitionStatsDumper dumper; MockPartitionStatsDumper dumper;
PartitionDumpStatsGeneric(generic_allocator.root(), generic_allocator.root()->DumpStats("mock_generic_allocator",
"mock_generic_allocator", false /* detailed dump */, &dumper);
false /* detailed dump */, &dumper);
EXPECT_TRUE(dumper.IsMemoryAllocationRecorded()); EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(9216); const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(9216);
...@@ -1960,31 +1909,28 @@ TEST_F(PartitionAllocTest, PurgeDiscardable) { ...@@ -1960,31 +1909,28 @@ TEST_F(PartitionAllocTest, PurgeDiscardable) {
CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 2), true); CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 2), true);
CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 3), true); CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 3), true);
CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 4), true); CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 4), true);
PartitionPurgeMemoryGeneric(generic_allocator.root(), generic_allocator.root()->PurgeMemory(
PartitionPurgeDiscardUnusedSystemPages); PartitionPurgeDiscardUnusedSystemPages);
CheckPageInCore(ptr1 - kPointerOffset, true); CheckPageInCore(ptr1 - kPointerOffset, true);
CheckPageInCore(ptr1 - kPointerOffset + kSystemPageSize, false); CheckPageInCore(ptr1 - kPointerOffset + kSystemPageSize, false);
CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 2), true); CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 2), true);
CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 3), false); CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 3), false);
CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 4), true); CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 4), true);
PartitionFreeGeneric(generic_allocator.root(), ptr3); generic_allocator.root()->Free(ptr3);
PartitionFreeGeneric(generic_allocator.root(), ptr4); generic_allocator.root()->Free(ptr4);
} }
{ {
char* ptr1 = reinterpret_cast<char*>(PartitionAllocGeneric( char* ptr1 = reinterpret_cast<char*>(generic_allocator.root()->Alloc(
generic_allocator.root(), (64 * kSystemPageSize) - kExtraAllocSize, (64 * kSystemPageSize) - kExtraAllocSize, type_name));
type_name));
memset(ptr1, 'A', (64 * kSystemPageSize) - kExtraAllocSize); memset(ptr1, 'A', (64 * kSystemPageSize) - kExtraAllocSize);
PartitionFreeGeneric(generic_allocator.root(), ptr1); generic_allocator.root()->Free(ptr1);
ptr1 = reinterpret_cast<char*>(PartitionAllocGeneric( ptr1 = reinterpret_cast<char*>(generic_allocator.root()->Alloc(
generic_allocator.root(), (61 * kSystemPageSize) - kExtraAllocSize, (61 * kSystemPageSize) - kExtraAllocSize, type_name));
type_name));
{ {
MockPartitionStatsDumper dumper; MockPartitionStatsDumper dumper;
PartitionDumpStatsGeneric(generic_allocator.root(), generic_allocator.root()->DumpStats("mock_generic_allocator",
"mock_generic_allocator", false /* detailed dump */, &dumper);
false /* detailed dump */, &dumper);
EXPECT_TRUE(dumper.IsMemoryAllocationRecorded()); EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
const PartitionBucketMemoryStats* stats = const PartitionBucketMemoryStats* stats =
...@@ -2000,45 +1946,42 @@ TEST_F(PartitionAllocTest, PurgeDiscardable) { ...@@ -2000,45 +1946,42 @@ TEST_F(PartitionAllocTest, PurgeDiscardable) {
CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 61), true); CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 61), true);
CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 62), true); CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 62), true);
CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 63), true); CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 63), true);
PartitionPurgeMemoryGeneric(generic_allocator.root(), generic_allocator.root()->PurgeMemory(
PartitionPurgeDiscardUnusedSystemPages); PartitionPurgeDiscardUnusedSystemPages);
CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 60), true); CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 60), true);
CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 61), false); CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 61), false);
CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 62), false); CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 62), false);
CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 63), false); CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 63), false);
PartitionFreeGeneric(generic_allocator.root(), ptr1); generic_allocator.root()->Free(ptr1);
} }
// This sub-test tests truncation of the provisioned slots in a trickier // This sub-test tests truncation of the provisioned slots in a trickier
// case where the freelist is rewritten. // case where the freelist is rewritten.
PartitionPurgeMemoryGeneric(generic_allocator.root(), generic_allocator.root()->PurgeMemory(PartitionPurgeDecommitEmptyPages);
PartitionPurgeDecommitEmptyPages);
{ {
char* ptr1 = reinterpret_cast<char*>( char* ptr1 = reinterpret_cast<char*>(generic_allocator.root()->Alloc(
PartitionAllocGeneric(generic_allocator.root(), kSystemPageSize - kExtraAllocSize, type_name));
kSystemPageSize - kExtraAllocSize, type_name)); void* ptr2 = generic_allocator.root()->Alloc(
void* ptr2 = PartitionAllocGeneric( kSystemPageSize - kExtraAllocSize, type_name);
generic_allocator.root(), kSystemPageSize - kExtraAllocSize, type_name); void* ptr3 = generic_allocator.root()->Alloc(
void* ptr3 = PartitionAllocGeneric( kSystemPageSize - kExtraAllocSize, type_name);
generic_allocator.root(), kSystemPageSize - kExtraAllocSize, type_name); void* ptr4 = generic_allocator.root()->Alloc(
void* ptr4 = PartitionAllocGeneric( kSystemPageSize - kExtraAllocSize, type_name);
generic_allocator.root(), kSystemPageSize - kExtraAllocSize, type_name);
ptr1[0] = 'A'; ptr1[0] = 'A';
ptr1[kSystemPageSize] = 'A'; ptr1[kSystemPageSize] = 'A';
ptr1[kSystemPageSize * 2] = 'A'; ptr1[kSystemPageSize * 2] = 'A';
ptr1[kSystemPageSize * 3] = 'A'; ptr1[kSystemPageSize * 3] = 'A';
PartitionPage* page = PartitionPage* page =
PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr1)); PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr1));
PartitionFreeGeneric(generic_allocator.root(), ptr2); generic_allocator.root()->Free(ptr2);
PartitionFreeGeneric(generic_allocator.root(), ptr4); generic_allocator.root()->Free(ptr4);
PartitionFreeGeneric(generic_allocator.root(), ptr1); generic_allocator.root()->Free(ptr1);
EXPECT_EQ(0u, page->num_unprovisioned_slots); EXPECT_EQ(0u, page->num_unprovisioned_slots);
{ {
MockPartitionStatsDumper dumper; MockPartitionStatsDumper dumper;
PartitionDumpStatsGeneric(generic_allocator.root(), generic_allocator.root()->DumpStats("mock_generic_allocator",
"mock_generic_allocator", false /* detailed dump */, &dumper);
false /* detailed dump */, &dumper);
EXPECT_TRUE(dumper.IsMemoryAllocationRecorded()); EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
const PartitionBucketMemoryStats* stats = const PartitionBucketMemoryStats* stats =
...@@ -2058,8 +2001,8 @@ TEST_F(PartitionAllocTest, PurgeDiscardable) { ...@@ -2058,8 +2001,8 @@ TEST_F(PartitionAllocTest, PurgeDiscardable) {
CheckPageInCore(ptr1 - kPointerOffset + kSystemPageSize, true); CheckPageInCore(ptr1 - kPointerOffset + kSystemPageSize, true);
CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 2), true); CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 2), true);
CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 3), true); CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 3), true);
PartitionPurgeMemoryGeneric(generic_allocator.root(), generic_allocator.root()->PurgeMemory(
PartitionPurgeDiscardUnusedSystemPages); PartitionPurgeDiscardUnusedSystemPages);
EXPECT_EQ(1u, page->num_unprovisioned_slots); EXPECT_EQ(1u, page->num_unprovisioned_slots);
CheckPageInCore(ptr1 - kPointerOffset, true); CheckPageInCore(ptr1 - kPointerOffset, true);
#if defined(OS_WIN) #if defined(OS_WIN)
...@@ -2071,46 +2014,43 @@ TEST_F(PartitionAllocTest, PurgeDiscardable) { ...@@ -2071,46 +2014,43 @@ TEST_F(PartitionAllocTest, PurgeDiscardable) {
CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 3), false); CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 3), false);
// Let's check we didn't brick the freelist. // Let's check we didn't brick the freelist.
void* ptr1b = PartitionAllocGeneric( void* ptr1b = generic_allocator.root()->Alloc(
generic_allocator.root(), kSystemPageSize - kExtraAllocSize, type_name); kSystemPageSize - kExtraAllocSize, type_name);
EXPECT_EQ(ptr1, ptr1b); EXPECT_EQ(ptr1, ptr1b);
void* ptr2b = PartitionAllocGeneric( void* ptr2b = generic_allocator.root()->Alloc(
generic_allocator.root(), kSystemPageSize - kExtraAllocSize, type_name); kSystemPageSize - kExtraAllocSize, type_name);
EXPECT_EQ(ptr2, ptr2b); EXPECT_EQ(ptr2, ptr2b);
EXPECT_FALSE(page->freelist_head); EXPECT_FALSE(page->freelist_head);
PartitionFreeGeneric(generic_allocator.root(), ptr1); generic_allocator.root()->Free(ptr1);
PartitionFreeGeneric(generic_allocator.root(), ptr2); generic_allocator.root()->Free(ptr2);
PartitionFreeGeneric(generic_allocator.root(), ptr3); generic_allocator.root()->Free(ptr3);
} }
// This sub-test is similar, but tests a double-truncation. // This sub-test is similar, but tests a double-truncation.
PartitionPurgeMemoryGeneric(generic_allocator.root(), generic_allocator.root()->PurgeMemory(PartitionPurgeDecommitEmptyPages);
PartitionPurgeDecommitEmptyPages);
{ {
char* ptr1 = reinterpret_cast<char*>( char* ptr1 = reinterpret_cast<char*>(generic_allocator.root()->Alloc(
PartitionAllocGeneric(generic_allocator.root(), kSystemPageSize - kExtraAllocSize, type_name));
kSystemPageSize - kExtraAllocSize, type_name)); void* ptr2 = generic_allocator.root()->Alloc(
void* ptr2 = PartitionAllocGeneric( kSystemPageSize - kExtraAllocSize, type_name);
generic_allocator.root(), kSystemPageSize - kExtraAllocSize, type_name); void* ptr3 = generic_allocator.root()->Alloc(
void* ptr3 = PartitionAllocGeneric( kSystemPageSize - kExtraAllocSize, type_name);
generic_allocator.root(), kSystemPageSize - kExtraAllocSize, type_name); void* ptr4 = generic_allocator.root()->Alloc(
void* ptr4 = PartitionAllocGeneric( kSystemPageSize - kExtraAllocSize, type_name);
generic_allocator.root(), kSystemPageSize - kExtraAllocSize, type_name);
ptr1[0] = 'A'; ptr1[0] = 'A';
ptr1[kSystemPageSize] = 'A'; ptr1[kSystemPageSize] = 'A';
ptr1[kSystemPageSize * 2] = 'A'; ptr1[kSystemPageSize * 2] = 'A';
ptr1[kSystemPageSize * 3] = 'A'; ptr1[kSystemPageSize * 3] = 'A';
PartitionPage* page = PartitionPage* page =
PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr1)); PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr1));
PartitionFreeGeneric(generic_allocator.root(), ptr4); generic_allocator.root()->Free(ptr4);
PartitionFreeGeneric(generic_allocator.root(), ptr3); generic_allocator.root()->Free(ptr3);
EXPECT_EQ(0u, page->num_unprovisioned_slots); EXPECT_EQ(0u, page->num_unprovisioned_slots);
{ {
MockPartitionStatsDumper dumper; MockPartitionStatsDumper dumper;
PartitionDumpStatsGeneric(generic_allocator.root(), generic_allocator.root()->DumpStats("mock_generic_allocator",
"mock_generic_allocator", false /* detailed dump */, &dumper);
false /* detailed dump */, &dumper);
EXPECT_TRUE(dumper.IsMemoryAllocationRecorded()); EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
const PartitionBucketMemoryStats* stats = const PartitionBucketMemoryStats* stats =
...@@ -2126,8 +2066,8 @@ TEST_F(PartitionAllocTest, PurgeDiscardable) { ...@@ -2126,8 +2066,8 @@ TEST_F(PartitionAllocTest, PurgeDiscardable) {
CheckPageInCore(ptr1 - kPointerOffset + kSystemPageSize, true); CheckPageInCore(ptr1 - kPointerOffset + kSystemPageSize, true);
CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 2), true); CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 2), true);
CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 3), true); CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 3), true);
PartitionPurgeMemoryGeneric(generic_allocator.root(), generic_allocator.root()->PurgeMemory(
PartitionPurgeDiscardUnusedSystemPages); PartitionPurgeDiscardUnusedSystemPages);
EXPECT_EQ(2u, page->num_unprovisioned_slots); EXPECT_EQ(2u, page->num_unprovisioned_slots);
CheckPageInCore(ptr1 - kPointerOffset, true); CheckPageInCore(ptr1 - kPointerOffset, true);
CheckPageInCore(ptr1 - kPointerOffset + kSystemPageSize, true); CheckPageInCore(ptr1 - kPointerOffset + kSystemPageSize, true);
...@@ -2136,8 +2076,8 @@ TEST_F(PartitionAllocTest, PurgeDiscardable) { ...@@ -2136,8 +2076,8 @@ TEST_F(PartitionAllocTest, PurgeDiscardable) {
EXPECT_FALSE(page->freelist_head); EXPECT_FALSE(page->freelist_head);
PartitionFreeGeneric(generic_allocator.root(), ptr1); generic_allocator.root()->Free(ptr1);
PartitionFreeGeneric(generic_allocator.root(), ptr2); generic_allocator.root()->Free(ptr2);
} }
} }
...@@ -2148,30 +2088,27 @@ TEST_F(PartitionAllocTest, ReallocMovesCookies) { ...@@ -2148,30 +2088,27 @@ TEST_F(PartitionAllocTest, ReallocMovesCookies) {
// and we can track the "raw" size. See https://crbug.com/709271 // and we can track the "raw" size. See https://crbug.com/709271
static constexpr size_t kSize = static constexpr size_t kSize =
base::kMaxSystemPagesPerSlotSpan * base::kSystemPageSize; base::kMaxSystemPagesPerSlotSpan * base::kSystemPageSize;
void* ptr = void* ptr = generic_allocator.root()->Alloc(kSize + 1, type_name);
PartitionAllocGeneric(generic_allocator.root(), kSize + 1, type_name);
EXPECT_TRUE(ptr); EXPECT_TRUE(ptr);
memset(ptr, 0xbd, kSize + 1); memset(ptr, 0xbd, kSize + 1);
ptr = PartitionReallocGeneric(generic_allocator.root(), ptr, kSize + 2, ptr = generic_allocator.root()->Realloc(ptr, kSize + 2, type_name);
type_name);
EXPECT_TRUE(ptr); EXPECT_TRUE(ptr);
memset(ptr, 0xbd, kSize + 2); memset(ptr, 0xbd, kSize + 2);
PartitionFreeGeneric(generic_allocator.root(), ptr); generic_allocator.root()->Free(ptr);
} }
TEST_F(PartitionAllocTest, SmallReallocDoesNotMoveTrailingCookie) { TEST_F(PartitionAllocTest, SmallReallocDoesNotMoveTrailingCookie) {
// For crbug.com/781473 // For crbug.com/781473
static constexpr size_t kSize = 264; static constexpr size_t kSize = 264;
void* ptr = PartitionAllocGeneric(generic_allocator.root(), kSize, type_name); void* ptr = generic_allocator.root()->Alloc(kSize, type_name);
EXPECT_TRUE(ptr); EXPECT_TRUE(ptr);
ptr = PartitionReallocGeneric(generic_allocator.root(), ptr, kSize + 16, ptr = generic_allocator.root()->Realloc(ptr, kSize + 16, type_name);
type_name);
EXPECT_TRUE(ptr); EXPECT_TRUE(ptr);
PartitionFreeGeneric(generic_allocator.root(), ptr); generic_allocator.root()->Free(ptr);
} }
} // namespace base } // namespace base
......
...@@ -198,9 +198,8 @@ class MemlogBrowserTest : public InProcessBrowserTest, ...@@ -198,9 +198,8 @@ class MemlogBrowserTest : public InProcessBrowserTest,
} }
for (int i = 0; i < kPartitionAllocCount; ++i) { for (int i = 0; i < kPartitionAllocCount; ++i) {
leaks_.push_back(static_cast<char*>( leaks_.push_back(static_cast<char*>(partition_allocator_.root()->Alloc(
PartitionAllocGeneric(partition_allocator_.root(), kPartitionAllocSize, kPartitionAllocTypeName)));
kPartitionAllocSize, kPartitionAllocTypeName)));
} }
for (int i = 0; i < kBrowserAllocCount; ++i) { for (int i = 0; i < kBrowserAllocCount; ++i) {
......
...@@ -322,9 +322,8 @@ void ProfilingTestDriver::MakeTestAllocations() { ...@@ -322,9 +322,8 @@ void ProfilingTestDriver::MakeTestAllocations() {
} }
for (int i = 0; i < kPartitionAllocCount; ++i) { for (int i = 0; i < kPartitionAllocCount; ++i) {
leaks_.push_back(static_cast<char*>( leaks_.push_back(static_cast<char*>(partition_allocator_.root()->Alloc(
PartitionAllocGeneric(partition_allocator_.root(), kPartitionAllocSize, kPartitionAllocSize, kPartitionAllocTypeName)));
kPartitionAllocTypeName)));
} }
for (int i = 0; i < kVariadicAllocCount; ++i) { for (int i = 0; i < kVariadicAllocCount; ++i) {
......
...@@ -35,8 +35,7 @@ class WTF_EXPORT PartitionAllocator { ...@@ -35,8 +35,7 @@ class WTF_EXPORT PartitionAllocator {
template <typename T> template <typename T>
static size_t QuantizedSize(size_t count) { static size_t QuantizedSize(size_t count) {
CHECK_LE(count, MaxElementCountInBackingStore<T>()); CHECK_LE(count, MaxElementCountInBackingStore<T>());
return PartitionAllocActualSize(WTF::Partitions::BufferPartition(), return WTF::Partitions::BufferPartition()->ActualSize(count * sizeof(T));
count * sizeof(T));
} }
template <typename T> template <typename T>
static T* AllocateVectorBacking(size_t size) { static T* AllocateVectorBacking(size_t size) {
......
...@@ -90,15 +90,14 @@ void Partitions::DecommitFreeableMemory() { ...@@ -90,15 +90,14 @@ void Partitions::DecommitFreeableMemory() {
if (!initialized_) if (!initialized_)
return; return;
base::PartitionPurgeMemoryGeneric( ArrayBufferPartition()->PurgeMemory(
ArrayBufferPartition(), base::PartitionPurgeDecommitEmptyPages | base::PartitionPurgeDecommitEmptyPages |
base::PartitionPurgeDiscardUnusedSystemPages); base::PartitionPurgeDiscardUnusedSystemPages);
base::PartitionPurgeMemoryGeneric( BufferPartition()->PurgeMemory(base::PartitionPurgeDecommitEmptyPages |
BufferPartition(), base::PartitionPurgeDecommitEmptyPages |
base::PartitionPurgeDiscardUnusedSystemPages);
base::PartitionPurgeMemoryGeneric(
FastMallocPartition(), base::PartitionPurgeDecommitEmptyPages |
base::PartitionPurgeDiscardUnusedSystemPages); base::PartitionPurgeDiscardUnusedSystemPages);
FastMallocPartition()->PurgeMemory(
base::PartitionPurgeDecommitEmptyPages |
base::PartitionPurgeDiscardUnusedSystemPages);
LayoutPartition()->PurgeMemory(base::PartitionPurgeDecommitEmptyPages | LayoutPartition()->PurgeMemory(base::PartitionPurgeDecommitEmptyPages |
base::PartitionPurgeDiscardUnusedSystemPages); base::PartitionPurgeDiscardUnusedSystemPages);
} }
...@@ -127,12 +126,11 @@ void Partitions::DumpMemoryStats( ...@@ -127,12 +126,11 @@ void Partitions::DumpMemoryStats(
DCHECK(IsMainThread()); DCHECK(IsMainThread());
DecommitFreeableMemory(); DecommitFreeableMemory();
PartitionDumpStatsGeneric(FastMallocPartition(), "fast_malloc", is_light_dump, FastMallocPartition()->DumpStats("fast_malloc", is_light_dump,
partition_stats_dumper); partition_stats_dumper);
PartitionDumpStatsGeneric(ArrayBufferPartition(), "array_buffer", ArrayBufferPartition()->DumpStats("array_buffer", is_light_dump,
is_light_dump, partition_stats_dumper); partition_stats_dumper);
PartitionDumpStatsGeneric(BufferPartition(), "buffer", is_light_dump, BufferPartition()->DumpStats("buffer", is_light_dump, partition_stats_dumper);
partition_stats_dumper);
LayoutPartition()->DumpStats("layout", is_light_dump, partition_stats_dumper); LayoutPartition()->DumpStats("layout", is_light_dump, partition_stats_dumper);
} }
......
...@@ -106,22 +106,19 @@ class WTF_EXPORT Partitions { ...@@ -106,22 +106,19 @@ class WTF_EXPORT Partitions {
static void DumpMemoryStats(bool is_light_dump, base::PartitionStatsDumper*); static void DumpMemoryStats(bool is_light_dump, base::PartitionStatsDumper*);
ALWAYS_INLINE static void* BufferMalloc(size_t n, const char* type_name) { ALWAYS_INLINE static void* BufferMalloc(size_t n, const char* type_name) {
return base::PartitionAllocGeneric(BufferPartition(), n, type_name); return BufferPartition()->Alloc(n, type_name);
} }
ALWAYS_INLINE static void* BufferRealloc(void* p, ALWAYS_INLINE static void* BufferRealloc(void* p,
size_t n, size_t n,
const char* type_name) { const char* type_name) {
return base::PartitionReallocGeneric(BufferPartition(), p, n, type_name); return BufferPartition()->Realloc(p, n, type_name);
}
ALWAYS_INLINE static void BufferFree(void* p) {
base::PartitionFreeGeneric(BufferPartition(), p);
} }
ALWAYS_INLINE static void BufferFree(void* p) { BufferPartition()->Free(p); }
ALWAYS_INLINE static size_t BufferActualSize(size_t n) { ALWAYS_INLINE static size_t BufferActualSize(size_t n) {
return base::PartitionAllocActualSize(BufferPartition(), n); return BufferPartition()->ActualSize(n);
} }
static void* FastMalloc(size_t n, const char* type_name) { static void* FastMalloc(size_t n, const char* type_name) {
return base::PartitionAllocGeneric(Partitions::FastMallocPartition(), n, return Partitions::FastMallocPartition()->Alloc(n, type_name);
type_name);
} }
static void* FastZeroedMalloc(size_t n, const char* type_name) { static void* FastZeroedMalloc(size_t n, const char* type_name) {
void* result = FastMalloc(n, type_name); void* result = FastMalloc(n, type_name);
...@@ -129,12 +126,9 @@ class WTF_EXPORT Partitions { ...@@ -129,12 +126,9 @@ class WTF_EXPORT Partitions {
return result; return result;
} }
static void* FastRealloc(void* p, size_t n, const char* type_name) { static void* FastRealloc(void* p, size_t n, const char* type_name) {
return base::PartitionReallocGeneric(Partitions::FastMallocPartition(), p, return Partitions::FastMallocPartition()->Realloc(p, n, type_name);
n, type_name);
}
static void FastFree(void* p) {
base::PartitionFreeGeneric(Partitions::FastMallocPartition(), p);
} }
static void FastFree(void* p) { Partitions::FastMallocPartition()->Free(p); }
static void HandleOutOfMemory(); static void HandleOutOfMemory();
......
...@@ -155,7 +155,7 @@ void* ArrayBufferContents::ReserveMemory(size_t size) { ...@@ -155,7 +155,7 @@ void* ArrayBufferContents::ReserveMemory(size_t size) {
} }
void ArrayBufferContents::FreeMemory(void* data) { void ArrayBufferContents::FreeMemory(void* data) {
base::PartitionFreeGeneric(Partitions::ArrayBufferPartition(), data); Partitions::ArrayBufferPartition()->Free(data);
} }
void ArrayBufferContents::ReleaseReservedMemory(void* data, size_t size) { void ArrayBufferContents::ReleaseReservedMemory(void* data, size_t size) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment