Commit 88ae1960 authored by Benoit Lize's avatar Benoit Lize Committed by Commit Bot

base/allocator: Remove now-meaningless "Generic" in PartitionAlloc.

PartitionAlloc used to have a "generic" variant, and a specialized
one. This is no longer the case, so remove needless "Generic" in the
constants, and fix now-incorrect function/class names in comments.

Bug: 998048
Change-Id: Icb72a60fee5fa005de9bd7b40c5cbbb1ce37ecbf
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2310352Reviewed-by: default avatarKentaro Hara <haraken@chromium.org>
Commit-Queue: Benoit L <lizeb@chromium.org>
Cr-Commit-Position: refs/heads/master@{#790770}
parent eb52f981
...@@ -40,7 +40,7 @@ possibility of inlining. ...@@ -40,7 +40,7 @@ possibility of inlining.
For an example of how to use partitions to get good performance and good safety, For an example of how to use partitions to get good performance and good safety,
see Blink's usage, as described in `wtf/allocator/Allocator.md`. see Blink's usage, as described in `wtf/allocator/Allocator.md`.
Large allocations (> kGenericMaxBucketed == 960KB) are realized by direct Large allocations (> kMaxBucketed == 960KB) are realized by direct
memory mmapping. This size makes sense because 960KB = 0xF0000. The next larger memory mmapping. This size makes sense because 960KB = 0xF0000. The next larger
bucket size is 1MB = 0x100000 which is greater than 1/2 the available space in bucket size is 1MB = 0x100000 which is greater than 1/2 the available space in
a SuperPage meaning it would not be possible to pack even 2 sequential a SuperPage meaning it would not be possible to pack even 2 sequential
......
...@@ -69,18 +69,17 @@ static_assert(kPageMetadataSize * kNumPartitionPagesPerSuperPage <= ...@@ -69,18 +69,17 @@ static_assert(kPageMetadataSize * kNumPartitionPagesPerSuperPage <=
kSystemPageSize, kSystemPageSize,
"page metadata fits in hole"); "page metadata fits in hole");
// Limit to prevent callers accidentally overflowing an int size. // Limit to prevent callers accidentally overflowing an int size.
static_assert(kGenericMaxDirectMapped <= static_assert(kMaxDirectMapped <= (1UL << 31) + kPageAllocationGranularity,
(1UL << 31) + kPageAllocationGranularity,
"maximum direct mapped allocation"); "maximum direct mapped allocation");
// Check that some of our zanier calculations worked out as expected. // Check that some of our zanier calculations worked out as expected.
#if ENABLE_TAG_FOR_MTE_CHECKED_PTR #if ENABLE_TAG_FOR_MTE_CHECKED_PTR
static_assert(kGenericSmallestBucket >= alignof(std::max_align_t), static_assert(kSmallestBucket >= alignof(std::max_align_t),
"generic smallest bucket"); "generic smallest bucket");
#else #else
static_assert(kGenericSmallestBucket == alignof(std::max_align_t), static_assert(kSmallestBucket == alignof(std::max_align_t),
"generic smallest bucket"); "generic smallest bucket");
#endif #endif
static_assert(kGenericMaxBucketed == 983040, "generic max bucketed"); static_assert(kMaxBucketed == 983040, "generic max bucketed");
static_assert(kMaxSystemPagesPerSlotSpan < (1 << 8), static_assert(kMaxSystemPagesPerSlotSpan < (1 << 8),
"System pages per slot span must be less than 128."); "System pages per slot span must be less than 128.");
...@@ -243,19 +242,19 @@ void PartitionRoot<thread_safe>::Init(bool enforce_alignment) { ...@@ -243,19 +242,19 @@ void PartitionRoot<thread_safe>::Init(bool enforce_alignment) {
size_t order; size_t order;
for (order = 0; order <= kBitsPerSizeT; ++order) { for (order = 0; order <= kBitsPerSizeT; ++order) {
size_t order_index_shift; size_t order_index_shift;
if (order < kGenericNumBucketsPerOrderBits + 1) if (order < kNumBucketsPerOrderBits + 1)
order_index_shift = 0; order_index_shift = 0;
else else
order_index_shift = order - (kGenericNumBucketsPerOrderBits + 1); order_index_shift = order - (kNumBucketsPerOrderBits + 1);
order_index_shifts[order] = order_index_shift; order_index_shifts[order] = order_index_shift;
size_t sub_order_index_mask; size_t sub_order_index_mask;
if (order == kBitsPerSizeT) { if (order == kBitsPerSizeT) {
// This avoids invoking undefined behavior for an excessive shift. // This avoids invoking undefined behavior for an excessive shift.
sub_order_index_mask = sub_order_index_mask =
static_cast<size_t>(-1) >> (kGenericNumBucketsPerOrderBits + 1); static_cast<size_t>(-1) >> (kNumBucketsPerOrderBits + 1);
} else { } else {
sub_order_index_mask = ((static_cast<size_t>(1) << order) - 1) >> sub_order_index_mask = ((static_cast<size_t>(1) << order) - 1) >>
(kGenericNumBucketsPerOrderBits + 1); (kNumBucketsPerOrderBits + 1);
} }
order_sub_index_masks[order] = sub_order_index_mask; order_sub_index_masks[order] = sub_order_index_mask;
} }
...@@ -267,47 +266,46 @@ void PartitionRoot<thread_safe>::Init(bool enforce_alignment) { ...@@ -267,47 +266,46 @@ void PartitionRoot<thread_safe>::Init(bool enforce_alignment) {
// We avoid them in the bucket lookup map, but we tolerate them to keep the // We avoid them in the bucket lookup map, but we tolerate them to keep the
// code simpler and the structures more generic. // code simpler and the structures more generic.
size_t i, j; size_t i, j;
size_t current_size = kGenericSmallestBucket; size_t current_size = kSmallestBucket;
size_t current_increment = size_t current_increment = kSmallestBucket >> kNumBucketsPerOrderBits;
kGenericSmallestBucket >> kGenericNumBucketsPerOrderBits;
Bucket* bucket = &buckets[0]; Bucket* bucket = &buckets[0];
for (i = 0; i < kGenericNumBucketedOrders; ++i) { for (i = 0; i < kNumBucketedOrders; ++i) {
for (j = 0; j < kGenericNumBucketsPerOrder; ++j) { for (j = 0; j < kNumBucketsPerOrder; ++j) {
bucket->Init(current_size); bucket->Init(current_size);
// Disable pseudo buckets so that touching them faults. // Disable pseudo buckets so that touching them faults.
if (current_size % kGenericSmallestBucket) if (current_size % kSmallestBucket)
bucket->active_pages_head = nullptr; bucket->active_pages_head = nullptr;
current_size += current_increment; current_size += current_increment;
++bucket; ++bucket;
} }
current_increment <<= 1; current_increment <<= 1;
} }
PA_DCHECK(current_size == 1 << kGenericMaxBucketedOrder); PA_DCHECK(current_size == 1 << kMaxBucketedOrder);
PA_DCHECK(bucket == &buckets[0] + kGenericNumBuckets); PA_DCHECK(bucket == &buckets[0] + kNumBuckets);
// Then set up the fast size -> bucket lookup table. // Then set up the fast size -> bucket lookup table.
bucket = &buckets[0]; bucket = &buckets[0];
Bucket** bucket_ptr = &bucket_lookups[0]; Bucket** bucket_ptr = &bucket_lookups[0];
for (order = 0; order <= kBitsPerSizeT; ++order) { for (order = 0; order <= kBitsPerSizeT; ++order) {
for (j = 0; j < kGenericNumBucketsPerOrder; ++j) { for (j = 0; j < kNumBucketsPerOrder; ++j) {
if (order < kGenericMinBucketedOrder) { if (order < kMinBucketedOrder) {
// Use the bucket of the finest granularity for malloc(0) etc. // Use the bucket of the finest granularity for malloc(0) etc.
*bucket_ptr++ = &buckets[0]; *bucket_ptr++ = &buckets[0];
} else if (order > kGenericMaxBucketedOrder) { } else if (order > kMaxBucketedOrder) {
*bucket_ptr++ = Bucket::get_sentinel_bucket(); *bucket_ptr++ = Bucket::get_sentinel_bucket();
} else { } else {
Bucket* valid_bucket = bucket; Bucket* valid_bucket = bucket;
// Skip over invalid buckets. // Skip over invalid buckets.
while (valid_bucket->slot_size % kGenericSmallestBucket) while (valid_bucket->slot_size % kSmallestBucket)
valid_bucket++; valid_bucket++;
*bucket_ptr++ = valid_bucket; *bucket_ptr++ = valid_bucket;
bucket++; bucket++;
} }
} }
} }
PA_DCHECK(bucket == &buckets[0] + kGenericNumBuckets); PA_DCHECK(bucket == &buckets[0] + kNumBuckets);
PA_DCHECK(bucket_ptr == &bucket_lookups[0] + ((kBitsPerSizeT + 1) * PA_DCHECK(bucket_ptr ==
kGenericNumBucketsPerOrder)); &bucket_lookups[0] + ((kBitsPerSizeT + 1) * kNumBucketsPerOrder));
// And there's one last bucket lookup that will be hit for e.g. malloc(-1), // And there's one last bucket lookup that will be hit for e.g. malloc(-1),
// which tries to overflow to a non-existent order. // which tries to overflow to a non-existent order.
*bucket_ptr = Bucket::get_sentinel_bucket(); *bucket_ptr = Bucket::get_sentinel_bucket();
...@@ -325,7 +323,7 @@ bool PartitionRoot<thread_safe>::ReallocDirectMappedInPlace( ...@@ -325,7 +323,7 @@ bool PartitionRoot<thread_safe>::ReallocDirectMappedInPlace(
// Note that the new size might be a bucketed size; this function is called // Note that the new size might be a bucketed size; this function is called
// whenever we're reallocating a direct mapped allocation. // whenever we're reallocating a direct mapped allocation.
size_t new_size = Bucket::get_direct_map_size(raw_size); size_t new_size = Bucket::get_direct_map_size(raw_size);
if (new_size < kGenericMinDirectMappedDownsize) if (new_size < kMinDirectMappedDownsize)
return false; return false;
// bucket->slot_size is the current size of the allocation. // bucket->slot_size is the current size of the allocation.
...@@ -394,7 +392,7 @@ void* PartitionRoot<thread_safe>::ReallocFlags(int flags, ...@@ -394,7 +392,7 @@ void* PartitionRoot<thread_safe>::ReallocFlags(int flags,
return nullptr; return nullptr;
} }
if (new_size > kGenericMaxDirectMapped) { if (new_size > kMaxDirectMapped) {
if (flags & PartitionAllocReturnNull) if (flags & PartitionAllocReturnNull)
return nullptr; return nullptr;
internal::PartitionExcessiveAllocationSize(new_size); internal::PartitionExcessiveAllocationSize(new_size);
...@@ -643,7 +641,7 @@ void PartitionRoot<thread_safe>::PurgeMemory(int flags) { ...@@ -643,7 +641,7 @@ void PartitionRoot<thread_safe>::PurgeMemory(int flags) {
if (flags & PartitionPurgeDecommitEmptyPages) if (flags & PartitionPurgeDecommitEmptyPages)
DecommitEmptyPages(); DecommitEmptyPages();
if (flags & PartitionPurgeDiscardUnusedSystemPages) { if (flags & PartitionPurgeDiscardUnusedSystemPages) {
for (size_t i = 0; i < kGenericNumBuckets; ++i) { for (size_t i = 0; i < kNumBuckets; ++i) {
Bucket* bucket = &buckets[i]; Bucket* bucket = &buckets[i];
if (bucket->slot_size >= kSystemPageSize) if (bucket->slot_size >= kSystemPageSize)
PartitionPurgeBucket(bucket); PartitionPurgeBucket(bucket);
...@@ -758,10 +756,10 @@ void PartitionRoot<thread_safe>::DumpStats(const char* partition_name, ...@@ -758,10 +756,10 @@ void PartitionRoot<thread_safe>::DumpStats(const char* partition_name,
std::unique_ptr<uint32_t[]>(new uint32_t[kMaxReportableDirectMaps]); std::unique_ptr<uint32_t[]>(new uint32_t[kMaxReportableDirectMaps]);
} }
PartitionBucketMemoryStats bucket_stats[kGenericNumBuckets]; PartitionBucketMemoryStats bucket_stats[kNumBuckets];
size_t num_direct_mapped_allocations = 0; size_t num_direct_mapped_allocations = 0;
{ {
for (size_t i = 0; i < kGenericNumBuckets; ++i) { for (size_t i = 0; i < kNumBuckets; ++i) {
const Bucket* bucket = &buckets[i]; const Bucket* bucket = &buckets[i];
// Don't report the pseudo buckets that the generic allocator sets up in // Don't report the pseudo buckets that the generic allocator sets up in
// order to preserve a fast size->bucket map (see // order to preserve a fast size->bucket map (see
...@@ -795,7 +793,7 @@ void PartitionRoot<thread_safe>::DumpStats(const char* partition_name, ...@@ -795,7 +793,7 @@ void PartitionRoot<thread_safe>::DumpStats(const char* partition_name,
// Call |PartitionsDumpBucketStats| after collecting stats because it can // Call |PartitionsDumpBucketStats| after collecting stats because it can
// try to allocate using |PartitionRoot::Alloc()| and it can't // try to allocate using |PartitionRoot::Alloc()| and it can't
// obtain the lock. // obtain the lock.
for (size_t i = 0; i < kGenericNumBuckets; ++i) { for (size_t i = 0; i < kNumBuckets; ++i) {
if (bucket_stats[i].is_valid) if (bucket_stats[i].is_valid)
dumper->PartitionsDumpBucketStats(partition_name, &bucket_stats[i]); dumper->PartitionsDumpBucketStats(partition_name, &bucket_stats[i]);
} }
......
...@@ -88,7 +88,7 @@ ...@@ -88,7 +88,7 @@
// We use this to make MEMORY_TOOL_REPLACES_ALLOCATOR behave the same for max // We use this to make MEMORY_TOOL_REPLACES_ALLOCATOR behave the same for max
// size as other alloc code. // size as other alloc code.
#define CHECK_MAX_SIZE_OR_RETURN_NULLPTR(size, flags) \ #define CHECK_MAX_SIZE_OR_RETURN_NULLPTR(size, flags) \
if (size > kGenericMaxDirectMapped) { \ if (size > kMaxDirectMapped) { \
if (flags & PartitionAllocReturnNull) { \ if (flags & PartitionAllocReturnNull) { \
return nullptr; \ return nullptr; \
} \ } \
...@@ -372,7 +372,7 @@ struct PartitionBucketMemoryStats { ...@@ -372,7 +372,7 @@ struct PartitionBucketMemoryStats {
}; };
// Interface that is passed to PartitionDumpStats and // Interface that is passed to PartitionDumpStats and
// PartitionDumpStatsGeneric for using the memory statistics. // PartitionDumpStats for using the memory statistics.
class BASE_EXPORT PartitionStatsDumper { class BASE_EXPORT PartitionStatsDumper {
public: public:
// Called to dump total memory used by partition, once per partition. // Called to dump total memory used by partition, once per partition.
...@@ -431,9 +431,8 @@ struct BASE_EXPORT PartitionRoot { ...@@ -431,9 +431,8 @@ struct BASE_EXPORT PartitionRoot {
// sizes. It is one flat array instead of a 2D array because in the 2D // sizes. It is one flat array instead of a 2D array because in the 2D
// world, we'd need to index array[blah][max+1] which risks undefined // world, we'd need to index array[blah][max+1] which risks undefined
// behavior. // behavior.
Bucket* bucket_lookups[((kBitsPerSizeT + 1) * kGenericNumBucketsPerOrder) + Bucket* bucket_lookups[((kBitsPerSizeT + 1) * kNumBucketsPerOrder) + 1] = {};
1] = {}; Bucket buckets[kNumBuckets] = {};
Bucket buckets[kGenericNumBuckets] = {};
PartitionRoot() = default; PartitionRoot() = default;
explicit PartitionRoot(bool enable_tag_pointers) { explicit PartitionRoot(bool enable_tag_pointers) {
...@@ -795,14 +794,14 @@ PartitionRoot<thread_safe>::SizeToBucket(size_t size) const { ...@@ -795,14 +794,14 @@ PartitionRoot<thread_safe>::SizeToBucket(size_t size) const {
size_t order = kBitsPerSizeT - bits::CountLeadingZeroBitsSizeT(size); size_t order = kBitsPerSizeT - bits::CountLeadingZeroBitsSizeT(size);
// The order index is simply the next few bits after the most significant bit. // The order index is simply the next few bits after the most significant bit.
size_t order_index = size_t order_index =
(size >> order_index_shifts[order]) & (kGenericNumBucketsPerOrder - 1); (size >> order_index_shifts[order]) & (kNumBucketsPerOrder - 1);
// And if the remaining bits are non-zero we must bump the bucket up. // And if the remaining bits are non-zero we must bump the bucket up.
size_t sub_order_index = size & order_sub_index_masks[order]; size_t sub_order_index = size & order_sub_index_masks[order];
Bucket* bucket = bucket_lookups[(order << kGenericNumBucketsPerOrderBits) + Bucket* bucket = bucket_lookups[(order << kNumBucketsPerOrderBits) +
order_index + !!sub_order_index]; order_index + !!sub_order_index];
PA_CHECK(bucket); PA_CHECK(bucket);
PA_DCHECK(!bucket->slot_size || bucket->slot_size >= size); PA_DCHECK(!bucket->slot_size || bucket->slot_size >= size);
PA_DCHECK(!(bucket->slot_size % kGenericSmallestBucket)); PA_DCHECK(!(bucket->slot_size % kSmallestBucket));
return bucket; return bucket;
} }
...@@ -914,7 +913,7 @@ ALWAYS_INLINE size_t PartitionRoot<thread_safe>::ActualSize(size_t size) { ...@@ -914,7 +913,7 @@ ALWAYS_INLINE size_t PartitionRoot<thread_safe>::ActualSize(size_t size) {
auto* bucket = SizeToBucket(size); auto* bucket = SizeToBucket(size);
if (LIKELY(!bucket->is_direct_mapped())) { if (LIKELY(!bucket->is_direct_mapped())) {
size = bucket->slot_size; size = bucket->slot_size;
} else if (size > kGenericMaxDirectMapped) { } else if (size > kMaxDirectMapped) {
// Too large to allocate => return the size unchanged. // Too large to allocate => return the size unchanged.
} else { } else {
size = Bucket::get_direct_map_size(size); size = Bucket::get_direct_map_size(size);
......
...@@ -28,14 +28,14 @@ namespace base { ...@@ -28,14 +28,14 @@ namespace base {
// Slot span sizes are adjusted depending on the allocation size, to make sure // Slot span sizes are adjusted depending on the allocation size, to make sure
// the packing does not lead to unused (wasted) space at the end of the last // the packing does not lead to unused (wasted) space at the end of the last
// system page of the span. For our current maximum slot span size of 64 KiB and // system page of the span. For our current maximum slot span size of 64 KiB and
// other constant values, we pack _all_ `PartitionRootGeneric::Alloc` sizes // other constant values, we pack _all_ `PartitionRoot::Alloc` sizes perfectly
// perfectly up against the end of a system page. // up against the end of a system page.
#if defined(_MIPS_ARCH_LOONGSON) #if defined(_MIPS_ARCH_LOONGSON)
static const size_t kPartitionPageShift = 16; // 64 KiB static const size_t kPartitionPageShift = 16; // 64 KiB
#elif defined(ARCH_CPU_PPC64) #elif defined(ARCH_CPU_PPC64)
static const size_t kPartitionPageShift = 18; // 256 KiB static const size_t kPartitionPageShift = 18; // 256 KiB
#elif defined (OS_MACOSX) && defined(ARCH_CPU_ARM64) #elif defined(OS_MACOSX) && defined(ARCH_CPU_ARM64)
static const size_t kPartitionPageShift = 16; // 64 KiB static const size_t kPartitionPageShift = 16; // 64 KiB
#else #else
static const size_t kPartitionPageShift = 14; // 16 KiB static const size_t kPartitionPageShift = 14; // 16 KiB
...@@ -134,7 +134,6 @@ static const size_t kSuperPageBaseMask = ~kSuperPageOffsetMask; ...@@ -134,7 +134,6 @@ static const size_t kSuperPageBaseMask = ~kSuperPageOffsetMask;
static const size_t kNumPartitionPagesPerSuperPage = static const size_t kNumPartitionPagesPerSuperPage =
kSuperPageSize / kPartitionPageSize; kSuperPageSize / kPartitionPageSize;
// The following kGeneric* constants apply to the generic variants of the API.
// The "order" of an allocation is closely related to the power-of-1 size of the // The "order" of an allocation is closely related to the power-of-1 size of the
// allocation. More precisely, the order is the bit index of the // allocation. More precisely, the order is the bit index of the
// most-significant-bit in the allocation size, where the bit numbers starts at // most-significant-bit in the allocation size, where the bit numbers starts at
...@@ -155,32 +154,29 @@ static_assert(alignof(std::max_align_t) <= 16, ...@@ -155,32 +154,29 @@ static_assert(alignof(std::max_align_t) <= 16,
// bytes on 64 bit ones. // bytes on 64 bit ones.
#if ENABLE_TAG_FOR_MTE_CHECKED_PTR #if ENABLE_TAG_FOR_MTE_CHECKED_PTR
// MTECheckedPtr requires 16B-alignment because kBytesPerPartitionTag is 16. // MTECheckedPtr requires 16B-alignment because kBytesPerPartitionTag is 16.
static const size_t kGenericMinBucketedOrder = 5; static const size_t kMinBucketedOrder = 5;
#else #else
static const size_t kGenericMinBucketedOrder = static const size_t kMinBucketedOrder =
alignof(std::max_align_t) == 16 ? 5 : 4; // 2^(order - 1), that is 16 or 8. alignof(std::max_align_t) == 16 ? 5 : 4; // 2^(order - 1), that is 16 or 8.
#endif #endif
// The largest bucketed order is 1 << (20 - 1), storing [512 KiB, 1 MiB): // The largest bucketed order is 1 << (20 - 1), storing [512 KiB, 1 MiB):
static const size_t kGenericMaxBucketedOrder = 20; static const size_t kMaxBucketedOrder = 20;
static const size_t kGenericNumBucketedOrders = static const size_t kNumBucketedOrders =
(kGenericMaxBucketedOrder - kGenericMinBucketedOrder) + 1; (kMaxBucketedOrder - kMinBucketedOrder) + 1;
// Eight buckets per order (for the higher orders), e.g. order 8 is 128, 144, // Eight buckets per order (for the higher orders), e.g. order 8 is 128, 144,
// 160, ..., 240: // 160, ..., 240:
static const size_t kGenericNumBucketsPerOrderBits = 3; static const size_t kNumBucketsPerOrderBits = 3;
static const size_t kGenericNumBucketsPerOrder = static const size_t kNumBucketsPerOrder = 1 << kNumBucketsPerOrderBits;
1 << kGenericNumBucketsPerOrderBits; static const size_t kNumBuckets = kNumBucketedOrders * kNumBucketsPerOrder;
static const size_t kGenericNumBuckets = static const size_t kSmallestBucket = 1 << (kMinBucketedOrder - 1);
kGenericNumBucketedOrders * kGenericNumBucketsPerOrder; static const size_t kMaxBucketSpacing =
static const size_t kGenericSmallestBucket = 1 1 << ((kMaxBucketedOrder - 1) - kNumBucketsPerOrderBits);
<< (kGenericMinBucketedOrder - 1); static const size_t kMaxBucketed =
static const size_t kGenericMaxBucketSpacing = (1 << (kMaxBucketedOrder - 1)) +
1 << ((kGenericMaxBucketedOrder - 1) - kGenericNumBucketsPerOrderBits); ((kNumBucketsPerOrder - 1) * kMaxBucketSpacing);
static const size_t kGenericMaxBucketed =
(1 << (kGenericMaxBucketedOrder - 1)) +
((kGenericNumBucketsPerOrder - 1) * kGenericMaxBucketSpacing);
// Limit when downsizing a direct mapping using `realloc`: // Limit when downsizing a direct mapping using `realloc`:
static const size_t kGenericMinDirectMappedDownsize = kGenericMaxBucketed + 1; static const size_t kMinDirectMappedDownsize = kMaxBucketed + 1;
static const size_t kGenericMaxDirectMapped = static const size_t kMaxDirectMapped =
(1UL << 31) + kPageAllocationGranularity; // 2 GiB plus 1 more page. (1UL << 31) + kPageAllocationGranularity; // 2 GiB plus 1 more page.
static const size_t kBitsPerSizeT = sizeof(void*) * CHAR_BIT; static const size_t kBitsPerSizeT = sizeof(void*) * CHAR_BIT;
...@@ -198,7 +194,7 @@ static const size_t kReasonableSizeOfUnusedPages = 1024 * 1024 * 1024; // 1 GiB ...@@ -198,7 +194,7 @@ static const size_t kReasonableSizeOfUnusedPages = 1024 * 1024 * 1024; // 1 GiB
static const unsigned char kUninitializedByte = 0xAB; static const unsigned char kUninitializedByte = 0xAB;
static const unsigned char kFreedByte = 0xCD; static const unsigned char kFreedByte = 0xCD;
// Flags for `PartitionAllocGenericFlags`. // Flags for `PartitionAllocFlags`.
enum PartitionAllocFlags { enum PartitionAllocFlags {
PartitionAllocReturnNull = 1 << 0, PartitionAllocReturnNull = 1 << 0,
PartitionAllocZeroFill = 1 << 1, PartitionAllocZeroFill = 1 << 1,
......
...@@ -229,9 +229,9 @@ class PartitionAllocTest : public testing::Test { ...@@ -229,9 +229,9 @@ class PartitionAllocTest : public testing::Test {
} }
enum ReturnNullTestMode { enum ReturnNullTestMode {
kPartitionAllocGenericFlags, kPartitionAllocFlags,
kPartitionReallocGenericFlags, kPartitionReallocFlags,
kPartitionRootGenericTryRealloc, kPartitionRootTryRealloc,
}; };
void DoReturnNullTest(size_t alloc_size, ReturnNullTestMode mode) { void DoReturnNullTest(size_t alloc_size, ReturnNullTestMode mode) {
...@@ -255,19 +255,19 @@ class PartitionAllocTest : public testing::Test { ...@@ -255,19 +255,19 @@ class PartitionAllocTest : public testing::Test {
for (i = 0; i < num_allocations; ++i) { for (i = 0; i < num_allocations; ++i) {
switch (mode) { switch (mode) {
case kPartitionAllocGenericFlags: { case kPartitionAllocFlags: {
ptrs[i] = allocator.root()->AllocFlags(PartitionAllocReturnNull, ptrs[i] = allocator.root()->AllocFlags(PartitionAllocReturnNull,
alloc_size, type_name); alloc_size, type_name);
break; break;
} }
case kPartitionReallocGenericFlags: { case kPartitionReallocFlags: {
ptrs[i] = allocator.root()->AllocFlags(PartitionAllocReturnNull, 1, ptrs[i] = allocator.root()->AllocFlags(PartitionAllocReturnNull, 1,
type_name); type_name);
ptrs[i] = allocator.root()->ReallocFlags( ptrs[i] = allocator.root()->ReallocFlags(
PartitionAllocReturnNull, ptrs[i], alloc_size, type_name); PartitionAllocReturnNull, ptrs[i], alloc_size, type_name);
break; break;
} }
case kPartitionRootGenericTryRealloc: { case kPartitionRootTryRealloc: {
ptrs[i] = allocator.root()->AllocFlags(PartitionAllocReturnNull, 1, ptrs[i] = allocator.root()->AllocFlags(PartitionAllocReturnNull, 1,
type_name); type_name);
ptrs[i] = ptrs[i] =
...@@ -665,11 +665,11 @@ TEST_F(PartitionAllocTest, MultiPageAllocs) { ...@@ -665,11 +665,11 @@ TEST_F(PartitionAllocTest, MultiPageAllocs) {
// Test the generic allocation functions that can handle arbitrary sizes and // Test the generic allocation functions that can handle arbitrary sizes and
// reallocing etc. // reallocing etc.
TEST_F(PartitionAllocTest, GenericAlloc) { TEST_F(PartitionAllocTest, Alloc) {
void* ptr = allocator.root()->Alloc(1, type_name); void* ptr = allocator.root()->Alloc(1, type_name);
EXPECT_TRUE(ptr); EXPECT_TRUE(ptr);
allocator.root()->Free(ptr); allocator.root()->Free(ptr);
ptr = allocator.root()->Alloc(kGenericMaxBucketed + 1, type_name); ptr = allocator.root()->Alloc(kMaxBucketed + 1, type_name);
EXPECT_TRUE(ptr); EXPECT_TRUE(ptr);
allocator.root()->Free(ptr); allocator.root()->Free(ptr);
...@@ -684,12 +684,11 @@ TEST_F(PartitionAllocTest, GenericAlloc) { ...@@ -684,12 +684,11 @@ TEST_F(PartitionAllocTest, GenericAlloc) {
EXPECT_EQ(ptr, new_ptr); EXPECT_EQ(ptr, new_ptr);
new_ptr = allocator.root()->Realloc(ptr, 1, type_name); new_ptr = allocator.root()->Realloc(ptr, 1, type_name);
EXPECT_EQ(ptr, new_ptr); EXPECT_EQ(ptr, new_ptr);
new_ptr = allocator.root()->Realloc(ptr, kGenericSmallestBucket, type_name); new_ptr = allocator.root()->Realloc(ptr, kSmallestBucket, type_name);
EXPECT_EQ(ptr, new_ptr); EXPECT_EQ(ptr, new_ptr);
// Change the size of the realloc, switching buckets. // Change the size of the realloc, switching buckets.
new_ptr = new_ptr = allocator.root()->Realloc(ptr, kSmallestBucket + 1, type_name);
allocator.root()->Realloc(ptr, kGenericSmallestBucket + 1, type_name);
EXPECT_NE(new_ptr, ptr); EXPECT_NE(new_ptr, ptr);
// Check that the realloc copied correctly. // Check that the realloc copied correctly.
char* new_char_ptr = static_cast<char*>(new_ptr); char* new_char_ptr = static_cast<char*>(new_ptr);
...@@ -698,8 +697,8 @@ TEST_F(PartitionAllocTest, GenericAlloc) { ...@@ -698,8 +697,8 @@ TEST_F(PartitionAllocTest, GenericAlloc) {
// Subtle: this checks for an old bug where we copied too much from the // Subtle: this checks for an old bug where we copied too much from the
// source of the realloc. The condition can be detected by a trashing of // source of the realloc. The condition can be detected by a trashing of
// the uninitialized value in the space of the upsized allocation. // the uninitialized value in the space of the upsized allocation.
EXPECT_EQ(kUninitializedByte, static_cast<unsigned char>( EXPECT_EQ(kUninitializedByte,
*(new_char_ptr + kGenericSmallestBucket))); static_cast<unsigned char>(*(new_char_ptr + kSmallestBucket)));
#endif #endif
*new_char_ptr = 'B'; *new_char_ptr = 'B';
// The realloc moved. To check that the old allocation was freed, we can // The realloc moved. To check that the old allocation was freed, we can
...@@ -719,7 +718,7 @@ TEST_F(PartitionAllocTest, GenericAlloc) { ...@@ -719,7 +718,7 @@ TEST_F(PartitionAllocTest, GenericAlloc) {
// Upsize the realloc to outside the partition. // Upsize the realloc to outside the partition.
ptr = new_ptr; ptr = new_ptr;
new_ptr = allocator.root()->Realloc(ptr, kGenericMaxBucketed + 1, type_name); new_ptr = allocator.root()->Realloc(ptr, kMaxBucketed + 1, type_name);
EXPECT_NE(new_ptr, ptr); EXPECT_NE(new_ptr, ptr);
new_char_ptr = static_cast<char*>(new_ptr); new_char_ptr = static_cast<char*>(new_ptr);
EXPECT_EQ(*new_char_ptr, 'C'); EXPECT_EQ(*new_char_ptr, 'C');
...@@ -727,12 +726,12 @@ TEST_F(PartitionAllocTest, GenericAlloc) { ...@@ -727,12 +726,12 @@ TEST_F(PartitionAllocTest, GenericAlloc) {
// Upsize and downsize the realloc, remaining outside the partition. // Upsize and downsize the realloc, remaining outside the partition.
ptr = new_ptr; ptr = new_ptr;
new_ptr = allocator.root()->Realloc(ptr, kGenericMaxBucketed * 10, type_name); new_ptr = allocator.root()->Realloc(ptr, kMaxBucketed * 10, type_name);
new_char_ptr = static_cast<char*>(new_ptr); new_char_ptr = static_cast<char*>(new_ptr);
EXPECT_EQ(*new_char_ptr, 'D'); EXPECT_EQ(*new_char_ptr, 'D');
*new_char_ptr = 'E'; *new_char_ptr = 'E';
ptr = new_ptr; ptr = new_ptr;
new_ptr = allocator.root()->Realloc(ptr, kGenericMaxBucketed * 2, type_name); new_ptr = allocator.root()->Realloc(ptr, kMaxBucketed * 2, type_name);
new_char_ptr = static_cast<char*>(new_ptr); new_char_ptr = static_cast<char*>(new_ptr);
EXPECT_EQ(*new_char_ptr, 'E'); EXPECT_EQ(*new_char_ptr, 'E');
*new_char_ptr = 'F'; *new_char_ptr = 'F';
...@@ -750,7 +749,7 @@ TEST_F(PartitionAllocTest, GenericAlloc) { ...@@ -750,7 +749,7 @@ TEST_F(PartitionAllocTest, GenericAlloc) {
// Test the generic allocation functions can handle some specific sizes of // Test the generic allocation functions can handle some specific sizes of
// interest. // interest.
TEST_F(PartitionAllocTest, GenericAllocSizes) { TEST_F(PartitionAllocTest, AllocSizes) {
void* ptr = allocator.root()->Alloc(0, type_name); void* ptr = allocator.root()->Alloc(0, type_name);
EXPECT_TRUE(ptr); EXPECT_TRUE(ptr);
allocator.root()->Free(ptr); allocator.root()->Free(ptr);
...@@ -842,7 +841,7 @@ TEST_F(PartitionAllocTest, GenericAllocSizes) { ...@@ -842,7 +841,7 @@ TEST_F(PartitionAllocTest, GenericAllocSizes) {
} }
// Test that we can fetch the real allocated size after an allocation. // Test that we can fetch the real allocated size after an allocation.
TEST_F(PartitionAllocTest, GenericAllocGetSizeAndOffset) { TEST_F(PartitionAllocTest, AllocGetSizeAndOffset) {
void* ptr; void* ptr;
size_t requested_size, actual_size, predicted_size; size_t requested_size, actual_size, predicted_size;
...@@ -940,7 +939,7 @@ TEST_F(PartitionAllocTest, GenericAllocGetSizeAndOffset) { ...@@ -940,7 +939,7 @@ TEST_F(PartitionAllocTest, GenericAllocGetSizeAndOffset) {
} }
// Too large allocation. // Too large allocation.
requested_size = kGenericMaxDirectMapped + 1; requested_size = kMaxDirectMapped + 1;
predicted_size = allocator.root()->ActualSize(requested_size); predicted_size = allocator.root()->ActualSize(requested_size);
EXPECT_EQ(requested_size, predicted_size); EXPECT_EQ(requested_size, predicted_size);
} }
...@@ -1020,11 +1019,11 @@ TEST_F(PartitionAllocTest, Realloc) { ...@@ -1020,11 +1019,11 @@ TEST_F(PartitionAllocTest, Realloc) {
allocator.root()->Free(ptr); allocator.root()->Free(ptr);
// Test that shrinking a direct mapped allocation happens in-place. // Test that shrinking a direct mapped allocation happens in-place.
size = kGenericMaxBucketed + 16 * kSystemPageSize; size = kMaxBucketed + 16 * kSystemPageSize;
ptr = allocator.root()->Alloc(size, type_name); ptr = allocator.root()->Alloc(size, type_name);
size_t actual_size = allocator.root()->GetSize(ptr); size_t actual_size = allocator.root()->GetSize(ptr);
ptr2 = allocator.root()->Realloc( ptr2 = allocator.root()->Realloc(ptr, kMaxBucketed + 8 * kSystemPageSize,
ptr, kGenericMaxBucketed + 8 * kSystemPageSize, type_name); type_name);
EXPECT_EQ(ptr, ptr2); EXPECT_EQ(ptr, ptr2);
EXPECT_EQ(actual_size - 8 * kSystemPageSize, allocator.root()->GetSize(ptr2)); EXPECT_EQ(actual_size - 8 * kSystemPageSize, allocator.root()->GetSize(ptr2));
...@@ -1510,40 +1509,38 @@ TEST_F(PartitionAllocTest, LostFreePagesBug) { ...@@ -1510,40 +1509,38 @@ TEST_F(PartitionAllocTest, LostFreePagesBug) {
// process, so they won't pollute other tests. // process, so they won't pollute other tests.
TEST_F(PartitionAllocDeathTest, RepeatedAllocReturnNullDirect) { TEST_F(PartitionAllocDeathTest, RepeatedAllocReturnNullDirect) {
// A direct-mapped allocation size. // A direct-mapped allocation size.
EXPECT_DEATH(DoReturnNullTest(32 * 1024 * 1024, kPartitionAllocGenericFlags), EXPECT_DEATH(DoReturnNullTest(32 * 1024 * 1024, kPartitionAllocFlags),
"DoReturnNullTest"); "DoReturnNullTest");
} }
// Repeating above test with Realloc // Repeating above test with Realloc
TEST_F(PartitionAllocDeathTest, RepeatedReallocReturnNullDirect) { TEST_F(PartitionAllocDeathTest, RepeatedReallocReturnNullDirect) {
EXPECT_DEATH( EXPECT_DEATH(DoReturnNullTest(32 * 1024 * 1024, kPartitionReallocFlags),
DoReturnNullTest(32 * 1024 * 1024, kPartitionReallocGenericFlags),
"DoReturnNullTest"); "DoReturnNullTest");
} }
// Repeating above test with TryRealloc // Repeating above test with TryRealloc
TEST_F(PartitionAllocDeathTest, RepeatedTryReallocReturnNullDirect) { TEST_F(PartitionAllocDeathTest, RepeatedTryReallocReturnNullDirect) {
EXPECT_DEATH( EXPECT_DEATH(DoReturnNullTest(32 * 1024 * 1024, kPartitionRootTryRealloc),
DoReturnNullTest(32 * 1024 * 1024, kPartitionRootGenericTryRealloc),
"DoReturnNullTest"); "DoReturnNullTest");
} }
// Test "return null" with a 512 kB block size. // Test "return null" with a 512 kB block size.
TEST_F(PartitionAllocDeathTest, RepeatedAllocReturnNull) { TEST_F(PartitionAllocDeathTest, RepeatedAllocReturnNull) {
// A single-slot but non-direct-mapped allocation size. // A single-slot but non-direct-mapped allocation size.
EXPECT_DEATH(DoReturnNullTest(512 * 1024, kPartitionAllocGenericFlags), EXPECT_DEATH(DoReturnNullTest(512 * 1024, kPartitionAllocFlags),
"DoReturnNullTest"); "DoReturnNullTest");
} }
// Repeating above test with Realloc. // Repeating above test with Realloc.
TEST_F(PartitionAllocDeathTest, RepeatedReallocReturnNull) { TEST_F(PartitionAllocDeathTest, RepeatedReallocReturnNull) {
EXPECT_DEATH(DoReturnNullTest(512 * 1024, kPartitionReallocGenericFlags), EXPECT_DEATH(DoReturnNullTest(512 * 1024, kPartitionReallocFlags),
"DoReturnNullTest"); "DoReturnNullTest");
} }
// Repeating above test with TryRealloc. // Repeating above test with TryRealloc.
TEST_F(PartitionAllocDeathTest, RepeatedTryReallocReturnNull) { TEST_F(PartitionAllocDeathTest, RepeatedTryReallocReturnNull) {
EXPECT_DEATH(DoReturnNullTest(512 * 1024, kPartitionRootGenericTryRealloc), EXPECT_DEATH(DoReturnNullTest(512 * 1024, kPartitionRootTryRealloc),
"DoReturnNullTest"); "DoReturnNullTest");
} }
...@@ -1557,8 +1554,7 @@ TEST_F(PartitionAllocDeathTest, LargeAllocs) { ...@@ -1557,8 +1554,7 @@ TEST_F(PartitionAllocDeathTest, LargeAllocs) {
// Largest alloc. // Largest alloc.
EXPECT_DEATH(allocator.root()->Alloc(static_cast<size_t>(-1), type_name), ""); EXPECT_DEATH(allocator.root()->Alloc(static_cast<size_t>(-1), type_name), "");
// And the smallest allocation we expect to die. // And the smallest allocation we expect to die.
EXPECT_DEATH(allocator.root()->Alloc(kGenericMaxDirectMapped + 1, type_name), EXPECT_DEATH(allocator.root()->Alloc(kMaxDirectMapped + 1, type_name), "");
"");
} }
// Check that our immediate double-free detection works. // Check that our immediate double-free detection works.
...@@ -1590,7 +1586,7 @@ TEST_F(PartitionAllocDeathTest, GuardPages) { ...@@ -1590,7 +1586,7 @@ TEST_F(PartitionAllocDeathTest, GuardPages) {
// (for metadata), and then rounds that size to kPageAllocationGranularity. // (for metadata), and then rounds that size to kPageAllocationGranularity.
// To be able to reliably write one past a direct allocation, choose a size // To be able to reliably write one past a direct allocation, choose a size
// that's // that's
// a) larger than kGenericMaxBucketed (to make the allocation direct) // a) larger than kMaxBucketed (to make the allocation direct)
// b) aligned at kPageAllocationGranularity boundaries after // b) aligned at kPageAllocationGranularity boundaries after
// kPartitionPageSize has been added to it. // kPartitionPageSize has been added to it.
// (On 32-bit, PartitionAlloc adds another kSystemPageSize to the // (On 32-bit, PartitionAlloc adds another kSystemPageSize to the
...@@ -1599,11 +1595,11 @@ TEST_F(PartitionAllocDeathTest, GuardPages) { ...@@ -1599,11 +1595,11 @@ TEST_F(PartitionAllocDeathTest, GuardPages) {
// hand to PartitionAlloc and we don't need to worry about allocation // hand to PartitionAlloc and we don't need to worry about allocation
// granularities.) // granularities.)
#define ALIGN(N, A) (((N) + (A)-1) / (A) * (A)) #define ALIGN(N, A) (((N) + (A)-1) / (A) * (A))
const int kSize = ALIGN(kGenericMaxBucketed + 1 + kPartitionPageSize, const int kSize =
kPageAllocationGranularity) - ALIGN(kMaxBucketed + 1 + kPartitionPageSize, kPageAllocationGranularity) -
kPartitionPageSize; kPartitionPageSize;
#undef ALIGN #undef ALIGN
static_assert(kSize > kGenericMaxBucketed, static_assert(kSize > kMaxBucketed,
"allocation not large enough for direct allocation"); "allocation not large enough for direct allocation");
size_t size = kSize - kExtraAllocSize; size_t size = kSize - kExtraAllocSize;
void* ptr = allocator.root()->Alloc(size, type_name); void* ptr = allocator.root()->Alloc(size, type_name);
...@@ -1619,7 +1615,7 @@ TEST_F(PartitionAllocDeathTest, GuardPages) { ...@@ -1619,7 +1615,7 @@ TEST_F(PartitionAllocDeathTest, GuardPages) {
#endif // !defined(OS_ANDROID) && !defined(OS_IOS) #endif // !defined(OS_ANDROID) && !defined(OS_IOS)
// Tests that |PartitionDumpStatsGeneric| and |PartitionDumpStats| run without // Tests that |PartitionDumpStats| and |PartitionDumpStats| run without
// crashing and return non-zero values when memory is allocated. // crashing and return non-zero values when memory is allocated.
TEST_F(PartitionAllocTest, DumpMemoryStats) { TEST_F(PartitionAllocTest, DumpMemoryStats) {
{ {
...@@ -1738,8 +1734,8 @@ TEST_F(PartitionAllocTest, DumpMemoryStats) { ...@@ -1738,8 +1734,8 @@ TEST_F(PartitionAllocTest, DumpMemoryStats) {
// This test checks for correct direct mapped accounting. // This test checks for correct direct mapped accounting.
{ {
size_t size_smaller = kGenericMaxBucketed + 1; size_t size_smaller = kMaxBucketed + 1;
size_t size_bigger = (kGenericMaxBucketed * 2) + 1; size_t size_bigger = (kMaxBucketed * 2) + 1;
size_t real_size_smaller = size_t real_size_smaller =
(size_smaller + kSystemPageOffsetMask) & kSystemPageBaseMask; (size_smaller + kSystemPageOffsetMask) & kSystemPageBaseMask;
size_t real_size_bigger = size_t real_size_bigger =
...@@ -1806,7 +1802,7 @@ TEST_F(PartitionAllocTest, DumpMemoryStats) { ...@@ -1806,7 +1802,7 @@ TEST_F(PartitionAllocTest, DumpMemoryStats) {
EXPECT_TRUE(dumper.IsMemoryAllocationRecorded()); EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
size_t slot_size = size_t slot_size =
requested_size + (requested_size / kGenericNumBucketsPerOrder); requested_size + (requested_size / kNumBucketsPerOrder);
const PartitionBucketMemoryStats* stats = const PartitionBucketMemoryStats* stats =
dumper.GetBucketStats(slot_size); dumper.GetBucketStats(slot_size);
EXPECT_TRUE(stats); EXPECT_TRUE(stats);
...@@ -1832,7 +1828,7 @@ TEST_F(PartitionAllocTest, DumpMemoryStats) { ...@@ -1832,7 +1828,7 @@ TEST_F(PartitionAllocTest, DumpMemoryStats) {
EXPECT_FALSE(dumper.IsMemoryAllocationRecorded()); EXPECT_FALSE(dumper.IsMemoryAllocationRecorded());
size_t slot_size = size_t slot_size =
requested_size + (requested_size / kGenericNumBucketsPerOrder); requested_size + (requested_size / kNumBucketsPerOrder);
const PartitionBucketMemoryStats* stats = const PartitionBucketMemoryStats* stats =
dumper.GetBucketStats(slot_size); dumper.GetBucketStats(slot_size);
EXPECT_TRUE(stats); EXPECT_TRUE(stats);
...@@ -1859,7 +1855,7 @@ TEST_F(PartitionAllocTest, DumpMemoryStats) { ...@@ -1859,7 +1855,7 @@ TEST_F(PartitionAllocTest, DumpMemoryStats) {
EXPECT_TRUE(dumper.IsMemoryAllocationRecorded()); EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
size_t slot_size = size_t slot_size =
requested_size + (requested_size / kGenericNumBucketsPerOrder); requested_size + (requested_size / kNumBucketsPerOrder);
const PartitionBucketMemoryStats* stats = const PartitionBucketMemoryStats* stats =
dumper.GetBucketStats(slot_size); dumper.GetBucketStats(slot_size);
EXPECT_TRUE(stats); EXPECT_TRUE(stats);
......
...@@ -546,7 +546,7 @@ void* PartitionBucket<thread_safe>::SlowPathAlloc( ...@@ -546,7 +546,7 @@ void* PartitionBucket<thread_safe>::SlowPathAlloc(
PartitionPage<thread_safe>* new_page = nullptr; PartitionPage<thread_safe>* new_page = nullptr;
*is_already_zeroed = false; *is_already_zeroed = false;
// For the PartitionRootGeneric::Alloc() API, we have a bunch of buckets // For the PartitionRoot::Alloc() API, we have a bunch of buckets
// marked as special cases. We bounce them through to the slow path so that // marked as special cases. We bounce them through to the slow path so that
// we can still have a blazing fast hot path due to lack of corner-case // we can still have a blazing fast hot path due to lack of corner-case
// branches. // branches.
...@@ -557,11 +557,11 @@ void* PartitionBucket<thread_safe>::SlowPathAlloc( ...@@ -557,11 +557,11 @@ void* PartitionBucket<thread_safe>::SlowPathAlloc(
// the empty or decommitted lists which affects the subsequent conditional. // the empty or decommitted lists which affects the subsequent conditional.
bool return_null = flags & PartitionAllocReturnNull; bool return_null = flags & PartitionAllocReturnNull;
if (UNLIKELY(is_direct_mapped())) { if (UNLIKELY(is_direct_mapped())) {
PA_DCHECK(size > kGenericMaxBucketed); PA_DCHECK(size > kMaxBucketed);
PA_DCHECK(this == get_sentinel_bucket()); PA_DCHECK(this == get_sentinel_bucket());
PA_DCHECK(active_pages_head == PA_DCHECK(active_pages_head ==
PartitionPage<thread_safe>::get_sentinel_page()); PartitionPage<thread_safe>::get_sentinel_page());
if (size > kGenericMaxDirectMapped) { if (size > kMaxDirectMapped) {
if (return_null) if (return_null)
return nullptr; return nullptr;
PartitionExcessiveAllocationSize(size); PartitionExcessiveAllocationSize(size);
......
...@@ -61,10 +61,10 @@ struct PartitionBucket { ...@@ -61,10 +61,10 @@ struct PartitionBucket {
} }
static ALWAYS_INLINE size_t get_direct_map_size(size_t size) { static ALWAYS_INLINE size_t get_direct_map_size(size_t size) {
// Caller must check that the size is not above the kGenericMaxDirectMapped // Caller must check that the size is not above the kMaxDirectMapped
// limit before calling. This also guards against integer overflow in the // limit before calling. This also guards against integer overflow in the
// calculation here. // calculation here.
PA_DCHECK(size <= kGenericMaxDirectMapped); PA_DCHECK(size <= kMaxDirectMapped);
return (size + kSystemPageOffsetMask) & kSystemPageBaseMask; return (size + kSystemPageOffsetMask) & kSystemPageBaseMask;
} }
......
...@@ -60,7 +60,7 @@ static constexpr size_t kBytesPerPartitionTagShift = 4; ...@@ -60,7 +60,7 @@ static constexpr size_t kBytesPerPartitionTagShift = 4;
static constexpr size_t kBytesPerPartitionTag = 1U static constexpr size_t kBytesPerPartitionTag = 1U
<< kBytesPerPartitionTagShift; << kBytesPerPartitionTagShift;
static_assert( static_assert(
kGenericMinBucketedOrder >= kBytesPerPartitionTagShift + 1, kMinBucketedOrder >= kBytesPerPartitionTagShift + 1,
"MTECheckedPtr requires kBytesPerPartitionTagShift-bytes alignment."); "MTECheckedPtr requires kBytesPerPartitionTagShift-bytes alignment.");
static constexpr size_t kBytesPerPartitionTagRatio = static constexpr size_t kBytesPerPartitionTagRatio =
...@@ -144,4 +144,4 @@ static constexpr size_t kReservedTagBitmapSize = 0; ...@@ -144,4 +144,4 @@ static constexpr size_t kReservedTagBitmapSize = 0;
} // namespace internal } // namespace internal
} // namespace base } // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_TAG_CONSTANTS_H_ #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_TAG_BITMAP_H_
...@@ -24,7 +24,7 @@ class WTF_EXPORT PartitionAllocator { ...@@ -24,7 +24,7 @@ class WTF_EXPORT PartitionAllocator {
template <typename T> template <typename T>
static size_t MaxElementCountInBackingStore() { static size_t MaxElementCountInBackingStore() {
return base::kGenericMaxDirectMapped / sizeof(T); return base::kMaxDirectMapped / sizeof(T);
} }
template <typename T> template <typename T>
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment