Commit 88ae1960 authored by Benoit Lize's avatar Benoit Lize Committed by Commit Bot

base/allocator: Remove now-meaningless "Generic" in PartitionAlloc.

PartitionAlloc used to have a "generic" variant, and a specialized
one. This is no longer the case, so remove needless "Generic" in the
constants, and fix now-incorrect function/class names in comments.

Bug: 998048
Change-Id: Icb72a60fee5fa005de9bd7b40c5cbbb1ce37ecbf
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2310352Reviewed-by: default avatarKentaro Hara <haraken@chromium.org>
Commit-Queue: Benoit L <lizeb@chromium.org>
Cr-Commit-Position: refs/heads/master@{#790770}
parent eb52f981
......@@ -40,7 +40,7 @@ possibility of inlining.
For an example of how to use partitions to get good performance and good safety,
see Blink's usage, as described in `wtf/allocator/Allocator.md`.
Large allocations (> kGenericMaxBucketed == 960KB) are realized by direct
Large allocations (> kMaxBucketed == 960KB) are realized by direct
memory mmapping. This size makes sense because 960KB = 0xF0000. The next larger
bucket size is 1MB = 0x100000 which is greater than 1/2 the available space in
a SuperPage meaning it would not be possible to pack even 2 sequential
......
......@@ -69,18 +69,17 @@ static_assert(kPageMetadataSize * kNumPartitionPagesPerSuperPage <=
kSystemPageSize,
"page metadata fits in hole");
// Limit to prevent callers accidentally overflowing an int size.
static_assert(kGenericMaxDirectMapped <=
(1UL << 31) + kPageAllocationGranularity,
static_assert(kMaxDirectMapped <= (1UL << 31) + kPageAllocationGranularity,
"maximum direct mapped allocation");
// Check that some of our zanier calculations worked out as expected.
#if ENABLE_TAG_FOR_MTE_CHECKED_PTR
static_assert(kGenericSmallestBucket >= alignof(std::max_align_t),
static_assert(kSmallestBucket >= alignof(std::max_align_t),
"generic smallest bucket");
#else
static_assert(kGenericSmallestBucket == alignof(std::max_align_t),
static_assert(kSmallestBucket == alignof(std::max_align_t),
"generic smallest bucket");
#endif
static_assert(kGenericMaxBucketed == 983040, "generic max bucketed");
static_assert(kMaxBucketed == 983040, "generic max bucketed");
static_assert(kMaxSystemPagesPerSlotSpan < (1 << 8),
"System pages per slot span must be less than 128.");
......@@ -243,19 +242,19 @@ void PartitionRoot<thread_safe>::Init(bool enforce_alignment) {
size_t order;
for (order = 0; order <= kBitsPerSizeT; ++order) {
size_t order_index_shift;
if (order < kGenericNumBucketsPerOrderBits + 1)
if (order < kNumBucketsPerOrderBits + 1)
order_index_shift = 0;
else
order_index_shift = order - (kGenericNumBucketsPerOrderBits + 1);
order_index_shift = order - (kNumBucketsPerOrderBits + 1);
order_index_shifts[order] = order_index_shift;
size_t sub_order_index_mask;
if (order == kBitsPerSizeT) {
// This avoids invoking undefined behavior for an excessive shift.
sub_order_index_mask =
static_cast<size_t>(-1) >> (kGenericNumBucketsPerOrderBits + 1);
static_cast<size_t>(-1) >> (kNumBucketsPerOrderBits + 1);
} else {
sub_order_index_mask = ((static_cast<size_t>(1) << order) - 1) >>
(kGenericNumBucketsPerOrderBits + 1);
(kNumBucketsPerOrderBits + 1);
}
order_sub_index_masks[order] = sub_order_index_mask;
}
......@@ -267,47 +266,46 @@ void PartitionRoot<thread_safe>::Init(bool enforce_alignment) {
// We avoid them in the bucket lookup map, but we tolerate them to keep the
// code simpler and the structures more generic.
size_t i, j;
size_t current_size = kGenericSmallestBucket;
size_t current_increment =
kGenericSmallestBucket >> kGenericNumBucketsPerOrderBits;
size_t current_size = kSmallestBucket;
size_t current_increment = kSmallestBucket >> kNumBucketsPerOrderBits;
Bucket* bucket = &buckets[0];
for (i = 0; i < kGenericNumBucketedOrders; ++i) {
for (j = 0; j < kGenericNumBucketsPerOrder; ++j) {
for (i = 0; i < kNumBucketedOrders; ++i) {
for (j = 0; j < kNumBucketsPerOrder; ++j) {
bucket->Init(current_size);
// Disable pseudo buckets so that touching them faults.
if (current_size % kGenericSmallestBucket)
if (current_size % kSmallestBucket)
bucket->active_pages_head = nullptr;
current_size += current_increment;
++bucket;
}
current_increment <<= 1;
}
PA_DCHECK(current_size == 1 << kGenericMaxBucketedOrder);
PA_DCHECK(bucket == &buckets[0] + kGenericNumBuckets);
PA_DCHECK(current_size == 1 << kMaxBucketedOrder);
PA_DCHECK(bucket == &buckets[0] + kNumBuckets);
// Then set up the fast size -> bucket lookup table.
bucket = &buckets[0];
Bucket** bucket_ptr = &bucket_lookups[0];
for (order = 0; order <= kBitsPerSizeT; ++order) {
for (j = 0; j < kGenericNumBucketsPerOrder; ++j) {
if (order < kGenericMinBucketedOrder) {
for (j = 0; j < kNumBucketsPerOrder; ++j) {
if (order < kMinBucketedOrder) {
// Use the bucket of the finest granularity for malloc(0) etc.
*bucket_ptr++ = &buckets[0];
} else if (order > kGenericMaxBucketedOrder) {
} else if (order > kMaxBucketedOrder) {
*bucket_ptr++ = Bucket::get_sentinel_bucket();
} else {
Bucket* valid_bucket = bucket;
// Skip over invalid buckets.
while (valid_bucket->slot_size % kGenericSmallestBucket)
while (valid_bucket->slot_size % kSmallestBucket)
valid_bucket++;
*bucket_ptr++ = valid_bucket;
bucket++;
}
}
}
PA_DCHECK(bucket == &buckets[0] + kGenericNumBuckets);
PA_DCHECK(bucket_ptr == &bucket_lookups[0] + ((kBitsPerSizeT + 1) *
kGenericNumBucketsPerOrder));
PA_DCHECK(bucket == &buckets[0] + kNumBuckets);
PA_DCHECK(bucket_ptr ==
&bucket_lookups[0] + ((kBitsPerSizeT + 1) * kNumBucketsPerOrder));
// And there's one last bucket lookup that will be hit for e.g. malloc(-1),
// which tries to overflow to a non-existent order.
*bucket_ptr = Bucket::get_sentinel_bucket();
......@@ -325,7 +323,7 @@ bool PartitionRoot<thread_safe>::ReallocDirectMappedInPlace(
// Note that the new size might be a bucketed size; this function is called
// whenever we're reallocating a direct mapped allocation.
size_t new_size = Bucket::get_direct_map_size(raw_size);
if (new_size < kGenericMinDirectMappedDownsize)
if (new_size < kMinDirectMappedDownsize)
return false;
// bucket->slot_size is the current size of the allocation.
......@@ -394,7 +392,7 @@ void* PartitionRoot<thread_safe>::ReallocFlags(int flags,
return nullptr;
}
if (new_size > kGenericMaxDirectMapped) {
if (new_size > kMaxDirectMapped) {
if (flags & PartitionAllocReturnNull)
return nullptr;
internal::PartitionExcessiveAllocationSize(new_size);
......@@ -643,7 +641,7 @@ void PartitionRoot<thread_safe>::PurgeMemory(int flags) {
if (flags & PartitionPurgeDecommitEmptyPages)
DecommitEmptyPages();
if (flags & PartitionPurgeDiscardUnusedSystemPages) {
for (size_t i = 0; i < kGenericNumBuckets; ++i) {
for (size_t i = 0; i < kNumBuckets; ++i) {
Bucket* bucket = &buckets[i];
if (bucket->slot_size >= kSystemPageSize)
PartitionPurgeBucket(bucket);
......@@ -758,10 +756,10 @@ void PartitionRoot<thread_safe>::DumpStats(const char* partition_name,
std::unique_ptr<uint32_t[]>(new uint32_t[kMaxReportableDirectMaps]);
}
PartitionBucketMemoryStats bucket_stats[kGenericNumBuckets];
PartitionBucketMemoryStats bucket_stats[kNumBuckets];
size_t num_direct_mapped_allocations = 0;
{
for (size_t i = 0; i < kGenericNumBuckets; ++i) {
for (size_t i = 0; i < kNumBuckets; ++i) {
const Bucket* bucket = &buckets[i];
// Don't report the pseudo buckets that the generic allocator sets up in
// order to preserve a fast size->bucket map (see
......@@ -795,7 +793,7 @@ void PartitionRoot<thread_safe>::DumpStats(const char* partition_name,
// Call |PartitionsDumpBucketStats| after collecting stats because it can
// try to allocate using |PartitionRoot::Alloc()| and it can't
// obtain the lock.
for (size_t i = 0; i < kGenericNumBuckets; ++i) {
for (size_t i = 0; i < kNumBuckets; ++i) {
if (bucket_stats[i].is_valid)
dumper->PartitionsDumpBucketStats(partition_name, &bucket_stats[i]);
}
......
......@@ -88,7 +88,7 @@
// We use this to make MEMORY_TOOL_REPLACES_ALLOCATOR behave the same for max
// size as other alloc code.
#define CHECK_MAX_SIZE_OR_RETURN_NULLPTR(size, flags) \
if (size > kGenericMaxDirectMapped) { \
if (size > kMaxDirectMapped) { \
if (flags & PartitionAllocReturnNull) { \
return nullptr; \
} \
......@@ -372,7 +372,7 @@ struct PartitionBucketMemoryStats {
};
// Interface that is passed to PartitionDumpStats and
// PartitionDumpStatsGeneric for using the memory statistics.
// PartitionDumpStats for using the memory statistics.
class BASE_EXPORT PartitionStatsDumper {
public:
// Called to dump total memory used by partition, once per partition.
......@@ -431,9 +431,8 @@ struct BASE_EXPORT PartitionRoot {
// sizes. It is one flat array instead of a 2D array because in the 2D
// world, we'd need to index array[blah][max+1] which risks undefined
// behavior.
Bucket* bucket_lookups[((kBitsPerSizeT + 1) * kGenericNumBucketsPerOrder) +
1] = {};
Bucket buckets[kGenericNumBuckets] = {};
Bucket* bucket_lookups[((kBitsPerSizeT + 1) * kNumBucketsPerOrder) + 1] = {};
Bucket buckets[kNumBuckets] = {};
PartitionRoot() = default;
explicit PartitionRoot(bool enable_tag_pointers) {
......@@ -795,14 +794,14 @@ PartitionRoot<thread_safe>::SizeToBucket(size_t size) const {
size_t order = kBitsPerSizeT - bits::CountLeadingZeroBitsSizeT(size);
// The order index is simply the next few bits after the most significant bit.
size_t order_index =
(size >> order_index_shifts[order]) & (kGenericNumBucketsPerOrder - 1);
(size >> order_index_shifts[order]) & (kNumBucketsPerOrder - 1);
// And if the remaining bits are non-zero we must bump the bucket up.
size_t sub_order_index = size & order_sub_index_masks[order];
Bucket* bucket = bucket_lookups[(order << kGenericNumBucketsPerOrderBits) +
Bucket* bucket = bucket_lookups[(order << kNumBucketsPerOrderBits) +
order_index + !!sub_order_index];
PA_CHECK(bucket);
PA_DCHECK(!bucket->slot_size || bucket->slot_size >= size);
PA_DCHECK(!(bucket->slot_size % kGenericSmallestBucket));
PA_DCHECK(!(bucket->slot_size % kSmallestBucket));
return bucket;
}
......@@ -914,7 +913,7 @@ ALWAYS_INLINE size_t PartitionRoot<thread_safe>::ActualSize(size_t size) {
auto* bucket = SizeToBucket(size);
if (LIKELY(!bucket->is_direct_mapped())) {
size = bucket->slot_size;
} else if (size > kGenericMaxDirectMapped) {
} else if (size > kMaxDirectMapped) {
// Too large to allocate => return the size unchanged.
} else {
size = Bucket::get_direct_map_size(size);
......
......@@ -28,14 +28,14 @@ namespace base {
// Slot span sizes are adjusted depending on the allocation size, to make sure
// the packing does not lead to unused (wasted) space at the end of the last
// system page of the span. For our current maximum slot span size of 64 KiB and
// other constant values, we pack _all_ `PartitionRootGeneric::Alloc` sizes
// perfectly up against the end of a system page.
// other constant values, we pack _all_ `PartitionRoot::Alloc` sizes perfectly
// up against the end of a system page.
#if defined(_MIPS_ARCH_LOONGSON)
static const size_t kPartitionPageShift = 16; // 64 KiB
#elif defined(ARCH_CPU_PPC64)
static const size_t kPartitionPageShift = 18; // 256 KiB
#elif defined (OS_MACOSX) && defined(ARCH_CPU_ARM64)
#elif defined(OS_MACOSX) && defined(ARCH_CPU_ARM64)
static const size_t kPartitionPageShift = 16; // 64 KiB
#else
static const size_t kPartitionPageShift = 14; // 16 KiB
......@@ -134,7 +134,6 @@ static const size_t kSuperPageBaseMask = ~kSuperPageOffsetMask;
static const size_t kNumPartitionPagesPerSuperPage =
kSuperPageSize / kPartitionPageSize;
// The following kGeneric* constants apply to the generic variants of the API.
// The "order" of an allocation is closely related to the power-of-1 size of the
// allocation. More precisely, the order is the bit index of the
// most-significant-bit in the allocation size, where the bit numbers starts at
......@@ -155,32 +154,29 @@ static_assert(alignof(std::max_align_t) <= 16,
// bytes on 64 bit ones.
#if ENABLE_TAG_FOR_MTE_CHECKED_PTR
// MTECheckedPtr requires 16B-alignment because kBytesPerPartitionTag is 16.
static const size_t kGenericMinBucketedOrder = 5;
static const size_t kMinBucketedOrder = 5;
#else
static const size_t kGenericMinBucketedOrder =
static const size_t kMinBucketedOrder =
alignof(std::max_align_t) == 16 ? 5 : 4; // 2^(order - 1), that is 16 or 8.
#endif
// The largest bucketed order is 1 << (20 - 1), storing [512 KiB, 1 MiB):
static const size_t kGenericMaxBucketedOrder = 20;
static const size_t kGenericNumBucketedOrders =
(kGenericMaxBucketedOrder - kGenericMinBucketedOrder) + 1;
static const size_t kMaxBucketedOrder = 20;
static const size_t kNumBucketedOrders =
(kMaxBucketedOrder - kMinBucketedOrder) + 1;
// Eight buckets per order (for the higher orders), e.g. order 8 is 128, 144,
// 160, ..., 240:
static const size_t kGenericNumBucketsPerOrderBits = 3;
static const size_t kGenericNumBucketsPerOrder =
1 << kGenericNumBucketsPerOrderBits;
static const size_t kGenericNumBuckets =
kGenericNumBucketedOrders * kGenericNumBucketsPerOrder;
static const size_t kGenericSmallestBucket = 1
<< (kGenericMinBucketedOrder - 1);
static const size_t kGenericMaxBucketSpacing =
1 << ((kGenericMaxBucketedOrder - 1) - kGenericNumBucketsPerOrderBits);
static const size_t kGenericMaxBucketed =
(1 << (kGenericMaxBucketedOrder - 1)) +
((kGenericNumBucketsPerOrder - 1) * kGenericMaxBucketSpacing);
static const size_t kNumBucketsPerOrderBits = 3;
static const size_t kNumBucketsPerOrder = 1 << kNumBucketsPerOrderBits;
static const size_t kNumBuckets = kNumBucketedOrders * kNumBucketsPerOrder;
static const size_t kSmallestBucket = 1 << (kMinBucketedOrder - 1);
static const size_t kMaxBucketSpacing =
1 << ((kMaxBucketedOrder - 1) - kNumBucketsPerOrderBits);
static const size_t kMaxBucketed =
(1 << (kMaxBucketedOrder - 1)) +
((kNumBucketsPerOrder - 1) * kMaxBucketSpacing);
// Limit when downsizing a direct mapping using `realloc`:
static const size_t kGenericMinDirectMappedDownsize = kGenericMaxBucketed + 1;
static const size_t kGenericMaxDirectMapped =
static const size_t kMinDirectMappedDownsize = kMaxBucketed + 1;
static const size_t kMaxDirectMapped =
(1UL << 31) + kPageAllocationGranularity; // 2 GiB plus 1 more page.
static const size_t kBitsPerSizeT = sizeof(void*) * CHAR_BIT;
......@@ -198,7 +194,7 @@ static const size_t kReasonableSizeOfUnusedPages = 1024 * 1024 * 1024; // 1 GiB
static const unsigned char kUninitializedByte = 0xAB;
static const unsigned char kFreedByte = 0xCD;
// Flags for `PartitionAllocGenericFlags`.
// Flags for `PartitionAllocFlags`.
enum PartitionAllocFlags {
PartitionAllocReturnNull = 1 << 0,
PartitionAllocZeroFill = 1 << 1,
......
......@@ -546,7 +546,7 @@ void* PartitionBucket<thread_safe>::SlowPathAlloc(
PartitionPage<thread_safe>* new_page = nullptr;
*is_already_zeroed = false;
// For the PartitionRootGeneric::Alloc() API, we have a bunch of buckets
// For the PartitionRoot::Alloc() API, we have a bunch of buckets
// marked as special cases. We bounce them through to the slow path so that
// we can still have a blazing fast hot path due to lack of corner-case
// branches.
......@@ -557,11 +557,11 @@ void* PartitionBucket<thread_safe>::SlowPathAlloc(
// the empty or decommitted lists which affects the subsequent conditional.
bool return_null = flags & PartitionAllocReturnNull;
if (UNLIKELY(is_direct_mapped())) {
PA_DCHECK(size > kGenericMaxBucketed);
PA_DCHECK(size > kMaxBucketed);
PA_DCHECK(this == get_sentinel_bucket());
PA_DCHECK(active_pages_head ==
PartitionPage<thread_safe>::get_sentinel_page());
if (size > kGenericMaxDirectMapped) {
if (size > kMaxDirectMapped) {
if (return_null)
return nullptr;
PartitionExcessiveAllocationSize(size);
......
......@@ -61,10 +61,10 @@ struct PartitionBucket {
}
static ALWAYS_INLINE size_t get_direct_map_size(size_t size) {
// Caller must check that the size is not above the kGenericMaxDirectMapped
// Caller must check that the size is not above the kMaxDirectMapped
// limit before calling. This also guards against integer overflow in the
// calculation here.
PA_DCHECK(size <= kGenericMaxDirectMapped);
PA_DCHECK(size <= kMaxDirectMapped);
return (size + kSystemPageOffsetMask) & kSystemPageBaseMask;
}
......
......@@ -60,7 +60,7 @@ static constexpr size_t kBytesPerPartitionTagShift = 4;
static constexpr size_t kBytesPerPartitionTag = 1U
<< kBytesPerPartitionTagShift;
static_assert(
kGenericMinBucketedOrder >= kBytesPerPartitionTagShift + 1,
kMinBucketedOrder >= kBytesPerPartitionTagShift + 1,
"MTECheckedPtr requires kBytesPerPartitionTagShift-bytes alignment.");
static constexpr size_t kBytesPerPartitionTagRatio =
......@@ -144,4 +144,4 @@ static constexpr size_t kReservedTagBitmapSize = 0;
} // namespace internal
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_TAG_CONSTANTS_H_
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_TAG_BITMAP_H_
......@@ -24,7 +24,7 @@ class WTF_EXPORT PartitionAllocator {
template <typename T>
static size_t MaxElementCountInBackingStore() {
return base::kGenericMaxDirectMapped / sizeof(T);
return base::kMaxDirectMapped / sizeof(T);
}
template <typename T>
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment