Commit d6688307 authored by Bartek Nowierski's avatar Bartek Nowierski Committed by Chromium LUCI CQ

[PA] Rename GetSize and ActualSize

The names are highly confusing and misleading. These functions don't
return an actual size, but an underlying capacity, which may or may not
be readily available to the app. AllocationCapacity* names reflect the
behavior better.

Change-Id: I4c59dbf75397b31ed58c1467c019ca23057031c7
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2636748
Commit-Queue: Bartek Nowierski <bartekn@chromium.org>
Reviewed-by: default avatarMichael Lippautz <mlippautz@chromium.org>
Reviewed-by: default avatarKentaro Hara <haraken@chromium.org>
Cr-Commit-Position: refs/heads/master@{#845540}
parent 4a0a39d0
...@@ -859,17 +859,18 @@ TEST_F(PartitionAllocTest, AllocSizes) { ...@@ -859,17 +859,18 @@ TEST_F(PartitionAllocTest, AllocSizes) {
TEST_F(PartitionAllocTest, AllocGetSizeAndOffsetAndStart) { TEST_F(PartitionAllocTest, AllocGetSizeAndOffsetAndStart) {
void* ptr; void* ptr;
void* slot_start; void* slot_start;
size_t requested_size, actual_size, predicted_size; size_t requested_size, actual_capacity, predicted_capacity;
// Allocate something small. // Allocate something small.
requested_size = 511 - kExtraAllocSize; requested_size = 511 - kExtraAllocSize;
predicted_size = allocator.root()->ActualSize(requested_size); predicted_capacity =
allocator.root()->AllocationCapacityFromRequestedSize(requested_size);
ptr = allocator.root()->Alloc(requested_size, type_name); ptr = allocator.root()->Alloc(requested_size, type_name);
EXPECT_TRUE(ptr); EXPECT_TRUE(ptr);
slot_start = reinterpret_cast<char*>(ptr) - allocator.root()->extras_offset; slot_start = reinterpret_cast<char*>(ptr) - allocator.root()->extras_offset;
actual_size = allocator.root()->GetSize(ptr); actual_capacity = allocator.root()->AllocationCapacityFromPtr(ptr);
EXPECT_EQ(predicted_size, actual_size); EXPECT_EQ(predicted_capacity, actual_capacity);
EXPECT_LT(requested_size, actual_size); EXPECT_LT(requested_size, actual_capacity);
#if defined(PA_HAS_64_BITS_POINTERS) #if defined(PA_HAS_64_BITS_POINTERS)
if (features::IsPartitionAllocGigaCageEnabled()) { if (features::IsPartitionAllocGigaCageEnabled()) {
for (size_t offset = 0; offset < requested_size; ++offset) { for (size_t offset = 0; offset < requested_size; ++offset) {
...@@ -885,13 +886,14 @@ TEST_F(PartitionAllocTest, AllocGetSizeAndOffsetAndStart) { ...@@ -885,13 +886,14 @@ TEST_F(PartitionAllocTest, AllocGetSizeAndOffsetAndStart) {
// Allocate a size that should be a perfect match for a bucket, because it // Allocate a size that should be a perfect match for a bucket, because it
// is an exact power of 2. // is an exact power of 2.
requested_size = (256 * 1024) - kExtraAllocSize; requested_size = (256 * 1024) - kExtraAllocSize;
predicted_size = allocator.root()->ActualSize(requested_size); predicted_capacity =
allocator.root()->AllocationCapacityFromRequestedSize(requested_size);
ptr = allocator.root()->Alloc(requested_size, type_name); ptr = allocator.root()->Alloc(requested_size, type_name);
EXPECT_TRUE(ptr); EXPECT_TRUE(ptr);
slot_start = reinterpret_cast<char*>(ptr) - allocator.root()->extras_offset; slot_start = reinterpret_cast<char*>(ptr) - allocator.root()->extras_offset;
actual_size = allocator.root()->GetSize(ptr); actual_capacity = allocator.root()->AllocationCapacityFromPtr(ptr);
EXPECT_EQ(predicted_size, actual_size); EXPECT_EQ(predicted_capacity, actual_capacity);
EXPECT_EQ(requested_size, actual_size); EXPECT_EQ(requested_size, actual_capacity);
#if defined(PA_HAS_64_BITS_POINTERS) #if defined(PA_HAS_64_BITS_POINTERS)
if (features::IsPartitionAllocGigaCageEnabled()) { if (features::IsPartitionAllocGigaCageEnabled()) {
for (size_t offset = 0; offset < requested_size; offset += 877) { for (size_t offset = 0; offset < requested_size; offset += 877) {
...@@ -904,20 +906,22 @@ TEST_F(PartitionAllocTest, AllocGetSizeAndOffsetAndStart) { ...@@ -904,20 +906,22 @@ TEST_F(PartitionAllocTest, AllocGetSizeAndOffsetAndStart) {
#endif #endif
allocator.root()->Free(ptr); allocator.root()->Free(ptr);
// Allocate a size that is a system page smaller than a bucket. GetSize() // Allocate a size that is a system page smaller than a bucket.
// should return a larger size than we asked for now. // AllocationCapacityFromPtr() should return a larger size than we asked for
// now.
size_t num = 64; size_t num = 64;
while (num * SystemPageSize() >= 1024 * 1024) { while (num * SystemPageSize() >= 1024 * 1024) {
num /= 2; num /= 2;
} }
requested_size = num * SystemPageSize() - SystemPageSize() - kExtraAllocSize; requested_size = num * SystemPageSize() - SystemPageSize() - kExtraAllocSize;
predicted_size = allocator.root()->ActualSize(requested_size); predicted_capacity =
allocator.root()->AllocationCapacityFromRequestedSize(requested_size);
ptr = allocator.root()->Alloc(requested_size, type_name); ptr = allocator.root()->Alloc(requested_size, type_name);
EXPECT_TRUE(ptr); EXPECT_TRUE(ptr);
slot_start = reinterpret_cast<char*>(ptr) - allocator.root()->extras_offset; slot_start = reinterpret_cast<char*>(ptr) - allocator.root()->extras_offset;
actual_size = allocator.root()->GetSize(ptr); actual_capacity = allocator.root()->AllocationCapacityFromPtr(ptr);
EXPECT_EQ(predicted_size, actual_size); EXPECT_EQ(predicted_capacity, actual_capacity);
EXPECT_EQ(requested_size + SystemPageSize(), actual_size); EXPECT_EQ(requested_size + SystemPageSize(), actual_capacity);
#if defined(PA_HAS_64_BITS_POINTERS) #if defined(PA_HAS_64_BITS_POINTERS)
if (features::IsPartitionAllocGigaCageEnabled()) { if (features::IsPartitionAllocGigaCageEnabled()) {
for (size_t offset = 0; offset < requested_size; offset += 4999) { for (size_t offset = 0; offset < requested_size; offset += 4999) {
...@@ -931,13 +935,14 @@ TEST_F(PartitionAllocTest, AllocGetSizeAndOffsetAndStart) { ...@@ -931,13 +935,14 @@ TEST_F(PartitionAllocTest, AllocGetSizeAndOffsetAndStart) {
// Allocate the maximum allowed bucketed size. // Allocate the maximum allowed bucketed size.
requested_size = kMaxBucketed - kExtraAllocSize; requested_size = kMaxBucketed - kExtraAllocSize;
predicted_size = allocator.root()->ActualSize(requested_size); predicted_capacity =
allocator.root()->AllocationCapacityFromRequestedSize(requested_size);
ptr = allocator.root()->Alloc(requested_size, type_name); ptr = allocator.root()->Alloc(requested_size, type_name);
EXPECT_TRUE(ptr); EXPECT_TRUE(ptr);
slot_start = reinterpret_cast<char*>(ptr) - allocator.root()->extras_offset; slot_start = reinterpret_cast<char*>(ptr) - allocator.root()->extras_offset;
actual_size = allocator.root()->GetSize(ptr); actual_capacity = allocator.root()->AllocationCapacityFromPtr(ptr);
EXPECT_EQ(predicted_size, actual_size); EXPECT_EQ(predicted_capacity, actual_capacity);
EXPECT_EQ(requested_size, actual_size); EXPECT_EQ(requested_size, actual_capacity);
#if defined(PA_HAS_64_BITS_POINTERS) #if defined(PA_HAS_64_BITS_POINTERS)
if (features::IsPartitionAllocGigaCageEnabled()) { if (features::IsPartitionAllocGigaCageEnabled()) {
for (size_t offset = 0; offset < requested_size; offset += 4999) { for (size_t offset = 0; offset < requested_size; offset += 4999) {
...@@ -951,18 +956,19 @@ TEST_F(PartitionAllocTest, AllocGetSizeAndOffsetAndStart) { ...@@ -951,18 +956,19 @@ TEST_F(PartitionAllocTest, AllocGetSizeAndOffsetAndStart) {
// Check that we can write at the end of the reported size too. // Check that we can write at the end of the reported size too.
char* char_ptr = reinterpret_cast<char*>(ptr); char* char_ptr = reinterpret_cast<char*>(ptr);
*(char_ptr + (actual_size - 1)) = 'A'; *(char_ptr + (actual_capacity - 1)) = 'A';
allocator.root()->Free(ptr); allocator.root()->Free(ptr);
// Allocate something very large, and uneven. // Allocate something very large, and uneven.
if (IsLargeMemoryDevice()) { if (IsLargeMemoryDevice()) {
requested_size = 512 * 1024 * 1024 - 1; requested_size = 512 * 1024 * 1024 - 1;
predicted_size = allocator.root()->ActualSize(requested_size); predicted_capacity =
allocator.root()->AllocationCapacityFromRequestedSize(requested_size);
ptr = allocator.root()->Alloc(requested_size, type_name); ptr = allocator.root()->Alloc(requested_size, type_name);
EXPECT_TRUE(ptr); EXPECT_TRUE(ptr);
actual_size = allocator.root()->GetSize(ptr); actual_capacity = allocator.root()->AllocationCapacityFromPtr(ptr);
EXPECT_EQ(predicted_size, actual_size); EXPECT_EQ(predicted_capacity, actual_capacity);
EXPECT_LT(requested_size, actual_size); EXPECT_LT(requested_size, actual_capacity);
// Unlike above, don't test for PartitionAllocGetSlotOffset. Such large // Unlike above, don't test for PartitionAllocGetSlotOffset. Such large
// allocations are direct-mapped, for which one can't easily obtain the // allocations are direct-mapped, for which one can't easily obtain the
// offset. // offset.
...@@ -971,8 +977,9 @@ TEST_F(PartitionAllocTest, AllocGetSizeAndOffsetAndStart) { ...@@ -971,8 +977,9 @@ TEST_F(PartitionAllocTest, AllocGetSizeAndOffsetAndStart) {
// Too large allocation. // Too large allocation.
requested_size = MaxDirectMapped() + 1; requested_size = MaxDirectMapped() + 1;
predicted_size = allocator.root()->ActualSize(requested_size); predicted_capacity =
EXPECT_EQ(requested_size, predicted_size); allocator.root()->AllocationCapacityFromRequestedSize(requested_size);
EXPECT_EQ(requested_size, predicted_capacity);
} }
#if defined(PA_HAS_64_BITS_POINTERS) #if defined(PA_HAS_64_BITS_POINTERS)
...@@ -999,7 +1006,8 @@ TEST_F(PartitionAllocTest, GetOffsetMultiplePages) { ...@@ -999,7 +1006,8 @@ TEST_F(PartitionAllocTest, GetOffsetMultiplePages) {
for (size_t i = 0; i < num_slots; ++i) { for (size_t i = 0; i < num_slots; ++i) {
char* ptr = static_cast<char*>(ptrs[i]); char* ptr = static_cast<char*>(ptrs[i]);
for (size_t offset = 0; offset < requested_size; offset += 13) { for (size_t offset = 0; offset < requested_size; offset += 13) {
EXPECT_EQ(allocator.root()->GetSize(ptr), requested_size); EXPECT_EQ(allocator.root()->AllocationCapacityFromPtr(ptr),
requested_size);
EXPECT_EQ(PartitionAllocGetSlotOffset(static_cast<char*>(ptr) + offset), EXPECT_EQ(PartitionAllocGetSlotOffset(static_cast<char*>(ptr) + offset),
offset + allocator.root()->extras_offset); offset + allocator.root()->extras_offset);
} }
...@@ -1025,7 +1033,7 @@ TEST_F(PartitionAllocTest, Realloc) { ...@@ -1025,7 +1033,7 @@ TEST_F(PartitionAllocTest, Realloc) {
// old allocation. // old allocation.
size_t size = SystemPageSize() - kExtraAllocSize; size_t size = SystemPageSize() - kExtraAllocSize;
// Confirm size fills the entire slot. // Confirm size fills the entire slot.
ASSERT_EQ(size, allocator.root()->ActualSize(size)); ASSERT_EQ(size, allocator.root()->AllocationCapacityFromRequestedSize(size));
ptr = allocator.root()->Alloc(size, type_name); ptr = allocator.root()->Alloc(size, type_name);
memset(ptr, 'A', size); memset(ptr, 'A', size);
ptr2 = allocator.root()->Realloc(ptr, size + 1, type_name); ptr2 = allocator.root()->Realloc(ptr, size + 1, type_name);
...@@ -1056,7 +1064,7 @@ TEST_F(PartitionAllocTest, Realloc) { ...@@ -1056,7 +1064,7 @@ TEST_F(PartitionAllocTest, Realloc) {
// old allocation. // old allocation.
size = 200000; size = 200000;
// Confirm size doesn't fill the entire slot. // Confirm size doesn't fill the entire slot.
ASSERT_LT(size, allocator.root()->ActualSize(size)); ASSERT_LT(size, allocator.root()->AllocationCapacityFromRequestedSize(size));
ptr = allocator.root()->Alloc(size, type_name); ptr = allocator.root()->Alloc(size, type_name);
memset(ptr, 'A', size); memset(ptr, 'A', size);
ptr2 = allocator.root()->Realloc(ptr, size * 2, type_name); ptr2 = allocator.root()->Realloc(ptr, size * 2, type_name);
...@@ -1086,18 +1094,19 @@ TEST_F(PartitionAllocTest, Realloc) { ...@@ -1086,18 +1094,19 @@ TEST_F(PartitionAllocTest, Realloc) {
// Test that shrinking a direct mapped allocation happens in-place. // Test that shrinking a direct mapped allocation happens in-place.
size = kMaxBucketed + 16 * SystemPageSize(); size = kMaxBucketed + 16 * SystemPageSize();
ptr = allocator.root()->Alloc(size, type_name); ptr = allocator.root()->Alloc(size, type_name);
size_t actual_size = allocator.root()->GetSize(ptr); size_t actual_capacity = allocator.root()->AllocationCapacityFromPtr(ptr);
ptr2 = allocator.root()->Realloc(ptr, kMaxBucketed + 8 * SystemPageSize(), ptr2 = allocator.root()->Realloc(ptr, kMaxBucketed + 8 * SystemPageSize(),
type_name); type_name);
EXPECT_EQ(ptr, ptr2); EXPECT_EQ(ptr, ptr2);
EXPECT_EQ(actual_size - 8 * SystemPageSize(), EXPECT_EQ(actual_capacity - 8 * SystemPageSize(),
allocator.root()->GetSize(ptr2)); allocator.root()->AllocationCapacityFromPtr(ptr2));
// Test that a previously in-place shrunk direct mapped allocation can be // Test that a previously in-place shrunk direct mapped allocation can be
// expanded up again within its original size. // expanded up again within its original size.
ptr = allocator.root()->Realloc(ptr2, size - SystemPageSize(), type_name); ptr = allocator.root()->Realloc(ptr2, size - SystemPageSize(), type_name);
EXPECT_EQ(ptr2, ptr); EXPECT_EQ(ptr2, ptr);
EXPECT_EQ(actual_size - SystemPageSize(), allocator.root()->GetSize(ptr)); EXPECT_EQ(actual_capacity - SystemPageSize(),
allocator.root()->AllocationCapacityFromPtr(ptr));
// Test that a direct mapped allocation is performed not in-place when the // Test that a direct mapped allocation is performed not in-place when the
// new size is small enough. // new size is small enough.
...@@ -2560,7 +2569,8 @@ TEST_F(PartitionAllocTest, FundamentalAlignment) { ...@@ -2560,7 +2569,8 @@ TEST_F(PartitionAllocTest, FundamentalAlignment) {
EXPECT_EQ(reinterpret_cast<uintptr_t>(ptr3) % fundamental_alignment, EXPECT_EQ(reinterpret_cast<uintptr_t>(ptr3) % fundamental_alignment,
static_cast<uintptr_t>(0)); static_cast<uintptr_t>(0));
EXPECT_EQ(allocator.root()->GetSize(ptr) % fundamental_alignment, EXPECT_EQ(allocator.root()->AllocationCapacityFromPtr(ptr) %
fundamental_alignment,
static_cast<uintptr_t>(0)); static_cast<uintptr_t>(0));
allocator.root()->Free(ptr); allocator.root()->Free(ptr);
......
...@@ -601,16 +601,14 @@ void* PartitionRoot<thread_safe>::ReallocFlags(int flags, ...@@ -601,16 +601,14 @@ void* PartitionRoot<thread_safe>::ReallocFlags(int flags,
return ptr; return ptr;
} }
const size_t actual_new_size = ActualSize(new_size);
const size_t actual_old_size = GetSize(ptr);
// TODO: note that tcmalloc will "ignore" a downsizing realloc() unless the // TODO: note that tcmalloc will "ignore" a downsizing realloc() unless the
// new size is a significant percentage smaller. We could do the same if we // new size is a significant percentage smaller. We could do the same if we
// determine it is a win. // determine it is a win.
if (actual_new_size == actual_old_size) { if (AllocationCapacityFromRequestedSize(new_size) ==
// Trying to allocate a block of size |new_size| would give us a block of AllocationCapacityFromPtr(ptr)) {
// the same size as the one we've already got, so re-use the allocation // Trying to allocate |new_size| would use the same amount of underlying
// after updating statistics (and cookies, if present). // memory as we're already using, so re-use the allocation after updating
// statistics (and cookies, if present).
if (slot_span->CanStoreRawSize()) { if (slot_span->CanStoreRawSize()) {
size_t new_raw_size = AdjustSizeForExtrasAdd(new_size); size_t new_raw_size = AdjustSizeForExtrasAdd(new_size);
slot_span->SetRawSize(new_raw_size); slot_span->SetRawSize(new_raw_size);
......
...@@ -298,8 +298,9 @@ struct BASE_EXPORT PartitionRoot { ...@@ -298,8 +298,9 @@ struct BASE_EXPORT PartitionRoot {
ALWAYS_INLINE void FreeNoHooksImmediate(void* ptr, SlotSpan* slot_span); ALWAYS_INLINE void FreeNoHooksImmediate(void* ptr, SlotSpan* slot_span);
ALWAYS_INLINE static size_t GetUsableSize(void* ptr); ALWAYS_INLINE static size_t GetUsableSize(void* ptr);
ALWAYS_INLINE size_t GetSize(void* ptr) const;
ALWAYS_INLINE size_t ActualSize(size_t size); ALWAYS_INLINE size_t AllocationCapacityFromPtr(void* ptr) const;
ALWAYS_INLINE size_t AllocationCapacityFromRequestedSize(size_t size) const;
// Frees memory from this partition, if possible, by decommitting pages or // Frees memory from this partition, if possible, by decommitting pages or
// even etnire slot spans. |flags| is an OR of base::PartitionPurgeFlags. // even etnire slot spans. |flags| is an OR of base::PartitionPurgeFlags.
...@@ -423,7 +424,8 @@ struct BASE_EXPORT PartitionRoot { ...@@ -423,7 +424,8 @@ struct BASE_EXPORT PartitionRoot {
// //
// See crbug.com/1150772 for an instance of Clusterfuzz / UBSAN detecting // See crbug.com/1150772 for an instance of Clusterfuzz / UBSAN detecting
// this. // this.
ALWAYS_INLINE Bucket& NO_SANITIZE("undefined") bucket_at(size_t i) { ALWAYS_INLINE const Bucket& NO_SANITIZE("undefined")
bucket_at(size_t i) const {
PA_DCHECK(i <= kNumBuckets); PA_DCHECK(i <= kNumBuckets);
return buckets[i]; return buckets[i];
} }
...@@ -1067,11 +1069,15 @@ ALWAYS_INLINE size_t PartitionRoot<thread_safe>::GetUsableSize(void* ptr) { ...@@ -1067,11 +1069,15 @@ ALWAYS_INLINE size_t PartitionRoot<thread_safe>::GetUsableSize(void* ptr) {
return size; return size;
} }
// Gets the size of the allocated slot that contains |ptr|, adjusted for the // Return the capacity of the underlying slot (adjusted for extras). This
// cookie and ref-count (if any). CAUTION! For direct-mapped allocation, |ptr| // doesn't mean this capacity is readily available. It merely means that if
// has to be within the first partition page. // a new allocation (or realloc) happened with that returned value, it'd use
// the same amount of underlying memory.
// CAUTION! For direct-mapped allocation, |ptr| has to be within the first
// partition page.
template <bool thread_safe> template <bool thread_safe>
ALWAYS_INLINE size_t PartitionRoot<thread_safe>::GetSize(void* ptr) const { ALWAYS_INLINE size_t
PartitionRoot<thread_safe>::AllocationCapacityFromPtr(void* ptr) const {
ptr = AdjustPointerForExtrasSubtract(ptr); ptr = AdjustPointerForExtrasSubtract(ptr);
auto* slot_span = auto* slot_span =
internal::PartitionAllocGetSlotSpanForSizeQuery<thread_safe>(ptr); internal::PartitionAllocGetSlotSpanForSizeQuery<thread_safe>(ptr);
...@@ -1369,8 +1375,15 @@ ALWAYS_INLINE void* PartitionRoot<thread_safe>::TryRealloc( ...@@ -1369,8 +1375,15 @@ ALWAYS_INLINE void* PartitionRoot<thread_safe>::TryRealloc(
return ReallocFlags(PartitionAllocReturnNull, ptr, new_size, type_name); return ReallocFlags(PartitionAllocReturnNull, ptr, new_size, type_name);
} }
// Return the capacity of the underlying slot (adjusted for extras) that'd be
// used to satisfy a request of |size|. This doesn't mean this capacity would be
// readily available. It merely means that if an allocation happened with that
// returned value, it'd use the same amount of underlying memory as the
// allocation with |size|.
template <bool thread_safe> template <bool thread_safe>
ALWAYS_INLINE size_t PartitionRoot<thread_safe>::ActualSize(size_t size) { ALWAYS_INLINE size_t
PartitionRoot<thread_safe>::AllocationCapacityFromRequestedSize(
size_t size) const {
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
return size; return size;
#else #else
......
...@@ -783,7 +783,7 @@ v8::Maybe<uint32_t> V8ScriptValueSerializer::GetWasmModuleTransferId( ...@@ -783,7 +783,7 @@ v8::Maybe<uint32_t> V8ScriptValueSerializer::GetWasmModuleTransferId(
void* V8ScriptValueSerializer::ReallocateBufferMemory(void* old_buffer, void* V8ScriptValueSerializer::ReallocateBufferMemory(void* old_buffer,
size_t size, size_t size,
size_t* actual_size) { size_t* actual_size) {
*actual_size = WTF::Partitions::BufferActualSize(size); *actual_size = WTF::Partitions::BufferPotentialCapacity(size);
return WTF::Partitions::BufferTryRealloc(old_buffer, *actual_size, return WTF::Partitions::BufferTryRealloc(old_buffer, *actual_size,
"SerializedScriptValue buffer"); "SerializedScriptValue buffer");
} }
......
...@@ -73,7 +73,7 @@ class PLATFORM_EXPORT ContiguousContainerBase { ...@@ -73,7 +73,7 @@ class PLATFORM_EXPORT ContiguousContainerBase {
public: public:
Buffer(wtf_size_t buffer_size, const char* type_name) Buffer(wtf_size_t buffer_size, const char* type_name)
: capacity_(static_cast<wtf_size_t>( : capacity_(static_cast<wtf_size_t>(
WTF::Partitions::BufferActualSize(buffer_size))), WTF::Partitions::BufferPotentialCapacity(buffer_size))),
begin_(static_cast<uint8_t*>( begin_(static_cast<uint8_t*>(
WTF::Partitions::BufferMalloc(capacity_, type_name))), WTF::Partitions::BufferMalloc(capacity_, type_name))),
end_(begin_) { end_(begin_) {
......
...@@ -30,7 +30,7 @@ class WTF_EXPORT PartitionAllocator { ...@@ -30,7 +30,7 @@ class WTF_EXPORT PartitionAllocator {
template <typename T> template <typename T>
static size_t QuantizedSize(size_t count) { static size_t QuantizedSize(size_t count) {
CHECK_LE(count, MaxElementCountInBackingStore<T>()); CHECK_LE(count, MaxElementCountInBackingStore<T>());
return WTF::Partitions::BufferActualSize(count * sizeof(T)); return WTF::Partitions::BufferPotentialCapacity(count * sizeof(T));
} }
template <typename T> template <typename T>
static T* AllocateVectorBacking(size_t size) { static T* AllocateVectorBacking(size_t size) {
......
...@@ -270,8 +270,8 @@ void Partitions::BufferFree(void* p) { ...@@ -270,8 +270,8 @@ void Partitions::BufferFree(void* p) {
} }
// static // static
size_t Partitions::BufferActualSize(size_t n) { size_t Partitions::BufferPotentialCapacity(size_t n) {
return BufferPartition()->ActualSize(n); return BufferPartition()->AllocationCapacityFromRequestedSize(n);
} }
// Ideally this would be removed when PartitionAlloc is malloc(), but there are // Ideally this would be removed when PartitionAlloc is malloc(), but there are
......
...@@ -80,7 +80,7 @@ class WTF_EXPORT Partitions { ...@@ -80,7 +80,7 @@ class WTF_EXPORT Partitions {
static void* BufferMalloc(size_t n, const char* type_name); static void* BufferMalloc(size_t n, const char* type_name);
static void* BufferTryRealloc(void* p, size_t n, const char* type_name); static void* BufferTryRealloc(void* p, size_t n, const char* type_name);
static void BufferFree(void* p); static void BufferFree(void* p);
static size_t BufferActualSize(size_t n); static size_t BufferPotentialCapacity(size_t n);
static void* FastMalloc(size_t n, const char* type_name); static void* FastMalloc(size_t n, const char* type_name);
static void* FastZeroedMalloc(size_t n, const char* type_name); static void* FastZeroedMalloc(size_t n, const char* type_name);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment