Commit 874bd97e authored by Bartek Nowierski's avatar Bartek Nowierski Committed by Commit Bot

Even more size-related disambiguation (heavy duty)

Continuation of crrev.com/c/2455947 and crrev.com/c/2463043.

The remaining GetAllocatedSize() was still ambiguous, returning two
different things only one of which could be considered "allocated size".
Furthermore, naming of function params and local variables wasn't always
clear.

This is an attempt to make the terminology consistent:
- requested_size - what the app requested
- extras - PA-internal data surrounding the allocated data, like
  tag/ref-count (for CheckedPtr) or cookies (debug only)
- raw_size - requested_size + extras
- usable_size (>=requested_size) - in case of over-allocation, this is
  what's available for the app (no risk of overriding extras, etc.)
- utilized_slot_size - contiguous region of the slot that accommodates
  all of the above (requested allocation, additional usable space and
  extras); equal to raw_size if it's possible to save it in meta-data
  (for large allocations only), slot_size otherwise

Change-Id: Ia2a771af6d29261b049b0dc98fede6395dc7a35f
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2462909
Commit-Queue: Bartek Nowierski <bartekn@chromium.org>
Auto-Submit: Bartek Nowierski <bartekn@chromium.org>
Reviewed-by: default avatarKentaro Hara <haraken@chromium.org>
Cr-Commit-Position: refs/heads/master@{#816469}
parent ea95f1f2
......@@ -375,8 +375,8 @@ template <bool thread_safe>
ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocFromBucket(
Bucket* bucket,
int flags,
size_t size,
size_t* allocated_size,
size_t raw_size,
size_t* utilized_slot_size,
bool* is_already_zeroed) {
*is_already_zeroed = false;
......@@ -384,7 +384,7 @@ ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocFromBucket(
// Check that this page is neither full nor freed.
PA_DCHECK(page);
PA_DCHECK(page->num_allocated_slots >= 0);
*allocated_size = bucket->slot_size;
*utilized_slot_size = bucket->slot_size;
void* ret = page->freelist_head;
if (LIKELY(ret)) {
......@@ -394,7 +394,7 @@ ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocFromBucket(
// All large allocations must go through the slow path to correctly update
// the size metadata.
PA_DCHECK(page->get_raw_size() == 0);
PA_DCHECK(!page->get_raw_size_ptr()); // doesn't have raw size
internal::PartitionFreelistEntry* new_head =
internal::EncodedPartitionFreelistEntry::Decode(
page->freelist_head->next);
......@@ -403,7 +403,7 @@ ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocFromBucket(
PA_DCHECK(page->bucket == bucket);
} else {
ret = bucket->SlowPathAlloc(this, flags, size, is_already_zeroed);
ret = bucket->SlowPathAlloc(this, flags, raw_size, is_already_zeroed);
// TODO(palmer): See if we can afford to make this a CHECK.
PA_DCHECK(!ret || IsValidPage(Page::FromPointer(ret)));
......@@ -415,7 +415,7 @@ ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocFromBucket(
PA_DCHECK((page->bucket == bucket) || (page->bucket->is_direct_mapped() &&
(bucket == &sentinel_bucket)));
*allocated_size = page->GetAllocatedSize();
*utilized_slot_size = page->GetUtilizedSlotSize();
}
return ret;
......@@ -479,23 +479,32 @@ ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooksImmediate(
PA_DCHECK(page);
PA_DCHECK(IsValidPage(page));
#if DCHECK_IS_ON()
size_t utilized_slot_size = page->GetUtilizedSlotSize();
#endif
if (allow_extras) {
// |ptr| points after the tag and the cookie.
// The layout is | tag or ref count | cookie | data | cookie |
//
// Layout inside the slot:
// <--------extras-------> <-extras->
// <----------------utilized_slot_size--------------->
// |[tag/refcnt]|[cookie]|...data...|[empty]|[cookie]|[unused]|
// ^ ^
// | |
// allocation_start_ptr ptr
//
// Note: tag, reference count and cookie can be 0-sized.
// Note: tag, ref-count and cookie can be 0-sized.
//
// For more context, see the other "Layout inside the slot" comment below.
void* allocation_start_ptr =
internal::PartitionPointerAdjustSubtract(true /* allow_extras */, ptr);
#if DCHECK_IS_ON()
size_t allocated_size = page->GetAllocatedSize();
void* start_cookie_ptr =
internal::PartitionCookiePointerAdjustSubtract(ptr);
void* end_cookie_ptr = internal::PartitionCookiePointerAdjustSubtract(
reinterpret_cast<char*>(allocation_start_ptr) + allocated_size);
reinterpret_cast<char*>(allocation_start_ptr) + utilized_slot_size);
// If these asserts fire, you probably corrupted memory.
internal::PartitionCookieCheckValue(start_cookie_ptr);
......@@ -505,9 +514,9 @@ ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooksImmediate(
if (!page->bucket->is_direct_mapped()) {
// PartitionTagIncrementValue and PartitionTagClearValue require that the
// size is tag_bitmap::kBytesPerPartitionTag-aligned (currently 16
// bytes-aligned) when MTECheckedPtr is enabled. However, allocated_size
// may not be aligned for single-slot slot spans. So we need the bucket's
// slot_size.
// bytes-aligned) when MTECheckedPtr is enabled. However,
// utilized_slot_size may not be aligned for single-slot slot spans. So we
// need the bucket's slot_size.
size_t slot_size_with_no_extras =
internal::PartitionSizeAdjustSubtract(true, page->bucket->slot_size);
#if ENABLE_TAG_FOR_MTE_CHECKED_PTR && MTE_CHECKED_PTR_SET_TAG_AT_FREE
......@@ -524,9 +533,9 @@ ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooksImmediate(
// potential use-after-free issues into unexploitable crashes.
if (UNLIKELY(!ref_count->HasOneRef())) {
#ifdef ADDRESS_SANITIZER
ASAN_POISON_MEMORY_REGION(ptr, size_with_no_extras);
ASAN_POISON_MEMORY_REGION(ptr, usable_size);
#else
memset(ptr, kFreedByte, size_with_no_extras);
memset(ptr, kFreedByte, usable_size);
#endif
ref_count->Release();
return;
......@@ -538,7 +547,7 @@ ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooksImmediate(
}
#if DCHECK_IS_ON()
memset(ptr, kFreedByte, page->GetAllocatedSize());
memset(ptr, kFreedByte, utilized_slot_size);
#endif
// TLS access can be expensive, do a cheap local check first.
......@@ -654,7 +663,7 @@ ALWAYS_INLINE size_t PartitionRoot<thread_safe>::GetUsableSize(void* ptr) {
Page* page = Page::FromPointerNoAlignmentCheck(ptr);
auto* root = PartitionRoot<thread_safe>::FromPage(page);
size_t size = page->GetAllocatedSize();
size_t size = page->GetUtilizedSlotSize();
// Adjust back by subtracing extras (if any).
size = internal::PartitionSizeAdjustSubtract(root->allow_extras, size);
return size;
......@@ -707,16 +716,16 @@ PartitionRoot<thread_safe>::SizeToBucketIndex(size_t size) {
template <bool thread_safe>
ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocFlags(
int flags,
size_t size,
size_t requested_size,
const char* type_name) {
PA_DCHECK(flags < PartitionAllocLastFlag << 1);
PA_DCHECK((flags & PartitionAllocNoHooks) == 0); // Internal only.
PA_DCHECK(initialized);
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
CHECK_MAX_SIZE_OR_RETURN_NULLPTR(size, flags);
CHECK_MAX_SIZE_OR_RETURN_NULLPTR(requested_size, flags);
const bool zero_fill = flags & PartitionAllocZeroFill;
void* result = zero_fill ? calloc(1, size) : malloc(size);
void* result = zero_fill ? calloc(1, requested_size) : malloc(requested_size);
PA_CHECK(result || flags & PartitionAllocReturnNull);
return result;
#else
......@@ -724,18 +733,19 @@ ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocFlags(
void* ret = nullptr;
const bool hooks_enabled = PartitionAllocHooks::AreHooksEnabled();
if (UNLIKELY(hooks_enabled)) {
if (PartitionAllocHooks::AllocationOverrideHookIfEnabled(&ret, flags, size,
type_name)) {
PartitionAllocHooks::AllocationObserverHookIfEnabled(ret, size,
if (PartitionAllocHooks::AllocationOverrideHookIfEnabled(
&ret, flags, requested_size, type_name)) {
PartitionAllocHooks::AllocationObserverHookIfEnabled(ret, requested_size,
type_name);
return ret;
}
}
ret = AllocFlagsNoHooks(flags, size);
ret = AllocFlagsNoHooks(flags, requested_size);
if (UNLIKELY(hooks_enabled)) {
PartitionAllocHooks::AllocationObserverHookIfEnabled(ret, size, type_name);
PartitionAllocHooks::AllocationObserverHookIfEnabled(ret, requested_size,
type_name);
}
return ret;
......@@ -743,24 +753,25 @@ ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocFlags(
}
template <bool thread_safe>
ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocFlagsNoHooks(int flags,
size_t size) {
ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocFlagsNoHooks(
int flags,
size_t requested_size) {
// The thread cache is added "in the middle" of the main allocator, that is:
// - After all the cookie/tag management
// - Before the "raw" allocator.
//
// That is, the general allocation flow is:
// 1. Adjustment of requested size to make room for tags / cookies
// 1. Adjustment of requested size to make room for extras
// 2. Allocation:
// a. Call to the thread cache, if it succeeds, go to step 3.
// b. Otherwise, call the "raw" allocator <-- Locking
// 3. Handle cookies/tags, zero allocation if required
size_t requested_size = size;
size = internal::PartitionSizeAdjustAdd(allow_extras, size);
PA_CHECK(size >= requested_size); // check for overflows
size_t raw_size =
internal::PartitionSizeAdjustAdd(allow_extras, requested_size);
PA_CHECK(raw_size >= requested_size); // check for overflows
uint16_t bucket_index = SizeToBucketIndex(size);
size_t allocated_size;
uint16_t bucket_index = SizeToBucketIndex(raw_size);
size_t utilized_slot_size;
bool is_already_zeroed;
void* ret = nullptr;
......@@ -796,13 +807,16 @@ ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocFlagsNoHooks(int flags,
// the thread cache allocation will return nullptr.
ret = tcache->GetFromCache(bucket_index);
is_already_zeroed = false;
allocated_size = buckets[bucket_index].slot_size;
utilized_slot_size = buckets[bucket_index].slot_size;
#if DCHECK_IS_ON()
// Make sure that the allocated pointer comes from the same place it would
// for a non-thread cache allocation.
if (ret) {
Page* page = Page::FromPointerNoAlignmentCheck(ret);
// All large allocations must go through the RawAlloc path to correctly
// set |utilized_slot_size|.
PA_DCHECK(!page->get_raw_size_ptr()); // doesn't have raw size
PA_DCHECK(IsValidPage(page));
PA_DCHECK(page->bucket == &buckets[bucket_index]);
}
......@@ -810,26 +824,44 @@ ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocFlagsNoHooks(int flags,
}
if (!ret)
ret = RawAlloc(buckets + bucket_index, flags, size, &allocated_size,
ret = RawAlloc(buckets + bucket_index, flags, raw_size, &utilized_slot_size,
&is_already_zeroed);
if (UNLIKELY(!ret))
return nullptr;
// Layout inside the slot: |[tag]|cookie|object|[empty]|cookie|
// <--a--->
// <------b------->
// <-----------------c---------------->
// a: allocated_size
// b: size_with_no_extras
// c: new_slot_size
// Note, empty space occurs if the slot size is larger than needed to
// accommodate the request. This doesn't apply to direct-mapped allocations
// and single-slot spans.
// The tag may or may not exist in the slot, depending on CheckedPtr
// implementation.
size_t size_with_no_extras =
internal::PartitionSizeAdjustSubtract(allow_extras, allocated_size);
// Layout inside the slot:
// |[tag/refcnt]|[cookie]|...data...|[empty]|[cookie]|[unused]|
// <----a----->
// <--------b--------->
// <----------c----------> <---c---->
// <---------------d---------------> + <---d---->
// <------------------------e------------------------>
// <----------------------------f----------------------------->
// a: requested_size
// b: usable_size
// c: extras
// d: raw_size
// e: utilized_slot_size
// f: slot_size
//
// - The tag/ref-count may or may not exist in the slot, depending on
// CheckedPtr implementation.
// - Cookies exist only when DCHECK is on.
// - Think of raw_size as the minimum size required internally to satisfy
// the allocation request (i.e. requested_size + extras)
// - Note, at most one "empty" or "unused" space can occur at a time. It
// occurs when slot_size is larger than raw_size. "unused" applies only to
// large allocations (direct-mapped and single-slot slot spans) and "empty"
// only to small allocations.
// Why either-or, one might ask? We make an effort to put the trailing
// cookie as close to data as possible to catch overflows (often
// off-by-one), but that's possible only if we have enough space in metadata
// to save raw_size, i.e. only for large allocations. For small allocations,
// we have no other choice than putting the cookie at the very end of the
// slot, thus creating the "empty" space.
size_t usable_size =
internal::PartitionSizeAdjustSubtract(allow_extras, utilized_slot_size);
// The value given to the application is just after the tag and cookie.
ret = internal::PartitionPointerAdjustAdd(allow_extras, ret);
......@@ -838,7 +870,7 @@ ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocFlagsNoHooks(int flags,
if (allow_extras) {
char* char_ret = static_cast<char*>(ret);
internal::PartitionCookieWriteValue(char_ret - internal::kCookieSize);
internal::PartitionCookieWriteValue(char_ret + size_with_no_extras);
internal::PartitionCookieWriteValue(char_ret + usable_size);
}
#endif
......@@ -847,21 +879,21 @@ ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocFlagsNoHooks(int flags,
bool zero_fill = flags & PartitionAllocZeroFill;
if (!zero_fill) {
#if DCHECK_IS_ON()
memset(ret, kUninitializedByte, size_with_no_extras);
memset(ret, kUninitializedByte, usable_size);
#endif
} else if (!is_already_zeroed) {
memset(ret, 0, size_with_no_extras);
memset(ret, 0, usable_size);
}
bool is_direct_mapped = size > kMaxBucketed;
bool is_direct_mapped = raw_size > kMaxBucketed;
if (allow_extras && !is_direct_mapped) {
// Do not set tag for MTECheckedPtr in the set-tag-at-free case.
// It is set only at Free() time and at slot span allocation time.
#if !ENABLE_TAG_FOR_MTE_CHECKED_PTR || !MTE_CHECKED_PTR_SET_TAG_AT_FREE
// PartitionTagSetValue requires that the size is
// tag_bitmap::kBytesPerPartitionTag-aligned (currently 16 bytes-aligned)
// when MTECheckedPtr is enabled. However, allocated_size may not be aligned
// for single-slot slot spans. So we need the bucket's slot_size.
// when MTECheckedPtr is enabled. However, utilized_slot_size may not be
// aligned for single-slot slot spans. So we need the bucket's slot_size.
size_t slot_size_with_no_extras = internal::PartitionSizeAdjustSubtract(
allow_extras, buckets[bucket_index].slot_size);
internal::PartitionTagSetValue(ret, slot_size_with_no_extras,
......@@ -879,11 +911,11 @@ template <bool thread_safe>
ALWAYS_INLINE void* PartitionRoot<thread_safe>::RawAlloc(
Bucket* bucket,
int flags,
size_t size,
size_t* allocated_size,
size_t raw_size,
size_t* utilized_slot_size,
bool* is_already_zeroed) {
internal::ScopedGuard<thread_safe> guard{lock_};
return AllocFromBucket(bucket, flags, size, allocated_size,
return AllocFromBucket(bucket, flags, raw_size, utilized_slot_size,
is_already_zeroed);
}
......@@ -939,9 +971,9 @@ ALWAYS_INLINE void* PartitionRoot<thread_safe>::AlignedAllocFlags(
}
template <bool thread_safe>
ALWAYS_INLINE void* PartitionRoot<thread_safe>::Alloc(size_t size,
ALWAYS_INLINE void* PartitionRoot<thread_safe>::Alloc(size_t requested_size,
const char* type_name) {
return AllocFlags(0, size, type_name);
return AllocFlags(0, requested_size, type_name);
}
template <bool thread_safe>
......
......@@ -543,7 +543,7 @@ template <bool thread_safe>
void* PartitionBucket<thread_safe>::SlowPathAlloc(
PartitionRoot<thread_safe>* root,
int flags,
size_t size,
size_t raw_size,
bool* is_already_zeroed) {
// The slow path is called when the freelist is empty.
PA_DCHECK(!active_pages_head->freelist_head);
......@@ -567,11 +567,11 @@ void* PartitionBucket<thread_safe>::SlowPathAlloc(
// the empty or decommitted lists which affects the subsequent conditional.
bool return_null = flags & PartitionAllocReturnNull;
if (UNLIKELY(is_direct_mapped())) {
PA_DCHECK(size > kMaxBucketed);
PA_DCHECK(raw_size > kMaxBucketed);
PA_DCHECK(this == &root->sentinel_bucket);
PA_DCHECK(active_pages_head ==
PartitionPage<thread_safe>::get_sentinel_page());
if (size > MaxDirectMapped()) {
if (raw_size > MaxDirectMapped()) {
if (return_null)
return nullptr;
// The lock is here to protect PA from:
......@@ -596,10 +596,10 @@ void* PartitionBucket<thread_safe>::SlowPathAlloc(
// equivalent, but that's violating the contract of
// base::OnNoMemoryInternal().
ScopedUnlockGuard<thread_safe> unlock{root->lock_};
PartitionExcessiveAllocationSize(size);
PartitionExcessiveAllocationSize(raw_size);
IMMEDIATE_CRASH(); // Not required, kept as documentation.
}
new_page = PartitionDirectMap(root, flags, size);
new_page = PartitionDirectMap(root, flags, raw_size);
if (new_page)
new_page_bucket = new_page->bucket;
// New pages from PageAllocator are always zeroed.
......@@ -659,13 +659,13 @@ void* PartitionBucket<thread_safe>::SlowPathAlloc(
return nullptr;
// See comment above.
ScopedUnlockGuard<thread_safe> unlock{root->lock_};
root->OutOfMemory(size);
root->OutOfMemory(raw_size);
IMMEDIATE_CRASH(); // Not required, kept as documentation.
}
PA_DCHECK(new_page_bucket != &root->sentinel_bucket);
new_page_bucket->active_pages_head = new_page;
new_page->set_raw_size(size);
new_page->set_raw_size(raw_size);
// If we found an active page with free slots, or an empty page, we have a
// usable freelist head.
......
......@@ -60,7 +60,7 @@ struct PartitionBucket {
// Note the matching Free() functions are in PartitionPage.
BASE_EXPORT NOINLINE void* SlowPathAlloc(PartitionRoot<thread_safe>* root,
int flags,
size_t size,
size_t raw_size,
bool* is_already_zeroed)
EXCLUSIVE_LOCKS_REQUIRED(root->lock_);
......
......@@ -123,14 +123,14 @@ struct PartitionPage {
ALWAYS_INLINE static PartitionPage* FromPointerNoAlignmentCheck(void* ptr);
ALWAYS_INLINE static PartitionPage* FromPointer(void* ptr);
// Returns either the exact allocated size for direct-mapped and single-slot
// buckets, or the slot size. The second one is an overestimate of the real
// allocated size.
ALWAYS_INLINE size_t GetAllocatedSize() const {
// Allocated size can be:
// Returns size of the region used within a slot. The used region comprises
// of actual allocated data, extras and possibly empty space.
ALWAYS_INLINE size_t GetUtilizedSlotSize() const {
// The returned size can be:
// - The slot size for small buckets.
// - Stored exactly, for large buckets and direct-mapped allocations (see
// the comment in get_raw_size_ptr()).
// - Exact needed size to satisfy allocation (incl. extras), for large
// buckets and direct-mapped allocations (see the comment in
// get_raw_size_ptr() for more info).
size_t result = bucket->slot_size;
if (UNLIKELY(get_raw_size_ptr())) // has raw size.
result = get_raw_size();
......@@ -274,8 +274,8 @@ ALWAYS_INLINE const size_t* PartitionPage<thread_safe>::get_raw_size_ptr()
const {
// For direct-map as well as single-slot buckets which span more than
// |kMaxPartitionPagesPerSlotSpan| partition pages, we have some spare
// metadata space to store the raw allocation size. We can use this to report
// better statistics.
// metadata space to store the raw size needed to satisfy the allocation
// (requested size + extras). We can use this to report better statistics.
if (LIKELY(bucket->slot_size <=
MaxSystemPagesPerSlotSpan() * SystemPageSize()))
return nullptr;
......
......@@ -21,13 +21,13 @@ void PartitionRefCount::Free() {
auto* root = PartitionRoot<ThreadSafe>::FromPage(page);
#ifdef ADDRESS_SANITIZER
size_t allocated_size = page->GetAllocatedSize();
size_t utilized_slot_size = page->GetUtilizedSlotSize();
// PartitionRefCount is required to be allocated inside a `PartitionRoot` that
// supports extras.
PA_DCHECK(root->allow_extras);
size_t size_with_no_extras = internal::PartitionSizeAdjustSubtract(
/* allow_extras= */ true, allocated_size);
ASAN_UNPOISON_MEMORY_REGION(this, size_with_no_extras);
size_t usable_size = internal::PartitionSizeAdjustSubtract(
/* allow_extras= */ true, utilized_slot_size);
ASAN_UNPOISON_MEMORY_REGION(this, usable_size);
#endif
if (root->is_thread_safe) {
......
......@@ -152,8 +152,10 @@ struct BASE_EXPORT PartitionRoot {
size_t alignment,
size_t size);
ALWAYS_INLINE void* Alloc(size_t size, const char* type_name);
ALWAYS_INLINE void* AllocFlags(int flags, size_t size, const char* type_name);
ALWAYS_INLINE void* Alloc(size_t requested_size, const char* type_name);
ALWAYS_INLINE void* AllocFlags(int flags,
size_t requested_size,
const char* type_name);
// Same as |AllocFlags()|, but bypasses the allocator hooks.
//
// This is separate from AllocFlags() because other callers of AllocFlags()
......@@ -163,7 +165,7 @@ struct BASE_EXPORT PartitionRoot {
// taking the extra branch in the non-malloc() case doesn't hurt. In addition,
// for the malloc() case, the compiler correctly removes the branch, since
// this is marked |ALWAYS_INLINE|.
ALWAYS_INLINE void* AllocFlagsNoHooks(int flags, size_t size);
ALWAYS_INLINE void* AllocFlagsNoHooks(int flags, size_t requested_size);
ALWAYS_INLINE void* Realloc(void* ptr, size_t newize, const char* type_name);
// Overload that may return nullptr if reallocation isn't possible. In this
......@@ -219,20 +221,23 @@ struct BASE_EXPORT PartitionRoot {
}
private:
// Allocates memory, without any cookies / tags.
// Allocates memory, without initializing extras.
//
// |flags| and |size| are as in AllocFlags(). |allocated_size| and
// is_already_zeroed| are output only. |allocated_size| is guaranteed to be
// larger or equal to |size|.
// - |flags| are as in AllocFlags().
// - |raw_size| should accommodate extras on top of AllocFlags()'s
// |requested_size|.
// - |utilized_slot_size| and |is_already_zeroed| are output only.
// |utilized_slot_size| is guaranteed to be larger or equal to
// |raw_size|.
ALWAYS_INLINE void* RawAlloc(Bucket* bucket,
int flags,
size_t size,
size_t* allocated_size,
size_t raw_size,
size_t* utilized_slot_size,
bool* is_already_zeroed);
ALWAYS_INLINE void* AllocFromBucket(Bucket* bucket,
int flags,
size_t size,
size_t* allocated_size,
size_t raw_size,
size_t* utilized_slot_size,
bool* is_already_zeroed)
EXCLUSIVE_LOCKS_REQUIRED(lock_);
......
......@@ -180,10 +180,10 @@ size_t PCScan<thread_safe>::PCScanTask::TryMarkObjectInNormalBucketPool(
Page::FromPointerNoAlignmentCheck(reinterpret_cast<void*>(base));
PA_DCHECK(&root_ == PartitionRoot<thread_safe>::FromPage(target_page));
const size_t object_size = PartitionSizeAdjustSubtract(
root_.allow_extras, target_page->GetAllocatedSize());
const size_t usable_size = PartitionSizeAdjustSubtract(
root_.allow_extras, target_page->GetUtilizedSlotSize());
// Range check for inner pointers.
if (maybe_ptr >= base + object_size)
if (maybe_ptr >= base + usable_size)
return 0;
// Now we are certain that |maybe_ptr| is a dangling pointer. Mark it again in
......@@ -208,9 +208,9 @@ void PCScan<thread_safe>::PCScanTask::ClearQuarantinedObjects() const {
auto* page = Page::FromPointerNoAlignmentCheck(object);
// Use zero as a zapping value to speed up the fast bailout check in
// ScanPartition.
memset(
object, 0,
PartitionSizeAdjustSubtract(allow_extras, page->GetAllocatedSize()));
memset(object, 0,
PartitionSizeAdjustSubtract(allow_extras,
page->GetUtilizedSlotSize()));
});
}
}
......
......@@ -130,7 +130,7 @@ ThreadCache* ThreadCache::Create(PartitionRoot<internal::ThreadSafe>* root) {
//
// This also means that deallocation must use RawFreeStatic(), hence the
// operator delete() implementation below.
size_t allocated_size;
size_t utilized_slot_size;
bool already_zeroed;
auto* bucket =
......@@ -138,7 +138,7 @@ ThreadCache* ThreadCache::Create(PartitionRoot<internal::ThreadSafe>* root) {
sizeof(ThreadCache));
void* buffer =
root->RawAlloc(bucket, PartitionAllocZeroFill, sizeof(ThreadCache),
&allocated_size, &already_zeroed);
&utilized_slot_size, &already_zeroed);
ThreadCache* tcache = new (buffer) ThreadCache(root);
// This may allocate.
......
......@@ -312,10 +312,10 @@ TEST_F(ThreadCacheTest, RecordStats) {
EXPECT_EQ(10u, cache_fill_misses_counter.Delta());
// Memory footprint.
size_t allocated_size = g_root->buckets[bucket_index].slot_size;
ThreadCacheStats stats;
ThreadCacheRegistry::Instance().DumpStats(true, &stats);
EXPECT_EQ(allocated_size * ThreadCache::kMaxCountPerBucket,
EXPECT_EQ(
g_root->buckets[bucket_index].slot_size * ThreadCache::kMaxCountPerBucket,
stats.bucket_total_memory);
EXPECT_EQ(sizeof(ThreadCache), stats.metadata_overhead);
}
......@@ -332,9 +332,9 @@ TEST_F(ThreadCacheTest, MultipleThreadCachesAccounting) {
ThreadCacheStats stats;
ThreadCacheRegistry::Instance().DumpStats(false, &stats);
size_t allocated_size = g_root->buckets[bucket_index].slot_size;
// 2* for this thread and the parent one.
EXPECT_EQ(2 * allocated_size, stats.bucket_total_memory);
EXPECT_EQ(2 * g_root->buckets[bucket_index].slot_size,
stats.bucket_total_memory);
EXPECT_EQ(2 * sizeof(ThreadCache), stats.metadata_overhead);
uint64_t this_thread_alloc_count =
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment