Commit 874bd97e authored by Bartek Nowierski's avatar Bartek Nowierski Committed by Commit Bot

Even more size-related disambiguation (heavy duty)

Continuation of crrev.com/c/2455947 and crrev.com/c/2463043.

The remaining GetAllocatedSize() was still ambiguous, returning two
different things only one of which could be considered "allocated size".
Furthermore, naming of function params and local variables wasn't always
clear.

This is an attempt to make the terminology consistent:
- requested_size - what the app requested
- extras - PA-internal data surrounding the allocated data, like
  tag/ref-count (for CheckedPtr) or cookies (debug only)
- raw_size - requested_size + extras
- usable_size (>=requested_size) - in case of over-allocation, this is
  what's available for the app (no risk of overriding extras, etc.)
- utilized_slot_size - contiguous region of the slot that accommodates
  all of the above (requested allocation, additional usable space and
  extras); equal to raw_size if it's possible to save it in meta-data
  (for large allocations only), slot_size otherwise

Change-Id: Ia2a771af6d29261b049b0dc98fede6395dc7a35f
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2462909
Commit-Queue: Bartek Nowierski <bartekn@chromium.org>
Auto-Submit: Bartek Nowierski <bartekn@chromium.org>
Reviewed-by: default avatarKentaro Hara <haraken@chromium.org>
Cr-Commit-Position: refs/heads/master@{#816469}
parent ea95f1f2
......@@ -543,7 +543,7 @@ template <bool thread_safe>
void* PartitionBucket<thread_safe>::SlowPathAlloc(
PartitionRoot<thread_safe>* root,
int flags,
size_t size,
size_t raw_size,
bool* is_already_zeroed) {
// The slow path is called when the freelist is empty.
PA_DCHECK(!active_pages_head->freelist_head);
......@@ -567,11 +567,11 @@ void* PartitionBucket<thread_safe>::SlowPathAlloc(
// the empty or decommitted lists which affects the subsequent conditional.
bool return_null = flags & PartitionAllocReturnNull;
if (UNLIKELY(is_direct_mapped())) {
PA_DCHECK(size > kMaxBucketed);
PA_DCHECK(raw_size > kMaxBucketed);
PA_DCHECK(this == &root->sentinel_bucket);
PA_DCHECK(active_pages_head ==
PartitionPage<thread_safe>::get_sentinel_page());
if (size > MaxDirectMapped()) {
if (raw_size > MaxDirectMapped()) {
if (return_null)
return nullptr;
// The lock is here to protect PA from:
......@@ -596,10 +596,10 @@ void* PartitionBucket<thread_safe>::SlowPathAlloc(
// equivalent, but that's violating the contract of
// base::OnNoMemoryInternal().
ScopedUnlockGuard<thread_safe> unlock{root->lock_};
PartitionExcessiveAllocationSize(size);
PartitionExcessiveAllocationSize(raw_size);
IMMEDIATE_CRASH(); // Not required, kept as documentation.
}
new_page = PartitionDirectMap(root, flags, size);
new_page = PartitionDirectMap(root, flags, raw_size);
if (new_page)
new_page_bucket = new_page->bucket;
// New pages from PageAllocator are always zeroed.
......@@ -659,13 +659,13 @@ void* PartitionBucket<thread_safe>::SlowPathAlloc(
return nullptr;
// See comment above.
ScopedUnlockGuard<thread_safe> unlock{root->lock_};
root->OutOfMemory(size);
root->OutOfMemory(raw_size);
IMMEDIATE_CRASH(); // Not required, kept as documentation.
}
PA_DCHECK(new_page_bucket != &root->sentinel_bucket);
new_page_bucket->active_pages_head = new_page;
new_page->set_raw_size(size);
new_page->set_raw_size(raw_size);
// If we found an active page with free slots, or an empty page, we have a
// usable freelist head.
......
......@@ -60,7 +60,7 @@ struct PartitionBucket {
// Note the matching Free() functions are in PartitionPage.
BASE_EXPORT NOINLINE void* SlowPathAlloc(PartitionRoot<thread_safe>* root,
int flags,
size_t size,
size_t raw_size,
bool* is_already_zeroed)
EXCLUSIVE_LOCKS_REQUIRED(root->lock_);
......
......@@ -123,14 +123,14 @@ struct PartitionPage {
ALWAYS_INLINE static PartitionPage* FromPointerNoAlignmentCheck(void* ptr);
ALWAYS_INLINE static PartitionPage* FromPointer(void* ptr);
// Returns either the exact allocated size for direct-mapped and single-slot
// buckets, or the slot size. The second one is an overestimate of the real
// allocated size.
ALWAYS_INLINE size_t GetAllocatedSize() const {
// Allocated size can be:
// Returns size of the region used within a slot. The used region comprises
// of actual allocated data, extras and possibly empty space.
ALWAYS_INLINE size_t GetUtilizedSlotSize() const {
// The returned size can be:
// - The slot size for small buckets.
// - Stored exactly, for large buckets and direct-mapped allocations (see
// the comment in get_raw_size_ptr()).
// - Exact needed size to satisfy allocation (incl. extras), for large
// buckets and direct-mapped allocations (see the comment in
// get_raw_size_ptr() for more info).
size_t result = bucket->slot_size;
if (UNLIKELY(get_raw_size_ptr())) // has raw size.
result = get_raw_size();
......@@ -274,8 +274,8 @@ ALWAYS_INLINE const size_t* PartitionPage<thread_safe>::get_raw_size_ptr()
const {
// For direct-map as well as single-slot buckets which span more than
// |kMaxPartitionPagesPerSlotSpan| partition pages, we have some spare
// metadata space to store the raw allocation size. We can use this to report
// better statistics.
// metadata space to store the raw size needed to satisfy the allocation
// (requested size + extras). We can use this to report better statistics.
if (LIKELY(bucket->slot_size <=
MaxSystemPagesPerSlotSpan() * SystemPageSize()))
return nullptr;
......
......@@ -21,13 +21,13 @@ void PartitionRefCount::Free() {
auto* root = PartitionRoot<ThreadSafe>::FromPage(page);
#ifdef ADDRESS_SANITIZER
size_t allocated_size = page->GetAllocatedSize();
size_t utilized_slot_size = page->GetUtilizedSlotSize();
// PartitionRefCount is required to be allocated inside a `PartitionRoot` that
// supports extras.
PA_DCHECK(root->allow_extras);
size_t size_with_no_extras = internal::PartitionSizeAdjustSubtract(
/* allow_extras= */ true, allocated_size);
ASAN_UNPOISON_MEMORY_REGION(this, size_with_no_extras);
size_t usable_size = internal::PartitionSizeAdjustSubtract(
/* allow_extras= */ true, utilized_slot_size);
ASAN_UNPOISON_MEMORY_REGION(this, usable_size);
#endif
if (root->is_thread_safe) {
......
......@@ -152,8 +152,10 @@ struct BASE_EXPORT PartitionRoot {
size_t alignment,
size_t size);
ALWAYS_INLINE void* Alloc(size_t size, const char* type_name);
ALWAYS_INLINE void* AllocFlags(int flags, size_t size, const char* type_name);
ALWAYS_INLINE void* Alloc(size_t requested_size, const char* type_name);
ALWAYS_INLINE void* AllocFlags(int flags,
size_t requested_size,
const char* type_name);
// Same as |AllocFlags()|, but bypasses the allocator hooks.
//
// This is separate from AllocFlags() because other callers of AllocFlags()
......@@ -163,7 +165,7 @@ struct BASE_EXPORT PartitionRoot {
// taking the extra branch in the non-malloc() case doesn't hurt. In addition,
// for the malloc() case, the compiler correctly removes the branch, since
// this is marked |ALWAYS_INLINE|.
ALWAYS_INLINE void* AllocFlagsNoHooks(int flags, size_t size);
ALWAYS_INLINE void* AllocFlagsNoHooks(int flags, size_t requested_size);
ALWAYS_INLINE void* Realloc(void* ptr, size_t newize, const char* type_name);
// Overload that may return nullptr if reallocation isn't possible. In this
......@@ -219,20 +221,23 @@ struct BASE_EXPORT PartitionRoot {
}
private:
// Allocates memory, without any cookies / tags.
// Allocates memory, without initializing extras.
//
// |flags| and |size| are as in AllocFlags(). |allocated_size| and
// is_already_zeroed| are output only. |allocated_size| is guaranteed to be
// larger or equal to |size|.
// - |flags| are as in AllocFlags().
// - |raw_size| should accommodate extras on top of AllocFlags()'s
// |requested_size|.
// - |utilized_slot_size| and |is_already_zeroed| are output only.
// |utilized_slot_size| is guaranteed to be larger or equal to
// |raw_size|.
ALWAYS_INLINE void* RawAlloc(Bucket* bucket,
int flags,
size_t size,
size_t* allocated_size,
size_t raw_size,
size_t* utilized_slot_size,
bool* is_already_zeroed);
ALWAYS_INLINE void* AllocFromBucket(Bucket* bucket,
int flags,
size_t size,
size_t* allocated_size,
size_t raw_size,
size_t* utilized_slot_size,
bool* is_already_zeroed)
EXCLUSIVE_LOCKS_REQUIRED(lock_);
......
......@@ -180,10 +180,10 @@ size_t PCScan<thread_safe>::PCScanTask::TryMarkObjectInNormalBucketPool(
Page::FromPointerNoAlignmentCheck(reinterpret_cast<void*>(base));
PA_DCHECK(&root_ == PartitionRoot<thread_safe>::FromPage(target_page));
const size_t object_size = PartitionSizeAdjustSubtract(
root_.allow_extras, target_page->GetAllocatedSize());
const size_t usable_size = PartitionSizeAdjustSubtract(
root_.allow_extras, target_page->GetUtilizedSlotSize());
// Range check for inner pointers.
if (maybe_ptr >= base + object_size)
if (maybe_ptr >= base + usable_size)
return 0;
// Now we are certain that |maybe_ptr| is a dangling pointer. Mark it again in
......@@ -208,9 +208,9 @@ void PCScan<thread_safe>::PCScanTask::ClearQuarantinedObjects() const {
auto* page = Page::FromPointerNoAlignmentCheck(object);
// Use zero as a zapping value to speed up the fast bailout check in
// ScanPartition.
memset(
object, 0,
PartitionSizeAdjustSubtract(allow_extras, page->GetAllocatedSize()));
memset(object, 0,
PartitionSizeAdjustSubtract(allow_extras,
page->GetUtilizedSlotSize()));
});
}
}
......
......@@ -130,7 +130,7 @@ ThreadCache* ThreadCache::Create(PartitionRoot<internal::ThreadSafe>* root) {
//
// This also means that deallocation must use RawFreeStatic(), hence the
// operator delete() implementation below.
size_t allocated_size;
size_t utilized_slot_size;
bool already_zeroed;
auto* bucket =
......@@ -138,7 +138,7 @@ ThreadCache* ThreadCache::Create(PartitionRoot<internal::ThreadSafe>* root) {
sizeof(ThreadCache));
void* buffer =
root->RawAlloc(bucket, PartitionAllocZeroFill, sizeof(ThreadCache),
&allocated_size, &already_zeroed);
&utilized_slot_size, &already_zeroed);
ThreadCache* tcache = new (buffer) ThreadCache(root);
// This may allocate.
......
......@@ -312,11 +312,11 @@ TEST_F(ThreadCacheTest, RecordStats) {
EXPECT_EQ(10u, cache_fill_misses_counter.Delta());
// Memory footprint.
size_t allocated_size = g_root->buckets[bucket_index].slot_size;
ThreadCacheStats stats;
ThreadCacheRegistry::Instance().DumpStats(true, &stats);
EXPECT_EQ(allocated_size * ThreadCache::kMaxCountPerBucket,
stats.bucket_total_memory);
EXPECT_EQ(
g_root->buckets[bucket_index].slot_size * ThreadCache::kMaxCountPerBucket,
stats.bucket_total_memory);
EXPECT_EQ(sizeof(ThreadCache), stats.metadata_overhead);
}
......@@ -332,9 +332,9 @@ TEST_F(ThreadCacheTest, MultipleThreadCachesAccounting) {
ThreadCacheStats stats;
ThreadCacheRegistry::Instance().DumpStats(false, &stats);
size_t allocated_size = g_root->buckets[bucket_index].slot_size;
// 2* for this thread and the parent one.
EXPECT_EQ(2 * allocated_size, stats.bucket_total_memory);
EXPECT_EQ(2 * g_root->buckets[bucket_index].slot_size,
stats.bucket_total_memory);
EXPECT_EQ(2 * sizeof(ThreadCache), stats.metadata_overhead);
uint64_t this_thread_alloc_count =
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment