Commit 861c2f7b authored by Benoit Lize's avatar Benoit Lize Committed by Commit Bot

[PartitionAlloc] Make the sentinel bucket per-PartitionRoot.

This simplifies the code, and removes a branch from the allocation path.

Bug: 998048
Change-Id: Icba80de3755a00164f40ee23b7fb4deb0f9d8ffa
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2418656
Commit-Queue: Benoit L <lizeb@chromium.org>
Reviewed-by: default avatarKentaro Hara <haraken@chromium.org>
Reviewed-by: default avatarChris Palmer <palmer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#808820}
parent 3602cd9d
......@@ -241,10 +241,8 @@ void PartitionRoot<thread_safe>::Init(bool enforce_alignment,
// We mark the sentinel bucket/page as free to make sure it is skipped by our
// logic to find a new active page.
//
// This may be executed several times, once per PartitionRoot. This is not an
// issue, as the operation is atomic and idempotent.
Bucket::get_sentinel_bucket()->active_pages_head = Page::get_sentinel_page();
memset(&sentinel_bucket, 0, sizeof(sentinel_bucket));
sentinel_bucket.active_pages_head = Page::get_sentinel_page();
// This is a "magic" value so we can test if a root pointer is valid.
inverted_self = ~reinterpret_cast<uintptr_t>(this);
......@@ -282,7 +280,7 @@ void PartitionRoot<thread_safe>::Init(bool enforce_alignment,
// Use the bucket of the finest granularity for malloc(0) etc.
*bucket_ptr++ = &buckets[0];
} else if (order > kMaxBucketedOrder) {
*bucket_ptr++ = Bucket::get_sentinel_bucket();
*bucket_ptr++ = &sentinel_bucket;
} else {
Bucket* valid_bucket = bucket;
// Skip over invalid buckets.
......@@ -298,7 +296,7 @@ void PartitionRoot<thread_safe>::Init(bool enforce_alignment,
&bucket_lookups[0] + ((kBitsPerSizeT + 1) * kNumBucketsPerOrder));
// And there's one last bucket lookup that will be hit for e.g. malloc(-1),
// which tries to overflow to a non-existent order.
*bucket_ptr = Bucket::get_sentinel_bucket();
*bucket_ptr = &sentinel_bucket;
initialized = true;
}
......
......@@ -412,6 +412,7 @@ struct BASE_EXPORT PartitionRoot {
// behavior.
Bucket* bucket_lookups[((kBitsPerSizeT + 1) * kNumBucketsPerOrder) + 1] = {};
Bucket buckets[kNumBuckets] = {};
Bucket sentinel_bucket;
PartitionRoot() = default;
PartitionRoot(bool enable_tag_pointers, bool enable_thread_cache) {
......@@ -546,6 +547,11 @@ static_assert(sizeof(PartitionRoot<internal::ThreadSafe>) ==
static_assert(offsetof(PartitionRoot<internal::ThreadSafe>, buckets) ==
offsetof(PartitionRoot<internal::NotThreadSafe>, buckets),
"Layouts should match");
static_assert(offsetof(PartitionRoot<internal::ThreadSafe>, sentinel_bucket) ==
offsetof(PartitionRoot<internal::ThreadSafe>, buckets) +
kNumBuckets *
sizeof(PartitionRoot<internal::ThreadSafe>::Bucket),
"sentinel_bucket must be just after the regular buckets.");
template <bool thread_safe>
ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocFromBucket(
......@@ -588,9 +594,8 @@ ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocFromBucket(
page = Page::FromPointer(ret);
// For direct mapped allocations, |bucket| is the sentinel.
PA_DCHECK((page->bucket == bucket) ||
(page->bucket->is_direct_mapped() &&
(bucket == Bucket::get_sentinel_bucket())));
PA_DCHECK((page->bucket == bucket) || (page->bucket->is_direct_mapped() &&
(bucket == &sentinel_bucket)));
*allocated_size = page->GetAllocatedSize();
}
......@@ -700,8 +705,9 @@ ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooks(void* ptr) {
// Also the thread-unsafe variant doesn't have a use for a thread cache, so
// make it statically known to the compiler.
if (thread_safe && root->with_thread_cache &&
LIKELY(!page->bucket->is_direct_mapped())) {
PA_DCHECK(page->bucket >= root->buckets);
!page->bucket->is_direct_mapped()) {
PA_DCHECK(page->bucket >= root->buckets &&
page->bucket <= &root->sentinel_bucket);
size_t bucket_index = page->bucket - root->buckets;
auto* thread_cache = internal::ThreadCache::Get();
if (thread_cache && thread_cache->MaybePutInCache(ptr, bucket_index))
......@@ -942,33 +948,23 @@ ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocFlagsNoHooks(int flags,
with_thread_cache = true;
}
// bucket->slot_size is 0 for direct-mapped allocations, as their bucket is
// the sentinel one. However, since we are touching *bucket, we may as well
// check it directly, rather than fetching the sentinel one, and comparing
// the addresses. Since the sentinel bucket is *not* part of the the buckets
// array, |bucket_index| is not valid for the sentinel one.
//
// TODO(lizeb): Consider making Bucket::sentinel per-PartitionRoot, at the
// end of the |buckets| array. This would remove this branch.
if (LIKELY(bucket->slot_size)) {
PA_DCHECK(bucket != Bucket::get_sentinel_bucket());
PA_DCHECK(bucket >= buckets && bucket < (buckets + kNumBuckets));
size_t bucket_index = bucket - buckets;
ret = tcache->GetFromCache(bucket_index);
is_already_zeroed = false;
allocated_size = bucket->slot_size;
// the sentinel one. Since |bucket_index| is going to be kNumBuckets + 1,
// the thread cache allocation will return nullptr.
PA_DCHECK(bucket >= buckets && bucket <= &sentinel_bucket);
size_t bucket_index = bucket - buckets;
ret = tcache->GetFromCache(bucket_index);
is_already_zeroed = false;
allocated_size = bucket->slot_size;
#if DCHECK_IS_ON()
// Make sure that the allocated pointer comes from the same place it would
// for a non-thread cache allocation.
if (ret) {
Page* page = Page::FromPointerNoAlignmentCheck(ret);
PA_DCHECK(IsValidPage(page));
PA_DCHECK(page->bucket == &buckets[bucket_index]);
}
#endif
} else {
PA_DCHECK(bucket == Bucket::get_sentinel_bucket());
// Make sure that the allocated pointer comes from the same place it would
// for a non-thread cache allocation.
if (ret) {
Page* page = Page::FromPointerNoAlignmentCheck(ret);
PA_DCHECK(IsValidPage(page));
PA_DCHECK(page->bucket == &buckets[bucket_index]);
}
#endif
}
if (!ret)
......
......@@ -124,16 +124,6 @@ ALWAYS_INLINE PartitionPage<thread_safe>* PartitionDirectMap(
} // namespace
// static
template <bool thread_safe>
PartitionBucket<thread_safe> PartitionBucket<thread_safe>::sentinel_bucket_;
template <bool thread_safe>
PartitionBucket<thread_safe>*
PartitionBucket<thread_safe>::get_sentinel_bucket() {
return &sentinel_bucket_;
}
// TODO(ajwong): This seems to interact badly with
// get_pages_per_slot_span() which rounds the value from this up to a
// multiple of NumSystemPagesPerPartitionPage() (aka 4) anyways.
......@@ -571,7 +561,7 @@ void* PartitionBucket<thread_safe>::SlowPathAlloc(
bool return_null = flags & PartitionAllocReturnNull;
if (UNLIKELY(is_direct_mapped())) {
PA_DCHECK(size > kMaxBucketed);
PA_DCHECK(this == get_sentinel_bucket());
PA_DCHECK(this == &root->sentinel_bucket);
PA_DCHECK(active_pages_head ==
PartitionPage<thread_safe>::get_sentinel_page());
if (size > MaxDirectMapped()) {
......@@ -665,7 +655,7 @@ void* PartitionBucket<thread_safe>::SlowPathAlloc(
IMMEDIATE_CRASH(); // Not required, kept as documentation.
}
PA_DCHECK(new_page_bucket != get_sentinel_bucket());
PA_DCHECK(new_page_bucket != &root->sentinel_bucket);
new_page_bucket->active_pages_head = new_page;
new_page->set_raw_size(size);
......
......@@ -86,8 +86,6 @@ struct PartitionBucket {
return (size + SystemPageOffsetMask()) & SystemPageBaseMask();
}
BASE_EXPORT static PartitionBucket* get_sentinel_bucket();
// This helper function scans a bucket's active page list for a suitable new
// active page. When it finds a suitable new active page (one that has
// free slots and is not empty), it is set as the new active page. If there
......@@ -157,8 +155,6 @@ struct PartitionBucket {
// the current bucket. If the |page| was freshly allocated, it must have been
// passed through InitializeSlotSpan() first.
ALWAYS_INLINE char* AllocAndFillFreelist(PartitionPage<thread_safe>* page);
static PartitionBucket sentinel_bucket_;
};
} // namespace internal
......
......@@ -170,6 +170,12 @@ TEST_F(ThreadCacheTest, LargeAllocationsAreNotCached) {
}
#endif
TEST_F(ThreadCacheTest, DirectMappedAllocationsAreNotCached) {
FillThreadCacheAndReturnIndex(1024 * 1024);
// The line above would crash due to out of bounds access if this wasn't
// properly handled.
}
TEST_F(ThreadCacheTest, MultipleThreadCaches) {
const size_t kTestSize = 100;
FillThreadCacheAndReturnIndex(kTestSize);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment