Commit d4e7ee66 authored by Benoit Lize's avatar Benoit Lize Committed by Commit Bot

Reland "base/allocator: Add a thread cache to PartitionAlloc."

Changes: disabled the thread cache on Windows.

Original change's description:
> base/allocator: Add a thread cache to PartitionAlloc.
>
> This CL adds a thread cache to PartitionAlloc. It is optional, only
> applies to thread-safe partitions, and uses the same freelist encoding
> and bucketing as the main allocator.
>
> The thread cache is added "in the middle" of the main allocator, that is:
> - After all the cookie/tag management
> - Before the "raw" allocator.
>
> That is, the general allocation flow is:
> 1. Adjustment of requested size to make room for tags / cookies
> 2. Allocation:
>   a. Call to the thread cache, if it succeeds, return.
>   b. Otherwise, call the "raw" allocator <-- Locking
> 3. Handle cookies/tags, zero allocation if required
>
> On the deallocation side, the process is reversed:
> 1. Check cookies / tags, adjust the pointer
> 2. Deallocation
>   a. Return to the thread cache of possible. If it succeeds, return.
>   b. Otherwise, call the "raw" allocator <-- Locking
>
> The thread cache maintains an array of buckets, the same as the parent
> allocator. A single thread cache instance is only used by a single
> partition. Each bucket is a linked list of allocations, capped to a set
> maximum size. Elements in this "freelist" are encoded the same way they
> are for the main allocator.
> Only the smallest buckets are eligible for caching, to reduce the
> memory impact.
>
> There are several limitations:
> - Only a single partition is allowed to have a thread cache
> - No periodic purging of thread caches is done
> - No statistics are collected
>
> The last two limitations will be addressed in subsequent CLs. Regarding
> the first one, it is not possible to use Chrome's native thread local
> storage support, as it allocates. It is also desirable to use
> thread_local to improve performance.
>
> Bug: 998048
> Change-Id: Ia771f507d9dd1c2c26a4668c76da220fb0c65dd4
> Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2375206
> Commit-Queue: Benoit L <lizeb@chromium.org>
> Reviewed-by: Kentaro Hara <haraken@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#805697}

Bug: 998048
Change-Id: I23b70f6964bb297502921d1a08bf128d9093d577
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2404849Reviewed-by: default avatarKentaro Hara <haraken@chromium.org>
Reviewed-by: default avatarBenoit L <lizeb@chromium.org>
Commit-Queue: Benoit L <lizeb@chromium.org>
Cr-Commit-Position: refs/heads/master@{#806597}
parent cc7fafa8
...@@ -1793,6 +1793,8 @@ component("base") { ...@@ -1793,6 +1793,8 @@ component("base") {
"allocator/partition_allocator/partition_tag_bitmap.h", "allocator/partition_allocator/partition_tag_bitmap.h",
"allocator/partition_allocator/random.cc", "allocator/partition_allocator/random.cc",
"allocator/partition_allocator/random.h", "allocator/partition_allocator/random.h",
"allocator/partition_allocator/thread_cache.cc",
"allocator/partition_allocator/thread_cache.h",
] ]
if (is_win) { if (is_win) {
sources += sources +=
...@@ -3217,6 +3219,7 @@ test("base_unittests") { ...@@ -3217,6 +3219,7 @@ test("base_unittests") {
"allocator/partition_allocator/memory_reclaimer_unittest.cc", "allocator/partition_allocator/memory_reclaimer_unittest.cc",
"allocator/partition_allocator/page_allocator_unittest.cc", "allocator/partition_allocator/page_allocator_unittest.cc",
"allocator/partition_allocator/partition_alloc_unittest.cc", "allocator/partition_allocator/partition_alloc_unittest.cc",
"allocator/partition_allocator/thread_cache_unittest.cc",
] ]
} }
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include "base/allocator/allocator_shim.h" #include "base/allocator/allocator_shim.h"
#include "base/allocator/allocator_shim_internals.h" #include "base/allocator/allocator_shim_internals.h"
#include "base/allocator/partition_allocator/partition_alloc.h" #include "base/allocator/partition_allocator/partition_alloc.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/bits.h" #include "base/bits.h"
#include "base/no_destructor.h" #include "base/no_destructor.h"
#include "build/build_config.h" #include "build/build_config.h"
...@@ -74,8 +75,8 @@ base::ThreadSafePartitionRoot& Allocator() { ...@@ -74,8 +75,8 @@ base::ThreadSafePartitionRoot& Allocator() {
return *root; return *root;
} }
auto* new_root = new (g_allocator_buffer) auto* new_root = new (g_allocator_buffer) base::ThreadSafePartitionRoot(
base::ThreadSafePartitionRoot(false /* enforce_alignment */); false /* enforce_alignment */, true /* enable_thread_cache */);
g_root_.store(new_root, std::memory_order_release); g_root_.store(new_root, std::memory_order_release);
// Semantically equivalent to base::Lock::Release(). // Semantically equivalent to base::Lock::Release().
...@@ -100,8 +101,9 @@ void* PartitionMemalign(const AllocatorDispatch*, ...@@ -100,8 +101,9 @@ void* PartitionMemalign(const AllocatorDispatch*,
size_t alignment, size_t alignment,
size_t size, size_t size,
void* context) { void* context) {
// Since the general-purpose allocator uses the thread cache, this one cannot.
static base::NoDestructor<base::ThreadSafePartitionRoot> aligned_allocator{ static base::NoDestructor<base::ThreadSafePartitionRoot> aligned_allocator{
true /* enforce_alignment */}; true /* enforce_alignment */, false /* enable_thread_cache */};
return aligned_allocator->AlignedAllocFlags(base::PartitionAllocNoHooks, return aligned_allocator->AlignedAllocFlags(base::PartitionAllocNoHooks,
alignment, size); alignment, size);
} }
......
...@@ -201,7 +201,8 @@ void PartitionAllocGlobalUninitForTesting() { ...@@ -201,7 +201,8 @@ void PartitionAllocGlobalUninitForTesting() {
} }
template <bool thread_safe> template <bool thread_safe>
void PartitionRoot<thread_safe>::Init(bool enforce_alignment) { void PartitionRoot<thread_safe>::Init(bool enforce_alignment,
bool enable_thread_cache) {
ScopedGuard guard{lock_}; ScopedGuard guard{lock_};
if (initialized) if (initialized)
return; return;
...@@ -215,6 +216,15 @@ void PartitionRoot<thread_safe>::Init(bool enforce_alignment) { ...@@ -215,6 +216,15 @@ void PartitionRoot<thread_safe>::Init(bool enforce_alignment) {
// If alignment needs to be enforced, disallow adding cookies and/or tags at // If alignment needs to be enforced, disallow adding cookies and/or tags at
// the beginning of the slot. // the beginning of the slot.
allow_extras = !enforce_alignment; allow_extras = !enforce_alignment;
#if defined(OS_WIN)
// Not supported on Windows due to TLS issues.
with_thread_cache = false;
#else
with_thread_cache = enable_thread_cache;
if (with_thread_cache)
internal::ThreadCache::ClaimThreadCacheAndCheck();
#endif
// We mark the sentinel bucket/page as free to make sure it is skipped by our // We mark the sentinel bucket/page as free to make sure it is skipped by our
// logic to find a new active page. // logic to find a new active page.
...@@ -280,6 +290,9 @@ void PartitionRoot<thread_safe>::Init(bool enforce_alignment) { ...@@ -280,6 +290,9 @@ void PartitionRoot<thread_safe>::Init(bool enforce_alignment) {
initialized = true; initialized = true;
} }
template <bool thread_safe>
PartitionRoot<thread_safe>::~PartitionRoot() = default;
template <bool thread_safe> template <bool thread_safe>
bool PartitionRoot<thread_safe>::ReallocDirectMappedInPlace( bool PartitionRoot<thread_safe>::ReallocDirectMappedInPlace(
internal::PartitionPage<thread_safe>* page, internal::PartitionPage<thread_safe>* page,
...@@ -619,6 +632,10 @@ void PartitionRoot<thread_safe>::PurgeMemory(int flags) { ...@@ -619,6 +632,10 @@ void PartitionRoot<thread_safe>::PurgeMemory(int flags) {
PartitionPurgeBucket(bucket); PartitionPurgeBucket(bucket);
} }
} }
// Purges only this thread's cache.
if (with_thread_cache && internal::g_thread_cache)
internal::g_thread_cache->Purge();
} }
template <bool thread_safe> template <bool thread_safe>
...@@ -806,7 +823,8 @@ void PartitionAllocator<thread_safe>::init( ...@@ -806,7 +823,8 @@ void PartitionAllocator<thread_safe>::init(
PartitionAllocatorAlignment alignment) { PartitionAllocatorAlignment alignment) {
partition_root_.Init( partition_root_.Init(
alignment == alignment ==
PartitionAllocatorAlignment::kAlignedAlloc /* enforce_alignment */); PartitionAllocatorAlignment::kAlignedAlloc /* enforce_alignment */,
false);
PartitionAllocMemoryReclaimer::Instance()->RegisterPartition( PartitionAllocMemoryReclaimer::Instance()->RegisterPartition(
&partition_root_); &partition_root_);
} }
......
...@@ -53,6 +53,7 @@ ...@@ -53,6 +53,7 @@
#include <limits.h> #include <limits.h>
#include <string.h> #include <string.h>
#include <memory>
#include <atomic> #include <atomic>
...@@ -68,6 +69,7 @@ ...@@ -68,6 +69,7 @@
#include "base/allocator/partition_allocator/partition_page.h" #include "base/allocator/partition_allocator/partition_page.h"
#include "base/allocator/partition_allocator/partition_ref_count.h" #include "base/allocator/partition_allocator/partition_ref_count.h"
#include "base/allocator/partition_allocator/partition_tag.h" #include "base/allocator/partition_allocator/partition_tag.h"
#include "base/allocator/partition_allocator/thread_cache.h"
#include "base/base_export.h" #include "base/base_export.h"
#include "base/bits.h" #include "base/bits.h"
#include "base/check_op.h" #include "base/check_op.h"
...@@ -371,6 +373,8 @@ struct BASE_EXPORT PartitionRoot { ...@@ -371,6 +373,8 @@ struct BASE_EXPORT PartitionRoot {
using DirectMapExtent = internal::PartitionDirectMapExtent<thread_safe>; using DirectMapExtent = internal::PartitionDirectMapExtent<thread_safe>;
using ScopedGuard = internal::ScopedGuard<thread_safe>; using ScopedGuard = internal::ScopedGuard<thread_safe>;
bool with_thread_cache = false;
internal::MaybeSpinLock<thread_safe> lock_; internal::MaybeSpinLock<thread_safe> lock_;
// Invariant: total_size_of_committed_pages <= // Invariant: total_size_of_committed_pages <=
// total_size_of_super_pages + // total_size_of_super_pages +
...@@ -409,10 +413,10 @@ struct BASE_EXPORT PartitionRoot { ...@@ -409,10 +413,10 @@ struct BASE_EXPORT PartitionRoot {
Bucket buckets[kNumBuckets] = {}; Bucket buckets[kNumBuckets] = {};
PartitionRoot() = default; PartitionRoot() = default;
explicit PartitionRoot(bool enable_tag_pointers) { PartitionRoot(bool enable_tag_pointers, bool enable_thread_cache) {
Init(enable_tag_pointers); Init(enable_tag_pointers, enable_thread_cache);
} }
~PartitionRoot() = default; ~PartitionRoot();
// Public API // Public API
// //
...@@ -424,7 +428,7 @@ struct BASE_EXPORT PartitionRoot { ...@@ -424,7 +428,7 @@ struct BASE_EXPORT PartitionRoot {
// //
// Moving it a layer lower couples PartitionRoot and PartitionBucket, but // Moving it a layer lower couples PartitionRoot and PartitionBucket, but
// preserves the layering of the includes. // preserves the layering of the includes.
void Init(bool enforce_alignment); void Init(bool enforce_alignment, bool enable_thread_cache);
ALWAYS_INLINE static bool IsValidPage(Page* page); ALWAYS_INLINE static bool IsValidPage(Page* page);
ALWAYS_INLINE static PartitionRoot* FromPage(Page* page); ALWAYS_INLINE static PartitionRoot* FromPage(Page* page);
...@@ -462,9 +466,7 @@ struct BASE_EXPORT PartitionRoot { ...@@ -462,9 +466,7 @@ struct BASE_EXPORT PartitionRoot {
// this is marked |ALWAYS_INLINE|. // this is marked |ALWAYS_INLINE|.
ALWAYS_INLINE void* AllocFlagsNoHooks(int flags, size_t size); ALWAYS_INLINE void* AllocFlagsNoHooks(int flags, size_t size);
ALWAYS_INLINE void* Realloc(void* ptr, ALWAYS_INLINE void* Realloc(void* ptr, size_t newize, const char* type_name);
size_t new_size,
const char* type_name);
// Overload that may return nullptr if reallocation isn't possible. In this // Overload that may return nullptr if reallocation isn't possible. In this
// case, |ptr| remains valid. // case, |ptr| remains valid.
ALWAYS_INLINE void* TryRealloc(void* ptr, ALWAYS_INLINE void* TryRealloc(void* ptr,
...@@ -495,13 +497,18 @@ struct BASE_EXPORT PartitionRoot { ...@@ -495,13 +497,18 @@ struct BASE_EXPORT PartitionRoot {
// Frees memory, with |ptr| as returned by |RawAlloc()|. // Frees memory, with |ptr| as returned by |RawAlloc()|.
ALWAYS_INLINE void RawFree(void* ptr, Page* page); ALWAYS_INLINE void RawFree(void* ptr, Page* page);
internal::ThreadCache* thread_cache_for_testing() const {
return with_thread_cache ? internal::g_thread_cache.get() : nullptr;
}
private: private:
// Allocates memory, without any cookies / tags. // Allocates memory, without any cookies / tags.
// //
// |flags| and |size| are as in AllocFlags(). |allocated_size| and // |flags| and |size| are as in AllocFlags(). |allocated_size| and
// is_already_zeroed| are output only. |allocated_size| is guaranteed to be // is_already_zeroed| are output only. |allocated_size| is guaranteed to be
// larger or equal to |size|. // larger or equal to |size|.
ALWAYS_INLINE void* RawAlloc(int flags, ALWAYS_INLINE void* RawAlloc(Bucket* bucket,
int flags,
size_t size, size_t size,
size_t* allocated_size, size_t* allocated_size,
bool* is_already_zeroed); bool* is_already_zeroed);
...@@ -523,13 +530,13 @@ struct BASE_EXPORT PartitionRoot { ...@@ -523,13 +530,13 @@ struct BASE_EXPORT PartitionRoot {
#endif #endif
} }
private:
ALWAYS_INLINE void* AllocFromBucket(Bucket* bucket, int flags, size_t size)
EXCLUSIVE_LOCKS_REQUIRED(lock_);
bool ReallocDirectMappedInPlace(internal::PartitionPage<thread_safe>* page, bool ReallocDirectMappedInPlace(internal::PartitionPage<thread_safe>* page,
size_t raw_size) size_t raw_size)
EXCLUSIVE_LOCKS_REQUIRED(lock_); EXCLUSIVE_LOCKS_REQUIRED(lock_);
void DecommitEmptyPages() EXCLUSIVE_LOCKS_REQUIRED(lock_); void DecommitEmptyPages() EXCLUSIVE_LOCKS_REQUIRED(lock_);
static void RawFreeStatic(void* ptr);
friend class internal::ThreadCache;
}; };
static_assert(sizeof(PartitionRoot<internal::ThreadSafe>) == static_assert(sizeof(PartitionRoot<internal::ThreadSafe>) ==
...@@ -612,6 +619,15 @@ ALWAYS_INLINE void PartitionRoot<thread_safe>::Free(void* ptr) { ...@@ -612,6 +619,15 @@ ALWAYS_INLINE void PartitionRoot<thread_safe>::Free(void* ptr) {
// static // static
template <bool thread_safe> template <bool thread_safe>
ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooks(void* ptr) { ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooks(void* ptr) {
// The thread cache is added "in the middle" of the main allocator, that is:
// - After all the cookie/tag management
// - Before the "raw" allocator.
//
// On the deallocation side:
// 1. Check cookies / tags, adjust the pointer
// 2. Deallocation
// a. Return to the thread cache of possible. If it succeeds, return.
// b. Otherwise, call the "raw" allocator <-- Locking
if (UNLIKELY(!ptr)) if (UNLIKELY(!ptr))
return; return;
...@@ -678,6 +694,19 @@ ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooks(void* ptr) { ...@@ -678,6 +694,19 @@ ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooks(void* ptr) {
memset(ptr, kFreedByte, page->GetAllocatedSize()); memset(ptr, kFreedByte, page->GetAllocatedSize());
#endif #endif
// TLS access can be expensive, do a cheap local check first.
//
// Also the thread-unsafe variant doesn't have a use for a thread cache, so
// make it statically known to the compiler.
if (thread_safe && root->with_thread_cache &&
LIKELY(!page->bucket->is_direct_mapped())) {
PA_DCHECK(page->bucket >= root->buckets);
size_t bucket_index = page->bucket - root->buckets;
internal::ThreadCache* thread_cache = internal::g_thread_cache.get();
if (thread_cache && thread_cache->MaybePutInCache(ptr, bucket_index))
return;
}
root->RawFree(ptr, page); root->RawFree(ptr, page);
} }
...@@ -691,6 +720,14 @@ ALWAYS_INLINE void PartitionRoot<thread_safe>::RawFree(void* ptr, Page* page) { ...@@ -691,6 +720,14 @@ ALWAYS_INLINE void PartitionRoot<thread_safe>::RawFree(void* ptr, Page* page) {
deferred_unmap.Run(); deferred_unmap.Run();
} }
// static
template <bool thread_safe>
void PartitionRoot<thread_safe>::RawFreeStatic(void* ptr) {
Page* page = Page::FromPointerNoAlignmentCheck(ptr);
auto* root = PartitionRoot<thread_safe>::FromPage(page);
root->RawFree(ptr, page);
}
// static // static
template <bool thread_safe> template <bool thread_safe>
ALWAYS_INLINE bool PartitionRoot<thread_safe>::IsValidPage(Page* page) { ALWAYS_INLINE bool PartitionRoot<thread_safe>::IsValidPage(Page* page) {
...@@ -811,7 +848,6 @@ PartitionRoot<thread_safe>::SizeToBucket(size_t size) const { ...@@ -811,7 +848,6 @@ PartitionRoot<thread_safe>::SizeToBucket(size_t size) const {
size_t sub_order_index = size & kOrderSubIndexMask[order]; size_t sub_order_index = size & kOrderSubIndexMask[order];
Bucket* bucket = bucket_lookups[(order << kNumBucketsPerOrderBits) + Bucket* bucket = bucket_lookups[(order << kNumBucketsPerOrderBits) +
order_index + !!sub_order_index]; order_index + !!sub_order_index];
PA_CHECK(bucket);
PA_DCHECK(!bucket->slot_size || bucket->slot_size >= size); PA_DCHECK(!bucket->slot_size || bucket->slot_size >= size);
PA_DCHECK(!(bucket->slot_size % kSmallestBucket)); PA_DCHECK(!(bucket->slot_size % kSmallestBucket));
return bucket; return bucket;
...@@ -834,7 +870,7 @@ ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocFlags( ...@@ -834,7 +870,7 @@ ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocFlags(
return result; return result;
#else #else
PA_DCHECK(initialized); PA_DCHECK(initialized);
void* ret; void* ret = nullptr;
const bool hooks_enabled = PartitionAllocHooks::AreHooksEnabled(); const bool hooks_enabled = PartitionAllocHooks::AreHooksEnabled();
if (UNLIKELY(hooks_enabled)) { if (UNLIKELY(hooks_enabled)) {
if (PartitionAllocHooks::AllocationOverrideHookIfEnabled(&ret, flags, size, if (PartitionAllocHooks::AllocationOverrideHookIfEnabled(&ret, flags, size,
...@@ -858,13 +894,63 @@ ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocFlags( ...@@ -858,13 +894,63 @@ ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocFlags(
template <bool thread_safe> template <bool thread_safe>
ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocFlagsNoHooks(int flags, ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocFlagsNoHooks(int flags,
size_t size) { size_t size) {
// The thread cache is added "in the middle" of the main allocator, that is:
// - After all the cookie/tag management
// - Before the "raw" allocator.
//
// That is, the general allocation flow is:
// 1. Adjustment of requested size to make room for tags / cookies
// 2. Allocation:
// a. Call to the thread cache, if it succeeds, go to step 3.
// b. Otherwise, call the "raw" allocator <-- Locking
// 3. Handle cookies/tags, zero allocation if required
size_t requested_size = size; size_t requested_size = size;
size = internal::PartitionSizeAdjustAdd(allow_extras, size); size = internal::PartitionSizeAdjustAdd(allow_extras, size);
PA_CHECK(size >= requested_size); // check for overflows PA_CHECK(size >= requested_size); // check for overflows
auto* bucket = SizeToBucket(size);
size_t allocated_size; size_t allocated_size;
bool is_already_zeroed; bool is_already_zeroed;
void* ret = RawAlloc(flags, size, &allocated_size, &is_already_zeroed); void* ret = nullptr;
// !thread_safe => !with_thread_cache, but adding the condition allows the
// compiler to statically remove this branch for the thread-unsafe variant.
if (thread_safe && with_thread_cache) {
if (UNLIKELY(!internal::g_thread_cache))
internal::g_thread_cache = internal::ThreadCache::Create(this);
// bucket->slot_size is 0 for direct-mapped allocations, as their bucket is
// the sentinel one. However, since we are touching *bucket, we may as well
// check it directly, rather than fetching the sentinel one, and comparing
// the addresses. Since the sentinel bucket is *not* part of the the buckets
// array, |bucket_index| is not valid for the sentinel one.
//
// TODO(lizeb): Consider making Bucket::sentinel per-PartitionRoot, at the
// end of the |buckets| array. This would remove this branch.
if (LIKELY(bucket->slot_size)) {
PA_DCHECK(bucket != Bucket::get_sentinel_bucket());
PA_DCHECK(bucket >= buckets && bucket < (buckets + kNumBuckets));
size_t bucket_index = bucket - buckets;
ret = internal::g_thread_cache->GetFromCache(bucket_index);
is_already_zeroed = false;
allocated_size = bucket->slot_size;
#if DCHECK_IS_ON()
// Make sure that the allocated pointer comes from the same place it would
// for a non-thread cache allocation.
if (ret) {
Page* page = Page::FromPointerNoAlignmentCheck(ret);
PA_DCHECK(IsValidPage(page));
PA_DCHECK(page->bucket == &buckets[bucket_index]);
}
#endif
} else {
PA_DCHECK(bucket == Bucket::get_sentinel_bucket());
}
}
if (!ret)
ret = RawAlloc(bucket, flags, size, &allocated_size, &is_already_zeroed);
if (UNLIKELY(!ret)) if (UNLIKELY(!ret))
return nullptr; return nullptr;
...@@ -925,18 +1011,14 @@ ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocFlagsNoHooks(int flags, ...@@ -925,18 +1011,14 @@ ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocFlagsNoHooks(int flags,
template <bool thread_safe> template <bool thread_safe>
ALWAYS_INLINE void* PartitionRoot<thread_safe>::RawAlloc( ALWAYS_INLINE void* PartitionRoot<thread_safe>::RawAlloc(
Bucket* bucket,
int flags, int flags,
size_t size, size_t size,
size_t* allocated_size, size_t* allocated_size,
bool* is_already_zeroed) { bool* is_already_zeroed) {
auto* bucket = SizeToBucket(size); internal::ScopedGuard<thread_safe> guard{lock_};
PA_DCHECK(bucket); return AllocFromBucket(bucket, flags, size, allocated_size,
is_already_zeroed);
{
internal::ScopedGuard<thread_safe> guard{lock_};
return AllocFromBucket(bucket, flags, size, allocated_size,
is_already_zeroed);
}
} }
template <bool thread_safe> template <bool thread_safe>
......
...@@ -2,7 +2,10 @@ ...@@ -2,7 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be // Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. // found in the LICENSE file.
#include <algorithm>
#include <atomic> #include <atomic>
#include <limits>
#include <memory>
#include <vector> #include <vector>
#include "base/allocator/partition_allocator/partition_alloc.h" #include "base/allocator/partition_allocator/partition_alloc.h"
...@@ -30,7 +33,7 @@ namespace { ...@@ -30,7 +33,7 @@ namespace {
// Change kTimeLimit to something higher if you need more time to capture a // Change kTimeLimit to something higher if you need more time to capture a
// trace. // trace.
constexpr base::TimeDelta kTimeLimit = base::TimeDelta::FromSeconds(2); constexpr base::TimeDelta kTimeLimit = base::TimeDelta::FromSeconds(2);
constexpr int kWarmupRuns = 5; constexpr int kWarmupRuns = 10000;
constexpr int kTimeCheckInterval = 100000; constexpr int kTimeCheckInterval = 100000;
// Size constants are mostly arbitrary, but try to simulate something like CSS // Size constants are mostly arbitrary, but try to simulate something like CSS
...@@ -78,12 +81,10 @@ class PartitionAllocator : public Allocator { ...@@ -78,12 +81,10 @@ class PartitionAllocator : public Allocator {
void* Alloc(size_t size) override { void* Alloc(size_t size) override {
return alloc_.AllocFlagsNoHooks(0, size); return alloc_.AllocFlagsNoHooks(0, size);
} }
void Free(void* data) override { void Free(void* data) override { ThreadSafePartitionRoot::FreeNoHooks(data); }
base::ThreadSafePartitionRoot::FreeNoHooks(data);
}
private: private:
base::ThreadSafePartitionRoot alloc_{false}; ThreadSafePartitionRoot alloc_{false, false};
}; };
class TestLoopThread : public PlatformThread::Delegate { class TestLoopThread : public PlatformThread::Delegate {
......
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/thread_cache.h"
#include <sys/types.h>
#include <atomic>
#include <vector>
#include "base/allocator/partition_allocator/partition_alloc.h"
namespace base {
namespace internal {
// static
std::unique_ptr<ThreadCache> ThreadCache::Create(
PartitionRoot<internal::ThreadSafe>* root) {
PA_CHECK(root);
// Placement new and RawAlloc() are used, as otherwise when this partition is
// the malloc() implementation, the memory allocated for the new thread cache
// would make this code reentrant.
//
// This also means that deallocation must use RawFreeStatic(), hence the
// operator delete() implementation below.
size_t allocated_size;
bool already_zeroed;
auto* bucket = root->SizeToBucket(sizeof(ThreadCache));
void* buffer =
root->RawAlloc(bucket, PartitionAllocZeroFill, sizeof(ThreadCache),
&allocated_size, &already_zeroed);
ThreadCache* tcache = new (buffer) ThreadCache();
return std::unique_ptr<ThreadCache>(tcache);
}
void ThreadCache::operator delete(void* ptr) {
PartitionRoot<internal::ThreadSafe>::RawFreeStatic(ptr);
}
void ThreadCache::Purge() {
for (Bucket& bucket : buckets_) {
size_t count = bucket.count;
while (bucket.freelist_head) {
auto* entry = bucket.freelist_head;
bucket.freelist_head = EncodedPartitionFreelistEntry::Decode(entry->next);
PartitionRoot<ThreadSafe>::RawFreeStatic(entry);
count--;
}
CHECK_EQ(0u, count);
bucket.count = 0;
}
}
// Since |g_thread_cache| is shared, make sure that no more than one
// PartitionRoot can use it.
static std::atomic<bool> g_has_instance;
// static
void ThreadCache::ClaimThreadCacheAndCheck() {
bool expected = false;
if (!g_has_instance.compare_exchange_strong(expected, true,
std::memory_order_seq_cst,
std::memory_order_seq_cst)) {
PA_CHECK(false)
<< "Only one PartitionRoot is allowed to have a thread cache";
}
}
} // namespace internal
} // namespace base
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_THREAD_CACHE_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_THREAD_CACHE_H_
#include <cstdint>
#include <memory>
#include "base/allocator/partition_allocator/partition_alloc_forward.h"
#include "base/allocator/partition_allocator/partition_cookie.h"
#include "base/allocator/partition_allocator/partition_freelist_entry.h"
#include "base/base_export.h"
#include "base/gtest_prod_util.h"
#include "base/partition_alloc_buildflags.h"
namespace base {
namespace internal {
class ThreadCache;
#pragma clang diagnostic push
// Silences the global destructor warning, as the name does not imply.
#pragma clang diagnostic ignored "-Wglobal-constructors"
#pragma clang diagnostic ignored "-Wexit-time-destructors"
// This is *not* using base::NoDestructor<> as we do want the destructor to be
// called when a thread is destroyed.
//
// Cannot be a static member of ThreadCache, as it is "dllexport" (BASE_EXPORT),
// and that is not supported by the Windows linker.
static thread_local std::unique_ptr<ThreadCache> g_thread_cache;
#pragma clang diagnostic pop
// Per-thread cache. *Not* threadsafe, must only be accessed from a single
// thread.
//
// In practice, this is easily enforced as long as only |instance| is
// manipulated, as it is a thread_local member. As such, any
// |ThreadCache::instance->*()| call will necessarily be done from a single
// thread.
class BASE_EXPORT ThreadCache {
public:
// Create a new ThreadCache associated with |root|.
static std::unique_ptr<ThreadCache> Create(PartitionRoot<ThreadSafe>* root);
static std::unique_ptr<ThreadCache> Create(
PartitionRoot<NotThreadSafe>* root) {
IMMEDIATE_CRASH();
}
~ThreadCache();
void operator delete(void* ptr);
ThreadCache(const ThreadCache&) = delete;
ThreadCache(const ThreadCache&&) = delete;
ThreadCache& operator=(const ThreadCache&) = delete;
// CHECK()s that the thread cache has not been claimed by another
// PartitionRoot, and mark it as claimed.
static void ClaimThreadCacheAndCheck();
// Tries to put a memory block at |address| into the cache.
// The block comes from the bucket at index |bucket_index| from the partition
// this cache is for.
//
// Returns true if the memory was put in the cache, and false otherwise. This
// can happen either because the cache is full or the allocation was too
// large.
ALWAYS_INLINE bool MaybePutInCache(void* address, size_t bucket_index);
// Tries to allocate memory from the cache.
// Returns nullptr for failure.
//
// Has the same behavior as RawAlloc(), that is: no cookie nor tag handling.
ALWAYS_INLINE void* GetFromCache(size_t bucket_index);
// Empties the cache.
void Purge();
size_t bucket_count_for_testing(size_t index) const {
return buckets_[index].count;
}
private:
ThreadCache() = default;
struct Bucket {
size_t count;
PartitionFreelistEntry* freelist_head;
};
// TODO(lizeb): Optimize the threshold, and define it as an allocation size
// rather than a bucket index.
static constexpr size_t kBucketCount = 40;
static_assert(
kBucketCount < kNumBuckets,
"Cannot have more cached buckets than what the allocator supports");
// TODO(lizeb): Tune this constant, and adapt it to the bucket size /
// allocation patterns.
static constexpr size_t kMaxCountPerBucket = 100;
Bucket buckets_[kBucketCount];
FRIEND_TEST_ALL_PREFIXES(ThreadCacheTest, LargeAllocationsAreNotCached);
FRIEND_TEST_ALL_PREFIXES(ThreadCacheTest, MultipleThreadCaches);
};
// Some platforms which don't build PartitionAlloc (iOS for instance) still end
// up including partition_alloc.h, and then this file as a side-effect. The
// proper fix is to fix these dependencies, and make partition_alloc.h inclusion
// require USE_PARTITION_ALLOC.
inline ThreadCache::~ThreadCache() {
#if !BUILDFLAG(USE_PARTITION_ALLOC)
IMMEDIATE_CRASH();
#else
Purge();
#endif // !BUILDFLAG(USE_PARTITION_ALLOC)
}
#if !BUILDFLAG(USE_PARTITION_ALLOC)
inline void ThreadCache::operator delete(void* ptr) {}
#endif // !BUILDFLAG(USE_PARTITION_ALLOC)
ALWAYS_INLINE bool ThreadCache::MaybePutInCache(void* address,
size_t bucket_index) {
if (bucket_index >= kBucketCount)
return false;
auto& bucket = buckets_[bucket_index];
if (bucket.count >= kMaxCountPerBucket)
return false;
PA_DCHECK(bucket.count != 0 || bucket.freelist_head == nullptr);
auto* entry = reinterpret_cast<PartitionFreelistEntry*>(address);
entry->next = PartitionFreelistEntry::Encode(bucket.freelist_head);
bucket.freelist_head = entry;
bucket.count++;
return true;
}
ALWAYS_INLINE void* ThreadCache::GetFromCache(size_t bucket_index) {
// Only handle "small" allocations.
if (bucket_index >= kBucketCount)
return nullptr;
auto& bucket = buckets_[bucket_index];
auto* result = bucket.freelist_head;
if (!result) {
PA_DCHECK(bucket.count == 0);
return nullptr;
}
PA_DCHECK(bucket.count != 0);
auto* next = EncodedPartitionFreelistEntry::Decode(result->next);
PA_DCHECK(result != next);
bucket.count--;
PA_DCHECK(bucket.count != 0 || !next);
bucket.freelist_head = next;
return result;
}
} // namespace internal
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_THREAD_CACHE_H_
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/thread_cache.h"
#include <vector>
#include "base/allocator/buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc.h"
#include "base/bind.h"
#include "base/callback.h"
#include "base/synchronization/lock.h"
#include "base/test/bind_test_util.h"
#include "base/threading/platform_thread.h"
#include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h"
// Only a single partition can have a thread cache at a time. When
// PartitionAlloc is malloc(), it is already in use.
//
// With *SAN, PartitionAlloc is replaced in partition_alloc.h by ASAN, so we
// cannot test the thread cache.
//
// Finally, the thread cache currently uses `thread_local`, which causes issues
// on Windows 7 (at least). As long as it doesn't use something else on Windows,
// disable the cache (and tests)
#if !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && \
!defined(MEMORY_TOOL_REPLACES_ALLOCATOR) && !defined(OS_WIN)
namespace base {
namespace internal {
namespace {
class LambdaThreadDelegate : public PlatformThread::Delegate {
public:
explicit LambdaThreadDelegate(OnceClosure f) : f_(std::move(f)) {}
void ThreadMain() override { std::move(f_).Run(); }
private:
OnceClosure f_;
};
// Need to be a global object without a destructor, because the cache is a
// global object with a destructor (to handle thread destruction), and the
// PartitionRoot has to outlive it.
//
// Forbid extras, since they make finding out which bucket is used harder.
NoDestructor<ThreadSafePartitionRoot> g_root{true, true};
size_t BucketIndexForSize(size_t size) {
auto* bucket = g_root->SizeToBucket(size);
return bucket - g_root->buckets;
}
size_t FillThreadCacheAndReturnIndex(size_t size, size_t count = 1) {
size_t bucket_index = BucketIndexForSize(size);
std::vector<void*> allocated_data;
for (size_t i = 0; i < count; ++i) {
allocated_data.push_back(g_root->Alloc(size, ""));
}
for (void* ptr : allocated_data) {
g_root->Free(ptr);
}
return bucket_index;
}
} // namespace
class ThreadCacheTest : public ::testing::Test {
protected:
void SetUp() override {
auto* tcache = g_root->thread_cache_for_testing();
if (tcache)
tcache->Purge();
}
void TearDown() override {}
};
TEST_F(ThreadCacheTest, Simple) {
const size_t kTestSize = 12;
void* ptr = g_root->Alloc(kTestSize, "");
ASSERT_TRUE(ptr);
// There is a cache.
auto* tcache = g_root->thread_cache_for_testing();
EXPECT_TRUE(tcache);
size_t index = BucketIndexForSize(kTestSize);
EXPECT_EQ(0u, tcache->bucket_count_for_testing(index));
g_root->Free(ptr);
// Freeing fills the thread cache.
EXPECT_EQ(1u, tcache->bucket_count_for_testing(index));
void* ptr2 = g_root->Alloc(kTestSize, "");
EXPECT_EQ(ptr, ptr2);
// Allocated from the thread cache.
EXPECT_EQ(0u, tcache->bucket_count_for_testing(index));
}
TEST_F(ThreadCacheTest, InexactSizeMatch) {
const size_t kTestSize = 12;
void* ptr = g_root->Alloc(kTestSize, "");
ASSERT_TRUE(ptr);
// There is a cache.
auto* tcache = g_root->thread_cache_for_testing();
EXPECT_TRUE(tcache);
size_t index = BucketIndexForSize(kTestSize);
EXPECT_EQ(0u, tcache->bucket_count_for_testing(index));
g_root->Free(ptr);
// Freeing fills the thread cache.
EXPECT_EQ(1u, tcache->bucket_count_for_testing(index));
void* ptr2 = g_root->Alloc(kTestSize + 1, "");
EXPECT_EQ(ptr, ptr2);
// Allocated from the thread cache.
EXPECT_EQ(0u, tcache->bucket_count_for_testing(index));
}
TEST_F(ThreadCacheTest, MultipleObjectsCachedPerBucket) {
size_t bucket_index = FillThreadCacheAndReturnIndex(100, 10);
auto* tcache = g_root->thread_cache_for_testing();
EXPECT_EQ(10u, tcache->bucket_count_for_testing(bucket_index));
}
TEST_F(ThreadCacheTest, ObjectsCachedCountIsLimited) {
size_t bucket_index = FillThreadCacheAndReturnIndex(100, 1000);
auto* tcache = g_root->thread_cache_for_testing();
EXPECT_LT(tcache->bucket_count_for_testing(bucket_index), 1000u);
}
TEST_F(ThreadCacheTest, Purge) {
size_t bucket_index = FillThreadCacheAndReturnIndex(100, 10);
auto* tcache = g_root->thread_cache_for_testing();
EXPECT_EQ(10u, tcache->bucket_count_for_testing(bucket_index));
tcache->Purge();
EXPECT_EQ(0u, tcache->bucket_count_for_testing(bucket_index));
}
TEST_F(ThreadCacheTest, NoCrossPartitionCache) {
const size_t kTestSize = 12;
ThreadSafePartitionRoot root{true, false};
size_t bucket_index = FillThreadCacheAndReturnIndex(kTestSize);
void* ptr = root.Alloc(kTestSize, "");
ASSERT_TRUE(ptr);
auto* tcache = g_root->thread_cache_for_testing();
EXPECT_EQ(1u, tcache->bucket_count_for_testing(bucket_index));
ThreadSafePartitionRoot::Free(ptr);
EXPECT_EQ(1u, tcache->bucket_count_for_testing(bucket_index));
}
#if ENABLE_THREAD_CACHE_STATISTICS // Required to record hits and misses.
TEST_F(ThreadCacheTest, LargeAllocationsAreNotCached) {
auto* tcache = g_root->thread_cache_for_testing();
size_t hits_before = tcache ? tcache->hits_ : 0;
FillThreadCacheAndReturnIndex(100 * 1024);
tcache = g_root->thread_cache_for_testing();
EXPECT_EQ(hits_before, tcache->hits_);
}
#endif
TEST_F(ThreadCacheTest, MultipleThreadCaches) {
const size_t kTestSize = 100;
FillThreadCacheAndReturnIndex(kTestSize);
auto* parent_thread_tcache = g_root->thread_cache_for_testing();
ASSERT_TRUE(parent_thread_tcache);
LambdaThreadDelegate delegate{BindLambdaForTesting([&]() {
EXPECT_FALSE(g_root->thread_cache_for_testing()); // No allocations yet.
FillThreadCacheAndReturnIndex(kTestSize);
auto* tcache = g_root->thread_cache_for_testing();
EXPECT_TRUE(tcache);
EXPECT_NE(parent_thread_tcache, tcache);
})};
PlatformThreadHandle thread_handle;
PlatformThread::Create(0, &delegate, &thread_handle);
PlatformThread::Join(thread_handle);
}
TEST_F(ThreadCacheTest, ThreadCacheReclaimedWhenThreadExits) {
const size_t kTestSize = 100;
// Make sure that there is always at least one object allocated in the test
// bucket, so that the PartitionPage is no reclaimed.
void* tmp = g_root->Alloc(kTestSize, "");
void* other_thread_ptr;
LambdaThreadDelegate delegate{BindLambdaForTesting([&]() {
EXPECT_FALSE(g_root->thread_cache_for_testing()); // No allocations yet.
other_thread_ptr = g_root->Alloc(kTestSize, "");
g_root->Free(other_thread_ptr);
// |other_thread_ptr| is now in the thread cache.
})};
PlatformThreadHandle thread_handle;
PlatformThread::Create(0, &delegate, &thread_handle);
PlatformThread::Join(thread_handle);
void* this_thread_ptr = g_root->Alloc(kTestSize, "");
// |other_thread_ptr| was returned to the central allocator, and is returned
// |here, as is comes from the freelist.
EXPECT_EQ(this_thread_ptr, other_thread_ptr);
g_root->Free(other_thread_ptr);
g_root->Free(tmp);
}
} // namespace internal
} // namespace base
#endif // !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) &&
// !defined(MEMORY_TOOL_REPLACES_ALLOCATOR) && !defined(OS_WIN)
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment