Commit 9d3805f8 authored by Anton Bikineev's avatar Anton Bikineev Committed by Commit Bot

PartitionAlloc: Use Options struct to initialize partitions

This is needed to simplify initialization interface, especially when
PCScan is added.

Bug: 11297512
Change-Id: I674f5b2f237bfb08e4dcbfd83e1fa3854acf9886
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2421676
Commit-Queue: Anton Bikineev <bikineev@chromium.org>
Reviewed-by: default avatarBartek Nowierski <bartekn@chromium.org>
Reviewed-by: default avatarKentaro Hara <haraken@chromium.org>
Reviewed-by: default avatarBenoit L <lizeb@chromium.org>
Cr-Commit-Position: refs/heads/master@{#810374}
parent e495b5f4
...@@ -76,7 +76,8 @@ base::ThreadSafePartitionRoot& Allocator() { ...@@ -76,7 +76,8 @@ base::ThreadSafePartitionRoot& Allocator() {
} }
auto* new_root = new (g_allocator_buffer) base::ThreadSafePartitionRoot( auto* new_root = new (g_allocator_buffer) base::ThreadSafePartitionRoot(
false /* enforce_alignment */, true /* enable_thread_cache */); {base::PartitionOptions::Alignment::kRegular,
base::PartitionOptions::ThreadCache::kEnabled});
g_root_.store(new_root, std::memory_order_release); g_root_.store(new_root, std::memory_order_release);
// Semantically equivalent to base::Lock::Release(). // Semantically equivalent to base::Lock::Release().
...@@ -105,8 +106,9 @@ void* PartitionCalloc(const AllocatorDispatch*, ...@@ -105,8 +106,9 @@ void* PartitionCalloc(const AllocatorDispatch*,
base::ThreadSafePartitionRoot* AlignedAllocator() { base::ThreadSafePartitionRoot* AlignedAllocator() {
// Since the general-purpose allocator uses the thread cache, this one cannot. // Since the general-purpose allocator uses the thread cache, this one cannot.
static base::NoDestructor<base::ThreadSafePartitionRoot> aligned_allocator{ static base::NoDestructor<base::ThreadSafePartitionRoot> aligned_allocator(
true /* enforce_alignment */, false /* enable_thread_cache */}; base::PartitionOptions{base::PartitionOptions::Alignment::kAlignedAlloc,
base::PartitionOptions::ThreadCache::kDisabled});
return aligned_allocator.get(); return aligned_allocator.get();
} }
......
...@@ -258,8 +258,7 @@ void InitBucketIndexLookup(PartitionRoot<thread_safe>* root) { ...@@ -258,8 +258,7 @@ void InitBucketIndexLookup(PartitionRoot<thread_safe>* root) {
} }
template <bool thread_safe> template <bool thread_safe>
void PartitionRoot<thread_safe>::Init(bool enforce_alignment, void PartitionRoot<thread_safe>::Init(PartitionOptions opts) {
bool enable_thread_cache) {
ScopedGuard guard{lock_}; ScopedGuard guard{lock_};
if (initialized) if (initialized)
return; return;
...@@ -272,12 +271,13 @@ void PartitionRoot<thread_safe>::Init(bool enforce_alignment, ...@@ -272,12 +271,13 @@ void PartitionRoot<thread_safe>::Init(bool enforce_alignment,
// If alignment needs to be enforced, disallow adding cookies and/or tags at // If alignment needs to be enforced, disallow adding cookies and/or tags at
// the beginning of the slot. // the beginning of the slot.
allow_extras = !enforce_alignment; allow_extras = (opts.alignment != PartitionOptions::Alignment::kAlignedAlloc);
#if !defined(OS_POSIX) #if !defined(OS_POSIX)
// TLS in ThreadCache not supported on other OSes. // TLS in ThreadCache not supported on other OSes.
with_thread_cache = false; with_thread_cache = false;
#else #else
with_thread_cache = enable_thread_cache; with_thread_cache =
(opts.thread_cache == PartitionOptions::ThreadCache::kEnabled);
if (with_thread_cache) if (with_thread_cache)
internal::ThreadCache::Init(this); internal::ThreadCache::Init(this);
...@@ -874,29 +874,21 @@ PartitionAllocator<thread_safe>::~PartitionAllocator() { ...@@ -874,29 +874,21 @@ PartitionAllocator<thread_safe>::~PartitionAllocator() {
} }
template <bool thread_safe> template <bool thread_safe>
void PartitionAllocator<thread_safe>::init( void PartitionAllocator<thread_safe>::init(PartitionOptions opts) {
PartitionAllocatorAlignment alignment,
bool with_thread_cache) {
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
PA_CHECK(!with_thread_cache) PA_CHECK(opts.thread_cache == PartitionOptions::ThreadCache::kDisabled)
<< "Cannot use a thread cache when PartitionAlloc is malloc()."; << "Cannot use a thread cache when PartitionAlloc is malloc().";
#endif #endif
partition_root_.Init( partition_root_.Init(opts);
alignment ==
PartitionAllocatorAlignment::kAlignedAlloc /* enforce_alignment */,
with_thread_cache);
PartitionAllocMemoryReclaimer::Instance()->RegisterPartition( PartitionAllocMemoryReclaimer::Instance()->RegisterPartition(
&partition_root_); &partition_root_);
} }
template PartitionAllocator<internal::ThreadSafe>::~PartitionAllocator(); template PartitionAllocator<internal::ThreadSafe>::~PartitionAllocator();
template void PartitionAllocator<internal::ThreadSafe>::init( template void PartitionAllocator<internal::ThreadSafe>::init(PartitionOptions);
PartitionAllocatorAlignment alignment,
bool with_thread_cache);
template PartitionAllocator<internal::NotThreadSafe>::~PartitionAllocator(); template PartitionAllocator<internal::NotThreadSafe>::~PartitionAllocator();
template void PartitionAllocator<internal::NotThreadSafe>::init( template void PartitionAllocator<internal::NotThreadSafe>::init(
PartitionAllocatorAlignment alignment, PartitionOptions);
bool with_thread_cache);
#if DCHECK_IS_ON() #if DCHECK_IS_ON()
void DCheckIfManagedByPartitionAllocNormalBuckets(const void* ptr) { void DCheckIfManagedByPartitionAllocNormalBuckets(const void* ptr) {
......
...@@ -367,6 +367,31 @@ constexpr size_t kOrderSubIndexMask[BITS_PER_SIZE_T + 1] = { ...@@ -367,6 +367,31 @@ constexpr size_t kOrderSubIndexMask[BITS_PER_SIZE_T + 1] = {
} // namespace } // namespace
// Options struct used to configure PartitionRoot and PartitionAllocator.
struct PartitionOptions {
enum class Alignment {
// By default all allocations will be aligned to 8B (16B if
// BUILDFLAG_INTERNAL_USE_PARTITION_ALLOC_AS_MALLOC is true).
kRegular,
// In addition to the above alignment enforcement, this option allows using
// AlignedAlloc() which can align at a larger boundary. This option comes
// at a cost of disallowing cookies on Debug builds and tags/ref-counts for
// CheckedPtr. It also causes all allocations to go outside of GigaCage, so
// that CheckedPtr can easily tell if a pointer comes with a tag/ref-count
// or not.
kAlignedAlloc,
};
enum class ThreadCache {
kDisabled,
kEnabled,
};
Alignment alignment = Alignment::kRegular;
ThreadCache thread_cache = ThreadCache::kDisabled;
};
// Never instantiate a PartitionRoot directly, instead use // Never instantiate a PartitionRoot directly, instead use
// PartitionAllocator. // PartitionAllocator.
template <bool thread_safe> template <bool thread_safe>
...@@ -429,9 +454,7 @@ struct BASE_EXPORT PartitionRoot { ...@@ -429,9 +454,7 @@ struct BASE_EXPORT PartitionRoot {
Bucket sentinel_bucket; Bucket sentinel_bucket;
PartitionRoot() = default; PartitionRoot() = default;
PartitionRoot(bool enable_tag_pointers, bool enable_thread_cache) { explicit PartitionRoot(PartitionOptions opts) { Init(opts); }
Init(enable_tag_pointers, enable_thread_cache);
}
~PartitionRoot(); ~PartitionRoot();
// Public API // Public API
...@@ -444,7 +467,7 @@ struct BASE_EXPORT PartitionRoot { ...@@ -444,7 +467,7 @@ struct BASE_EXPORT PartitionRoot {
// //
// Moving it a layer lower couples PartitionRoot and PartitionBucket, but // Moving it a layer lower couples PartitionRoot and PartitionBucket, but
// preserves the layering of the includes. // preserves the layering of the includes.
void Init(bool enforce_alignment, bool enable_thread_cache); void Init(PartitionOptions);
ALWAYS_INLINE static bool IsValidPage(Page* page); ALWAYS_INLINE static bool IsValidPage(Page* page);
ALWAYS_INLINE static PartitionRoot* FromPage(Page* page); ALWAYS_INLINE static PartitionRoot* FromPage(Page* page);
...@@ -1144,28 +1167,13 @@ ALWAYS_INLINE size_t PartitionRoot<thread_safe>::ActualSize(size_t size) { ...@@ -1144,28 +1167,13 @@ ALWAYS_INLINE size_t PartitionRoot<thread_safe>::ActualSize(size_t size) {
#endif #endif
} }
enum class PartitionAllocatorAlignment {
// By default all allocations will be aligned to 8B (16B if
// BUILDFLAG_INTERNAL_USE_PARTITION_ALLOC_AS_MALLOC is true).
kRegular,
// In addition to the above alignment enforcement, this option allows using
// AlignedAlloc() which can align at a larger boundary.
// This option comes at a cost of disallowing cookies on Debug builds and tags
// for CheckedPtr. It also causes all allocations to go outside of GigaCage,
// so that CheckedPtr can easily tell if a pointer comes with a tag or not.
kAlignedAlloc,
};
namespace internal { namespace internal {
template <bool thread_safe> template <bool thread_safe>
struct BASE_EXPORT PartitionAllocator { struct BASE_EXPORT PartitionAllocator {
PartitionAllocator() = default; PartitionAllocator() = default;
~PartitionAllocator(); ~PartitionAllocator();
void init(PartitionAllocatorAlignment alignment = void init(PartitionOptions = {});
PartitionAllocatorAlignment::kRegular,
bool with_thread_cache = false);
ALWAYS_INLINE PartitionRoot<thread_safe>* root() { return &partition_root_; } ALWAYS_INLINE PartitionRoot<thread_safe>* root() { return &partition_root_; }
private: private:
......
...@@ -84,7 +84,8 @@ class PartitionAllocator : public Allocator { ...@@ -84,7 +84,8 @@ class PartitionAllocator : public Allocator {
void Free(void* data) override { ThreadSafePartitionRoot::FreeNoHooks(data); } void Free(void* data) override { ThreadSafePartitionRoot::FreeNoHooks(data); }
private: private:
ThreadSafePartitionRoot alloc_{false, false}; ThreadSafePartitionRoot alloc_{{PartitionOptions::Alignment::kRegular,
PartitionOptions::ThreadCache::kDisabled}};
}; };
class TestLoopThread : public PlatformThread::Delegate { class TestLoopThread : public PlatformThread::Delegate {
......
...@@ -156,8 +156,8 @@ class PartitionAllocTest : public testing::Test { ...@@ -156,8 +156,8 @@ class PartitionAllocTest : public testing::Test {
void SetUp() override { void SetUp() override {
scoped_feature_list.InitWithFeatures({kPartitionAllocGigaCage}, {}); scoped_feature_list.InitWithFeatures({kPartitionAllocGigaCage}, {});
PartitionAllocGlobalInit(HandleOOM); PartitionAllocGlobalInit(HandleOOM);
allocator.init(PartitionAllocatorAlignment::kRegular); allocator.init({PartitionOptions::Alignment::kRegular});
aligned_allocator.init(PartitionAllocatorAlignment::kAlignedAlloc); aligned_allocator.init({PartitionOptions::Alignment::kAlignedAlloc});
test_bucket_index_ = SizeToIndex(kRealAllocSize); test_bucket_index_ = SizeToIndex(kRealAllocSize);
} }
......
...@@ -59,7 +59,9 @@ class DeltaCounter { ...@@ -59,7 +59,9 @@ class DeltaCounter {
// PartitionRoot has to outlive it. // PartitionRoot has to outlive it.
// //
// Forbid extras, since they make finding out which bucket is used harder. // Forbid extras, since they make finding out which bucket is used harder.
NoDestructor<ThreadSafePartitionRoot> g_root{true, true}; NoDestructor<ThreadSafePartitionRoot> g_root{
PartitionOptions{PartitionOptions::Alignment::kAlignedAlloc,
PartitionOptions::ThreadCache::kEnabled}};
size_t FillThreadCacheAndReturnIndex(size_t size, size_t count = 1) { size_t FillThreadCacheAndReturnIndex(size_t size, size_t count = 1) {
uint16_t bucket_index = PartitionRoot<ThreadSafe>::SizeToBucketIndex(size); uint16_t bucket_index = PartitionRoot<ThreadSafe>::SizeToBucketIndex(size);
...@@ -157,7 +159,8 @@ TEST_F(ThreadCacheTest, Purge) { ...@@ -157,7 +159,8 @@ TEST_F(ThreadCacheTest, Purge) {
TEST_F(ThreadCacheTest, NoCrossPartitionCache) { TEST_F(ThreadCacheTest, NoCrossPartitionCache) {
const size_t kTestSize = 12; const size_t kTestSize = 12;
ThreadSafePartitionRoot root{true, false}; ThreadSafePartitionRoot root{{PartitionOptions::Alignment::kAlignedAlloc,
PartitionOptions::ThreadCache::kDisabled}};
size_t bucket_index = FillThreadCacheAndReturnIndex(kTestSize); size_t bucket_index = FillThreadCacheAndReturnIndex(kTestSize);
void* ptr = root.Alloc(kTestSize, ""); void* ptr = root.Alloc(kTestSize, "");
......
...@@ -75,8 +75,8 @@ bool Partitions::InitializeOnce() { ...@@ -75,8 +75,8 @@ bool Partitions::InitializeOnce() {
// - BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC): Only one thread cache at a time // - BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC): Only one thread cache at a time
// is supported, in this case it is already claimed by malloc(). // is supported, in this case it is already claimed by malloc().
#if DCHECK_IS_ON() && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) #if DCHECK_IS_ON() && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
fast_malloc_allocator.init(base::PartitionAllocatorAlignment::kRegular, fast_malloc_allocator.init({base::PartitionOptions::Alignment::kRegular,
true /* with_thread_cache */); base::PartitionOptions::ThreadCache::kEnabled});
#else #else
fast_malloc_allocator.init(); fast_malloc_allocator.init();
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment