Commit 3543808c authored by Benoit Lize's avatar Benoit Lize Committed by Commit Bot

[PartitionAlloc] Trigger all-thread Purge() from any thread.

Thread caches can accumulate memory over time, increasing memory
footprint. It is then necessary to Purge() them to avoid this. However
thread caches are meant to be thread-unsafe and not use locking on the
fast path, so they cannot be purged from another thread.

This adds a mechanism to purge all thread caches, by synchronously
purging the current thread, and asking other threads to Purge() at the
next deallocation. This does not solve the problem of a thread sleeping
and never deallocating, which will be addressed later.

Bug: 998048
Change-Id: I82c70be128acf2e0801623e81c353bdbc80231a2
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2428985
Commit-Queue: Benoit L <lizeb@chromium.org>
Reviewed-by: default avatarKentaro Hara <haraken@chromium.org>
Cr-Commit-Position: refs/heads/master@{#810650}
parent 26f4012a
...@@ -679,9 +679,8 @@ void PartitionRoot<thread_safe>::PurgeMemory(int flags) { ...@@ -679,9 +679,8 @@ void PartitionRoot<thread_safe>::PurgeMemory(int flags) {
} }
} }
// Purges only this thread's cache. if (with_thread_cache)
if (with_thread_cache && internal::ThreadCache::Get()) internal::ThreadCacheRegistry::Instance().PurgeAll();
internal::ThreadCache::Get()->Purge();
} }
template <bool thread_safe> template <bool thread_safe>
......
...@@ -81,6 +81,28 @@ void ThreadCacheRegistry::DumpStats(bool my_thread_only, ...@@ -81,6 +81,28 @@ void ThreadCacheRegistry::DumpStats(bool my_thread_only,
} }
} }
void ThreadCacheRegistry::PurgeAll() {
auto* current_thread_tcache = ThreadCache::Get();
{
AutoLock scoped_locker(GetLock());
ThreadCache* tcache = list_head_;
while (tcache) {
// Cannot purge directly, need to ask the other thread to purge "at some
// point".
// Note that this will not work if the other thread is sleeping forever.
// TODO(lizeb): Handle sleeping threads.
if (tcache != current_thread_tcache)
tcache->SetShouldPurge();
tcache = tcache->next_;
}
}
// May take a while, don't hold the lock while purging.
if (current_thread_tcache)
current_thread_tcache->Purge();
}
// static // static
void ThreadCache::Init(PartitionRoot<ThreadSafe>* root) { void ThreadCache::Init(PartitionRoot<ThreadSafe>* root) {
bool ok = PartitionTlsCreate(&g_thread_cache_key, DeleteThreadCache); bool ok = PartitionTlsCreate(&g_thread_cache_key, DeleteThreadCache);
...@@ -154,6 +176,12 @@ void ThreadCache::AccumulateStats(ThreadCacheStats* stats) const { ...@@ -154,6 +176,12 @@ void ThreadCache::AccumulateStats(ThreadCacheStats* stats) const {
stats->metadata_overhead += sizeof(*this); stats->metadata_overhead += sizeof(*this);
} }
void ThreadCache::SetShouldPurge() {
// We don't need any synchronization, and don't really care if the purge is
// carried out "right away", hence relaxed atomics.
should_purge_.store(true, std::memory_order_relaxed);
}
void ThreadCache::Purge() { void ThreadCache::Purge() {
for (Bucket& bucket : buckets_) { for (Bucket& bucket : buckets_) {
size_t count = bucket.count; size_t count = bucket.count;
...@@ -168,6 +196,7 @@ void ThreadCache::Purge() { ...@@ -168,6 +196,7 @@ void ThreadCache::Purge() {
CHECK_EQ(0u, count); CHECK_EQ(0u, count);
bucket.count = 0; bucket.count = 0;
} }
should_purge_.store(false, std::memory_order_relaxed);
} }
} // namespace internal } // namespace internal
......
...@@ -63,6 +63,10 @@ class BASE_EXPORT ThreadCacheRegistry { ...@@ -63,6 +63,10 @@ class BASE_EXPORT ThreadCacheRegistry {
void UnregisterThreadCache(ThreadCache* cache); void UnregisterThreadCache(ThreadCache* cache);
// Prints statistics for all thread caches, or this thread's only. // Prints statistics for all thread caches, or this thread's only.
void DumpStats(bool my_thread_only, ThreadCacheStats* stats); void DumpStats(bool my_thread_only, ThreadCacheStats* stats);
// Purge() this thread's cache, and asks the other ones to trigger Purge() at
// a later point (during a deallocation).
void PurgeAll();
static Lock& GetLock() { return Instance().lock_; } static Lock& GetLock() { return Instance().lock_; }
private: private:
...@@ -141,8 +145,12 @@ class BASE_EXPORT ThreadCache { ...@@ -141,8 +145,12 @@ class BASE_EXPORT ThreadCache {
// Has the same behavior as RawAlloc(), that is: no cookie nor tag handling. // Has the same behavior as RawAlloc(), that is: no cookie nor tag handling.
ALWAYS_INLINE void* GetFromCache(size_t bucket_index); ALWAYS_INLINE void* GetFromCache(size_t bucket_index);
// Asks this cache to trigger |Purge()| at a later point. Can be called from
// any thread.
void SetShouldPurge();
// Empties the cache. // Empties the cache.
// The Partition lock must *not* be held when calling this. // The Partition lock must *not* be held when calling this.
// Must be called from the thread this cache is for.
void Purge(); void Purge();
void AccumulateStats(ThreadCacheStats* stats) const; void AccumulateStats(ThreadCacheStats* stats) const;
...@@ -168,6 +176,7 @@ class BASE_EXPORT ThreadCache { ...@@ -168,6 +176,7 @@ class BASE_EXPORT ThreadCache {
// allocation patterns. // allocation patterns.
static constexpr size_t kMaxCountPerBucket = 100; static constexpr size_t kMaxCountPerBucket = 100;
std::atomic<bool> should_purge_;
Bucket buckets_[kBucketCount]; Bucket buckets_[kBucketCount];
ThreadCacheStats stats_; ThreadCacheStats stats_;
PartitionRoot<ThreadSafe>* root_; PartitionRoot<ThreadSafe>* root_;
...@@ -187,6 +196,9 @@ class BASE_EXPORT ThreadCache { ...@@ -187,6 +196,9 @@ class BASE_EXPORT ThreadCache {
ALWAYS_INLINE bool ThreadCache::MaybePutInCache(void* address, ALWAYS_INLINE bool ThreadCache::MaybePutInCache(void* address,
size_t bucket_index) { size_t bucket_index) {
if (UNLIKELY(should_purge_.load(std::memory_order_relaxed)))
Purge();
INCREMENT_COUNTER(stats_.cache_fill_count); INCREMENT_COUNTER(stats_.cache_fill_count);
if (bucket_index >= kBucketCount) { if (bucket_index >= kBucketCount) {
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
#include "base/allocator/partition_allocator/thread_cache.h" #include "base/allocator/partition_allocator/thread_cache.h"
#include <atomic>
#include <vector> #include <vector>
#include "base/allocator/buildflags.h" #include "base/allocator/buildflags.h"
...@@ -347,6 +348,56 @@ TEST_F(ThreadCacheTest, MultipleThreadCachesAccounting) { ...@@ -347,6 +348,56 @@ TEST_F(ThreadCacheTest, MultipleThreadCachesAccounting) {
#endif // defined(PA_ENABLE_THREAD_CACHE_STATISTICS) #endif // defined(PA_ENABLE_THREAD_CACHE_STATISTICS)
TEST_F(ThreadCacheTest, PurgeAll) NO_THREAD_SAFETY_ANALYSIS {
std::atomic<bool> other_thread_started{false};
std::atomic<bool> purge_called{false};
const size_t kTestSize = 100;
size_t bucket_index = FillThreadCacheAndReturnIndex(kTestSize);
ThreadCache* this_thread_tcache = g_root->thread_cache_for_testing();
ThreadCache* other_thread_tcache = nullptr;
LambdaThreadDelegate delegate{
BindLambdaForTesting([&]() NO_THREAD_SAFETY_ANALYSIS {
FillThreadCacheAndReturnIndex(kTestSize);
other_thread_tcache = g_root->thread_cache_for_testing();
other_thread_started.store(true, std::memory_order_release);
while (!purge_called.load(std::memory_order_acquire)) {
}
// Purge() was not triggered from the other thread.
EXPECT_EQ(1u,
other_thread_tcache->bucket_count_for_testing(bucket_index));
// Allocations do not trigger Purge().
void* data = g_root->Alloc(1, "");
EXPECT_EQ(1u,
other_thread_tcache->bucket_count_for_testing(bucket_index));
// But deallocations do.
g_root->Free(data);
EXPECT_EQ(0u,
other_thread_tcache->bucket_count_for_testing(bucket_index));
})};
PlatformThreadHandle thread_handle;
PlatformThread::Create(0, &delegate, &thread_handle);
while (!other_thread_started.load(std::memory_order_acquire)) {
}
EXPECT_EQ(1u, this_thread_tcache->bucket_count_for_testing(bucket_index));
EXPECT_EQ(1u, other_thread_tcache->bucket_count_for_testing(bucket_index));
ThreadCacheRegistry::Instance().PurgeAll();
// This thread is synchronously purged.
EXPECT_EQ(0u, this_thread_tcache->bucket_count_for_testing(bucket_index));
// Not the other one.
EXPECT_EQ(1u, other_thread_tcache->bucket_count_for_testing(bucket_index));
purge_called.store(true, std::memory_order_release);
PlatformThread::Join(thread_handle);
}
} // namespace internal } // namespace internal
} // namespace base } // namespace base
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment