Commit 21349955 authored by Benoit Lize's avatar Benoit Lize Committed by Chromium LUCI CQ

[PartitionAlloc] Use exponential backoff for periodic purge.

Periodic purge should ideally not be scheduled at all when a process is
fully idle from the allocator's standpoint, that is doesn't
allocate. The current current scheme achieves that, with non-trivial
complexity (including managing reentrancy in the allocator).

Make the code simpler, by modulating purge frequency with exponential
backoff, rather than the complex mechanism we have.

Bug: 998048
Change-Id: I7faa5b2bb8450e41dba1c6cbdc0f65cd2d9a1dec
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2593373Reviewed-by: default avatarKentaro Hara <haraken@chromium.org>
Commit-Queue: Benoit L <lizeb@chromium.org>
Cr-Commit-Position: refs/heads/master@{#837539}
parent e07ae77a
......@@ -5,6 +5,7 @@
#include "base/allocator/partition_allocator/thread_cache.h"
#include <sys/types.h>
#include <algorithm>
#include <atomic>
#include <vector>
......@@ -31,7 +32,9 @@ static std::atomic<bool> g_has_instance;
} // namespace
constexpr base::TimeDelta ThreadCacheRegistry::kPurgeInterval;
constexpr base::TimeDelta ThreadCacheRegistry::kMinPurgeInterval;
constexpr base::TimeDelta ThreadCacheRegistry::kMaxPurgeInterval;
constexpr base::TimeDelta ThreadCacheRegistry::kDefaultPurgeInterval;
// static
ThreadCacheRegistry& ThreadCacheRegistry::Instance() {
......@@ -106,21 +109,26 @@ void ThreadCacheRegistry::PurgeAll() {
}
void ThreadCacheRegistry::StartPeriodicPurge() {
if (periodic_purge_running_)
return;
periodic_purge_running_ = true;
PostDelayedPurgeTask();
}
void ThreadCacheRegistry::PostDelayedPurgeTask() {
PA_DCHECK(!has_pending_purge_task_);
ThreadTaskRunnerHandle::Get()->PostDelayedTask(
FROM_HERE,
base::BindOnce(&ThreadCacheRegistry::PeriodicPurge,
base::Unretained(this)),
kPurgeInterval);
has_pending_purge_task_ = true;
purge_interval_);
}
void ThreadCacheRegistry::PeriodicPurge() {
has_pending_purge_task_ = false;
// To stop periodic purge for testing.
if (!periodic_purge_running_)
return;
ThreadCache* tcache = ThreadCache::Get();
PA_DCHECK(tcache);
uint64_t allocations = tcache->stats_.alloc_count;
......@@ -131,52 +139,36 @@ void ThreadCacheRegistry::PeriodicPurge() {
// assume that the main thread is a reasonable proxy for the process activity,
// where the main thread is the current one.
//
// If we didn't see enough allocations since the last purge, don't schedule a
// new one, and ask the thread cache to notify us of deallocations. This makes
// the next |kMinMainThreadAllocationsForPurging| deallocations slightly
// slower.
// If there were not enough allocations since the last purge, back off. On the
// other hand, if there were many allocations, make purge more frequent, but
// always in a set frequency range.
//
// Once the threshold is reached, reschedule a purge task. We count
// deallocations rather than allocations because these are the ones that fill
// the cache, and also because we already have a check on the deallocation
// path, not on the allocation one that we don't want to slow down.
bool enough_allocations =
allocations_since_last_purge >= kMinMainThreadAllocationsForPurging;
tcache->SetNotifiesRegistry(!enough_allocations);
deallocations_ = 0;
PurgeAll();
if (enough_allocations) {
allocations_at_last_purge_ = allocations;
PostDelayedPurgeTask();
// There is a potential drawback: a process that was idle for a long time and
// suddenly becomes very actve will take some time to go back to regularly
// scheduled purge with a small enough interval. This is the case for instance
// of a renderer moving to foreground. To mitigate that, if the number of
// allocations since the last purge was very large, make a greater leap to
// faster purging.
if (allocations_since_last_purge > 10 * kMinMainThreadAllocationsForPurging) {
purge_interval_ = std::min(kDefaultPurgeInterval, purge_interval_ / 2);
} else if (allocations_since_last_purge >
2 * kMinMainThreadAllocationsForPurging) {
purge_interval_ = std::max(kMinPurgeInterval, purge_interval_ / 2);
} else if (allocations_since_last_purge <
kMinMainThreadAllocationsForPurging) {
purge_interval_ = std::min(kMaxPurgeInterval, purge_interval_ * 2);
}
}
void ThreadCacheRegistry::OnDeallocation() {
deallocations_++;
if (deallocations_ > kMinMainThreadAllocationsForPurging) {
ThreadCache* tcache = ThreadCache::Get();
PA_DCHECK(tcache);
deallocations_ = 0;
tcache->SetNotifiesRegistry(false);
if (has_pending_purge_task_)
return;
PurgeAll();
// This is called from the thread cache, which is called from the central
// allocator. This means that any allocation made by task posting will make
// it reentrant, unless we disable the thread cache.
tcache->Disable();
PostDelayedPurgeTask();
tcache->Enable();
}
allocations_at_last_purge_ = allocations;
PostDelayedPurgeTask();
}
void ThreadCacheRegistry::ResetForTesting() {
PA_CHECK(!has_pending_purge_task_);
allocations_at_last_purge_ = 0;
deallocations_ = 0;
purge_interval_ = kDefaultPurgeInterval;
periodic_purge_running_ = false;
}
// static
......@@ -225,12 +217,12 @@ ThreadCache* ThreadCache::Create(PartitionRoot<internal::ThreadSafe>* root) {
ThreadCache::ThreadCache(PartitionRoot<ThreadSafe>* root)
: buckets_(),
should_purge_(false),
stats_(),
root_(root),
registry_(&ThreadCacheRegistry::Instance()),
next_(nullptr),
prev_(nullptr) {
registry_->RegisterThreadCache(this);
ThreadCacheRegistry::Instance().RegisterThreadCache(this);
for (int index = 0; index < kBucketCount; index++) {
const auto& root_bucket = root->buckets[index];
......@@ -261,7 +253,7 @@ ThreadCache::ThreadCache(PartitionRoot<ThreadSafe>* root)
}
ThreadCache::~ThreadCache() {
registry_->UnregisterThreadCache(this);
ThreadCacheRegistry::Instance().UnregisterThreadCache(this);
Purge();
}
......@@ -360,22 +352,6 @@ void ThreadCache::ClearBucket(ThreadCache::Bucket& bucket, size_t limit) {
PA_DCHECK(bucket.count == limit);
}
void ThreadCache::HandleNonNormalMode() {
switch (mode_.load(std::memory_order_relaxed)) {
case Mode::kPurge:
PurgeInternal();
mode_.store(Mode::kNormal, std::memory_order_relaxed);
break;
case Mode::kNotifyRegistry:
registry_->OnDeallocation();
break;
default:
break;
}
}
void ThreadCache::ResetForTesting() {
stats_.alloc_count = 0;
stats_.alloc_hits = 0;
......@@ -392,7 +368,7 @@ void ThreadCache::ResetForTesting() {
stats_.metadata_overhead = 0;
Purge();
mode_.store(Mode::kNormal, std::memory_order_relaxed);
should_purge_.store(false, std::memory_order_relaxed);
}
void ThreadCache::AccumulateStats(ThreadCacheStats* stats) const {
......@@ -407,30 +383,15 @@ void ThreadCache::AccumulateStats(ThreadCacheStats* stats) const {
stats->cache_fill_hits += stats_.cache_fill_hits;
stats->cache_fill_misses += stats_.cache_fill_misses;
for (size_t i = 0; i < kBucketCount; i++) {
for (const Bucket& bucket : buckets_) {
stats->bucket_total_memory +=
buckets_[i].count * static_cast<size_t>(buckets_[i].slot_size);
bucket.count * static_cast<size_t>(bucket.slot_size);
}
stats->metadata_overhead += sizeof(*this);
}
void ThreadCache::SetShouldPurge() {
// Purge may be triggered by an external event, in which case it should not
// take precedence over the notification mode, otherwise we risk disabling
// periodic purge entirely.
//
// Also, no other thread can set this to notification mode.
if (mode_.load(std::memory_order_relaxed) != Mode::kNormal)
return;
// We don't need any synchronization, and don't really care if the purge is
// carried out "right away", hence relaxed atomics.
mode_.store(Mode::kPurge, std::memory_order_relaxed);
}
void ThreadCache::SetNotifiesRegistry(bool enabled) {
mode_.store(enabled ? Mode::kNotifyRegistry : Mode::kNormal,
std::memory_order_relaxed);
should_purge_.store(true, std::memory_order_relaxed);
}
void ThreadCache::Purge() {
......@@ -439,18 +400,11 @@ void ThreadCache::Purge() {
}
void ThreadCache::PurgeInternal() {
should_purge_.store(false, std::memory_order_relaxed);
for (auto& bucket : buckets_)
ClearBucket(bucket, 0);
}
void ThreadCache::Disable() {
root_->with_thread_cache = false;
}
void ThreadCache::Enable() {
root_->with_thread_cache = true;
}
} // namespace internal
} // namespace base
......@@ -68,14 +68,15 @@ class BASE_EXPORT ThreadCacheRegistry {
// Starts a periodic timer on the current thread to purge all thread caches.
void StartPeriodicPurge();
void OnDeallocation();
static PartitionLock& GetLock() { return Instance().lock_; }
base::TimeDelta purge_interval_for_testing() const { return purge_interval_; }
bool has_pending_purge_task() const { return has_pending_purge_task_; }
void ResetForTesting();
static constexpr TimeDelta kPurgeInterval = TimeDelta::FromSeconds(1);
static constexpr TimeDelta kMinPurgeInterval = TimeDelta::FromSeconds(1);
static constexpr TimeDelta kMaxPurgeInterval = TimeDelta::FromMinutes(1);
static constexpr TimeDelta kDefaultPurgeInterval = 2 * kMinPurgeInterval;
static constexpr int kMinMainThreadAllocationsForPurging = 1000;
private:
......@@ -86,8 +87,8 @@ class BASE_EXPORT ThreadCacheRegistry {
PartitionLock lock_;
ThreadCache* list_head_ GUARDED_BY(GetLock()) = nullptr;
uint64_t allocations_at_last_purge_ = 0;
int deallocations_ = 0;
bool has_pending_purge_task_ = false;
base::TimeDelta purge_interval_ = kDefaultPurgeInterval;
bool periodic_purge_running_ = false;
};
constexpr ThreadCacheRegistry::ThreadCacheRegistry() = default;
......@@ -198,10 +199,6 @@ class BASE_EXPORT ThreadCache {
void Purge();
void AccumulateStats(ThreadCacheStats* stats) const;
// Disables the thread cache for its associated root.
void Disable();
void Enable();
size_t bucket_count_for_testing(size_t index) const {
return buckets_[index].count;
}
......@@ -241,11 +238,10 @@ class BASE_EXPORT ThreadCache {
kBucketCount < kNumBuckets,
"Cannot have more cached buckets than what the allocator supports");
std::atomic<Mode> mode_{Mode::kNormal};
Bucket buckets_[kBucketCount];
std::atomic<bool> should_purge_;
ThreadCacheStats stats_;
PartitionRoot<ThreadSafe>* const root_;
ThreadCacheRegistry* const registry_;
#if DCHECK_IS_ON()
bool is_in_thread_cache_ = false;
#endif
......@@ -286,8 +282,8 @@ ALWAYS_INLINE bool ThreadCache::MaybePutInCache(void* address,
ClearBucket(bucket, bucket.limit / 2);
}
if (UNLIKELY(mode_.load(std::memory_order_relaxed) != Mode::kNormal))
HandleNonNormalMode();
if (UNLIKELY(should_purge_.load(std::memory_order_relaxed)))
PurgeInternal();
return true;
}
......
......@@ -104,16 +104,13 @@ class ThreadCacheTest : public ::testing::Test {
auto* tcache = g_root->thread_cache_for_testing();
ASSERT_TRUE(tcache);
task_env_.FastForwardBy(2 * ThreadCacheRegistry::kPurgeInterval);
EXPECT_FALSE(ThreadCacheRegistry::Instance().has_pending_purge_task());
// Make sure that periodic purge will not interfere with tests.
auto interval =
ThreadCacheRegistry::Instance().purge_interval_for_testing();
ThreadCacheRegistry::Instance().ResetForTesting();
tcache->ResetForTesting();
}
void TearDown() override {
task_env_.FastForwardBy(2 * ThreadCacheRegistry::kPurgeInterval);
ASSERT_FALSE(ThreadCacheRegistry::Instance().has_pending_purge_task());
task_env_.FastForwardBy(interval);
ASSERT_EQ(0u, task_env_.GetPendingMainThreadTaskCount());
}
base::test::TaskEnvironment task_env_{
......@@ -436,100 +433,61 @@ TEST_F(ThreadCacheTest, PurgeAll) NO_THREAD_SAFETY_ANALYSIS {
}
TEST_F(ThreadCacheTest, PeriodicPurge) {
ThreadCacheRegistry::Instance().StartPeriodicPurge();
EXPECT_TRUE(ThreadCacheRegistry::Instance().has_pending_purge_task());
std::atomic<bool> other_thread_started{false};
std::atomic<bool> purge_called{false};
size_t bucket_index = FillThreadCacheAndReturnIndex(kMediumSize);
ThreadCache* this_thread_tcache = g_root->thread_cache_for_testing();
ThreadCache* other_thread_tcache = nullptr;
LambdaThreadDelegate delegate{
BindLambdaForTesting([&]() NO_THREAD_SAFETY_ANALYSIS {
FillThreadCacheAndReturnIndex(kMediumSize);
other_thread_tcache = g_root->thread_cache_for_testing();
other_thread_started.store(true, std::memory_order_release);
while (!purge_called.load(std::memory_order_acquire)) {
}
// Purge() was not triggered from the other thread.
EXPECT_EQ(kFillCountForMediumBucket,
other_thread_tcache->bucket_count_for_testing(bucket_index));
// Allocations do not trigger Purge().
void* data = g_root->Alloc(1, "");
EXPECT_EQ(kFillCountForMediumBucket,
other_thread_tcache->bucket_count_for_testing(bucket_index));
// But deallocations do.
g_root->Free(data);
EXPECT_EQ(0u,
other_thread_tcache->bucket_count_for_testing(bucket_index));
})};
PlatformThreadHandle thread_handle;
PlatformThread::Create(0, &delegate, &thread_handle);
while (!other_thread_started.load(std::memory_order_acquire)) {
}
EXPECT_EQ(kFillCountForMediumBucket,
this_thread_tcache->bucket_count_for_testing(bucket_index));
EXPECT_EQ(kFillCountForMediumBucket,
other_thread_tcache->bucket_count_for_testing(bucket_index));
EXPECT_TRUE(ThreadCacheRegistry::Instance().has_pending_purge_task());
task_env_.FastForwardBy(ThreadCacheRegistry::kPurgeInterval);
// Not enough allocations since last purge, don't reschedule it.
EXPECT_FALSE(ThreadCacheRegistry::Instance().has_pending_purge_task());
// This thread is synchronously purged.
EXPECT_EQ(0u, this_thread_tcache->bucket_count_for_testing(bucket_index));
// Not the other one.
EXPECT_EQ(kFillCountForMediumBucket,
other_thread_tcache->bucket_count_for_testing(bucket_index));
purge_called.store(true, std::memory_order_release);
PlatformThread::Join(thread_handle);
}
TEST_F(ThreadCacheTest, PeriodicPurgeStopsAndRestarts) {
ThreadCacheRegistry::Instance().StartPeriodicPurge();
EXPECT_TRUE(ThreadCacheRegistry::Instance().has_pending_purge_task());
size_t bucket_index = FillThreadCacheAndReturnIndex(kSmallSize);
auto* tcache = ThreadCache::Get();
EXPECT_GT(tcache->bucket_count_for_testing(bucket_index), 0u);
task_env_.FastForwardBy(ThreadCacheRegistry::kPurgeInterval);
// Not enough allocations since last purge, don't reschedule it.
EXPECT_FALSE(ThreadCacheRegistry::Instance().has_pending_purge_task());
// This thread is synchronously purged.
EXPECT_EQ(0u, tcache->bucket_count_for_testing(bucket_index));
// 1 allocation is not enough to restart it.
FillThreadCacheAndReturnIndex(kSmallSize);
EXPECT_FALSE(ThreadCacheRegistry::Instance().has_pending_purge_task());
for (int i = 0; i < ThreadCacheRegistry::kMinMainThreadAllocationsForPurging;
i++) {
FillThreadCacheAndReturnIndex(kSmallSize);
auto& registry = ThreadCacheRegistry::Instance();
registry.StartPeriodicPurge();
EXPECT_EQ(1u, task_env_.GetPendingMainThreadTaskCount());
EXPECT_EQ(ThreadCacheRegistry::kDefaultPurgeInterval,
registry.purge_interval_for_testing());
// No allocations, the period gets longer.
task_env_.FastForwardBy(registry.purge_interval_for_testing());
EXPECT_EQ(2 * ThreadCacheRegistry::kDefaultPurgeInterval,
registry.purge_interval_for_testing());
task_env_.FastForwardBy(registry.purge_interval_for_testing());
EXPECT_EQ(4 * ThreadCacheRegistry::kDefaultPurgeInterval,
registry.purge_interval_for_testing());
// Check that the purge interval is clamped at the maximum value.
while (registry.purge_interval_for_testing() <
ThreadCacheRegistry::kMaxPurgeInterval) {
task_env_.FastForwardBy(registry.purge_interval_for_testing());
}
EXPECT_TRUE(ThreadCacheRegistry::Instance().has_pending_purge_task());
EXPECT_GT(tcache->bucket_count_for_testing(bucket_index), 0u);
task_env_.FastForwardBy(ThreadCacheRegistry::kPurgeInterval);
EXPECT_EQ(0u, tcache->bucket_count_for_testing(bucket_index));
// Since there were enough allocations, another task is posted.
EXPECT_TRUE(ThreadCacheRegistry::Instance().has_pending_purge_task());
FillThreadCacheAndReturnIndex(kSmallSize);
task_env_.FastForwardBy(ThreadCacheRegistry::kPurgeInterval);
EXPECT_EQ(0u, tcache->bucket_count_for_testing(bucket_index));
// Not enough this time.
EXPECT_FALSE(ThreadCacheRegistry::Instance().has_pending_purge_task());
task_env_.FastForwardBy(registry.purge_interval_for_testing());
// There is still a task, even though there are no allocations.
EXPECT_EQ(1u, task_env_.GetPendingMainThreadTaskCount());
// Not enough allocations to decrease the interval.
FillThreadCacheAndReturnIndex(kSmallSize, 1);
task_env_.FastForwardBy(registry.purge_interval_for_testing());
EXPECT_EQ(ThreadCacheRegistry::kMaxPurgeInterval,
registry.purge_interval_for_testing());
FillThreadCacheAndReturnIndex(
kSmallSize,
2 * ThreadCacheRegistry::kMinMainThreadAllocationsForPurging + 1);
task_env_.FastForwardBy(registry.purge_interval_for_testing());
EXPECT_EQ(ThreadCacheRegistry::kMaxPurgeInterval / 2,
registry.purge_interval_for_testing());
// Enough allocations, interval doesn't change.
FillThreadCacheAndReturnIndex(
kSmallSize, ThreadCacheRegistry::kMinMainThreadAllocationsForPurging);
task_env_.FastForwardBy(registry.purge_interval_for_testing());
EXPECT_EQ(ThreadCacheRegistry::kMaxPurgeInterval / 2,
registry.purge_interval_for_testing());
// No allocations anymore, increase the interval.
task_env_.FastForwardBy(registry.purge_interval_for_testing());
EXPECT_EQ(ThreadCacheRegistry::kMaxPurgeInterval,
registry.purge_interval_for_testing());
// Many allocations, directly go to the default interval.
FillThreadCacheAndReturnIndex(
kSmallSize,
10 * ThreadCacheRegistry::kMinMainThreadAllocationsForPurging + 1);
task_env_.FastForwardBy(registry.purge_interval_for_testing());
EXPECT_EQ(ThreadCacheRegistry::kDefaultPurgeInterval,
registry.purge_interval_for_testing());
}
} // namespace internal
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment