Commit 946b81de authored by Stephen McGruer's avatar Stephen McGruer Committed by Chromium LUCI CQ

Revert "[PartitionAlloc] Use exponential backoff for periodic purge."

This reverts commit 21349955.

Reason for revert: Very tentative revert (with apologies if it's completely wrong) for strange crashes in mini_installer tests: https://crbug.com/1159411

Original change's description:
> [PartitionAlloc] Use exponential backoff for periodic purge.
>
> Periodic purge should ideally not be scheduled at all when a process is
> fully idle from the allocator's standpoint, that is doesn't
> allocate. The current current scheme achieves that, with non-trivial
> complexity (including managing reentrancy in the allocator).
>
> Make the code simpler, by modulating purge frequency with exponential
> backoff, rather than the complex mechanism we have.
>
> Bug: 998048
> Change-Id: I7faa5b2bb8450e41dba1c6cbdc0f65cd2d9a1dec
> Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2593373
> Reviewed-by: Kentaro Hara <haraken@chromium.org>
> Commit-Queue: Benoit L <lizeb@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#837539}

TBR=haraken@chromium.org,lizeb@chromium.org,chromium-scoped@luci-project-accounts.iam.gserviceaccount.com

Change-Id: I1d47a0aaf1f66ca8be50934b99b5222a07289855
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: 998048, 1159411
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2595088Reviewed-by: default avatarStephen McGruer <smcgruer@chromium.org>
Commit-Queue: Stephen McGruer <smcgruer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#837561}
parent 2c8aec82
......@@ -5,7 +5,6 @@
#include "base/allocator/partition_allocator/thread_cache.h"
#include <sys/types.h>
#include <algorithm>
#include <atomic>
#include <vector>
......@@ -32,9 +31,7 @@ static std::atomic<bool> g_has_instance;
} // namespace
constexpr base::TimeDelta ThreadCacheRegistry::kMinPurgeInterval;
constexpr base::TimeDelta ThreadCacheRegistry::kMaxPurgeInterval;
constexpr base::TimeDelta ThreadCacheRegistry::kDefaultPurgeInterval;
constexpr base::TimeDelta ThreadCacheRegistry::kPurgeInterval;
// static
ThreadCacheRegistry& ThreadCacheRegistry::Instance() {
......@@ -109,26 +106,21 @@ void ThreadCacheRegistry::PurgeAll() {
}
void ThreadCacheRegistry::StartPeriodicPurge() {
if (periodic_purge_running_)
return;
periodic_purge_running_ = true;
PostDelayedPurgeTask();
}
void ThreadCacheRegistry::PostDelayedPurgeTask() {
PA_DCHECK(!has_pending_purge_task_);
ThreadTaskRunnerHandle::Get()->PostDelayedTask(
FROM_HERE,
base::BindOnce(&ThreadCacheRegistry::PeriodicPurge,
base::Unretained(this)),
purge_interval_);
kPurgeInterval);
has_pending_purge_task_ = true;
}
void ThreadCacheRegistry::PeriodicPurge() {
// To stop periodic purge for testing.
if (!periodic_purge_running_)
return;
has_pending_purge_task_ = false;
ThreadCache* tcache = ThreadCache::Get();
PA_DCHECK(tcache);
uint64_t allocations = tcache->stats_.alloc_count;
......@@ -139,36 +131,52 @@ void ThreadCacheRegistry::PeriodicPurge() {
// assume that the main thread is a reasonable proxy for the process activity,
// where the main thread is the current one.
//
// If there were not enough allocations since the last purge, back off. On the
// other hand, if there were many allocations, make purge more frequent, but
// always in a set frequency range.
// If we didn't see enough allocations since the last purge, don't schedule a
// new one, and ask the thread cache to notify us of deallocations. This makes
// the next |kMinMainThreadAllocationsForPurging| deallocations slightly
// slower.
//
// There is a potential drawback: a process that was idle for a long time and
// suddenly becomes very actve will take some time to go back to regularly
// scheduled purge with a small enough interval. This is the case for instance
// of a renderer moving to foreground. To mitigate that, if the number of
// allocations since the last purge was very large, make a greater leap to
// faster purging.
if (allocations_since_last_purge > 10 * kMinMainThreadAllocationsForPurging) {
purge_interval_ = std::min(kDefaultPurgeInterval, purge_interval_ / 2);
} else if (allocations_since_last_purge >
2 * kMinMainThreadAllocationsForPurging) {
purge_interval_ = std::max(kMinPurgeInterval, purge_interval_ / 2);
} else if (allocations_since_last_purge <
kMinMainThreadAllocationsForPurging) {
purge_interval_ = std::min(kMaxPurgeInterval, purge_interval_ * 2);
// Once the threshold is reached, reschedule a purge task. We count
// deallocations rather than allocations because these are the ones that fill
// the cache, and also because we already have a check on the deallocation
// path, not on the allocation one that we don't want to slow down.
bool enough_allocations =
allocations_since_last_purge >= kMinMainThreadAllocationsForPurging;
tcache->SetNotifiesRegistry(!enough_allocations);
deallocations_ = 0;
PurgeAll();
if (enough_allocations) {
allocations_at_last_purge_ = allocations;
PostDelayedPurgeTask();
}
}
PurgeAll();
void ThreadCacheRegistry::OnDeallocation() {
deallocations_++;
if (deallocations_ > kMinMainThreadAllocationsForPurging) {
ThreadCache* tcache = ThreadCache::Get();
PA_DCHECK(tcache);
allocations_at_last_purge_ = allocations;
PostDelayedPurgeTask();
deallocations_ = 0;
tcache->SetNotifiesRegistry(false);
if (has_pending_purge_task_)
return;
// This is called from the thread cache, which is called from the central
// allocator. This means that any allocation made by task posting will make
// it reentrant, unless we disable the thread cache.
tcache->Disable();
PostDelayedPurgeTask();
tcache->Enable();
}
}
void ThreadCacheRegistry::ResetForTesting() {
PA_CHECK(!has_pending_purge_task_);
allocations_at_last_purge_ = 0;
purge_interval_ = kDefaultPurgeInterval;
periodic_purge_running_ = false;
deallocations_ = 0;
}
// static
......@@ -217,12 +225,12 @@ ThreadCache* ThreadCache::Create(PartitionRoot<internal::ThreadSafe>* root) {
ThreadCache::ThreadCache(PartitionRoot<ThreadSafe>* root)
: buckets_(),
should_purge_(false),
stats_(),
root_(root),
registry_(&ThreadCacheRegistry::Instance()),
next_(nullptr),
prev_(nullptr) {
ThreadCacheRegistry::Instance().RegisterThreadCache(this);
registry_->RegisterThreadCache(this);
for (int index = 0; index < kBucketCount; index++) {
const auto& root_bucket = root->buckets[index];
......@@ -253,7 +261,7 @@ ThreadCache::ThreadCache(PartitionRoot<ThreadSafe>* root)
}
ThreadCache::~ThreadCache() {
ThreadCacheRegistry::Instance().UnregisterThreadCache(this);
registry_->UnregisterThreadCache(this);
Purge();
}
......@@ -352,6 +360,22 @@ void ThreadCache::ClearBucket(ThreadCache::Bucket& bucket, size_t limit) {
PA_DCHECK(bucket.count == limit);
}
void ThreadCache::HandleNonNormalMode() {
switch (mode_.load(std::memory_order_relaxed)) {
case Mode::kPurge:
PurgeInternal();
mode_.store(Mode::kNormal, std::memory_order_relaxed);
break;
case Mode::kNotifyRegistry:
registry_->OnDeallocation();
break;
default:
break;
}
}
void ThreadCache::ResetForTesting() {
stats_.alloc_count = 0;
stats_.alloc_hits = 0;
......@@ -368,7 +392,7 @@ void ThreadCache::ResetForTesting() {
stats_.metadata_overhead = 0;
Purge();
should_purge_.store(false, std::memory_order_relaxed);
mode_.store(Mode::kNormal, std::memory_order_relaxed);
}
void ThreadCache::AccumulateStats(ThreadCacheStats* stats) const {
......@@ -383,15 +407,30 @@ void ThreadCache::AccumulateStats(ThreadCacheStats* stats) const {
stats->cache_fill_hits += stats_.cache_fill_hits;
stats->cache_fill_misses += stats_.cache_fill_misses;
for (const Bucket& bucket : buckets_) {
for (size_t i = 0; i < kBucketCount; i++) {
stats->bucket_total_memory +=
bucket.count * static_cast<size_t>(bucket.slot_size);
buckets_[i].count * static_cast<size_t>(buckets_[i].slot_size);
}
stats->metadata_overhead += sizeof(*this);
}
void ThreadCache::SetShouldPurge() {
should_purge_.store(true, std::memory_order_relaxed);
// Purge may be triggered by an external event, in which case it should not
// take precedence over the notification mode, otherwise we risk disabling
// periodic purge entirely.
//
// Also, no other thread can set this to notification mode.
if (mode_.load(std::memory_order_relaxed) != Mode::kNormal)
return;
// We don't need any synchronization, and don't really care if the purge is
// carried out "right away", hence relaxed atomics.
mode_.store(Mode::kPurge, std::memory_order_relaxed);
}
void ThreadCache::SetNotifiesRegistry(bool enabled) {
mode_.store(enabled ? Mode::kNotifyRegistry : Mode::kNormal,
std::memory_order_relaxed);
}
void ThreadCache::Purge() {
......@@ -400,11 +439,18 @@ void ThreadCache::Purge() {
}
void ThreadCache::PurgeInternal() {
should_purge_.store(false, std::memory_order_relaxed);
for (auto& bucket : buckets_)
ClearBucket(bucket, 0);
}
void ThreadCache::Disable() {
root_->with_thread_cache = false;
}
void ThreadCache::Enable() {
root_->with_thread_cache = true;
}
} // namespace internal
} // namespace base
......@@ -68,15 +68,14 @@ class BASE_EXPORT ThreadCacheRegistry {
// Starts a periodic timer on the current thread to purge all thread caches.
void StartPeriodicPurge();
void OnDeallocation();
static PartitionLock& GetLock() { return Instance().lock_; }
base::TimeDelta purge_interval_for_testing() const { return purge_interval_; }
bool has_pending_purge_task() const { return has_pending_purge_task_; }
void ResetForTesting();
static constexpr TimeDelta kMinPurgeInterval = TimeDelta::FromSeconds(1);
static constexpr TimeDelta kMaxPurgeInterval = TimeDelta::FromMinutes(1);
static constexpr TimeDelta kDefaultPurgeInterval = 2 * kMinPurgeInterval;
static constexpr TimeDelta kPurgeInterval = TimeDelta::FromSeconds(1);
static constexpr int kMinMainThreadAllocationsForPurging = 1000;
private:
......@@ -87,8 +86,8 @@ class BASE_EXPORT ThreadCacheRegistry {
PartitionLock lock_;
ThreadCache* list_head_ GUARDED_BY(GetLock()) = nullptr;
uint64_t allocations_at_last_purge_ = 0;
base::TimeDelta purge_interval_ = kDefaultPurgeInterval;
bool periodic_purge_running_ = false;
int deallocations_ = 0;
bool has_pending_purge_task_ = false;
};
constexpr ThreadCacheRegistry::ThreadCacheRegistry() = default;
......@@ -199,6 +198,10 @@ class BASE_EXPORT ThreadCache {
void Purge();
void AccumulateStats(ThreadCacheStats* stats) const;
// Disables the thread cache for its associated root.
void Disable();
void Enable();
size_t bucket_count_for_testing(size_t index) const {
return buckets_[index].count;
}
......@@ -238,10 +241,11 @@ class BASE_EXPORT ThreadCache {
kBucketCount < kNumBuckets,
"Cannot have more cached buckets than what the allocator supports");
std::atomic<Mode> mode_{Mode::kNormal};
Bucket buckets_[kBucketCount];
std::atomic<bool> should_purge_;
ThreadCacheStats stats_;
PartitionRoot<ThreadSafe>* const root_;
ThreadCacheRegistry* const registry_;
#if DCHECK_IS_ON()
bool is_in_thread_cache_ = false;
#endif
......@@ -282,8 +286,8 @@ ALWAYS_INLINE bool ThreadCache::MaybePutInCache(void* address,
ClearBucket(bucket, bucket.limit / 2);
}
if (UNLIKELY(should_purge_.load(std::memory_order_relaxed)))
PurgeInternal();
if (UNLIKELY(mode_.load(std::memory_order_relaxed) != Mode::kNormal))
HandleNonNormalMode();
return true;
}
......
......@@ -104,13 +104,16 @@ class ThreadCacheTest : public ::testing::Test {
auto* tcache = g_root->thread_cache_for_testing();
ASSERT_TRUE(tcache);
// Make sure that periodic purge will not interfere with tests.
auto interval =
ThreadCacheRegistry::Instance().purge_interval_for_testing();
task_env_.FastForwardBy(2 * ThreadCacheRegistry::kPurgeInterval);
EXPECT_FALSE(ThreadCacheRegistry::Instance().has_pending_purge_task());
ThreadCacheRegistry::Instance().ResetForTesting();
tcache->ResetForTesting();
task_env_.FastForwardBy(interval);
ASSERT_EQ(0u, task_env_.GetPendingMainThreadTaskCount());
}
void TearDown() override {
task_env_.FastForwardBy(2 * ThreadCacheRegistry::kPurgeInterval);
ASSERT_FALSE(ThreadCacheRegistry::Instance().has_pending_purge_task());
}
base::test::TaskEnvironment task_env_{
......@@ -433,61 +436,100 @@ TEST_F(ThreadCacheTest, PurgeAll) NO_THREAD_SAFETY_ANALYSIS {
}
TEST_F(ThreadCacheTest, PeriodicPurge) {
auto& registry = ThreadCacheRegistry::Instance();
registry.StartPeriodicPurge();
EXPECT_EQ(1u, task_env_.GetPendingMainThreadTaskCount());
EXPECT_EQ(ThreadCacheRegistry::kDefaultPurgeInterval,
registry.purge_interval_for_testing());
// No allocations, the period gets longer.
task_env_.FastForwardBy(registry.purge_interval_for_testing());
EXPECT_EQ(2 * ThreadCacheRegistry::kDefaultPurgeInterval,
registry.purge_interval_for_testing());
task_env_.FastForwardBy(registry.purge_interval_for_testing());
EXPECT_EQ(4 * ThreadCacheRegistry::kDefaultPurgeInterval,
registry.purge_interval_for_testing());
// Check that the purge interval is clamped at the maximum value.
while (registry.purge_interval_for_testing() <
ThreadCacheRegistry::kMaxPurgeInterval) {
task_env_.FastForwardBy(registry.purge_interval_for_testing());
ThreadCacheRegistry::Instance().StartPeriodicPurge();
EXPECT_TRUE(ThreadCacheRegistry::Instance().has_pending_purge_task());
std::atomic<bool> other_thread_started{false};
std::atomic<bool> purge_called{false};
size_t bucket_index = FillThreadCacheAndReturnIndex(kMediumSize);
ThreadCache* this_thread_tcache = g_root->thread_cache_for_testing();
ThreadCache* other_thread_tcache = nullptr;
LambdaThreadDelegate delegate{
BindLambdaForTesting([&]() NO_THREAD_SAFETY_ANALYSIS {
FillThreadCacheAndReturnIndex(kMediumSize);
other_thread_tcache = g_root->thread_cache_for_testing();
other_thread_started.store(true, std::memory_order_release);
while (!purge_called.load(std::memory_order_acquire)) {
}
// Purge() was not triggered from the other thread.
EXPECT_EQ(kFillCountForMediumBucket,
other_thread_tcache->bucket_count_for_testing(bucket_index));
// Allocations do not trigger Purge().
void* data = g_root->Alloc(1, "");
EXPECT_EQ(kFillCountForMediumBucket,
other_thread_tcache->bucket_count_for_testing(bucket_index));
// But deallocations do.
g_root->Free(data);
EXPECT_EQ(0u,
other_thread_tcache->bucket_count_for_testing(bucket_index));
})};
PlatformThreadHandle thread_handle;
PlatformThread::Create(0, &delegate, &thread_handle);
while (!other_thread_started.load(std::memory_order_acquire)) {
}
EXPECT_EQ(kFillCountForMediumBucket,
this_thread_tcache->bucket_count_for_testing(bucket_index));
EXPECT_EQ(kFillCountForMediumBucket,
other_thread_tcache->bucket_count_for_testing(bucket_index));
EXPECT_TRUE(ThreadCacheRegistry::Instance().has_pending_purge_task());
task_env_.FastForwardBy(ThreadCacheRegistry::kPurgeInterval);
// Not enough allocations since last purge, don't reschedule it.
EXPECT_FALSE(ThreadCacheRegistry::Instance().has_pending_purge_task());
// This thread is synchronously purged.
EXPECT_EQ(0u, this_thread_tcache->bucket_count_for_testing(bucket_index));
// Not the other one.
EXPECT_EQ(kFillCountForMediumBucket,
other_thread_tcache->bucket_count_for_testing(bucket_index));
purge_called.store(true, std::memory_order_release);
PlatformThread::Join(thread_handle);
}
TEST_F(ThreadCacheTest, PeriodicPurgeStopsAndRestarts) {
ThreadCacheRegistry::Instance().StartPeriodicPurge();
EXPECT_TRUE(ThreadCacheRegistry::Instance().has_pending_purge_task());
size_t bucket_index = FillThreadCacheAndReturnIndex(kSmallSize);
auto* tcache = ThreadCache::Get();
EXPECT_GT(tcache->bucket_count_for_testing(bucket_index), 0u);
task_env_.FastForwardBy(ThreadCacheRegistry::kPurgeInterval);
// Not enough allocations since last purge, don't reschedule it.
EXPECT_FALSE(ThreadCacheRegistry::Instance().has_pending_purge_task());
// This thread is synchronously purged.
EXPECT_EQ(0u, tcache->bucket_count_for_testing(bucket_index));
// 1 allocation is not enough to restart it.
FillThreadCacheAndReturnIndex(kSmallSize);
EXPECT_FALSE(ThreadCacheRegistry::Instance().has_pending_purge_task());
for (int i = 0; i < ThreadCacheRegistry::kMinMainThreadAllocationsForPurging;
i++) {
FillThreadCacheAndReturnIndex(kSmallSize);
}
task_env_.FastForwardBy(registry.purge_interval_for_testing());
// There is still a task, even though there are no allocations.
EXPECT_EQ(1u, task_env_.GetPendingMainThreadTaskCount());
// Not enough allocations to decrease the interval.
FillThreadCacheAndReturnIndex(kSmallSize, 1);
task_env_.FastForwardBy(registry.purge_interval_for_testing());
EXPECT_EQ(ThreadCacheRegistry::kMaxPurgeInterval,
registry.purge_interval_for_testing());
FillThreadCacheAndReturnIndex(
kSmallSize,
2 * ThreadCacheRegistry::kMinMainThreadAllocationsForPurging + 1);
task_env_.FastForwardBy(registry.purge_interval_for_testing());
EXPECT_EQ(ThreadCacheRegistry::kMaxPurgeInterval / 2,
registry.purge_interval_for_testing());
// Enough allocations, interval doesn't change.
FillThreadCacheAndReturnIndex(
kSmallSize, ThreadCacheRegistry::kMinMainThreadAllocationsForPurging);
task_env_.FastForwardBy(registry.purge_interval_for_testing());
EXPECT_EQ(ThreadCacheRegistry::kMaxPurgeInterval / 2,
registry.purge_interval_for_testing());
// No allocations anymore, increase the interval.
task_env_.FastForwardBy(registry.purge_interval_for_testing());
EXPECT_EQ(ThreadCacheRegistry::kMaxPurgeInterval,
registry.purge_interval_for_testing());
// Many allocations, directly go to the default interval.
FillThreadCacheAndReturnIndex(
kSmallSize,
10 * ThreadCacheRegistry::kMinMainThreadAllocationsForPurging + 1);
task_env_.FastForwardBy(registry.purge_interval_for_testing());
EXPECT_EQ(ThreadCacheRegistry::kDefaultPurgeInterval,
registry.purge_interval_for_testing());
EXPECT_TRUE(ThreadCacheRegistry::Instance().has_pending_purge_task());
EXPECT_GT(tcache->bucket_count_for_testing(bucket_index), 0u);
task_env_.FastForwardBy(ThreadCacheRegistry::kPurgeInterval);
EXPECT_EQ(0u, tcache->bucket_count_for_testing(bucket_index));
// Since there were enough allocations, another task is posted.
EXPECT_TRUE(ThreadCacheRegistry::Instance().has_pending_purge_task());
FillThreadCacheAndReturnIndex(kSmallSize);
task_env_.FastForwardBy(ThreadCacheRegistry::kPurgeInterval);
EXPECT_EQ(0u, tcache->bucket_count_for_testing(bucket_index));
// Not enough this time.
EXPECT_FALSE(ThreadCacheRegistry::Instance().has_pending_purge_task());
}
} // namespace internal
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment