Commit 8a6f1bd0 authored by Anton Bikineev's avatar Anton Bikineev Committed by Commit Bot

PartitionAlloc: PCScan: Make quarantine limit bounded by heap size.

This also makes PartitionRoot::total_size_of_*_pages atomic. Since there
are no side effects around reading/writing them (other than the
invariant check), using the relaxed semantics should be safe.

Bug: 11297512
Change-Id: Ibeb74484f25ba915a1b31659f3c2d6e13560f83b
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2506441Reviewed-by: default avatarBenoit L <lizeb@chromium.org>
Reviewed-by: default avatarKentaro Hara <haraken@chromium.org>
Reviewed-by: default avatarMichael Lippautz <mlippautz@chromium.org>
Commit-Queue: Anton Bikineev <bikineev@chromium.org>
Cr-Commit-Position: refs/heads/master@{#822658}
parent a0f502e8
...@@ -75,33 +75,31 @@ TEST_F(PartitionAllocMemoryReclaimerTest, Simple) { ...@@ -75,33 +75,31 @@ TEST_F(PartitionAllocMemoryReclaimerTest, Simple) {
TEST_F(PartitionAllocMemoryReclaimerTest, FreesMemory) { TEST_F(PartitionAllocMemoryReclaimerTest, FreesMemory) {
PartitionRoot<internal::ThreadSafe>* root = allocator_->root(); PartitionRoot<internal::ThreadSafe>* root = allocator_->root();
size_t committed_initially = size_t committed_initially = root->get_total_size_of_committed_pages();
root->total_size_of_committed_pages_for_testing();
AllocateAndFree(); AllocateAndFree();
size_t committed_before = root->total_size_of_committed_pages_for_testing(); size_t committed_before = root->get_total_size_of_committed_pages();
EXPECT_GT(committed_before, committed_initially); EXPECT_GT(committed_before, committed_initially);
StartReclaimer(); StartReclaimer();
task_environment_.FastForwardBy( task_environment_.FastForwardBy(
task_environment_.NextMainThreadPendingTaskDelay()); task_environment_.NextMainThreadPendingTaskDelay());
size_t committed_after = root->total_size_of_committed_pages_for_testing(); size_t committed_after = root->get_total_size_of_committed_pages();
EXPECT_LT(committed_after, committed_before); EXPECT_LT(committed_after, committed_before);
EXPECT_LE(committed_initially, committed_after); EXPECT_LE(committed_initially, committed_after);
} }
TEST_F(PartitionAllocMemoryReclaimerTest, Reclaim) { TEST_F(PartitionAllocMemoryReclaimerTest, Reclaim) {
PartitionRoot<internal::ThreadSafe>* root = allocator_->root(); PartitionRoot<internal::ThreadSafe>* root = allocator_->root();
size_t committed_initially = size_t committed_initially = root->get_total_size_of_committed_pages();
root->total_size_of_committed_pages_for_testing();
{ {
AllocateAndFree(); AllocateAndFree();
size_t committed_before = root->total_size_of_committed_pages_for_testing(); size_t committed_before = root->get_total_size_of_committed_pages();
EXPECT_GT(committed_before, committed_initially); EXPECT_GT(committed_before, committed_initially);
PartitionAllocMemoryReclaimer::Instance()->Reclaim(); PartitionAllocMemoryReclaimer::Instance()->Reclaim();
size_t committed_after = root->total_size_of_committed_pages_for_testing(); size_t committed_after = root->get_total_size_of_committed_pages();
EXPECT_LT(committed_after, committed_before); EXPECT_LT(committed_after, committed_before);
EXPECT_LE(committed_initially, committed_after); EXPECT_LE(committed_initially, committed_after);
......
...@@ -1361,7 +1361,7 @@ TEST_F(PartitionAllocTest, MappingCollision) { ...@@ -1361,7 +1361,7 @@ TEST_F(PartitionAllocTest, MappingCollision) {
// Tests that slot spans in the free slot span cache do get freed as // Tests that slot spans in the free slot span cache do get freed as
// appropriate. // appropriate.
TEST_F(PartitionAllocTest, FreeCache) { TEST_F(PartitionAllocTest, FreeCache) {
EXPECT_EQ(0U, allocator.root()->total_size_of_committed_pages_for_testing()); EXPECT_EQ(0U, allocator.root()->get_total_size_of_committed_pages());
size_t big_size = 1000 - kExtraAllocSize; size_t big_size = 1000 - kExtraAllocSize;
size_t bucket_index = SizeToIndex(big_size + kExtraAllocSize); size_t bucket_index = SizeToIndex(big_size + kExtraAllocSize);
...@@ -1376,7 +1376,7 @@ TEST_F(PartitionAllocTest, FreeCache) { ...@@ -1376,7 +1376,7 @@ TEST_F(PartitionAllocTest, FreeCache) {
EXPECT_EQ(1, slot_span->num_allocated_slots); EXPECT_EQ(1, slot_span->num_allocated_slots);
size_t expected_committed_size = PartitionPageSize(); size_t expected_committed_size = PartitionPageSize();
EXPECT_EQ(expected_committed_size, EXPECT_EQ(expected_committed_size,
allocator.root()->total_size_of_committed_pages_for_testing()); allocator.root()->get_total_size_of_committed_pages());
allocator.root()->Free(ptr); allocator.root()->Free(ptr);
EXPECT_EQ(0, slot_span->num_allocated_slots); EXPECT_EQ(0, slot_span->num_allocated_slots);
EXPECT_NE(-1, slot_span->empty_cache_index); EXPECT_NE(-1, slot_span->empty_cache_index);
...@@ -1394,7 +1394,7 @@ TEST_F(PartitionAllocTest, FreeCache) { ...@@ -1394,7 +1394,7 @@ TEST_F(PartitionAllocTest, FreeCache) {
cycle_free_cache_bucket->num_system_pages_per_slot_span * cycle_free_cache_bucket->num_system_pages_per_slot_span *
SystemPageSize(); SystemPageSize();
EXPECT_EQ(expected_size, EXPECT_EQ(expected_size,
allocator.root()->total_size_of_committed_pages_for_testing()); allocator.root()->get_total_size_of_committed_pages());
// Check that an allocation works ok whilst in this state (a free'd slot span // Check that an allocation works ok whilst in this state (a free'd slot span
// as the active slot spans head). // as the active slot spans head).
...@@ -1411,7 +1411,7 @@ TEST_F(PartitionAllocTest, FreeCache) { ...@@ -1411,7 +1411,7 @@ TEST_F(PartitionAllocTest, FreeCache) {
EXPECT_TRUE(slot_span->freelist_head); EXPECT_TRUE(slot_span->freelist_head);
} }
EXPECT_EQ(expected_committed_size, EXPECT_EQ(expected_committed_size,
allocator.root()->total_size_of_committed_pages_for_testing()); allocator.root()->get_total_size_of_committed_pages());
} }
// Tests for a bug we had with losing references to free slot spans. // Tests for a bug we had with losing references to free slot spans.
......
...@@ -63,7 +63,8 @@ PartitionDirectMap(PartitionRoot<thread_safe>* root, int flags, size_t raw_size) ...@@ -63,7 +63,8 @@ PartitionDirectMap(PartitionRoot<thread_safe>* root, int flags, size_t raw_size)
return nullptr; return nullptr;
size_t committed_page_size = size + SystemPageSize(); size_t committed_page_size = size + SystemPageSize();
root->total_size_of_direct_mapped_pages += committed_page_size; root->total_size_of_direct_mapped_pages.fetch_add(committed_page_size,
std::memory_order_relaxed);
root->IncreaseCommittedPages(committed_page_size); root->IncreaseCommittedPages(committed_page_size);
char* slot = ptr + PartitionPageSize(); char* slot = ptr + PartitionPageSize();
...@@ -279,7 +280,8 @@ ALWAYS_INLINE void* PartitionBucket<thread_safe>::AllocNewSlotSpan( ...@@ -279,7 +280,8 @@ ALWAYS_INLINE void* PartitionBucket<thread_safe>::AllocNewSlotSpan(
if (UNLIKELY(!super_page)) if (UNLIKELY(!super_page))
return nullptr; return nullptr;
root->total_size_of_super_pages += kSuperPageSize; root->total_size_of_super_pages.fetch_add(kSuperPageSize,
std::memory_order_relaxed);
// |total_size| MUST be less than kSuperPageSize - (PartitionPageSize()*2). // |total_size| MUST be less than kSuperPageSize - (PartitionPageSize()*2).
// This is a trustworthy value because num_partition_pages is not user // This is a trustworthy value because num_partition_pages is not user
......
...@@ -332,12 +332,9 @@ NOINLINE void PartitionRoot<thread_safe>::OutOfMemory(size_t size) { ...@@ -332,12 +332,9 @@ NOINLINE void PartitionRoot<thread_safe>::OutOfMemory(size_t size) {
// Check whether this OOM is due to a lot of super pages that are allocated // Check whether this OOM is due to a lot of super pages that are allocated
// but not committed, probably due to http://crbug.com/421387. // but not committed, probably due to http://crbug.com/421387.
// //
// Reading values without locking is fine here, we are going to crash anyway, if (total_size_of_super_pages.load(std::memory_order_relaxed) +
// this is used for reporting only, and concurrent successful allocations are total_size_of_direct_mapped_pages.load(std::memory_order_relaxed) -
// unlikely. total_size_of_committed_pages.load(std::memory_order_relaxed) >
if (TS_UNCHECKED_READ(total_size_of_super_pages) +
TS_UNCHECKED_READ(total_size_of_direct_mapped_pages) -
TS_UNCHECKED_READ(total_size_of_committed_pages) >
kReasonableSizeOfUnusedPages) { kReasonableSizeOfUnusedPages) {
internal::PartitionOutOfMemoryWithLotsOfUncommitedPages(size); internal::PartitionOutOfMemoryWithLotsOfUncommitedPages(size);
} }
...@@ -647,8 +644,10 @@ void PartitionRoot<thread_safe>::DumpStats(const char* partition_name, ...@@ -647,8 +644,10 @@ void PartitionRoot<thread_safe>::DumpStats(const char* partition_name,
ScopedGuard guard{lock_}; ScopedGuard guard{lock_};
stats.total_mmapped_bytes = stats.total_mmapped_bytes =
total_size_of_super_pages + total_size_of_direct_mapped_pages; total_size_of_super_pages.load(std::memory_order_relaxed) +
stats.total_committed_bytes = total_size_of_committed_pages; total_size_of_direct_mapped_pages.load(std::memory_order_relaxed);
stats.total_committed_bytes =
total_size_of_committed_pages.load(std::memory_order_relaxed);
size_t direct_mapped_allocations_total_size = 0; size_t direct_mapped_allocations_total_size = 0;
for (size_t i = 0; i < kNumBuckets; ++i) { for (size_t i = 0; i < kNumBuckets; ++i) {
......
...@@ -30,6 +30,8 @@ ...@@ -30,6 +30,8 @@
// might be placed into a 4096-byte bucket. Bucket sizes are chosen to try and // might be placed into a 4096-byte bucket. Bucket sizes are chosen to try and
// keep worst-case waste to ~10%. // keep worst-case waste to ~10%.
#include <atomic>
#include "base/allocator/partition_allocator/page_allocator.h" #include "base/allocator/partition_allocator/page_allocator.h"
#include "base/allocator/partition_allocator/partition_alloc-inl.h" #include "base/allocator/partition_allocator/partition_alloc-inl.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h" #include "base/allocator/partition_allocator/partition_alloc_check.h"
...@@ -143,9 +145,11 @@ struct BASE_EXPORT PartitionRoot { ...@@ -143,9 +145,11 @@ struct BASE_EXPORT PartitionRoot {
// Invariant: total_size_of_committed_pages <= // Invariant: total_size_of_committed_pages <=
// total_size_of_super_pages + // total_size_of_super_pages +
// total_size_of_direct_mapped_pages. // total_size_of_direct_mapped_pages.
size_t total_size_of_committed_pages GUARDED_BY(lock_) = 0; // Since all operations on these atomic variables have relaxed semantics, we
size_t total_size_of_super_pages GUARDED_BY(lock_) = 0; // don't check this invariant with DCHECKs.
size_t total_size_of_direct_mapped_pages GUARDED_BY(lock_) = 0; std::atomic<size_t> total_size_of_committed_pages{0};
std::atomic<size_t> total_size_of_super_pages{0};
std::atomic<size_t> total_size_of_direct_mapped_pages{0};
char* next_super_page = nullptr; char* next_super_page = nullptr;
char* next_partition_page = nullptr; char* next_partition_page = nullptr;
...@@ -191,10 +195,8 @@ struct BASE_EXPORT PartitionRoot { ...@@ -191,10 +195,8 @@ struct BASE_EXPORT PartitionRoot {
ALWAYS_INLINE static bool IsValidSlotSpan(SlotSpan* slot_span); ALWAYS_INLINE static bool IsValidSlotSpan(SlotSpan* slot_span);
ALWAYS_INLINE static PartitionRoot* FromSlotSpan(SlotSpan* slot_span); ALWAYS_INLINE static PartitionRoot* FromSlotSpan(SlotSpan* slot_span);
ALWAYS_INLINE void IncreaseCommittedPages(size_t len) ALWAYS_INLINE void IncreaseCommittedPages(size_t len);
EXCLUSIVE_LOCKS_REQUIRED(lock_); ALWAYS_INLINE void DecreaseCommittedPages(size_t len);
ALWAYS_INLINE void DecreaseCommittedPages(size_t len)
EXCLUSIVE_LOCKS_REQUIRED(lock_);
ALWAYS_INLINE void DecommitSystemPages(void* address, size_t length) ALWAYS_INLINE void DecommitSystemPages(void* address, size_t length)
EXCLUSIVE_LOCKS_REQUIRED(lock_); EXCLUSIVE_LOCKS_REQUIRED(lock_);
ALWAYS_INLINE void RecommitSystemPages(void* address, size_t length) ALWAYS_INLINE void RecommitSystemPages(void* address, size_t length)
...@@ -265,9 +267,8 @@ struct BASE_EXPORT PartitionRoot { ...@@ -265,9 +267,8 @@ struct BASE_EXPORT PartitionRoot {
internal::ThreadCache* thread_cache_for_testing() const { internal::ThreadCache* thread_cache_for_testing() const {
return with_thread_cache ? internal::ThreadCache::Get() : nullptr; return with_thread_cache ? internal::ThreadCache::Get() : nullptr;
} }
size_t total_size_of_committed_pages_for_testing() { size_t get_total_size_of_committed_pages() const {
ScopedGuard guard{lock_}; return total_size_of_committed_pages.load(std::memory_order_relaxed);
return total_size_of_committed_pages;
} }
ALWAYS_INLINE internal::PartitionTag GetNewPartitionTag() { ALWAYS_INLINE internal::PartitionTag GetNewPartitionTag() {
...@@ -666,17 +667,13 @@ PartitionRoot<thread_safe>::FromSlotSpan(SlotSpan* slot_span) { ...@@ -666,17 +667,13 @@ PartitionRoot<thread_safe>::FromSlotSpan(SlotSpan* slot_span) {
template <bool thread_safe> template <bool thread_safe>
ALWAYS_INLINE void PartitionRoot<thread_safe>::IncreaseCommittedPages( ALWAYS_INLINE void PartitionRoot<thread_safe>::IncreaseCommittedPages(
size_t len) { size_t len) {
total_size_of_committed_pages += len; total_size_of_committed_pages.fetch_add(len, std::memory_order_relaxed);
PA_DCHECK(total_size_of_committed_pages <=
total_size_of_super_pages + total_size_of_direct_mapped_pages);
} }
template <bool thread_safe> template <bool thread_safe>
ALWAYS_INLINE void PartitionRoot<thread_safe>::DecreaseCommittedPages( ALWAYS_INLINE void PartitionRoot<thread_safe>::DecreaseCommittedPages(
size_t len) { size_t len) {
total_size_of_committed_pages -= len; total_size_of_committed_pages.fetch_sub(len, std::memory_order_relaxed);
PA_DCHECK(total_size_of_committed_pages <=
total_size_of_super_pages + total_size_of_direct_mapped_pages);
} }
template <bool thread_safe> template <bool thread_safe>
......
...@@ -297,9 +297,9 @@ PCScan<thread_safe>::PCScanTask::PCScanTask(PCScan& pcscan, Root& root) ...@@ -297,9 +297,9 @@ PCScan<thread_safe>::PCScanTask::PCScanTask(PCScan& pcscan, Root& root)
: pcscan_(pcscan), root_(root) { : pcscan_(pcscan), root_(root) {
// Take a snapshot of all allocated non-empty slot spans. // Take a snapshot of all allocated non-empty slot spans.
static constexpr size_t kScanAreasReservationSlack = 10; static constexpr size_t kScanAreasReservationSlack = 10;
const size_t kScanAreasReservationSize = root_.total_size_of_committed_pages / const size_t kScanAreasReservationSize =
PartitionPageSize() / root_.get_total_size_of_committed_pages() / PartitionPageSize() /
kScanAreasReservationSlack; kScanAreasReservationSlack;
scan_areas_.reserve(kScanAreasReservationSize); scan_areas_.reserve(kScanAreasReservationSize);
typename Root::ScopedGuard guard(root.lock_); typename Root::ScopedGuard guard(root.lock_);
...@@ -346,7 +346,8 @@ void PCScan<thread_safe>::PCScanTask::RunOnce() && { ...@@ -346,7 +346,8 @@ void PCScan<thread_safe>::PCScanTask::RunOnce() && {
new_quarantine_size); new_quarantine_size);
pcscan_.quarantine_data_.Account(new_quarantine_size); pcscan_.quarantine_data_.Account(new_quarantine_size);
pcscan_.quarantine_data_.GrowLimitIfNeeded(); pcscan_.quarantine_data_.GrowLimitIfNeeded(
root_.get_total_size_of_committed_pages());
// Check that concurrent task can't be scheduled twice. // Check that concurrent task can't be scheduled twice.
PA_CHECK(pcscan_.in_progress_.exchange(false)); PA_CHECK(pcscan_.in_progress_.exchange(false));
......
...@@ -56,7 +56,7 @@ class BASE_EXPORT PCScan final { ...@@ -56,7 +56,7 @@ class BASE_EXPORT PCScan final {
// Account freed bytes. Returns true if limit was reached. // Account freed bytes. Returns true if limit was reached.
ALWAYS_INLINE bool Account(size_t bytes); ALWAYS_INLINE bool Account(size_t bytes);
void GrowLimitIfNeeded(); void GrowLimitIfNeeded(size_t heap_size);
void ResetAndAdvanceEpoch(); void ResetAndAdvanceEpoch();
size_t epoch() const { return epoch_.load(std::memory_order_relaxed); } size_t epoch() const { return epoch_.load(std::memory_order_relaxed); }
...@@ -66,8 +66,7 @@ class BASE_EXPORT PCScan final { ...@@ -66,8 +66,7 @@ class BASE_EXPORT PCScan final {
size_t last_size() const { return last_size_; } size_t last_size() const { return last_size_; }
private: private:
static constexpr size_t kQuarantineSizeMinLimit = 16 * 1024 * 1024; static constexpr size_t kQuarantineSizeMinLimit = 1 * 1024 * 1024;
static constexpr double kQuarantineSizeGrowingFactor = 1.1;
std::atomic<size_t> current_size_{0u}; std::atomic<size_t> current_size_{0u};
std::atomic<size_t> size_limit_{kQuarantineSizeMinLimit}; std::atomic<size_t> size_limit_{kQuarantineSizeMinLimit};
...@@ -85,9 +84,6 @@ class BASE_EXPORT PCScan final { ...@@ -85,9 +84,6 @@ class BASE_EXPORT PCScan final {
template <bool thread_safe> template <bool thread_safe>
constexpr size_t PCScan<thread_safe>::QuarantineData::kQuarantineSizeMinLimit; constexpr size_t PCScan<thread_safe>::QuarantineData::kQuarantineSizeMinLimit;
template <bool thread_safe>
constexpr double
PCScan<thread_safe>::QuarantineData::kQuarantineSizeGrowingFactor;
template <bool thread_safe> template <bool thread_safe>
bool PCScan<thread_safe>::QuarantineData::Account(size_t size) { bool PCScan<thread_safe>::QuarantineData::Account(size_t size) {
...@@ -102,12 +98,13 @@ void PCScan<thread_safe>::QuarantineData::ResetAndAdvanceEpoch() { ...@@ -102,12 +98,13 @@ void PCScan<thread_safe>::QuarantineData::ResetAndAdvanceEpoch() {
} }
template <bool thread_safe> template <bool thread_safe>
void PCScan<thread_safe>::QuarantineData::GrowLimitIfNeeded() { void PCScan<thread_safe>::QuarantineData::GrowLimitIfNeeded(size_t heap_size) {
static constexpr double kQuarantineSizeFraction = 0.1;
// |heap_size| includes the current quarantine size, we intentionally leave
// some slack till hitting the limit.
size_limit_.store( size_limit_.store(
std::max( std::max(kQuarantineSizeMinLimit,
kQuarantineSizeMinLimit, static_cast<size_t>(kQuarantineSizeFraction * heap_size)),
static_cast<size_t>(kQuarantineSizeGrowingFactor *
current_size_.load(std::memory_order_relaxed))),
std::memory_order_relaxed); std::memory_order_relaxed);
} }
......
...@@ -58,7 +58,7 @@ struct FullSlotSpanAllocation { ...@@ -58,7 +58,7 @@ struct FullSlotSpanAllocation {
// Assumes heap is purged. // Assumes heap is purged.
FullSlotSpanAllocation GetFullSlotSpan(ThreadSafePartitionRoot& root, FullSlotSpanAllocation GetFullSlotSpan(ThreadSafePartitionRoot& root,
size_t object_size) { size_t object_size) {
CHECK_EQ(0u, root.total_size_of_committed_pages_for_testing()); CHECK_EQ(0u, root.get_total_size_of_committed_pages());
const size_t size_with_extra = PartitionSizeAdjustAdd(true, object_size); const size_t size_with_extra = PartitionSizeAdjustAdd(true, object_size);
const size_t bucket_index = root.SizeToBucketIndex(size_with_extra); const size_t bucket_index = root.SizeToBucketIndex(size_with_extra);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment