Commit 91afef22 authored by Michael Lippautz's avatar Michael Lippautz Committed by Commit Bot

heap: Refactor heap stats using observer pattern

- Use observer pattern to notify interested components about heap sizing
  updates.
- Only send deltas of reasonable kUpdateThreshold sizes.
- Send positive as well as negative increments to allow observers to take
  different action on them.

Change-Id: Id5a83b5b140e25e5eec07b402396eb4f2fc9062c
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1631708
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Reviewed-by: default avatarKentaro Hara <haraken@chromium.org>
Cr-Commit-Position: refs/heads/master@{#666187}
parent bf4b8ba2
......@@ -46,8 +46,7 @@ static size_t GetMemoryUsage() {
size_t usage =
base::ProcessMetrics::CreateCurrentProcessMetrics()->GetMallocUsage() +
WTF::Partitions::TotalActiveBytes() +
blink::ProcessHeap::TotalAllocatedObjectSize() +
blink::ProcessHeap::TotalMarkedObjectSize();
blink::ProcessHeap::TotalAllocatedObjectSize();
v8::HeapStatistics v8_heap_statistics;
blink::V8PerIsolateData::MainThreadIsolate()->GetHeapStatistics(
&v8_heap_statistics);
......
......@@ -61,8 +61,7 @@ void MemoryUsageMonitor::GetV8MemoryUsage(MemoryUsage& usage) {
}
void MemoryUsageMonitor::GetBlinkMemoryUsage(MemoryUsage& usage) {
usage.blink_gc_bytes = ProcessHeap::TotalAllocatedObjectSize() +
ProcessHeap::TotalMarkedObjectSize();
usage.blink_gc_bytes = ProcessHeap::TotalAllocatedObjectSize();
usage.partition_alloc_bytes = WTF::Partitions::TotalSizeOfCommittedPages();
}
......
......@@ -14,8 +14,7 @@ WebMemoryStatistics WebMemoryStatistics::Get() {
statistics.partition_alloc_total_allocated_bytes =
WTF::Partitions::TotalActiveBytes();
statistics.blink_gc_total_allocated_bytes =
ProcessHeap::TotalAllocatedObjectSize() +
ProcessHeap::TotalMarkedObjectSize();
ProcessHeap::TotalAllocatedObjectSize();
return statistics;
}
......
......@@ -29,8 +29,7 @@ void DumpMemoryTotals(base::trace_event::ProcessMemoryDump* memory_dump) {
// ThreadHeap::markedObjectSize() can be underestimated if we're still in the
// process of lazy sweeping.
objects_dump->AddScalar("size", "bytes",
ProcessHeap::TotalAllocatedObjectSize() +
ProcessHeap::TotalMarkedObjectSize());
ProcessHeap::TotalAllocatedObjectSize());
}
} // namespace
......
......@@ -59,12 +59,43 @@ namespace blink {
HeapAllocHooks::AllocationHook* HeapAllocHooks::allocation_hook_ = nullptr;
HeapAllocHooks::FreeHook* HeapAllocHooks::free_hook_ = nullptr;
class ProcessHeapReporter final : public ThreadHeapStatsObserver {
public:
void IncreaseAllocatedSpace(size_t bytes) final {
ProcessHeap::IncreaseTotalAllocatedSpace(bytes);
}
void DecreaseAllocatedSpace(size_t bytes) final {
ProcessHeap::DecreaseTotalAllocatedSpace(bytes);
}
void ResetAllocatedObjectSize(size_t bytes) final {
ProcessHeap::DecreaseTotalAllocatedObjectSize(prev_incremented_);
ProcessHeap::IncreaseTotalAllocatedObjectSize(bytes);
prev_incremented_ = bytes;
}
void IncreaseAllocatedObjectSize(size_t bytes) final {
ProcessHeap::IncreaseTotalAllocatedObjectSize(bytes);
prev_incremented_ += bytes;
}
void DecreaseAllocatedObjectSize(size_t bytes) final {
ProcessHeap::DecreaseTotalAllocatedObjectSize(bytes);
prev_incremented_ -= bytes;
}
private:
size_t prev_incremented_ = 0;
};
ThreadHeap::ThreadHeap(ThreadState* thread_state)
: thread_state_(thread_state),
heap_stats_collector_(std::make_unique<ThreadHeapStatsCollector>()),
region_tree_(std::make_unique<RegionTree>()),
address_cache_(std::make_unique<AddressCache>()),
free_page_pool_(std::make_unique<PagePool>()),
process_heap_reporter_(std::make_unique<ProcessHeapReporter>()),
marking_worklist_(nullptr),
not_fully_constructed_worklist_(nullptr),
weak_callback_worklist_(nullptr),
......@@ -82,6 +113,8 @@ ThreadHeap::ThreadHeap(ThreadState* thread_state)
likely_to_be_promptly_freed_ =
std::make_unique<int[]>(kLikelyToBePromptlyFreedArraySize);
ClearArenaAges();
stats_collector()->RegisterObserver(process_heap_reporter_.get());
}
ThreadHeap::~ThreadHeap() {
......@@ -89,26 +122,6 @@ ThreadHeap::~ThreadHeap() {
delete arenas_[i];
}
void ThreadHeap::IncreaseAllocatedObjectSize(size_t bytes) {
stats_collector()->IncreaseAllocatedObjectSize(bytes);
ProcessHeap::IncreaseTotalAllocatedObjectSize(bytes);
}
void ThreadHeap::DecreaseAllocatedObjectSize(size_t bytes) {
stats_collector()->DecreaseAllocatedObjectSize(bytes);
ProcessHeap::DecreaseTotalAllocatedObjectSize(bytes);
}
void ThreadHeap::IncreaseAllocatedSpace(size_t bytes) {
stats_collector()->IncreaseAllocatedSpace(bytes);
ProcessHeap::IncreaseTotalAllocatedSpace(bytes);
}
void ThreadHeap::DecreaseAllocatedSpace(size_t bytes) {
stats_collector()->DecreaseAllocatedSpace(bytes);
ProcessHeap::DecreaseTotalAllocatedSpace(bytes);
}
Address ThreadHeap::CheckAndMarkPointer(MarkingVisitor* visitor,
Address address) {
DCHECK(thread_state_->InAtomicMarkingPause());
......
......@@ -57,6 +57,7 @@ class IncrementalMarkingScopeBase;
class AddressCache;
class ThreadHeapStatsCollector;
class PagePool;
class ProcessHeapReporter;
class RegionTree;
struct MarkingItem {
......@@ -372,11 +373,6 @@ class PLATFORM_EXPORT ThreadHeap {
return heap_stats_collector_.get();
}
void IncreaseAllocatedObjectSize(size_t);
void DecreaseAllocatedObjectSize(size_t);
void IncreaseAllocatedSpace(size_t);
void DecreaseAllocatedSpace(size_t);
#if defined(ADDRESS_SANITIZER)
void PoisonEagerArena();
void PoisonAllHeaps();
......@@ -406,6 +402,7 @@ class PLATFORM_EXPORT ThreadHeap {
std::unique_ptr<RegionTree> region_tree_;
std::unique_ptr<AddressCache> address_cache_;
std::unique_ptr<PagePool> free_page_pool_;
std::unique_ptr<ProcessHeapReporter> process_heap_reporter_;
// All objects on this worklist have been fully initialized and assigned a
// trace callback for iterating the body of the object. This worklist should
......
......@@ -615,7 +615,8 @@ void NormalPageArena::AllocatePage() {
new (page_memory->WritableStart()) NormalPage(page_memory, this);
swept_pages_.Push(page);
GetThreadState()->Heap().IncreaseAllocatedSpace(page->size());
GetThreadState()->Heap().stats_collector()->IncreaseAllocatedSpace(
page->size());
#if DCHECK_IS_ON() || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER)
// Allow the following addToFreeList() to add the newly allocated memory
// to the free list.
......@@ -629,7 +630,8 @@ void NormalPageArena::AllocatePage() {
}
void NormalPageArena::FreePage(NormalPage* page) {
GetThreadState()->Heap().DecreaseAllocatedSpace(page->size());
GetThreadState()->Heap().stats_collector()->DecreaseAllocatedSpace(
page->size());
PageMemory* memory = page->Storage();
page->~NormalPage();
......@@ -694,7 +696,7 @@ void NormalPageArena::PromptlyFreeObjectInFreeList(HeapObjectHeader* header,
AddToFreeList(address, size);
promptly_freed_size_ += size;
}
GetThreadState()->Heap().DecreaseAllocatedObjectSize(size);
GetThreadState()->Heap().stats_collector()->DecreaseAllocatedObjectSize(size);
}
bool NormalPageArena::ExpandObject(HeapObjectHeader* header, size_t new_size) {
......@@ -782,10 +784,10 @@ void NormalPageArena::SetRemainingAllocationSize(
// - If checkpoint is larger, the allocated size has increased.
// - The allocated size has decreased, otherwise.
if (last_remaining_allocation_size_ > remaining_allocation_size_) {
GetThreadState()->Heap().IncreaseAllocatedObjectSize(
GetThreadState()->Heap().stats_collector()->IncreaseAllocatedObjectSize(
last_remaining_allocation_size_ - remaining_allocation_size_);
} else if (last_remaining_allocation_size_ != remaining_allocation_size_) {
GetThreadState()->Heap().DecreaseAllocatedObjectSize(
GetThreadState()->Heap().stats_collector()->DecreaseAllocatedObjectSize(
remaining_allocation_size_ - last_remaining_allocation_size_);
}
last_remaining_allocation_size_ = remaining_allocation_size_;
......@@ -803,7 +805,7 @@ void NormalPageArena::SetAllocationPoint(Address point, size_t size) {
// Free and clear the old linear allocation area.
if (HasCurrentAllocationArea()) {
AddToFreeList(CurrentAllocationPoint(), RemainingAllocationSize());
GetThreadState()->Heap().DecreaseAllocatedObjectSize(
GetThreadState()->Heap().stats_collector()->DecreaseAllocatedObjectSize(
RemainingAllocationSize());
}
// Set up a new linear allocation area.
......@@ -812,7 +814,8 @@ void NormalPageArena::SetAllocationPoint(Address point, size_t size) {
if (point) {
// Only, update allocated size and object start bitmap if the area is
// actually set up with a non-null address.
GetThreadState()->Heap().IncreaseAllocatedObjectSize(size);
GetThreadState()->Heap().stats_collector()->IncreaseAllocatedObjectSize(
size);
// Current allocation point can never be part of the object bitmap start
// because the area can grow or shrink. Will be added back before a GC when
// clearing the allocation point.
......@@ -959,8 +962,9 @@ Address LargeObjectArena::DoAllocateLargeObjectPage(size_t allocation_size,
swept_pages_.Push(large_object);
GetThreadState()->Heap().IncreaseAllocatedSpace(large_object->size());
GetThreadState()->Heap().IncreaseAllocatedObjectSize(
GetThreadState()->Heap().stats_collector()->IncreaseAllocatedSpace(
large_object->size());
GetThreadState()->Heap().stats_collector()->IncreaseAllocatedObjectSize(
large_object->PayloadSize());
return result;
}
......@@ -968,7 +972,8 @@ Address LargeObjectArena::DoAllocateLargeObjectPage(size_t allocation_size,
void LargeObjectArena::FreeLargeObjectPage(LargeObjectPage* object) {
ASAN_UNPOISON_MEMORY_REGION(object->Payload(), object->PayloadSize());
object->ObjectHeader()->Finalize(object->Payload(), object->PayloadSize());
GetThreadState()->Heap().DecreaseAllocatedSpace(object->size());
GetThreadState()->Heap().stats_collector()->DecreaseAllocatedSpace(
object->size());
// Unpoison the object header and allocationGranularity bytes after the
// object before freeing.
......
......@@ -4,8 +4,9 @@
#include "third_party/blink/renderer/platform/heap/heap_stats_collector.h"
#include <cmath>
#include "base/logging.h"
#include "third_party/blink/renderer/platform/heap/unified_heap_controller.h"
#include "third_party/blink/renderer/platform/wtf/allocator/partitions.h"
namespace blink {
......@@ -23,27 +24,68 @@ void ThreadHeapStatsCollector::IncreaseCompactionFreedPages(size_t pages) {
void ThreadHeapStatsCollector::IncreaseAllocatedObjectSize(size_t bytes) {
// The current GC may not have been started. This is ok as recording considers
// the whole time range between garbage collections.
allocated_bytes_since_prev_gc_ += static_cast<int64_t>(bytes);
pos_delta_allocated_bytes_since_prev_gc_ += bytes;
}
void ThreadHeapStatsCollector::IncreaseAllocatedObjectSizeForTesting(
size_t bytes) {
IncreaseAllocatedObjectSize(bytes);
AllocatedObjectSizeSafepointImpl();
}
void ThreadHeapStatsCollector::DecreaseAllocatedObjectSize(size_t bytes) {
// See IncreaseAllocatedObjectSize.
allocated_bytes_since_prev_gc_ -= static_cast<int64_t>(bytes);
neg_delta_allocated_bytes_since_prev_gc_ += bytes;
}
void ThreadHeapStatsCollector::DecreaseAllocatedObjectSizeForTesting(
size_t bytes) {
DecreaseAllocatedObjectSize(bytes);
AllocatedObjectSizeSafepointImpl();
}
void ThreadHeapStatsCollector::AllocatedObjectSizeSafepoint() {
if (unified_heap_controller_) {
unified_heap_controller_->UpdateAllocatedObjectSize(
allocated_bytes_since_prev_gc_);
if (std::abs(pos_delta_allocated_bytes_since_prev_gc_ -
neg_delta_allocated_bytes_since_prev_gc_) > kUpdateThreshold) {
AllocatedObjectSizeSafepointImpl();
}
}
void ThreadHeapStatsCollector::AllocatedObjectSizeSafepointImpl() {
allocated_bytes_since_prev_gc_ +=
static_cast<int64_t>(pos_delta_allocated_bytes_since_prev_gc_) -
static_cast<int64_t>(neg_delta_allocated_bytes_since_prev_gc_);
// These observer methods may start or finalize GC. In case they trigger a
// final GC pause, the delta counters are reset there and the following
// observer calls are called with '0' updates.
ForAllObservers([this](ThreadHeapStatsObserver* observer) {
// Recompute delta here so that a GC finalization is able to clear the
// delta for other observer calls.
int64_t delta = pos_delta_allocated_bytes_since_prev_gc_ -
neg_delta_allocated_bytes_since_prev_gc_;
if (delta < 0) {
observer->DecreaseAllocatedObjectSize(static_cast<size_t>(-delta));
} else {
observer->IncreaseAllocatedObjectSize(static_cast<size_t>(delta));
}
});
pos_delta_allocated_bytes_since_prev_gc_ = 0;
neg_delta_allocated_bytes_since_prev_gc_ = 0;
}
void ThreadHeapStatsCollector::IncreaseAllocatedSpace(size_t bytes) {
allocated_space_bytes_ += bytes;
ForAllObservers([bytes](ThreadHeapStatsObserver* observer) {
observer->IncreaseAllocatedSpace(bytes);
});
}
void ThreadHeapStatsCollector::DecreaseAllocatedSpace(size_t bytes) {
allocated_space_bytes_ -= bytes;
ForAllObservers([bytes](ThreadHeapStatsObserver* observer) {
observer->DecreaseAllocatedSpace(bytes);
});
}
void ThreadHeapStatsCollector::IncreaseWrapperCount(size_t count) {
......@@ -66,6 +108,9 @@ void ThreadHeapStatsCollector::NotifyMarkingStarted(BlinkGC::GCReason reason) {
}
void ThreadHeapStatsCollector::NotifyMarkingCompleted(size_t marked_bytes) {
allocated_bytes_since_prev_gc_ +=
static_cast<int64_t>(pos_delta_allocated_bytes_since_prev_gc_) -
static_cast<int64_t>(neg_delta_allocated_bytes_since_prev_gc_);
current_.marked_bytes = marked_bytes;
current_.object_size_in_bytes_before_sweeping = object_size_in_bytes();
current_.allocated_space_in_bytes_before_sweeping = allocated_space_bytes();
......@@ -74,6 +119,12 @@ void ThreadHeapStatsCollector::NotifyMarkingCompleted(size_t marked_bytes) {
current_.wrapper_count_before_sweeping = wrapper_count_;
allocated_bytes_since_prev_gc_ = 0;
collected_wrapper_count_ = 0;
pos_delta_allocated_bytes_since_prev_gc_ = 0;
neg_delta_allocated_bytes_since_prev_gc_ = 0;
ForAllObservers([marked_bytes](ThreadHeapStatsObserver* observer) {
observer->ResetAllocatedObjectSize(marked_bytes);
});
}
void ThreadHeapStatsCollector::NotifySweepingCompleted() {
......@@ -157,4 +208,24 @@ size_t ThreadHeapStatsCollector::wrapper_count() const {
return wrapper_count_;
}
void ThreadHeapStatsCollector::RegisterObserver(
ThreadHeapStatsObserver* observer) {
DCHECK(!observers_.Contains(observer));
observers_.push_back(observer);
}
void ThreadHeapStatsCollector::UnregisterObserver(
ThreadHeapStatsObserver* observer) {
wtf_size_t index = observers_.Find(observer);
DCHECK_NE(WTF::kNotFound, index);
observers_.EraseAt(index);
}
template <typename Callback>
void ThreadHeapStatsCollector::ForAllObservers(Callback callback) {
for (ThreadHeapStatsObserver* observer : observers_) {
callback(observer);
}
}
} // namespace blink
......@@ -15,7 +15,30 @@
namespace blink {
class UnifiedHeapController;
// Interface for observing changes to heap sizing.
class PLATFORM_EXPORT ThreadHeapStatsObserver {
public:
// Called upon allocating/releasing chunks of memory that contain objects.
//
// Must not trigger GC or allocate.
virtual void IncreaseAllocatedSpace(size_t) = 0;
virtual void DecreaseAllocatedSpace(size_t) = 0;
// Called once per GC cycle with the accurate number of live |bytes|.
//
// Must not trigger GC or allocate.
virtual void ResetAllocatedObjectSize(size_t bytes) = 0;
// Called after observing at least
// |ThreadHeapStatsCollector::kUpdateThreshold| changed bytes through
// allocation or explicit free. Reports both, negative and positive
// increments, to allow observer to decide whether absolute values or only the
// deltas is interesting.
//
// May trigger GC but most not allocate.
virtual void IncreaseAllocatedObjectSize(size_t) = 0;
virtual void DecreaseAllocatedObjectSize(size_t) = 0;
};
// Manages counters and statistics across garbage collection cycles.
//
......@@ -274,11 +297,23 @@ class PLATFORM_EXPORT ThreadHeapStatsCollector {
return TimeDelta::FromMilliseconds(current_.marking_time_in_ms());
}
void SetUnifiedHeapController(UnifiedHeapController* controller) {
unified_heap_controller_ = controller;
}
void RegisterObserver(ThreadHeapStatsObserver* observer);
void UnregisterObserver(ThreadHeapStatsObserver* observer);
void IncreaseAllocatedObjectSizeForTesting(size_t);
void DecreaseAllocatedObjectSizeForTesting(size_t);
private:
// Observers are implemented using virtual calls. Avoid notifications below
// reasonably interesting sizes.
static constexpr int64_t kUpdateThreshold = 1024;
// Invokes |callback| for all registered observers.
template <typename Callback>
void ForAllObservers(Callback callback);
void AllocatedObjectSizeSafepointImpl();
// Statistics for the currently running garbage collection. Note that the
// Event may not be fully populated yet as some phase may not have been run.
const Event& current() const { return current_; }
......@@ -289,6 +324,8 @@ class PLATFORM_EXPORT ThreadHeapStatsCollector {
// Allocated bytes since the last garbage collection. These bytes are reset
// after marking as they are accounted in marked_bytes then.
int64_t allocated_bytes_since_prev_gc_ = 0;
int64_t pos_delta_allocated_bytes_since_prev_gc_ = 0;
int64_t neg_delta_allocated_bytes_since_prev_gc_ = 0;
// Allocated space in bytes for all arenas.
size_t allocated_space_bytes_ = 0;
......@@ -302,9 +339,7 @@ class PLATFORM_EXPORT ThreadHeapStatsCollector {
// collection cycle to make them easier to use.
TimeDelta gc_nested_in_v8_;
// Unified heap observes interesting statistics and forwards them to V8 after
// some aggregation.
UnifiedHeapController* unified_heap_controller_ = nullptr;
Vector<ThreadHeapStatsObserver*> observers_;
FRIEND_TEST_ALL_PREFIXES(ThreadHeapStatsCollectorTest, InitialEmpty);
FRIEND_TEST_ALL_PREFIXES(ThreadHeapStatsCollectorTest, IncreaseScopeTime);
......
......@@ -4,6 +4,7 @@
#include "third_party/blink/renderer/platform/heap/heap_stats_collector.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace blink {
......@@ -104,7 +105,7 @@ TEST(ThreadHeapStatsCollectorTest, InitialEstimatedObjectSizeInBytes) {
TEST(ThreadHeapStatsCollectorTest, EstimatedObjectSizeInBytesNoMarkedBytes) {
ThreadHeapStatsCollector stats_collector;
stats_collector.NotifyMarkingStarted(BlinkGC::GCReason::kForcedGCForTesting);
stats_collector.IncreaseAllocatedObjectSize(512);
stats_collector.IncreaseAllocatedObjectSizeForTesting(512);
EXPECT_EQ(512u, stats_collector.object_size_in_bytes());
stats_collector.NotifyMarkingCompleted(kNoMarkedBytes);
stats_collector.NotifySweepingCompleted();
......@@ -117,7 +118,7 @@ TEST(ThreadHeapStatsCollectorTest, EstimatedObjectSizeInBytesWithMarkedBytes) {
stats_collector.NotifySweepingCompleted();
stats_collector.NotifyMarkingStarted(BlinkGC::GCReason::kForcedGCForTesting);
stats_collector.NotifyMarkingCompleted(kNoMarkedBytes);
stats_collector.IncreaseAllocatedObjectSize(512);
stats_collector.IncreaseAllocatedObjectSizeForTesting(512);
EXPECT_EQ(640u, stats_collector.object_size_in_bytes());
stats_collector.NotifySweepingCompleted();
}
......@@ -131,7 +132,7 @@ TEST(ThreadHeapStatsCollectorTest,
stats_collector.NotifyMarkingStarted(BlinkGC::GCReason::kForcedGCForTesting);
stats_collector.NotifyMarkingCompleted(128);
// Currently marked bytes should not account to the estimated object size.
stats_collector.IncreaseAllocatedObjectSize(512);
stats_collector.IncreaseAllocatedObjectSizeForTesting(512);
EXPECT_EQ(640u, stats_collector.object_size_in_bytes());
stats_collector.NotifySweepingCompleted();
}
......@@ -167,7 +168,7 @@ TEST(ThreadHeapStatsCollectorTest, EstimatedMarkingTime2) {
stats_collector.NotifyMarkingCompleted(1024);
stats_collector.NotifySweepingCompleted();
stats_collector.NotifyMarkingStarted(BlinkGC::GCReason::kForcedGCForTesting);
stats_collector.IncreaseAllocatedObjectSize(512);
stats_collector.IncreaseAllocatedObjectSizeForTesting(512);
EXPECT_DOUBLE_EQ(1.5, stats_collector.estimated_marking_time_in_seconds());
stats_collector.NotifyMarkingCompleted(kNoMarkedBytes);
stats_collector.NotifySweepingCompleted();
......@@ -403,4 +404,131 @@ TEST(ThreadHeapStatsCollectorTest, EventAllocatedSpaceBeforeSweeping2) {
stats_collector.previous().allocated_space_in_bytes_before_sweeping);
}
namespace {
class MockThreadHeapStatsObserver : public ThreadHeapStatsObserver {
public:
MOCK_METHOD1(IncreaseAllocatedSpace, void(size_t));
MOCK_METHOD1(DecreaseAllocatedSpace, void(size_t));
MOCK_METHOD1(ResetAllocatedObjectSize, void(size_t));
MOCK_METHOD1(IncreaseAllocatedObjectSize, void(size_t));
MOCK_METHOD1(DecreaseAllocatedObjectSize, void(size_t));
};
void FakeGC(ThreadHeapStatsCollector* stats_collector, size_t marked_bytes) {
stats_collector->NotifyMarkingStarted(BlinkGC::GCReason::kForcedGCForTesting);
stats_collector->NotifyMarkingCompleted(marked_bytes);
stats_collector->NotifySweepingCompleted();
}
} // namespace
TEST(ThreadHeapStatsCollectorTest, RegisterUnregisterObserver) {
ThreadHeapStatsCollector stats_collector;
MockThreadHeapStatsObserver observer;
stats_collector.RegisterObserver(&observer);
stats_collector.UnregisterObserver(&observer);
}
TEST(ThreadHeapStatsCollectorTest, ObserveAllocatedSpace) {
ThreadHeapStatsCollector stats_collector;
MockThreadHeapStatsObserver observer;
stats_collector.RegisterObserver(&observer);
EXPECT_CALL(observer, IncreaseAllocatedSpace(1024));
stats_collector.IncreaseAllocatedSpace(1024);
EXPECT_CALL(observer, DecreaseAllocatedSpace(1024));
stats_collector.DecreaseAllocatedSpace(1024);
stats_collector.UnregisterObserver(&observer);
}
TEST(ThreadHeapStatsCollectorTest, ObserveResetAllocatedObjectSize) {
ThreadHeapStatsCollector stats_collector;
MockThreadHeapStatsObserver observer;
stats_collector.RegisterObserver(&observer);
EXPECT_CALL(observer, ResetAllocatedObjectSize(2048));
FakeGC(&stats_collector, 2048);
stats_collector.UnregisterObserver(&observer);
}
TEST(ThreadHeapStatsCollectorTest, ObserveAllocatedObjectSize) {
ThreadHeapStatsCollector stats_collector;
MockThreadHeapStatsObserver observer;
stats_collector.RegisterObserver(&observer);
EXPECT_CALL(observer, IncreaseAllocatedObjectSize(1024));
stats_collector.IncreaseAllocatedObjectSizeForTesting(1024);
EXPECT_CALL(observer, DecreaseAllocatedObjectSize(1024));
stats_collector.DecreaseAllocatedObjectSizeForTesting(1024);
stats_collector.UnregisterObserver(&observer);
}
namespace {
class ObserverTriggeringGC final : public ThreadHeapStatsObserver {
public:
explicit ObserverTriggeringGC(ThreadHeapStatsCollector* stats_collector)
: stats_collector_(stats_collector) {}
void IncreaseAllocatedObjectSize(size_t bytes) final {
increase_call_count++;
increased_size_bytes_ += bytes;
if (increase_call_count == 1) {
FakeGC(stats_collector_, bytes);
}
}
void ResetAllocatedObjectSize(size_t marked) final {
reset_call_count++;
marked_bytes_ = marked;
}
// Mock out the rest to trigger warnings if used.
MOCK_METHOD1(IncreaseAllocatedSpace, void(size_t));
MOCK_METHOD1(DecreaseAllocatedSpace, void(size_t));
MOCK_METHOD1(DecreaseAllocatedObjectSize, void(size_t));
size_t marked_bytes() const { return marked_bytes_; }
size_t increased_size_bytes() const { return increased_size_bytes_; }
size_t increase_call_count = 0;
size_t reset_call_count = 0;
private:
ThreadHeapStatsCollector* const stats_collector_;
size_t marked_bytes_ = 0;
size_t increased_size_bytes_ = 0;
};
} // namespace
TEST(ThreadHeapStatsCollectorTest, ObserverTriggersGC) {
ThreadHeapStatsCollector stats_collector;
ObserverTriggeringGC gc_observer(&stats_collector);
MockThreadHeapStatsObserver mock_observer;
// Internal detail: First registered observer is also notified first.
stats_collector.RegisterObserver(&gc_observer);
stats_collector.RegisterObserver(&mock_observer);
// mock_observer is notified after triggering GC. This means that it should
// see the reset call with the fully marked size (as gc_observer fakes a GC
// with that size).
EXPECT_CALL(mock_observer, ResetAllocatedObjectSize(1024));
// Since the GC clears counters, it should see an increase call with a delta
// of zero bytes.
EXPECT_CALL(mock_observer, IncreaseAllocatedObjectSize(0));
// Trigger scenario.
stats_collector.IncreaseAllocatedObjectSizeForTesting(1024);
// gc_observer sees both calls exactly once.
EXPECT_EQ(1u, gc_observer.increase_call_count);
EXPECT_EQ(1u, gc_observer.reset_call_count);
// gc_observer sees the increased bytes and the reset call with the fully
// marked size.
EXPECT_EQ(1024u, gc_observer.increased_size_bytes());
EXPECT_EQ(1024u, gc_observer.marked_bytes());
stats_collector.UnregisterObserver(&gc_observer);
stats_collector.UnregisterObserver(&mock_observer);
}
} // namespace blink
......@@ -79,9 +79,6 @@ void PersistentRegion::ReleasePersistentNode(
// list of PersistentNodes.
void PersistentRegion::TracePersistentNodes(Visitor* visitor,
ShouldTraceCallback should_trace) {
size_t debug_marked_object_size = ProcessHeap::TotalMarkedObjectSize();
base::debug::Alias(&debug_marked_object_size);
free_list_head_ = nullptr;
int persistent_count = 0;
PersistentNodeSlots** prev_next = &slots_;
......@@ -103,7 +100,6 @@ void PersistentRegion::TracePersistentNodes(Visitor* visitor,
if (!should_trace(visitor, node))
continue;
node->TracePersistentNode(visitor);
debug_marked_object_size = ProcessHeap::TotalMarkedObjectSize();
}
}
if (free_count == PersistentNodeSlots::kSlotCount) {
......
......@@ -29,7 +29,6 @@ void BlinkGCFreeHook(uint8_t* address) {
void ProcessHeap::Init() {
total_allocated_space_ = 0;
total_allocated_object_size_ = 0;
total_marked_object_size_ = 0;
GCInfoTable::CreateGlobalTable();
......@@ -41,7 +40,6 @@ void ProcessHeap::Init() {
void ProcessHeap::ResetHeapCounters() {
total_allocated_object_size_ = 0;
total_marked_object_size_ = 0;
}
CrossThreadPersistentRegion& ProcessHeap::GetCrossThreadPersistentRegion() {
......@@ -63,6 +61,5 @@ Mutex& ProcessHeap::CrossThreadPersistentMutex() {
std::atomic_size_t ProcessHeap::total_allocated_space_{0};
std::atomic_size_t ProcessHeap::total_allocated_object_size_{0};
std::atomic_size_t ProcessHeap::total_marked_object_size_{0};
} // namespace blink
......@@ -43,15 +43,6 @@ class PLATFORM_EXPORT ProcessHeap {
static size_t TotalAllocatedObjectSize() {
return total_allocated_object_size_.load(std::memory_order_relaxed);
}
static void IncreaseTotalMarkedObjectSize(size_t delta) {
total_marked_object_size_.fetch_add(delta, std::memory_order_relaxed);
}
static void DecreaseTotalMarkedObjectSize(size_t delta) {
total_marked_object_size_.fetch_sub(delta, std::memory_order_relaxed);
}
static size_t TotalMarkedObjectSize() {
return total_marked_object_size_.load(std::memory_order_relaxed);
}
static void IncreaseTotalAllocatedSpace(size_t delta) {
total_allocated_space_.fetch_add(delta, std::memory_order_relaxed);
}
......@@ -66,7 +57,6 @@ class PLATFORM_EXPORT ProcessHeap {
private:
static std::atomic_size_t total_allocated_space_;
static std::atomic_size_t total_allocated_object_size_;
static std::atomic_size_t total_marked_object_size_;
friend class ThreadState;
};
......
......@@ -1637,16 +1637,6 @@ void ThreadState::MarkPhaseEpilogue(BlinkGC::MarkingType marking_type) {
if (ShouldVerifyMarking())
VerifyMarking(marking_type);
if (Heap().stats_collector()->allocated_bytes_since_prev_gc() > 0) {
ProcessHeap::DecreaseTotalAllocatedObjectSize(static_cast<size_t>(
Heap().stats_collector()->allocated_bytes_since_prev_gc()));
} else {
ProcessHeap::IncreaseTotalAllocatedObjectSize(static_cast<size_t>(
-Heap().stats_collector()->allocated_bytes_since_prev_gc()));
}
ProcessHeap::DecreaseTotalMarkedObjectSize(
Heap().stats_collector()->previous().marked_bytes);
ProcessHeap::IncreaseTotalMarkedObjectSize(marked_bytes);
Heap().stats_collector()->NotifyMarkingCompleted(marked_bytes);
WTF::Partitions::ReportMemoryUsageHistogram();
......
......@@ -27,11 +27,11 @@ constexpr BlinkGC::StackState ToBlinkGCStackState(
UnifiedHeapController::UnifiedHeapController(ThreadState* thread_state)
: thread_state_(thread_state) {
thread_state->Heap().stats_collector()->SetUnifiedHeapController(this);
thread_state->Heap().stats_collector()->RegisterObserver(this);
}
UnifiedHeapController::~UnifiedHeapController() {
thread_state_->Heap().stats_collector()->SetUnifiedHeapController(nullptr);
thread_state_->Heap().stats_collector()->UnregisterObserver(this);
}
void UnifiedHeapController::TracePrologue(
......@@ -98,7 +98,6 @@ void UnifiedHeapController::TraceEpilogue(
static_cast<size_t>(stats_collector->marked_bytes());
summary->time = stats_collector->marking_time_so_far().InMillisecondsF();
buffered_allocated_size_ = 0;
old_allocated_bytes_since_prev_gc_ = 0;
if (!thread_state_->IsSweepingInProgress()) {
// Sweeping was finished during the atomic pause. Update statistics needs to
......@@ -181,29 +180,24 @@ bool UnifiedHeapController::IsRootForNonTracingGC(
return IsRootForNonTracingGCInternal(handle);
}
void UnifiedHeapController::UpdateAllocatedObjectSize(
int64_t allocated_bytes_since_prev_gc) {
int64_t delta =
allocated_bytes_since_prev_gc - old_allocated_bytes_since_prev_gc_;
old_allocated_bytes_since_prev_gc_ = allocated_bytes_since_prev_gc;
if (delta < 0) {
// TODO(mlippautz): Add support for negative deltas in V8.
buffered_allocated_size_ += delta;
return;
}
constexpr int64_t kMinimumReportingSize = 1024;
buffered_allocated_size_ += static_cast<int64_t>(delta);
void UnifiedHeapController::IncreaseAllocatedObjectSize(size_t delta_bytes) {
buffered_allocated_size_ += delta_bytes;
// Reported from a recursive sweeping call.
if (thread_state()->IsSweepingInProgress() &&
thread_state()->SweepForbidden())
thread_state()->SweepForbidden()) {
return;
}
if (buffered_allocated_size_ > kMinimumReportingSize) {
if (buffered_allocated_size_ > 0) {
IncreaseAllocatedSize(static_cast<size_t>(buffered_allocated_size_));
buffered_allocated_size_ = 0;
}
}
void UnifiedHeapController::DecreaseAllocatedObjectSize(size_t delta_bytes) {
// TODO(mlippautz): Add support for negative deltas in V8.
buffered_allocated_size_ -= delta_bytes;
}
} // namespace blink
......@@ -6,6 +6,7 @@
#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_UNIFIED_HEAP_CONTROLLER_H_
#include "base/macros.h"
#include "third_party/blink/renderer/platform/heap/heap_stats_collector.h"
#include "third_party/blink/renderer/platform/platform_export.h"
#include "v8/include/v8.h"
......@@ -29,7 +30,8 @@ class ThreadState;
// Oilpan does not consider references from DOM wrappers (JavaScript objects on
// V8's heap) as roots for such garbage collections.
class PLATFORM_EXPORT UnifiedHeapController final
: public v8::EmbedderHeapTracer {
: public v8::EmbedderHeapTracer,
public ThreadHeapStatsObserver {
DISALLOW_IMPLICIT_CONSTRUCTORS(UnifiedHeapController);
public:
......@@ -47,8 +49,13 @@ class PLATFORM_EXPORT UnifiedHeapController final
ThreadState* thread_state() const { return thread_state_; }
// Forwarded from ThreadHeapStatsCollector.
void UpdateAllocatedObjectSize(int64_t);
// ThreadHeapStatsObserver implementation.
void IncreaseAllocatedObjectSize(size_t) final;
void DecreaseAllocatedObjectSize(size_t) final;
// Not needed.
void ResetAllocatedObjectSize(size_t) final {}
void IncreaseAllocatedSpace(size_t) final {}
void DecreaseAllocatedSpace(size_t) final {}
private:
static bool IsRootForNonTracingGCInternal(
......@@ -60,7 +67,6 @@ class PLATFORM_EXPORT UnifiedHeapController final
// Buffered allocated size. Only positive values are forwarded to V8.
int64_t buffered_allocated_size_ = 0;
int64_t old_allocated_bytes_since_prev_gc_ = 0;
};
} // namespace blink
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment