Commit 644616e1 authored by Michael Lippautz's avatar Michael Lippautz Committed by Commit Bot

heap: Only compact heap on memory reducing unified heap GCs

These GCs are already expensive or are executed in idleness or when
entering background. Use the opportunity to perform compaction.

Bug: 958615, 961231

Change-Id: I19c571e9b1b773fe11516c02b0918019fbffb852
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1588561
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Reviewed-by: default avatarHannes Payer <hpayer@chromium.org>
Reviewed-by: default avatarKentaro Hara <haraken@chromium.org>
Cr-Commit-Position: refs/heads/master@{#659002}
parent 69cdd619
......@@ -24,6 +24,8 @@ const char* BlinkGC::ToString(BlinkGC::GCReason reason) {
return "IncrementalV8FollowupGC";
case BlinkGC::GCReason::kUnifiedHeapGC:
return "UnifiedHeapGC";
case BlinkGC::GCReason::kUnifiedHeapForMemoryReductionGC:
return "UnifiedHeapForMemoryReductionGC";
}
IMMEDIATE_CRASH();
}
......
......@@ -100,7 +100,8 @@ class PLATFORM_EXPORT BlinkGC final {
// kIncrementalIdleGC = 8,
kIncrementalV8FollowupGC = 9,
kUnifiedHeapGC = 10,
kMaxValue = kUnifiedHeapGC,
kUnifiedHeapForMemoryReductionGC = 11,
kMaxValue = kUnifiedHeapForMemoryReductionGC,
};
enum ArenaIndices {
......
......@@ -346,11 +346,13 @@ bool HeapCompact::ShouldCompact(BlinkGC::StackState stack_state,
return true;
}
// TODO(mlippautz): Only enable compaction when doing garbage collections that
// should aggressively reduce memory footprint.
return gc_count_since_last_compaction_ >
kGCCountSinceLastCompactionThreshold &&
free_list_size_ > kFreeListSizeThreshold;
// Only enable compaction when in a memory reduction garbage collection as it
// may significantly increase the final garbage collection pause.
if (reason == BlinkGC::GCReason::kUnifiedHeapForMemoryReductionGC) {
return free_list_size_ > kFreeListSizeThreshold;
}
return false;
}
void HeapCompact::Initialize(ThreadState* state) {
......
......@@ -118,9 +118,6 @@ class PLATFORM_EXPORT HeapCompact final {
private:
class MovableObjectFixups;
// Number of GCs that must have passed since last compaction GC.
static const int kGCCountSinceLastCompactionThreshold = 10;
// Freelist size threshold that must be exceeded before compaction
// should be considered.
static const size_t kFreeListSizeThreshold = 512 * 1024;
......
......@@ -1067,6 +1067,7 @@ void UpdateHistograms(const ThreadHeapStatsCollector::Event& event) {
COUNT_BY_GC_REASON(ThreadTerminationGC)
COUNT_BY_GC_REASON(IncrementalV8FollowupGC)
COUNT_BY_GC_REASON(UnifiedHeapGC)
COUNT_BY_GC_REASON(UnifiedHeapForMemoryReductionGC)
#undef COUNT_BY_GC_REASON
}
......@@ -1405,6 +1406,7 @@ void ThreadState::CollectGarbage(BlinkGC::StackState stack_state,
COUNT_BY_GC_REASON(ThreadTerminationGC)
COUNT_BY_GC_REASON(IncrementalV8FollowupGC)
COUNT_BY_GC_REASON(UnifiedHeapGC)
COUNT_BY_GC_REASON(UnifiedHeapForMemoryReductionGC)
}
#undef COUNT_BY_GC_REASON
......
......@@ -234,7 +234,9 @@ class PLATFORM_EXPORT ThreadState final : private RAILModeObserver {
bool IsSweepingInProgress() const { return gc_phase_ == GCPhase::kSweeping; }
bool IsUnifiedGCMarkingInProgress() const {
return IsMarkingInProgress() &&
(current_gc_data_.reason == BlinkGC::GCReason::kUnifiedHeapGC);
(current_gc_data_.reason == BlinkGC::GCReason::kUnifiedHeapGC ||
current_gc_data_.reason ==
BlinkGC::GCReason::kUnifiedHeapForMemoryReductionGC);
}
void EnableCompactionForNextGCForTesting();
......
......@@ -28,7 +28,8 @@ constexpr BlinkGC::StackState ToBlinkGCStackState(
UnifiedHeapController::UnifiedHeapController(ThreadState* thread_state)
: thread_state_(thread_state) {}
void UnifiedHeapController::TracePrologue() {
void UnifiedHeapController::TracePrologue(
v8::EmbedderHeapTracer::TraceFlags v8_flags) {
VLOG(2) << "UnifiedHeapController::TracePrologue";
ThreadHeapStatsCollector::BlinkGCInV8Scope nested_scope(
thread_state_->Heap().stats_collector());
......@@ -40,8 +41,11 @@ void UnifiedHeapController::TracePrologue() {
// Reset any previously scheduled garbage collections.
thread_state_->SetGCState(ThreadState::kNoGCScheduled);
// Start incremental marking with unified tracing.
thread_state_->IncrementalMarkingStart(BlinkGC::GCReason::kUnifiedHeapGC);
BlinkGC::GCReason gc_reason =
(v8_flags & v8::EmbedderHeapTracer::TraceFlags::kReduceMemory)
? BlinkGC::GCReason::kUnifiedHeapForMemoryReductionGC
: BlinkGC::GCReason::kUnifiedHeapGC;
thread_state_->IncrementalMarkingStart(gc_reason);
is_tracing_done_ = false;
}
......@@ -59,9 +63,9 @@ void UnifiedHeapController::EnterFinalPause(EmbedderStackState stack_state) {
ThreadHeapStatsCollector::Scope mark_prologue_scope(
thread_state_->Heap().stats_collector(),
ThreadHeapStatsCollector::kUnifiedMarkingAtomicPrologue);
thread_state_->AtomicPauseMarkPrologue(ToBlinkGCStackState(stack_state),
BlinkGC::kIncrementalMarking,
BlinkGC::GCReason::kUnifiedHeapGC);
thread_state_->AtomicPauseMarkPrologue(
ToBlinkGCStackState(stack_state), BlinkGC::kIncrementalMarking,
thread_state_->current_gc_data_.reason);
}
}
......
......@@ -36,7 +36,7 @@ class PLATFORM_EXPORT UnifiedHeapController final
explicit UnifiedHeapController(ThreadState*);
// v8::EmbedderHeapTracer implementation.
void TracePrologue() final;
void TracePrologue(v8::EmbedderHeapTracer::TraceFlags) final;
void TraceEpilogue() final;
void EnterFinalPause(EmbedderStackState) final;
void RegisterV8References(const std::vector<std::pair<void*, void*>>&) final;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment