Commit 6521de90 authored by Michael Lippautz's avatar Michael Lippautz Committed by Commit Bot

heap: Remove page navigation GC

Garbage collections on page navigations are also initiated by V8 (see
v8::Heap::NotifyContextDisposed for non-dependent contexts) using its
memory reducer mechanism. Since they cover Blink with with unified heap
we do not need to schedule anything else there.

Bug: 948807
Change-Id: I17a7d9235fd80b8cfc91fd55cab904873166056f
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1605408Reviewed-by: default avatarHannes Payer <hpayer@chromium.org>
Reviewed-by: default avatarKentaro Hara <haraken@chromium.org>
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#658988}
parent 178c1926
......@@ -936,18 +936,6 @@ bool FrameLoader::PrepareForCommit() {
PluginScriptForbiddenScope forbid_plugin_destructor_scripting;
DocumentLoader* pdl = provisional_document_loader_;
if (frame_->GetDocument()) {
unsigned node_count = 0;
for (Frame* frame = frame_; frame; frame = frame->Tree().TraverseNext()) {
if (auto* local_frame = DynamicTo<LocalFrame>(frame))
node_count += local_frame->GetDocument()->NodeCount();
}
unsigned total_node_count =
InstanceCounters::CounterValue(InstanceCounters::kNodeCounter);
float ratio = static_cast<float>(node_count) / total_node_count;
ThreadState::Current()->SchedulePageNavigationGCIfNeeded(ratio);
}
// Don't allow this frame to navigate anymore. This line is needed for
// navigation triggered from children's unload handlers. Blocking navigations
// triggered from this frame's unload handler is already covered in
......
......@@ -18,8 +18,6 @@ const char* BlinkGC::ToString(BlinkGC::GCReason reason) {
return "ForcedGCForTesting";
case BlinkGC::GCReason::kMemoryPressureGC:
return "MemoryPressureGC";
case BlinkGC::GCReason::kPageNavigationGC:
return "PageNavigationGC";
case BlinkGC::GCReason::kThreadTerminationGC:
return "ThreadTerminationGC";
case BlinkGC::GCReason::kIncrementalV8FollowupGC:
......
......@@ -94,7 +94,7 @@ class PLATFORM_EXPORT BlinkGC final {
kConservativeGC = 2,
kForcedGCForTesting = 3,
kMemoryPressureGC = 4,
kPageNavigationGC = 5,
// kPageNavigationGC = 5,
kThreadTerminationGC = 6,
// kTesting = 7,
// kIncrementalIdleGC = 8,
......
......@@ -415,16 +415,6 @@ bool ThreadState::ShouldScheduleV8FollowupGC() {
32 * 1024 * 1024, 1.5);
}
bool ThreadState::ShouldSchedulePageNavigationGC(
float estimated_removal_ratio) {
// If estimatedRemovalRatio is low we should let IdleGC handle this.
if (estimated_removal_ratio < 0.01)
return false;
return JudgeGCThreshold(kDefaultAllocatedObjectSizeThreshold,
32 * 1024 * 1024,
1.5 * (1 - estimated_removal_ratio));
}
bool ThreadState::ShouldForceConservativeGC() {
// TODO(haraken): 400% is too large. Lower the heap growing factor.
return JudgeGCThreshold(kDefaultAllocatedObjectSizeThreshold,
......@@ -487,44 +477,6 @@ void ThreadState::WillStartV8GC(BlinkGC::V8GCType gc_type) {
CompleteSweep();
}
void ThreadState::SchedulePageNavigationGCIfNeeded(
float estimated_removal_ratio) {
VLOG(2) << "[state:" << this << "] SchedulePageNavigationGCIfNeeded: "
<< "estimatedRemovalRatio=" << std::setprecision(2)
<< estimated_removal_ratio;
DCHECK(CheckThread());
if (IsGCForbidden())
return;
// Finish on-going lazy sweeping.
// TODO(haraken): It might not make sense to force completeSweep() for all
// page navigations.
CompleteSweep();
DCHECK(!IsSweepingInProgress());
DCHECK(!SweepForbidden());
if (ShouldForceMemoryPressureGC()) {
VLOG(2) << "[state:" << this << "] "
<< "SchedulePageNavigationGCIfNeeded: Scheduled memory pressure GC";
CollectGarbage(BlinkGC::kHeapPointersOnStack, BlinkGC::kAtomicMarking,
BlinkGC::kLazySweeping,
BlinkGC::GCReason::kMemoryPressureGC);
return;
}
if (ShouldSchedulePageNavigationGC(estimated_removal_ratio)) {
VLOG(2) << "[state:" << this << "] "
<< "SchedulePageNavigationGCIfNeeded: Scheduled page navigation GC";
SchedulePageNavigationGC();
}
}
void ThreadState::SchedulePageNavigationGC() {
DCHECK(CheckThread());
DCHECK(!IsSweepingInProgress());
SetGCState(kPageNavigationGCScheduled);
}
void ThreadState::ScheduleForcedGCForTesting() {
DCHECK(CheckThread());
CompleteSweep();
......@@ -677,7 +629,6 @@ void UnexpectedGCState(ThreadState::GCState gc_state) {
UNEXPECTED_GCSTATE(kIncrementalMarkingStepPaused);
UNEXPECTED_GCSTATE(kIncrementalMarkingStepScheduled);
UNEXPECTED_GCSTATE(kIncrementalMarkingFinalizeScheduled);
UNEXPECTED_GCSTATE(kPageNavigationGCScheduled);
UNEXPECTED_GCSTATE(kIncrementalGCScheduled);
}
}
......@@ -697,7 +648,6 @@ void ThreadState::SetGCState(GCState gc_state) {
VERIFY_STATE_TRANSITION(
gc_state_ == kNoGCScheduled || gc_state_ == kPreciseGCScheduled ||
gc_state_ == kForcedGCForTestingScheduled ||
gc_state_ == kPageNavigationGCScheduled ||
gc_state_ == kIncrementalMarkingStepPaused ||
gc_state_ == kIncrementalMarkingStepScheduled ||
gc_state_ == kIncrementalMarkingFinalizeScheduled ||
......@@ -714,7 +664,6 @@ void ThreadState::SetGCState(GCState gc_state) {
VERIFY_STATE_TRANSITION(gc_state_ == kIncrementalMarkingStepScheduled);
break;
case kForcedGCForTestingScheduled:
case kPageNavigationGCScheduled:
case kPreciseGCScheduled:
DCHECK(CheckThread());
DCHECK(!IsSweepingInProgress());
......@@ -725,7 +674,6 @@ void ThreadState::SetGCState(GCState gc_state) {
kIncrementalMarkingFinalizeScheduled ||
gc_state_ == kPreciseGCScheduled ||
gc_state_ == kForcedGCForTestingScheduled ||
gc_state_ == kPageNavigationGCScheduled ||
gc_state_ == kIncrementalGCScheduled);
break;
case kIncrementalGCScheduled:
......@@ -786,11 +734,6 @@ void ThreadState::RunScheduledGC(BlinkGC::StackState stack_state) {
CollectGarbage(BlinkGC::kNoHeapPointersOnStack, BlinkGC::kAtomicMarking,
BlinkGC::kLazySweeping, BlinkGC::GCReason::kPreciseGC);
break;
case kPageNavigationGCScheduled:
CollectGarbage(BlinkGC::kNoHeapPointersOnStack, BlinkGC::kAtomicMarking,
BlinkGC::kEagerSweeping,
BlinkGC::GCReason::kPageNavigationGC);
break;
case kIncrementalMarkingStepScheduled:
IncrementalMarkingStep(stack_state);
break;
......@@ -1121,7 +1064,6 @@ void UpdateHistograms(const ThreadHeapStatsCollector::Event& event) {
COUNT_BY_GC_REASON(ConservativeGC)
COUNT_BY_GC_REASON(ForcedGCForTesting)
COUNT_BY_GC_REASON(MemoryPressureGC)
COUNT_BY_GC_REASON(PageNavigationGC)
COUNT_BY_GC_REASON(ThreadTerminationGC)
COUNT_BY_GC_REASON(IncrementalV8FollowupGC)
COUNT_BY_GC_REASON(UnifiedHeapGC)
......@@ -1460,7 +1402,6 @@ void ThreadState::CollectGarbage(BlinkGC::StackState stack_state,
COUNT_BY_GC_REASON(ConservativeGC)
COUNT_BY_GC_REASON(ForcedGCForTesting)
COUNT_BY_GC_REASON(MemoryPressureGC)
COUNT_BY_GC_REASON(PageNavigationGC)
COUNT_BY_GC_REASON(ThreadTerminationGC)
COUNT_BY_GC_REASON(IncrementalV8FollowupGC)
COUNT_BY_GC_REASON(UnifiedHeapGC)
......
......@@ -151,7 +151,6 @@ class PLATFORM_EXPORT ThreadState final : private RAILModeObserver {
kIncrementalMarkingFinalizeScheduled,
kPreciseGCScheduled,
kForcedGCForTestingScheduled,
kPageNavigationGCScheduled,
kIncrementalGCScheduled,
};
......@@ -224,8 +223,6 @@ class PLATFORM_EXPORT ThreadState final : private RAILModeObserver {
void SchedulePreciseGC();
void ScheduleIncrementalGC(BlinkGC::GCReason);
void ScheduleV8FollowupGCIfNeeded(BlinkGC::V8GCType);
void SchedulePageNavigationGCIfNeeded(float estimated_removal_ratio);
void SchedulePageNavigationGC();
void ScheduleForcedGCForTesting();
void ScheduleGCIfNeeded();
void PostIdleGCTask();
......@@ -491,11 +488,6 @@ class PLATFORM_EXPORT ThreadState final : private RAILModeObserver {
// V8 minor or major GC is likely to drop a lot of references to objects
// on Oilpan's heap. We give a chance to schedule a GC.
bool ShouldScheduleV8FollowupGC();
// Page navigation is likely to drop a lot of references to objects
// on Oilpan's heap. We give a chance to schedule a GC.
// estimatedRemovalRatio is the estimated ratio of objects that will be no
// longer necessary due to the navigation.
bool ShouldSchedulePageNavigationGC(float estimated_removal_ratio);
// Internal helpers to handle memory pressure conditions.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment