Commit 48597fb8 authored by Michael Lippautz's avatar Michael Lippautz Committed by Commit Bot

heap: Remove stand-alone scheduling heuristics

Removes the stand-alone scheduling heuristics of Oilpan. Since M77 V8 takes care
of scheduling unified heap garbage collections and also considers memory
pressure of the Blink heap when doing so.

We still maintain ways to trigger stand-alone GCs as they should stay supported.

Bug: 948807
Change-Id: Idc60921354a5b4996cda7edb601b2242a1fbe213
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1692751Reviewed-by: default avatarKentaro Hara <haraken@chromium.org>
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#694769}
parent b89dec94
...@@ -379,10 +379,6 @@ const base::Feature kBlinkHeapIncrementalMarking{ ...@@ -379,10 +379,6 @@ const base::Feature kBlinkHeapIncrementalMarking{
// also adds additional verification passes. // also adds additional verification passes.
const base::Feature kBlinkHeapIncrementalMarkingStress{ const base::Feature kBlinkHeapIncrementalMarkingStress{
"BlinkHeapIncrementalMarkingStress", base::FEATURE_DISABLED_BY_DEFAULT}; "BlinkHeapIncrementalMarkingStress", base::FEATURE_DISABLED_BY_DEFAULT};
// Enables unified heap garbage collection scheduling where scheduling is
// delegated to V8's heap controller.
const base::Feature kBlinkHeapUnifiedGCScheduling{
"BlinkHeapUnifiedGCScheduling", base::FEATURE_ENABLED_BY_DEFAULT};
// Enables a delay before BufferingBytesConsumer begins reading from its // Enables a delay before BufferingBytesConsumer begins reading from its
// underlying consumer when instantiated with CreateWithDelay(). // underlying consumer when instantiated with CreateWithDelay().
......
...@@ -119,7 +119,6 @@ BLINK_COMMON_EXPORT extern const base::Feature kBlinkHeapConcurrentSweeping; ...@@ -119,7 +119,6 @@ BLINK_COMMON_EXPORT extern const base::Feature kBlinkHeapConcurrentSweeping;
BLINK_COMMON_EXPORT extern const base::Feature kBlinkHeapIncrementalMarking; BLINK_COMMON_EXPORT extern const base::Feature kBlinkHeapIncrementalMarking;
BLINK_COMMON_EXPORT extern const base::Feature BLINK_COMMON_EXPORT extern const base::Feature
kBlinkHeapIncrementalMarkingStress; kBlinkHeapIncrementalMarkingStress;
BLINK_COMMON_EXPORT extern const base::Feature kBlinkHeapUnifiedGCScheduling;
BLINK_COMMON_EXPORT extern const base::Feature kBufferingBytesConsumerDelay; BLINK_COMMON_EXPORT extern const base::Feature kBufferingBytesConsumerDelay;
BLINK_COMMON_EXPORT extern const base::FeatureParam<int> BLINK_COMMON_EXPORT extern const base::FeatureParam<int>
......
...@@ -176,18 +176,10 @@ void V8GCController::GcEpilogue(v8::Isolate* isolate, ...@@ -176,18 +176,10 @@ void V8GCController::GcEpilogue(v8::Isolate* isolate,
case v8::kGCTypeScavenge: case v8::kGCTypeScavenge:
TRACE_EVENT_END1("devtools.timeline,v8", "MinorGC", "usedHeapSizeAfter", TRACE_EVENT_END1("devtools.timeline,v8", "MinorGC", "usedHeapSizeAfter",
UsedHeapSize(isolate)); UsedHeapSize(isolate));
// Scavenger might have dropped nodes.
if (ThreadState::Current()) {
ThreadState::Current()->ScheduleV8FollowupGCIfNeeded(
BlinkGC::kV8MinorGC);
}
break; break;
case v8::kGCTypeMarkSweepCompact: case v8::kGCTypeMarkSweepCompact:
TRACE_EVENT_END1("devtools.timeline,v8", "MajorGC", "usedHeapSizeAfter", TRACE_EVENT_END1("devtools.timeline,v8", "MajorGC", "usedHeapSizeAfter",
UsedHeapSize(isolate)); UsedHeapSize(isolate));
if (ThreadState::Current())
ThreadState::Current()->ScheduleV8FollowupGCIfNeeded(
BlinkGC::kV8MajorGC);
break; break;
case v8::kGCTypeIncrementalMarking: case v8::kGCTypeIncrementalMarking:
TRACE_EVENT_END1("devtools.timeline,v8", "MajorGC", "usedHeapSizeAfter", TRACE_EVENT_END1("devtools.timeline,v8", "MajorGC", "usedHeapSizeAfter",
......
...@@ -98,8 +98,6 @@ uint8_t ThreadState::main_thread_state_storage_[sizeof(ThreadState)]; ...@@ -98,8 +98,6 @@ uint8_t ThreadState::main_thread_state_storage_[sizeof(ThreadState)];
namespace { namespace {
const size_t kDefaultAllocatedObjectSizeThreshold = 100 * 1024;
// Duration of one incremental marking step. Should be short enough that it // Duration of one incremental marking step. Should be short enough that it
// doesn't cause jank even though it is scheduled as a normal task. // doesn't cause jank even though it is scheduled as a normal task.
constexpr base::TimeDelta kDefaultIncrementalMarkingStepDuration = constexpr base::TimeDelta kDefaultIncrementalMarkingStepDuration =
...@@ -143,21 +141,6 @@ ThreadState::ThreadState() ...@@ -143,21 +141,6 @@ ThreadState::ThreadState()
heap_ = std::make_unique<ThreadHeap>(this); heap_ = std::make_unique<ThreadHeap>(this);
} }
// Implementation for RAILModeObserver
void ThreadState::OnRAILModeChanged(RAILMode new_mode) {
should_optimize_for_load_time_ = new_mode == RAILMode::kLoad;
// When switching RAIL mode to load we try to avoid incremental marking as
// the write barrier cost is noticeable on throughput and garbage
// accumulated during loading is likely to be alive during that phase. The
// same argument holds for unified heap garbage collections with the
// difference that these collections are triggered by V8 and should thus be
// avoided on that end.
if (should_optimize_for_load_time_ && IsIncrementalMarking() &&
!IsUnifiedGCMarkingInProgress() &&
GetGCState() == GCState::kIncrementalMarkingStepScheduled)
ScheduleIncrementalMarkingFinalize();
}
ThreadState::~ThreadState() { ThreadState::~ThreadState() {
DCHECK(CheckThread()); DCHECK(CheckThread());
if (IsMainThread()) if (IsMainThread())
...@@ -170,8 +153,6 @@ ThreadState::~ThreadState() { ...@@ -170,8 +153,6 @@ ThreadState::~ThreadState() {
void ThreadState::AttachMainThread() { void ThreadState::AttachMainThread() {
thread_specific_ = new WTF::ThreadSpecific<ThreadState*>(); thread_specific_ = new WTF::ThreadSpecific<ThreadState*>();
new (main_thread_state_storage_) ThreadState(); new (main_thread_state_storage_) ThreadState();
ThreadScheduler::Current()->AddRAILModeObserver(MainThreadState());
} }
void ThreadState::AttachCurrentThread() { void ThreadState::AttachCurrentThread() {
...@@ -366,154 +347,6 @@ ThreadState::GCSnapshotInfo::GCSnapshotInfo(wtf_size_t num_object_types) ...@@ -366,154 +347,6 @@ ThreadState::GCSnapshotInfo::GCSnapshotInfo(wtf_size_t num_object_types)
live_size(Vector<size_t>(num_object_types)), live_size(Vector<size_t>(num_object_types)),
dead_size(Vector<size_t>(num_object_types)) {} dead_size(Vector<size_t>(num_object_types)) {}
size_t ThreadState::TotalMemorySize() {
return heap_->stats_collector()->object_size_in_bytes() +
WTF::Partitions::TotalSizeOfCommittedPages();
}
size_t ThreadState::EstimatedLiveSize(size_t estimation_base_size,
size_t size_at_last_gc) {
const ThreadHeapStatsCollector& stats_collector = *heap_->stats_collector();
const ThreadHeapStatsCollector::Event& prev = stats_collector.previous();
if (prev.wrapper_count_before_sweeping == 0)
return estimation_base_size;
// (estimated size) = (estimation base size) - (heap size at the last GC) /
// (# of persistent handles at the last GC) *
// (# of persistent handles collected since the last GC)
size_t size_retained_by_collected_persistents = static_cast<size_t>(
1.0 * size_at_last_gc / prev.wrapper_count_before_sweeping *
stats_collector.collected_wrapper_count());
if (estimation_base_size < size_retained_by_collected_persistents)
return 0;
return estimation_base_size - size_retained_by_collected_persistents;
}
double ThreadState::HeapGrowingRate() {
const size_t current_size = heap_->stats_collector()->object_size_in_bytes();
// TODO(mlippautz): Clarify those two parameters below.
const size_t estimated_size =
EstimatedLiveSize(heap_->stats_collector()->previous().marked_bytes,
heap_->stats_collector()->previous().marked_bytes);
// If the estimatedSize is 0, we set a high growing rate to trigger a GC.
double growing_rate =
estimated_size > 0 ? 1.0 * current_size / estimated_size : 100;
TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"),
"ThreadState::heapEstimatedSizeKB",
CappedSizeInKB(estimated_size));
TRACE_COUNTER1(
TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadState::heapGrowingRate",
base::saturated_cast<base::Histogram::Sample>(100 * growing_rate));
return growing_rate;
}
double ThreadState::PartitionAllocGrowingRate() {
size_t current_size = WTF::Partitions::TotalSizeOfCommittedPages();
size_t estimated_size = EstimatedLiveSize(
current_size, heap_->stats_collector()
->previous()
.partition_alloc_bytes_before_sweeping);
// If the estimatedSize is 0, we set a high growing rate to trigger a GC.
double growing_rate =
estimated_size > 0 ? 1.0 * current_size / estimated_size : 100;
TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"),
"ThreadState::partitionAllocEstimatedSizeKB",
CappedSizeInKB(estimated_size));
TRACE_COUNTER1(
TRACE_DISABLED_BY_DEFAULT("blink_gc"),
"ThreadState::partitionAllocGrowingRate",
base::saturated_cast<base::Histogram::Sample>(100 * growing_rate));
return growing_rate;
}
// TODO(haraken): We should improve the GC heuristics. The heuristics affect
// performance significantly.
bool ThreadState::JudgeGCThreshold(size_t allocated_object_size_threshold,
size_t total_memory_size_threshold,
double heap_growing_rate_threshold) {
// If the allocated object size or the total memory size is small, don't
// trigger a GC.
if (heap_->stats_collector()->allocated_bytes_since_prev_gc() <
static_cast<int64_t>(allocated_object_size_threshold) ||
TotalMemorySize() < total_memory_size_threshold)
return false;
VLOG(2) << "[state:" << this << "] JudgeGCThreshold:"
<< " heapGrowingRate=" << std::setprecision(1) << HeapGrowingRate()
<< " partitionAllocGrowingRate=" << std::setprecision(1)
<< PartitionAllocGrowingRate();
// If the growing rate of Oilpan's heap or PartitionAlloc is high enough,
// trigger a GC.
return HeapGrowingRate() >= heap_growing_rate_threshold ||
PartitionAllocGrowingRate() >= heap_growing_rate_threshold;
}
bool ThreadState::ShouldScheduleV8FollowupGC() {
if (base::FeatureList::IsEnabled(
blink::features::kBlinkHeapUnifiedGCScheduling))
return false;
return JudgeGCThreshold(kDefaultAllocatedObjectSizeThreshold,
32 * 1024 * 1024, 1.5);
}
bool ThreadState::ShouldForceConservativeGC() {
if (base::FeatureList::IsEnabled(
blink::features::kBlinkHeapUnifiedGCScheduling))
return false;
// TODO(haraken): 400% is too large. Lower the heap growing factor.
return JudgeGCThreshold(kDefaultAllocatedObjectSizeThreshold,
32 * 1024 * 1024, 5.0);
}
// If we're consuming too much memory, trigger a conservative GC
// aggressively. This is a safe guard to avoid OOM.
bool ThreadState::ShouldForceMemoryPressureGC() {
if (base::FeatureList::IsEnabled(
blink::features::kBlinkHeapUnifiedGCScheduling))
return false;
if (TotalMemorySize() < 300 * 1024 * 1024)
return false;
return JudgeGCThreshold(0, 0, 1.5);
}
void ThreadState::ScheduleV8FollowupGCIfNeeded(BlinkGC::V8GCType gc_type) {
VLOG(2) << "[state:" << this << "] ScheduleV8FollowupGCIfNeeded: v8_gc_type="
<< ((gc_type == BlinkGC::kV8MajorGC) ? "MajorGC" : "MinorGC");
DCHECK(CheckThread());
if (IsGCForbidden())
return;
if (gc_type == BlinkGC::kV8MajorGC) {
// In case of unified heap garbage collections a V8 major GC also collects
// the Blink heap.
return;
}
if (ShouldScheduleV8FollowupGC()) {
// When we want to optimize for load time, we should prioritize throughput
// over latency and not do incremental marking.
if (base::FeatureList::IsEnabled(
blink::features::kBlinkHeapIncrementalMarking) &&
!should_optimize_for_load_time_) {
VLOG(2) << "[state:" << this << "] "
<< "ScheduleV8FollowupGCIfNeeded: Scheduled incremental v8 "
"followup GC";
ScheduleIncrementalGC(BlinkGC::GCReason::kIncrementalV8FollowupGC);
} else {
VLOG(2) << "[state:" << this << "] "
<< "ScheduleV8FollowupGCIfNeeded: Scheduled precise GC";
SchedulePreciseGC();
}
}
}
void ThreadState::WillStartV8GC(BlinkGC::V8GCType gc_type) { void ThreadState::WillStartV8GC(BlinkGC::V8GCType gc_type) {
// Finish Oilpan's complete sweeping before running a V8 major GC. // Finish Oilpan's complete sweeping before running a V8 major GC.
// This will let the GC collect more V8 objects. // This will let the GC collect more V8 objects.
...@@ -548,32 +381,6 @@ void ThreadState::ScheduleGCIfNeeded() { ...@@ -548,32 +381,6 @@ void ThreadState::ScheduleGCIfNeeded() {
if (IsUnifiedGCMarkingInProgress()) if (IsUnifiedGCMarkingInProgress())
return; return;
ReportMemoryToV8();
if (ShouldForceMemoryPressureGC()) {
CompleteSweep();
if (ShouldForceMemoryPressureGC()) {
VLOG(2) << "[state:" << this << "] "
<< "ScheduleGCIfNeeded: Scheduled memory pressure GC";
CollectGarbage(BlinkGC::kHeapPointersOnStack, BlinkGC::kAtomicMarking,
BlinkGC::kConcurrentAndLazySweeping,
BlinkGC::GCReason::kMemoryPressureGC);
return;
}
}
if (ShouldForceConservativeGC()) {
CompleteSweep();
if (ShouldForceConservativeGC()) {
VLOG(2) << "[state:" << this << "] "
<< "ScheduleGCIfNeeded: Scheduled conservative GC";
CollectGarbage(BlinkGC::kHeapPointersOnStack, BlinkGC::kAtomicMarking,
BlinkGC::kConcurrentAndLazySweeping,
BlinkGC::GCReason::kConservativeGC);
return;
}
}
if (GetGCState() == kNoGCScheduled && if (GetGCState() == kNoGCScheduled &&
base::FeatureList::IsEnabled( base::FeatureList::IsEnabled(
blink::features::kBlinkHeapIncrementalMarkingStress)) { blink::features::kBlinkHeapIncrementalMarkingStress)) {
...@@ -1151,19 +958,6 @@ void ThreadState::RemoveObserver(BlinkGCObserver* observer) { ...@@ -1151,19 +958,6 @@ void ThreadState::RemoveObserver(BlinkGCObserver* observer) {
observers_.erase(observer); observers_.erase(observer);
} }
void ThreadState::ReportMemoryToV8() {
if (!isolate_ || base::FeatureList::IsEnabled(
blink::features::kBlinkHeapUnifiedGCScheduling))
return;
const size_t current_heap_size =
heap_->stats_collector()->object_size_in_bytes();
int64_t diff = static_cast<int64_t>(current_heap_size) -
static_cast<int64_t>(reported_memory_to_v8_);
isolate_->AdjustAmountOfExternalAllocatedMemory(diff);
reported_memory_to_v8_ = current_heap_size;
}
void ThreadState::EnterStaticReferenceRegistrationDisabledScope() { void ThreadState::EnterStaticReferenceRegistrationDisabledScope() {
static_persistent_registration_disabled_count_++; static_persistent_registration_disabled_count_++;
} }
......
...@@ -38,7 +38,6 @@ ...@@ -38,7 +38,6 @@
#include "third_party/blink/renderer/platform/heap/blink_gc.h" #include "third_party/blink/renderer/platform/heap/blink_gc.h"
#include "third_party/blink/renderer/platform/heap/threading_traits.h" #include "third_party/blink/renderer/platform/heap/threading_traits.h"
#include "third_party/blink/renderer/platform/platform_export.h" #include "third_party/blink/renderer/platform/platform_export.h"
#include "third_party/blink/renderer/platform/scheduler/public/rail_mode_observer.h"
#include "third_party/blink/renderer/platform/wtf/allocator/allocator.h" #include "third_party/blink/renderer/platform/wtf/allocator/allocator.h"
#include "third_party/blink/renderer/platform/wtf/forward.h" #include "third_party/blink/renderer/platform/wtf/forward.h"
#include "third_party/blink/renderer/platform/wtf/hash_map.h" #include "third_party/blink/renderer/platform/wtf/hash_map.h"
...@@ -126,7 +125,7 @@ class PLATFORM_EXPORT BlinkGCObserver { ...@@ -126,7 +125,7 @@ class PLATFORM_EXPORT BlinkGCObserver {
ThreadState* thread_state_; ThreadState* thread_state_;
}; };
class PLATFORM_EXPORT ThreadState final : private RAILModeObserver { class PLATFORM_EXPORT ThreadState final {
USING_FAST_MALLOC(ThreadState); USING_FAST_MALLOC(ThreadState);
public: public:
...@@ -253,7 +252,6 @@ class PLATFORM_EXPORT ThreadState final : private RAILModeObserver { ...@@ -253,7 +252,6 @@ class PLATFORM_EXPORT ThreadState final : private RAILModeObserver {
void SchedulePreciseGC(); void SchedulePreciseGC();
void ScheduleIncrementalGC(BlinkGC::GCReason); void ScheduleIncrementalGC(BlinkGC::GCReason);
void ScheduleV8FollowupGCIfNeeded(BlinkGC::V8GCType);
void ScheduleForcedGCForTesting(); void ScheduleForcedGCForTesting();
void ScheduleGCIfNeeded(); void ScheduleGCIfNeeded();
void WillStartV8GC(BlinkGC::V8GCType); void WillStartV8GC(BlinkGC::V8GCType);
...@@ -386,9 +384,6 @@ class PLATFORM_EXPORT ThreadState final : private RAILModeObserver { ...@@ -386,9 +384,6 @@ class PLATFORM_EXPORT ThreadState final : private RAILModeObserver {
return current_gc_data_.visitor.get(); return current_gc_data_.visitor.get();
} }
// Implementation for RAILModeObserver
void OnRAILModeChanged(RAILMode new_mode) override;
// Returns true if the marking verifier is enabled, false otherwise. // Returns true if the marking verifier is enabled, false otherwise.
bool IsVerifyMarkingEnabled() const; bool IsVerifyMarkingEnabled() const;
...@@ -413,7 +408,7 @@ class PLATFORM_EXPORT ThreadState final : private RAILModeObserver { ...@@ -413,7 +408,7 @@ class PLATFORM_EXPORT ThreadState final : private RAILModeObserver {
intptr_t* end_of_stack); intptr_t* end_of_stack);
ThreadState(); ThreadState();
~ThreadState() override; ~ThreadState();
void EnterNoAllocationScope() { no_allocation_count_++; } void EnterNoAllocationScope() { no_allocation_count_++; }
void LeaveNoAllocationScope() { no_allocation_count_--; } void LeaveNoAllocationScope() { no_allocation_count_--; }
...@@ -502,33 +497,6 @@ class PLATFORM_EXPORT ThreadState final : private RAILModeObserver { ...@@ -502,33 +497,6 @@ class PLATFORM_EXPORT ThreadState final : private RAILModeObserver {
// See |DetachCurrentThread|. // See |DetachCurrentThread|.
void RunTerminationGC(); void RunTerminationGC();
// ShouldForceConservativeGC
// implements the heuristics that are used to determine when to collect
// garbage.
// If shouldForceConservativeGC returns true, we force the garbage
// collection immediately. Otherwise, if should*GC returns true, we
// record that we should garbage collect the next time we return
// to the event loop. If both return false, we don't need to
// collect garbage at this point.
bool ShouldForceConservativeGC();
// V8 minor or major GC is likely to drop a lot of references to objects
// on Oilpan's heap. We give a chance to schedule a GC.
bool ShouldScheduleV8FollowupGC();
// Internal helpers to handle memory pressure conditions.
// Returns true if memory use is in a near-OOM state
// (aka being under "memory pressure".)
bool ShouldForceMemoryPressureGC();
size_t EstimatedLiveSize(size_t current_size, size_t size_at_last_gc);
size_t TotalMemorySize();
double HeapGrowingRate();
double PartitionAllocGrowingRate();
bool JudgeGCThreshold(size_t allocated_object_size_threshold,
size_t total_memory_size_threshold,
double heap_growing_rate_threshold);
void RunScheduledGC(BlinkGC::StackState); void RunScheduledGC(BlinkGC::StackState);
void UpdateIncrementalMarkingStepDuration(); void UpdateIncrementalMarkingStepDuration();
...@@ -537,8 +505,6 @@ class PLATFORM_EXPORT ThreadState final : private RAILModeObserver { ...@@ -537,8 +505,6 @@ class PLATFORM_EXPORT ThreadState final : private RAILModeObserver {
void InvokePreFinalizers(); void InvokePreFinalizers();
void ReportMemoryToV8();
// Adds the given observer to the ThreadState's observer list. This doesn't // Adds the given observer to the ThreadState's observer list. This doesn't
// take ownership of the argument. The argument must not be null. The argument // take ownership of the argument. The argument must not be null. The argument
// must not be registered before calling this. // must not be registered before calling this.
...@@ -613,7 +579,6 @@ class PLATFORM_EXPORT ThreadState final : private RAILModeObserver { ...@@ -613,7 +579,6 @@ class PLATFORM_EXPORT ThreadState final : private RAILModeObserver {
// have to clear before initiating LSan's leak detection. // have to clear before initiating LSan's leak detection.
HashSet<PersistentNode*> static_persistents_; HashSet<PersistentNode*> static_persistents_;
size_t reported_memory_to_v8_ = 0;
int gc_age_ = 0; int gc_age_ = 0;
struct GCData { struct GCData {
......
...@@ -29,14 +29,10 @@ constexpr BlinkGC::StackState ToBlinkGCStackState( ...@@ -29,14 +29,10 @@ constexpr BlinkGC::StackState ToBlinkGCStackState(
UnifiedHeapController::UnifiedHeapController(ThreadState* thread_state) UnifiedHeapController::UnifiedHeapController(ThreadState* thread_state)
: thread_state_(thread_state) { : thread_state_(thread_state) {
if (base::FeatureList::IsEnabled(
blink::features::kBlinkHeapUnifiedGCScheduling))
thread_state->Heap().stats_collector()->RegisterObserver(this); thread_state->Heap().stats_collector()->RegisterObserver(this);
} }
UnifiedHeapController::~UnifiedHeapController() { UnifiedHeapController::~UnifiedHeapController() {
if (base::FeatureList::IsEnabled(
blink::features::kBlinkHeapUnifiedGCScheduling))
thread_state_->Heap().stats_collector()->UnregisterObserver(this); thread_state_->Heap().stats_collector()->UnregisterObserver(this);
} }
...@@ -85,14 +81,11 @@ void UnifiedHeapController::TraceEpilogue( ...@@ -85,14 +81,11 @@ void UnifiedHeapController::TraceEpilogue(
thread_state_->AtomicPauseSweepAndCompact( thread_state_->AtomicPauseSweepAndCompact(
BlinkGC::kIncrementalMarking, BlinkGC::kConcurrentAndLazySweeping); BlinkGC::kIncrementalMarking, BlinkGC::kConcurrentAndLazySweeping);
if (base::FeatureList::IsEnabled(
blink::features::kBlinkHeapUnifiedGCScheduling)) {
ThreadHeapStatsCollector* const stats_collector = ThreadHeapStatsCollector* const stats_collector =
thread_state_->Heap().stats_collector(); thread_state_->Heap().stats_collector();
summary->allocated_size = summary->allocated_size =
static_cast<size_t>(stats_collector->marked_bytes()); static_cast<size_t>(stats_collector->marked_bytes());
summary->time = stats_collector->marking_time_so_far().InMillisecondsF(); summary->time = stats_collector->marking_time_so_far().InMillisecondsF();
}
buffered_allocated_size_ = 0; buffered_allocated_size_ = 0;
} }
thread_state_->AtomicPauseEpilogue(); thread_state_->AtomicPauseEpilogue();
...@@ -188,8 +181,6 @@ bool UnifiedHeapController::IsRootForNonTracingGC( ...@@ -188,8 +181,6 @@ bool UnifiedHeapController::IsRootForNonTracingGC(
} }
void UnifiedHeapController::ReportBufferedAllocatedSizeIfPossible() { void UnifiedHeapController::ReportBufferedAllocatedSizeIfPossible() {
DCHECK(base::FeatureList::IsEnabled(
blink::features::kBlinkHeapUnifiedGCScheduling));
// Reported from a recursive sweeping call. // Reported from a recursive sweeping call.
if (thread_state()->IsSweepingInProgress() && if (thread_state()->IsSweepingInProgress() &&
thread_state()->SweepForbidden()) { thread_state()->SweepForbidden()) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment