Commit 360a25fc authored by Omer Katz's avatar Omer Katz Committed by Commit Bot

heap: Various marking tweaks

This CL does the following:
1) Move flushing of ephemeron pairs to the start of AdvanceMarking
  (also increase frequency by 2x and make step counters actually
  count steps).
2) Introduce additional (disabled) scopes for diagnostics.
3) Increase frequency of deadline checks when draining worklists (by
   ~8x)
4) Revise worklist draining so that the deadline is checked before
   draining starts.

Bug: 986235
Change-Id: Ie24827a52cbcb497426fd51bc052fb129c417702
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2283743
Commit-Queue: Omer Katz <omerkatz@chromium.org>
Reviewed-by: default avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#786229}
parent 44b1a751
......@@ -268,11 +268,14 @@ void ThreadHeap::FlushNotFullyConstructedObjects() {
void ThreadHeap::FlushEphemeronPairs(EphemeronProcessing ephemeron_processing) {
if (ephemeron_processing == EphemeronProcessing::kPartialProcessing) {
if (++steps_since_last_ephemeron_pairs_flush_ !=
if (steps_since_last_ephemeron_pairs_flush_ <
kStepsBeforeEphemeronPairsFlush)
return;
}
ThreadHeapStatsCollector::Scope stats_scope(
stats_collector(), ThreadHeapStatsCollector::kMarkFlushEphemeronPairs);
EphemeronPairsWorklist::View view(discovered_ephemeron_pairs_worklist_.get(),
WorklistTaskId::MutatorThread);
if (!view.IsLocalViewEmpty()) {
......@@ -302,22 +305,26 @@ void ThreadHeap::MarkNotFullyConstructedObjects(MarkingVisitor* visitor) {
namespace {
template <typename Worklist, typename Callback>
static constexpr size_t kDefaultDeadlineCheckInterval = 150u;
static constexpr size_t kDefaultConcurrentDeadlineCheckInterval =
5 * kDefaultDeadlineCheckInterval;
template <size_t kDeadlineCheckInterval = kDefaultDeadlineCheckInterval,
typename Worklist,
typename Callback>
bool DrainWorklistWithDeadline(base::TimeTicks deadline,
Worklist* worklist,
Callback callback,
int task_id) {
const size_t kDeadlineCheckInterval = 1250;
size_t processed_callback_count = 0;
typename Worklist::EntryType item;
while (worklist->Pop(task_id, &item)) {
callback(item);
if (++processed_callback_count == kDeadlineCheckInterval) {
if (processed_callback_count-- == 0) {
if (deadline <= base::TimeTicks::Now()) {
return false;
}
processed_callback_count = 0;
processed_callback_count = kDeadlineCheckInterval;
}
}
return true;
......@@ -330,7 +337,7 @@ bool ThreadHeap::InvokeEphemeronCallbacks(
MarkingVisitor* visitor,
base::TimeTicks deadline) {
if (ephemeron_processing == EphemeronProcessing::kPartialProcessing) {
if (++steps_since_last_ephemeron_processing_ !=
if (steps_since_last_ephemeron_processing_ <
kStepsBeforeEphemeronProcessing) {
// Returning "no more work" to avoid excessive processing. The fixed
// point computation in the atomic pause takes care of correctness.
......@@ -338,10 +345,12 @@ bool ThreadHeap::InvokeEphemeronCallbacks(
}
}
FlushEphemeronPairs(EphemeronProcessing::kFullProcessing);
steps_since_last_ephemeron_processing_ = 0;
// Mark any strong pointers that have now become reachable in ephemeron maps.
ThreadHeapStatsCollector::Scope stats_scope(
ThreadHeapStatsCollector::EnabledScope stats_scope(
stats_collector(),
ThreadHeapStatsCollector::kMarkInvokeEphemeronCallbacks);
......@@ -364,23 +373,32 @@ bool ThreadHeap::AdvanceMarking(MarkingVisitor* visitor,
EphemeronProcessing ephemeron_processing) {
DCHECK_EQ(WorklistTaskId::MutatorThread, visitor->task_id());
++steps_since_last_ephemeron_pairs_flush_;
++steps_since_last_ephemeron_processing_;
bool finished;
bool processed_ephemerons = false;
FlushEphemeronPairs(ephemeron_processing);
// Ephemeron fixed point loop.
do {
{
// Iteratively mark all objects that are reachable from the objects
// currently pushed onto the marking worklist.
ThreadHeapStatsCollector::Scope stats_scope(
stats_collector(), ThreadHeapStatsCollector::kMarkProcessWorklist);
stats_collector(), ThreadHeapStatsCollector::kMarkProcessWorklists);
// Start with mutator-thread-only worklists (not fully constructed).
// If time runs out, concurrent markers can take care of the rest.
{
ThreadHeapStatsCollector::EnabledScope bailout_scope(
ThreadHeapStatsCollector::EnabledScope inner_scope(
stats_collector(), ThreadHeapStatsCollector::kMarkBailOutObjects);
finished = DrainWorklistWithDeadline(
// Items in the bailout worklist are only collection backing stores.
// These items could take a long time to process, so we should check
// the deadline more often (backing stores and large items can also be
// found in the regular marking worklist, but those are interleaved
// with smaller objects).
finished = DrainWorklistWithDeadline<kDefaultDeadlineCheckInterval / 3>(
deadline, not_safe_to_concurrently_trace_worklist_.get(),
[visitor](const MarkingItem& item) {
item.callback(visitor, item.base_object_payload);
......@@ -390,53 +408,72 @@ bool ThreadHeap::AdvanceMarking(MarkingVisitor* visitor,
break;
}
finished = FlushV8References(deadline);
if (!finished)
break;
{
ThreadHeapStatsCollector::Scope inner_scope(
stats_collector(),
ThreadHeapStatsCollector::kMarkFlushV8References);
finished = FlushV8References(deadline);
if (!finished)
break;
}
// Convert |previously_not_fully_constructed_worklist_| to
// |marking_worklist_|. This merely re-adds items with the proper
// callbacks.
finished = DrainWorklistWithDeadline(
deadline, previously_not_fully_constructed_worklist_.get(),
[visitor](NotFullyConstructedItem& item) {
visitor->DynamicallyMarkAddress(
reinterpret_cast<ConstAddress>(item));
},
WorklistTaskId::MutatorThread);
if (!finished)
break;
{
ThreadHeapStatsCollector::Scope inner_scope(
stats_collector(),
ThreadHeapStatsCollector::kMarkProcessNotFullyconstructeddWorklist);
// Convert |previously_not_fully_constructed_worklist_| to
// |marking_worklist_|. This merely re-adds items with the proper
// callbacks.
finished = DrainWorklistWithDeadline(
deadline, previously_not_fully_constructed_worklist_.get(),
[visitor](NotFullyConstructedItem& item) {
visitor->DynamicallyMarkAddress(
reinterpret_cast<ConstAddress>(item));
},
WorklistTaskId::MutatorThread);
if (!finished)
break;
}
finished = DrainWorklistWithDeadline(
deadline, marking_worklist_.get(),
[visitor](const MarkingItem& item) {
HeapObjectHeader* header =
HeapObjectHeader::FromPayload(item.base_object_payload);
DCHECK(!header->IsInConstruction());
item.callback(visitor, item.base_object_payload);
visitor->AccountMarkedBytes(header);
},
WorklistTaskId::MutatorThread);
if (!finished)
break;
{
ThreadHeapStatsCollector::Scope inner_scope(
stats_collector(),
ThreadHeapStatsCollector::kMarkProcessMarkingWorklist);
finished = DrainWorklistWithDeadline(
deadline, marking_worklist_.get(),
[visitor](const MarkingItem& item) {
HeapObjectHeader* header =
HeapObjectHeader::FromPayload(item.base_object_payload);
DCHECK(!header->IsInConstruction());
item.callback(visitor, item.base_object_payload);
visitor->AccountMarkedBytes(header);
},
WorklistTaskId::MutatorThread);
if (!finished)
break;
}
finished = DrainWorklistWithDeadline(
deadline, write_barrier_worklist_.get(),
[visitor](HeapObjectHeader* header) {
DCHECK(!header->IsInConstruction());
GCInfo::From(header->GcInfoIndex())
.trace(visitor, header->Payload());
visitor->AccountMarkedBytes(header);
},
WorklistTaskId::MutatorThread);
if (!finished)
break;
{
ThreadHeapStatsCollector::Scope inner_scope(
stats_collector(),
ThreadHeapStatsCollector::kMarkProcessWriteBarrierWorklist);
finished = DrainWorklistWithDeadline(
deadline, write_barrier_worklist_.get(),
[visitor](HeapObjectHeader* header) {
DCHECK(!header->IsInConstruction());
GCInfo::From(header->GcInfoIndex())
.trace(visitor, header->Payload());
visitor->AccountMarkedBytes(header);
},
WorklistTaskId::MutatorThread);
if (!finished)
break;
}
}
if ((ephemeron_processing == EphemeronProcessing::kFullProcessing) ||
!processed_ephemerons) {
processed_ephemerons = true;
FlushEphemeronPairs(ephemeron_processing);
finished =
InvokeEphemeronCallbacks(ephemeron_processing, visitor, deadline);
if (!finished)
......@@ -463,18 +500,21 @@ bool ThreadHeap::AdvanceConcurrentMarking(ConcurrentMarkingVisitor* visitor,
// Convert |previously_not_fully_constructed_worklist_| to
// |marking_worklist_|. This merely re-adds items with the proper
// callbacks.
finished = DrainWorklistWithDeadline(
deadline, previously_not_fully_constructed_worklist_.get(),
[visitor](NotFullyConstructedItem& item) {
visitor->DynamicallyMarkAddress(reinterpret_cast<ConstAddress>(item));
},
visitor->task_id());
finished =
DrainWorklistWithDeadline<kDefaultConcurrentDeadlineCheckInterval>(
deadline, previously_not_fully_constructed_worklist_.get(),
[visitor](NotFullyConstructedItem& item) {
visitor->DynamicallyMarkAddress(
reinterpret_cast<ConstAddress>(item));
},
visitor->task_id());
if (!finished)
break;
// Iteratively mark all objects that are reachable from the objects
// currently pushed onto the marking worklist.
finished = DrainWorklistWithDeadline(
finished = DrainWorklistWithDeadline<
kDefaultConcurrentDeadlineCheckInterval>(
deadline, marking_worklist_.get(),
[visitor](const MarkingItem& item) {
HeapObjectHeader* header =
......@@ -490,7 +530,8 @@ bool ThreadHeap::AdvanceConcurrentMarking(ConcurrentMarkingVisitor* visitor,
if (!finished)
break;
finished = DrainWorklistWithDeadline(
finished = DrainWorklistWithDeadline<
kDefaultConcurrentDeadlineCheckInterval>(
deadline, write_barrier_worklist_.get(),
[visitor](HeapObjectHeader* header) {
PageFromObject(header)->SynchronizedLoad();
......@@ -513,13 +554,14 @@ bool ThreadHeap::AdvanceConcurrentMarking(ConcurrentMarkingVisitor* visitor,
// Callbacks found by the concurrent marking will be flushed eventually
// by the mutator thread and then invoked either concurrently or by the
// mutator thread (in the atomic pause at latest).
finished = DrainWorklistWithDeadline(
deadline, ephemeron_pairs_to_process_worklist_.get(),
[visitor](EphemeronPairItem& item) {
visitor->VisitEphemeron(item.key, item.value,
item.value_trace_callback);
},
visitor->task_id());
finished =
DrainWorklistWithDeadline<kDefaultConcurrentDeadlineCheckInterval>(
deadline, ephemeron_pairs_to_process_worklist_.get(),
[visitor](EphemeronPairItem& item) {
visitor->VisitEphemeron(item.key, item.value,
item.value_trace_callback);
},
visitor->task_id());
if (!finished)
break;
}
......
......@@ -459,7 +459,7 @@ class PLATFORM_EXPORT ThreadHeap {
static ThreadHeap* main_thread_heap_;
static constexpr size_t kStepsBeforeEphemeronPairsFlush = 8u;
static constexpr size_t kStepsBeforeEphemeronPairsFlush = 4u;
size_t steps_since_last_ephemeron_pairs_flush_ = 0;
static constexpr size_t kStepsBeforeEphemeronProcessing = 16u;
size_t steps_since_last_ephemeron_processing_ = 0;
......
......@@ -158,7 +158,7 @@ base::TimeDelta ThreadHeapStatsCollector::Event::incremental_marking_time()
base::TimeDelta
ThreadHeapStatsCollector::Event::worklist_processing_time_foreground() const {
return scope_data[kMarkProcessWorklist];
return scope_data[kMarkProcessWorklists];
}
base::TimeDelta ThreadHeapStatsCollector::Event::atomic_marking_time() const {
......
......@@ -40,31 +40,37 @@ class PLATFORM_EXPORT ThreadHeapStatsObserver {
virtual void DecreaseAllocatedObjectSize(size_t) = 0;
};
#define FOR_ALL_SCOPES(V) \
V(AtomicPauseCompaction) \
V(AtomicPauseMarkEpilogue) \
V(AtomicPauseMarkPrologue) \
V(AtomicPauseMarkRoots) \
V(AtomicPauseMarkTransitiveClosure) \
V(AtomicPauseSweepAndCompact) \
V(CompleteSweep) \
V(IncrementalMarkingFinalize) \
V(IncrementalMarkingStartMarking) \
V(IncrementalMarkingStep) \
V(InvokePreFinalizers) \
V(LazySweepInIdle) \
V(LazySweepOnAllocation) \
V(MarkBailOutObjects) \
V(MarkInvokeEphemeronCallbacks) \
V(MarkProcessWorklist) \
V(MarkNotFullyConstructedObjects) \
V(MarkWeakProcessing) \
V(UnifiedMarkingStep) \
V(VisitCrossThreadPersistents) \
V(VisitPersistentRoots) \
V(VisitPersistents) \
V(VisitRoots) \
V(VisitStackRoots) \
#define FOR_ALL_SCOPES(V) \
V(AtomicPauseCompaction) \
V(AtomicPauseMarkEpilogue) \
V(AtomicPauseMarkPrologue) \
V(AtomicPauseMarkRoots) \
V(AtomicPauseMarkTransitiveClosure) \
V(AtomicPauseSweepAndCompact) \
V(CompleteSweep) \
V(IncrementalMarkingFinalize) \
V(IncrementalMarkingStartMarking) \
V(IncrementalMarkingStep) \
V(IncrementalMarkingWithDeadline) \
V(InvokePreFinalizers) \
V(LazySweepInIdle) \
V(LazySweepOnAllocation) \
V(MarkBailOutObjects) \
V(MarkInvokeEphemeronCallbacks) \
V(MarkFlushV8References) \
V(MarkFlushEphemeronPairs) \
V(MarkProcessWorklists) \
V(MarkProcessMarkingWorklist) \
V(MarkProcessWriteBarrierWorklist) \
V(MarkProcessNotFullyconstructeddWorklist) \
V(MarkNotFullyConstructedObjects) \
V(MarkWeakProcessing) \
V(UnifiedMarkingStep) \
V(VisitCrossThreadPersistents) \
V(VisitPersistentRoots) \
V(VisitPersistents) \
V(VisitRoots) \
V(VisitStackRoots) \
V(VisitRememberedSets)
#define FOR_ALL_CONCURRENT_SCOPES(V) \
......
......@@ -1180,7 +1180,6 @@ void ThreadState::IncrementalMarkingStep(BlinkGC::StackState stack_state) {
skip_incremental_marking_for_testing_ = false;
} else {
complete = MarkPhaseAdvanceMarking(
base::TimeTicks::Now() +
marking_scheduling_->GetNextIncrementalStepDurationForTask(
Heap().stats_collector()->object_size_in_bytes()),
EphemeronProcessing::kPartialProcessing);
......@@ -1345,7 +1344,8 @@ void ThreadState::AtomicPauseMarkTransitiveClosure() {
Heap().stats_collector(),
ThreadHeapStatsCollector::kAtomicPauseMarkTransitiveClosure, "epoch",
gc_age_, "forced", IsForcedGC(current_gc_data_.reason));
CHECK(MarkPhaseAdvanceMarking(base::TimeTicks::Max(),
// base::TimeTicks::Now() + base::TimeDelta::Max() == base::TimeTicks::Max()
CHECK(MarkPhaseAdvanceMarking(base::TimeDelta::Max(),
EphemeronProcessing::kFullProcessing));
}
......@@ -1588,20 +1588,23 @@ bool ThreadState::MarkPhaseAdvanceMarkingBasedOnSchedule(
base::TimeDelta max_deadline,
EphemeronProcessing ephemeron_processing) {
return MarkPhaseAdvanceMarking(
base::TimeTicks::Now() +
std::min(max_deadline,
marking_scheduling_->GetNextIncrementalStepDurationForTask(
Heap().stats_collector()->object_size_in_bytes())),
std::min(max_deadline,
marking_scheduling_->GetNextIncrementalStepDurationForTask(
Heap().stats_collector()->object_size_in_bytes())),
ephemeron_processing);
}
bool ThreadState::MarkPhaseAdvanceMarking(
base::TimeTicks deadline,
base::TimeDelta deadline,
EphemeronProcessing ephemeron_processing) {
MarkingVisitor* visitor = current_gc_data_.visitor.get();
const bool finished =
Heap().AdvanceMarking(reinterpret_cast<MarkingVisitor*>(visitor),
deadline, ephemeron_processing);
ThreadHeapStatsCollector::Scope deadline_scope(
Heap().stats_collector(),
ThreadHeapStatsCollector::kIncrementalMarkingWithDeadline, "deadline_ms",
deadline.InMillisecondsF());
const bool finished = Heap().AdvanceMarking(
reinterpret_cast<MarkingVisitor*>(visitor),
base::TimeTicks::Now() + deadline, ephemeron_processing);
// visitor->marked_bytes() can also include bytes marked during roots
// visitation which is not counted in worklist_processing_time_foreground.
// Since the size of the roots is usually small relative to the size of
......
......@@ -516,7 +516,7 @@ class PLATFORM_EXPORT ThreadState final {
void MarkPhaseVisitNotFullyConstructedObjects();
bool MarkPhaseAdvanceMarkingBasedOnSchedule(base::TimeDelta,
EphemeronProcessing);
bool MarkPhaseAdvanceMarking(base::TimeTicks, EphemeronProcessing);
bool MarkPhaseAdvanceMarking(base::TimeDelta, EphemeronProcessing);
void VerifyMarking(BlinkGC::MarkingType);
// Visit the stack after pushing registers onto the stack.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment