Commit fe1dd740 authored by Omer Katz's avatar Omer Katz Committed by Commit Bot

heap: Reduce frequency of ephemeron processing

Instead of processing ephemerons during each incremental step on the
mutator thread, this CL establishes the following:
* Epehemerons are fully processed during every concurrent marking step.
* Incremental marking on the mutator thread flushes ephemeron pairs
  every few steps. These steps also performs a single iteration of
  ephemeron processing every few steps (lower frequency than flushing
  to allow concurrent markers to do most of the work). Fixed-point
  processing during these tasks is not considered cost-effective.
* Marking during the atomic pause performs full fixed-point processing.

Bug: 986235, 1099415
Change-Id: Id2980ef467b393b4062c5a1206ac1212233e6658
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2260015Reviewed-by: default avatarAnton Bikineev <bikineev@chromium.org>
Reviewed-by: default avatarMichael Lippautz <mlippautz@chromium.org>
Commit-Queue: Omer Katz <omerkatz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#784417}
parent 7b7c74a4
......@@ -266,7 +266,13 @@ void ThreadHeap::FlushNotFullyConstructedObjects() {
DCHECK(view.IsLocalViewEmpty());
}
void ThreadHeap::FlushEphemeronPairs() {
void ThreadHeap::FlushEphemeronPairs(EphemeronProcessing ephemeron_processing) {
if (ephemeron_processing == EphemeronProcessing::kPartialProcessing) {
if (++steps_since_last_ephemeron_pairs_flush_ !=
kStepsBeforeEphemeronPairsFlush)
return;
}
EphemeronPairsWorklist::View view(discovered_ephemeron_pairs_worklist_.get(),
WorklistTaskId::MutatorThread);
if (!view.IsLocalViewEmpty()) {
......@@ -274,6 +280,8 @@ void ThreadHeap::FlushEphemeronPairs() {
ephemeron_pairs_to_process_worklist_->MergeGlobalPool(
discovered_ephemeron_pairs_worklist_.get());
}
steps_since_last_ephemeron_pairs_flush_ = 0;
}
void ThreadHeap::MarkNotFullyConstructedObjects(MarkingVisitor* visitor) {
......@@ -317,9 +325,20 @@ bool DrainWorklistWithDeadline(base::TimeTicks deadline,
} // namespace
bool ThreadHeap::InvokeEphemeronCallbacks(MarkingVisitor* visitor,
base::TimeTicks deadline) {
FlushEphemeronPairs();
bool ThreadHeap::InvokeEphemeronCallbacks(
EphemeronProcessing ephemeron_processing,
MarkingVisitor* visitor,
base::TimeTicks deadline) {
if (ephemeron_processing == EphemeronProcessing::kPartialProcessing) {
if (++steps_since_last_ephemeron_processing_ !=
kStepsBeforeEphemeronProcessing) {
// Returning "no more work" to avoid excessive processing. The fixed
// point computation in the atomic pause takes care of correctness.
return true;
}
}
steps_since_last_ephemeron_processing_ = 0;
// Mark any strong pointers that have now become reachable in ephemeron maps.
ThreadHeapStatsCollector::Scope stats_scope(
......@@ -341,10 +360,12 @@ bool ThreadHeap::InvokeEphemeronCallbacks(MarkingVisitor* visitor,
}
bool ThreadHeap::AdvanceMarking(MarkingVisitor* visitor,
base::TimeTicks deadline) {
base::TimeTicks deadline,
EphemeronProcessing ephemeron_processing) {
DCHECK_EQ(WorklistTaskId::MutatorThread, visitor->task_id());
bool finished;
bool processed_ephemerons = false;
// Ephemeron fixed point loop.
do {
{
......@@ -412,9 +433,15 @@ bool ThreadHeap::AdvanceMarking(MarkingVisitor* visitor,
break;
}
finished = InvokeEphemeronCallbacks(visitor, deadline);
if (!finished)
break;
if ((ephemeron_processing == EphemeronProcessing::kFullProcessing) ||
!processed_ephemerons) {
processed_ephemerons = true;
FlushEphemeronPairs(ephemeron_processing);
finished =
InvokeEphemeronCallbacks(ephemeron_processing, visitor, deadline);
if (!finished)
break;
}
// Rerun loop if ephemeron processing queued more objects for tracing.
} while (!marking_worklist_->IsLocalViewEmpty(WorklistTaskId::MutatorThread));
......@@ -425,7 +452,8 @@ bool ThreadHeap::AdvanceMarking(MarkingVisitor* visitor,
bool ThreadHeap::HasWorkForConcurrentMarking() const {
return !marking_worklist_->IsGlobalPoolEmpty() ||
!write_barrier_worklist_->IsGlobalPoolEmpty() ||
!previously_not_fully_constructed_worklist_->IsGlobalPoolEmpty();
!previously_not_fully_constructed_worklist_->IsGlobalPoolEmpty() ||
!ephemeron_pairs_to_process_worklist_->IsGlobalPoolEmpty();
}
bool ThreadHeap::AdvanceConcurrentMarking(ConcurrentMarkingVisitor* visitor,
......@@ -475,6 +503,27 @@ bool ThreadHeap::AdvanceConcurrentMarking(ConcurrentMarkingVisitor* visitor,
visitor->task_id());
if (!finished)
break;
{
ThreadHeapStatsCollector::ConcurrentScope stats_scope(
stats_collector(),
ThreadHeapStatsCollector::kConcurrentMarkInvokeEphemeronCallbacks);
// Then we iterate over the new ephemerons found by the marking visitor.
// Callbacks found by the concurrent marking will be flushed eventually
// by the mutator thread and then invoked either concurrently or by the
// mutator thread (in the atomic pause at latest).
finished = DrainWorklistWithDeadline(
deadline, ephemeron_pairs_to_process_worklist_.get(),
[visitor](EphemeronPairItem& item) {
visitor->VisitEphemeron(item.key, item.value,
item.value_trace_callback);
},
visitor->task_id());
if (!finished)
break;
}
} while (HasWorkForConcurrentMarking());
return finished;
......
......@@ -197,6 +197,8 @@ struct IsGarbageCollectedContainer<
class PLATFORM_EXPORT ThreadHeap {
USING_FAST_MALLOC(ThreadHeap);
using EphemeronProcessing = ThreadState::EphemeronProcessing;
public:
explicit ThreadHeap(ThreadState*);
~ThreadHeap();
......@@ -279,12 +281,12 @@ class PLATFORM_EXPORT ThreadHeap {
// Moves ephemeron pairs from |discovered_ephemeron_pairs_worklist_| to
// |ephemeron_pairs_to_process_worklist_|
void FlushEphemeronPairs();
void FlushEphemeronPairs(EphemeronProcessing);
// Marks not fully constructed objects.
void MarkNotFullyConstructedObjects(MarkingVisitor*);
// Marks the transitive closure including ephemerons.
bool AdvanceMarking(MarkingVisitor*, base::TimeTicks deadline);
bool AdvanceMarking(MarkingVisitor*, base::TimeTicks, EphemeronProcessing);
void VerifyMarking();
// Returns true if concurrent markers will have work to steal
......@@ -383,7 +385,9 @@ class PLATFORM_EXPORT ThreadHeap {
void DestroyMarkingWorklists(BlinkGC::StackState);
void DestroyCompactionWorklists();
bool InvokeEphemeronCallbacks(MarkingVisitor*, base::TimeTicks);
bool InvokeEphemeronCallbacks(EphemeronProcessing,
MarkingVisitor*,
base::TimeTicks);
bool FlushV8References(base::TimeTicks);
......@@ -450,6 +454,11 @@ class PLATFORM_EXPORT ThreadHeap {
static ThreadHeap* main_thread_heap_;
static constexpr size_t kStepsBeforeEphemeronPairsFlush = 8u;
size_t steps_since_last_ephemeron_pairs_flush_ = 0;
static constexpr size_t kStepsBeforeEphemeronProcessing = 16u;
size_t steps_since_last_ephemeron_processing_ = 0;
friend class incremental_marking_test::IncrementalMarkingScopeBase;
template <typename T>
friend class Member;
......
......@@ -67,8 +67,9 @@ class PLATFORM_EXPORT ThreadHeapStatsObserver {
V(VisitStackRoots) \
V(VisitRememberedSets)
#define FOR_ALL_CONCURRENT_SCOPES(V) \
V(ConcurrentMarkingStep) \
#define FOR_ALL_CONCURRENT_SCOPES(V) \
V(ConcurrentMarkInvokeEphemeronCallbacks) \
V(ConcurrentMarkingStep) \
V(ConcurrentSweepingStep)
// Manages counters and statistics across garbage collection cycles.
......
......@@ -1181,8 +1181,9 @@ void ThreadState::IncrementalMarkingStep(BlinkGC::StackState stack_state) {
} else {
complete = MarkPhaseAdvanceMarking(
base::TimeTicks::Now() +
marking_scheduling_->GetNextIncrementalStepDurationForTask(
Heap().stats_collector()->object_size_in_bytes()));
marking_scheduling_->GetNextIncrementalStepDurationForTask(
Heap().stats_collector()->object_size_in_bytes()),
EphemeronProcessing::kPartialProcessing);
}
if (base::FeatureList::IsEnabled(
......@@ -1344,7 +1345,8 @@ void ThreadState::AtomicPauseMarkTransitiveClosure() {
Heap().stats_collector(),
ThreadHeapStatsCollector::kAtomicPauseMarkTransitiveClosure, "epoch",
gc_age_, "forced", IsForcedGC(current_gc_data_.reason));
CHECK(MarkPhaseAdvanceMarking(base::TimeTicks::Max()));
CHECK(MarkPhaseAdvanceMarking(base::TimeTicks::Max(),
EphemeronProcessing::kFullProcessing));
}
void ThreadState::AtomicPauseMarkEpilogue(BlinkGC::MarkingType marking_type) {
......@@ -1583,18 +1585,23 @@ void ThreadState::MarkPhaseVisitRoots() {
}
bool ThreadState::MarkPhaseAdvanceMarkingBasedOnSchedule(
base::TimeDelta max_deadline) {
base::TimeDelta max_deadline,
EphemeronProcessing ephemeron_processing) {
return MarkPhaseAdvanceMarking(
base::TimeTicks::Now() +
std::min(max_deadline,
marking_scheduling_->GetNextIncrementalStepDurationForTask(
Heap().stats_collector()->object_size_in_bytes())));
std::min(max_deadline,
marking_scheduling_->GetNextIncrementalStepDurationForTask(
Heap().stats_collector()->object_size_in_bytes())),
ephemeron_processing);
}
bool ThreadState::MarkPhaseAdvanceMarking(base::TimeTicks deadline) {
bool ThreadState::MarkPhaseAdvanceMarking(
base::TimeTicks deadline,
EphemeronProcessing ephemeron_processing) {
MarkingVisitor* visitor = current_gc_data_.visitor.get();
const bool finished = Heap().AdvanceMarking(
reinterpret_cast<MarkingVisitor*>(visitor), deadline);
const bool finished =
Heap().AdvanceMarking(reinterpret_cast<MarkingVisitor*>(visitor),
deadline, ephemeron_processing);
// visitor->marked_bytes() can also include bytes marked during roots
// visitation which is not counted in worklist_processing_time_foreground.
// Since the size of the roots is usually small relative to the size of
......
......@@ -179,6 +179,13 @@ class PLATFORM_EXPORT ThreadState final {
kSweeping,
};
enum class EphemeronProcessing {
kPartialProcessing, // Perofrm one ephemeron processing iteration every
// few step
kFullProcessing // Perofrm full fixed-point ephemeron processing on each
// step
};
class AtomicPauseScope;
class GCForbiddenScope;
class LsanDisabledScope;
......@@ -507,8 +514,9 @@ class PLATFORM_EXPORT ThreadState final {
void MarkPhaseEpilogue(BlinkGC::MarkingType);
void MarkPhaseVisitRoots();
void MarkPhaseVisitNotFullyConstructedObjects();
bool MarkPhaseAdvanceMarkingBasedOnSchedule(base::TimeDelta max_deadline);
bool MarkPhaseAdvanceMarking(base::TimeTicks deadline);
bool MarkPhaseAdvanceMarkingBasedOnSchedule(base::TimeDelta,
EphemeronProcessing);
bool MarkPhaseAdvanceMarking(base::TimeTicks, EphemeronProcessing);
void VerifyMarking(BlinkGC::MarkingType);
// Visit the stack after pushing registers onto the stack.
......
......@@ -139,7 +139,8 @@ bool UnifiedHeapController::AdvanceTracing(double deadline_in_ms) {
ThreadState::AtomicPauseScope atomic_pause_scope(thread_state_);
ScriptForbiddenScope script_forbidden_scope;
is_tracing_done_ = thread_state_->MarkPhaseAdvanceMarkingBasedOnSchedule(
base::TimeDelta::FromMillisecondsD(deadline_in_ms));
base::TimeDelta::FromMillisecondsD(deadline_in_ms),
ThreadState::EphemeronProcessing::kPartialProcessing);
if (!is_tracing_done_) {
if (base::FeatureList::IsEnabled(
blink::features::kBlinkHeapConcurrentMarking)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment