Commit 360a25fc authored by Omer Katz's avatar Omer Katz Committed by Commit Bot

heap: Various marking tweaks

This CL does the following:
1) Move flushing of ephemeron pairs to the start of AdvanceMarking
  (also increase frequency by 2x and make step counters actually
  count steps).
2) Introduce additional (disabled) scopes for diagnostics.
3) Increase frequency of deadline checks when draining worklists (by
   ~8x)
4) Revise worklist draining so that the deadline is checked before
   draining starts.

Bug: 986235
Change-Id: Ie24827a52cbcb497426fd51bc052fb129c417702
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2283743
Commit-Queue: Omer Katz <omerkatz@chromium.org>
Reviewed-by: default avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#786229}
parent 44b1a751
...@@ -268,11 +268,14 @@ void ThreadHeap::FlushNotFullyConstructedObjects() { ...@@ -268,11 +268,14 @@ void ThreadHeap::FlushNotFullyConstructedObjects() {
void ThreadHeap::FlushEphemeronPairs(EphemeronProcessing ephemeron_processing) { void ThreadHeap::FlushEphemeronPairs(EphemeronProcessing ephemeron_processing) {
if (ephemeron_processing == EphemeronProcessing::kPartialProcessing) { if (ephemeron_processing == EphemeronProcessing::kPartialProcessing) {
if (++steps_since_last_ephemeron_pairs_flush_ != if (steps_since_last_ephemeron_pairs_flush_ <
kStepsBeforeEphemeronPairsFlush) kStepsBeforeEphemeronPairsFlush)
return; return;
} }
ThreadHeapStatsCollector::Scope stats_scope(
stats_collector(), ThreadHeapStatsCollector::kMarkFlushEphemeronPairs);
EphemeronPairsWorklist::View view(discovered_ephemeron_pairs_worklist_.get(), EphemeronPairsWorklist::View view(discovered_ephemeron_pairs_worklist_.get(),
WorklistTaskId::MutatorThread); WorklistTaskId::MutatorThread);
if (!view.IsLocalViewEmpty()) { if (!view.IsLocalViewEmpty()) {
...@@ -302,22 +305,26 @@ void ThreadHeap::MarkNotFullyConstructedObjects(MarkingVisitor* visitor) { ...@@ -302,22 +305,26 @@ void ThreadHeap::MarkNotFullyConstructedObjects(MarkingVisitor* visitor) {
namespace { namespace {
template <typename Worklist, typename Callback> static constexpr size_t kDefaultDeadlineCheckInterval = 150u;
static constexpr size_t kDefaultConcurrentDeadlineCheckInterval =
5 * kDefaultDeadlineCheckInterval;
template <size_t kDeadlineCheckInterval = kDefaultDeadlineCheckInterval,
typename Worklist,
typename Callback>
bool DrainWorklistWithDeadline(base::TimeTicks deadline, bool DrainWorklistWithDeadline(base::TimeTicks deadline,
Worklist* worklist, Worklist* worklist,
Callback callback, Callback callback,
int task_id) { int task_id) {
const size_t kDeadlineCheckInterval = 1250;
size_t processed_callback_count = 0; size_t processed_callback_count = 0;
typename Worklist::EntryType item; typename Worklist::EntryType item;
while (worklist->Pop(task_id, &item)) { while (worklist->Pop(task_id, &item)) {
callback(item); callback(item);
if (++processed_callback_count == kDeadlineCheckInterval) { if (processed_callback_count-- == 0) {
if (deadline <= base::TimeTicks::Now()) { if (deadline <= base::TimeTicks::Now()) {
return false; return false;
} }
processed_callback_count = 0; processed_callback_count = kDeadlineCheckInterval;
} }
} }
return true; return true;
...@@ -330,7 +337,7 @@ bool ThreadHeap::InvokeEphemeronCallbacks( ...@@ -330,7 +337,7 @@ bool ThreadHeap::InvokeEphemeronCallbacks(
MarkingVisitor* visitor, MarkingVisitor* visitor,
base::TimeTicks deadline) { base::TimeTicks deadline) {
if (ephemeron_processing == EphemeronProcessing::kPartialProcessing) { if (ephemeron_processing == EphemeronProcessing::kPartialProcessing) {
if (++steps_since_last_ephemeron_processing_ != if (steps_since_last_ephemeron_processing_ <
kStepsBeforeEphemeronProcessing) { kStepsBeforeEphemeronProcessing) {
// Returning "no more work" to avoid excessive processing. The fixed // Returning "no more work" to avoid excessive processing. The fixed
// point computation in the atomic pause takes care of correctness. // point computation in the atomic pause takes care of correctness.
...@@ -338,10 +345,12 @@ bool ThreadHeap::InvokeEphemeronCallbacks( ...@@ -338,10 +345,12 @@ bool ThreadHeap::InvokeEphemeronCallbacks(
} }
} }
FlushEphemeronPairs(EphemeronProcessing::kFullProcessing);
steps_since_last_ephemeron_processing_ = 0; steps_since_last_ephemeron_processing_ = 0;
// Mark any strong pointers that have now become reachable in ephemeron maps. // Mark any strong pointers that have now become reachable in ephemeron maps.
ThreadHeapStatsCollector::Scope stats_scope( ThreadHeapStatsCollector::EnabledScope stats_scope(
stats_collector(), stats_collector(),
ThreadHeapStatsCollector::kMarkInvokeEphemeronCallbacks); ThreadHeapStatsCollector::kMarkInvokeEphemeronCallbacks);
...@@ -364,23 +373,32 @@ bool ThreadHeap::AdvanceMarking(MarkingVisitor* visitor, ...@@ -364,23 +373,32 @@ bool ThreadHeap::AdvanceMarking(MarkingVisitor* visitor,
EphemeronProcessing ephemeron_processing) { EphemeronProcessing ephemeron_processing) {
DCHECK_EQ(WorklistTaskId::MutatorThread, visitor->task_id()); DCHECK_EQ(WorklistTaskId::MutatorThread, visitor->task_id());
++steps_since_last_ephemeron_pairs_flush_;
++steps_since_last_ephemeron_processing_;
bool finished; bool finished;
bool processed_ephemerons = false; bool processed_ephemerons = false;
FlushEphemeronPairs(ephemeron_processing);
// Ephemeron fixed point loop. // Ephemeron fixed point loop.
do { do {
{ {
// Iteratively mark all objects that are reachable from the objects // Iteratively mark all objects that are reachable from the objects
// currently pushed onto the marking worklist. // currently pushed onto the marking worklist.
ThreadHeapStatsCollector::Scope stats_scope( ThreadHeapStatsCollector::Scope stats_scope(
stats_collector(), ThreadHeapStatsCollector::kMarkProcessWorklist); stats_collector(), ThreadHeapStatsCollector::kMarkProcessWorklists);
// Start with mutator-thread-only worklists (not fully constructed). // Start with mutator-thread-only worklists (not fully constructed).
// If time runs out, concurrent markers can take care of the rest. // If time runs out, concurrent markers can take care of the rest.
{ {
ThreadHeapStatsCollector::EnabledScope bailout_scope( ThreadHeapStatsCollector::EnabledScope inner_scope(
stats_collector(), ThreadHeapStatsCollector::kMarkBailOutObjects); stats_collector(), ThreadHeapStatsCollector::kMarkBailOutObjects);
finished = DrainWorklistWithDeadline( // Items in the bailout worklist are only collection backing stores.
// These items could take a long time to process, so we should check
// the deadline more often (backing stores and large items can also be
// found in the regular marking worklist, but those are interleaved
// with smaller objects).
finished = DrainWorklistWithDeadline<kDefaultDeadlineCheckInterval / 3>(
deadline, not_safe_to_concurrently_trace_worklist_.get(), deadline, not_safe_to_concurrently_trace_worklist_.get(),
[visitor](const MarkingItem& item) { [visitor](const MarkingItem& item) {
item.callback(visitor, item.base_object_payload); item.callback(visitor, item.base_object_payload);
...@@ -390,53 +408,72 @@ bool ThreadHeap::AdvanceMarking(MarkingVisitor* visitor, ...@@ -390,53 +408,72 @@ bool ThreadHeap::AdvanceMarking(MarkingVisitor* visitor,
break; break;
} }
finished = FlushV8References(deadline); {
if (!finished) ThreadHeapStatsCollector::Scope inner_scope(
break; stats_collector(),
ThreadHeapStatsCollector::kMarkFlushV8References);
finished = FlushV8References(deadline);
if (!finished)
break;
}
// Convert |previously_not_fully_constructed_worklist_| to {
// |marking_worklist_|. This merely re-adds items with the proper ThreadHeapStatsCollector::Scope inner_scope(
// callbacks. stats_collector(),
finished = DrainWorklistWithDeadline( ThreadHeapStatsCollector::kMarkProcessNotFullyconstructeddWorklist);
deadline, previously_not_fully_constructed_worklist_.get(), // Convert |previously_not_fully_constructed_worklist_| to
[visitor](NotFullyConstructedItem& item) { // |marking_worklist_|. This merely re-adds items with the proper
visitor->DynamicallyMarkAddress( // callbacks.
reinterpret_cast<ConstAddress>(item)); finished = DrainWorklistWithDeadline(
}, deadline, previously_not_fully_constructed_worklist_.get(),
WorklistTaskId::MutatorThread); [visitor](NotFullyConstructedItem& item) {
if (!finished) visitor->DynamicallyMarkAddress(
break; reinterpret_cast<ConstAddress>(item));
},
WorklistTaskId::MutatorThread);
if (!finished)
break;
}
finished = DrainWorklistWithDeadline( {
deadline, marking_worklist_.get(), ThreadHeapStatsCollector::Scope inner_scope(
[visitor](const MarkingItem& item) { stats_collector(),
HeapObjectHeader* header = ThreadHeapStatsCollector::kMarkProcessMarkingWorklist);
HeapObjectHeader::FromPayload(item.base_object_payload); finished = DrainWorklistWithDeadline(
DCHECK(!header->IsInConstruction()); deadline, marking_worklist_.get(),
item.callback(visitor, item.base_object_payload); [visitor](const MarkingItem& item) {
visitor->AccountMarkedBytes(header); HeapObjectHeader* header =
}, HeapObjectHeader::FromPayload(item.base_object_payload);
WorklistTaskId::MutatorThread); DCHECK(!header->IsInConstruction());
if (!finished) item.callback(visitor, item.base_object_payload);
break; visitor->AccountMarkedBytes(header);
},
WorklistTaskId::MutatorThread);
if (!finished)
break;
}
finished = DrainWorklistWithDeadline( {
deadline, write_barrier_worklist_.get(), ThreadHeapStatsCollector::Scope inner_scope(
[visitor](HeapObjectHeader* header) { stats_collector(),
DCHECK(!header->IsInConstruction()); ThreadHeapStatsCollector::kMarkProcessWriteBarrierWorklist);
GCInfo::From(header->GcInfoIndex()) finished = DrainWorklistWithDeadline(
.trace(visitor, header->Payload()); deadline, write_barrier_worklist_.get(),
visitor->AccountMarkedBytes(header); [visitor](HeapObjectHeader* header) {
}, DCHECK(!header->IsInConstruction());
WorklistTaskId::MutatorThread); GCInfo::From(header->GcInfoIndex())
if (!finished) .trace(visitor, header->Payload());
break; visitor->AccountMarkedBytes(header);
},
WorklistTaskId::MutatorThread);
if (!finished)
break;
}
} }
if ((ephemeron_processing == EphemeronProcessing::kFullProcessing) || if ((ephemeron_processing == EphemeronProcessing::kFullProcessing) ||
!processed_ephemerons) { !processed_ephemerons) {
processed_ephemerons = true; processed_ephemerons = true;
FlushEphemeronPairs(ephemeron_processing);
finished = finished =
InvokeEphemeronCallbacks(ephemeron_processing, visitor, deadline); InvokeEphemeronCallbacks(ephemeron_processing, visitor, deadline);
if (!finished) if (!finished)
...@@ -463,18 +500,21 @@ bool ThreadHeap::AdvanceConcurrentMarking(ConcurrentMarkingVisitor* visitor, ...@@ -463,18 +500,21 @@ bool ThreadHeap::AdvanceConcurrentMarking(ConcurrentMarkingVisitor* visitor,
// Convert |previously_not_fully_constructed_worklist_| to // Convert |previously_not_fully_constructed_worklist_| to
// |marking_worklist_|. This merely re-adds items with the proper // |marking_worklist_|. This merely re-adds items with the proper
// callbacks. // callbacks.
finished = DrainWorklistWithDeadline( finished =
deadline, previously_not_fully_constructed_worklist_.get(), DrainWorklistWithDeadline<kDefaultConcurrentDeadlineCheckInterval>(
[visitor](NotFullyConstructedItem& item) { deadline, previously_not_fully_constructed_worklist_.get(),
visitor->DynamicallyMarkAddress(reinterpret_cast<ConstAddress>(item)); [visitor](NotFullyConstructedItem& item) {
}, visitor->DynamicallyMarkAddress(
visitor->task_id()); reinterpret_cast<ConstAddress>(item));
},
visitor->task_id());
if (!finished) if (!finished)
break; break;
// Iteratively mark all objects that are reachable from the objects // Iteratively mark all objects that are reachable from the objects
// currently pushed onto the marking worklist. // currently pushed onto the marking worklist.
finished = DrainWorklistWithDeadline( finished = DrainWorklistWithDeadline<
kDefaultConcurrentDeadlineCheckInterval>(
deadline, marking_worklist_.get(), deadline, marking_worklist_.get(),
[visitor](const MarkingItem& item) { [visitor](const MarkingItem& item) {
HeapObjectHeader* header = HeapObjectHeader* header =
...@@ -490,7 +530,8 @@ bool ThreadHeap::AdvanceConcurrentMarking(ConcurrentMarkingVisitor* visitor, ...@@ -490,7 +530,8 @@ bool ThreadHeap::AdvanceConcurrentMarking(ConcurrentMarkingVisitor* visitor,
if (!finished) if (!finished)
break; break;
finished = DrainWorklistWithDeadline( finished = DrainWorklistWithDeadline<
kDefaultConcurrentDeadlineCheckInterval>(
deadline, write_barrier_worklist_.get(), deadline, write_barrier_worklist_.get(),
[visitor](HeapObjectHeader* header) { [visitor](HeapObjectHeader* header) {
PageFromObject(header)->SynchronizedLoad(); PageFromObject(header)->SynchronizedLoad();
...@@ -513,13 +554,14 @@ bool ThreadHeap::AdvanceConcurrentMarking(ConcurrentMarkingVisitor* visitor, ...@@ -513,13 +554,14 @@ bool ThreadHeap::AdvanceConcurrentMarking(ConcurrentMarkingVisitor* visitor,
// Callbacks found by the concurrent marking will be flushed eventually // Callbacks found by the concurrent marking will be flushed eventually
// by the mutator thread and then invoked either concurrently or by the // by the mutator thread and then invoked either concurrently or by the
// mutator thread (in the atomic pause at latest). // mutator thread (in the atomic pause at latest).
finished = DrainWorklistWithDeadline( finished =
deadline, ephemeron_pairs_to_process_worklist_.get(), DrainWorklistWithDeadline<kDefaultConcurrentDeadlineCheckInterval>(
[visitor](EphemeronPairItem& item) { deadline, ephemeron_pairs_to_process_worklist_.get(),
visitor->VisitEphemeron(item.key, item.value, [visitor](EphemeronPairItem& item) {
item.value_trace_callback); visitor->VisitEphemeron(item.key, item.value,
}, item.value_trace_callback);
visitor->task_id()); },
visitor->task_id());
if (!finished) if (!finished)
break; break;
} }
......
...@@ -459,7 +459,7 @@ class PLATFORM_EXPORT ThreadHeap { ...@@ -459,7 +459,7 @@ class PLATFORM_EXPORT ThreadHeap {
static ThreadHeap* main_thread_heap_; static ThreadHeap* main_thread_heap_;
static constexpr size_t kStepsBeforeEphemeronPairsFlush = 8u; static constexpr size_t kStepsBeforeEphemeronPairsFlush = 4u;
size_t steps_since_last_ephemeron_pairs_flush_ = 0; size_t steps_since_last_ephemeron_pairs_flush_ = 0;
static constexpr size_t kStepsBeforeEphemeronProcessing = 16u; static constexpr size_t kStepsBeforeEphemeronProcessing = 16u;
size_t steps_since_last_ephemeron_processing_ = 0; size_t steps_since_last_ephemeron_processing_ = 0;
......
...@@ -158,7 +158,7 @@ base::TimeDelta ThreadHeapStatsCollector::Event::incremental_marking_time() ...@@ -158,7 +158,7 @@ base::TimeDelta ThreadHeapStatsCollector::Event::incremental_marking_time()
base::TimeDelta base::TimeDelta
ThreadHeapStatsCollector::Event::worklist_processing_time_foreground() const { ThreadHeapStatsCollector::Event::worklist_processing_time_foreground() const {
return scope_data[kMarkProcessWorklist]; return scope_data[kMarkProcessWorklists];
} }
base::TimeDelta ThreadHeapStatsCollector::Event::atomic_marking_time() const { base::TimeDelta ThreadHeapStatsCollector::Event::atomic_marking_time() const {
......
...@@ -40,31 +40,37 @@ class PLATFORM_EXPORT ThreadHeapStatsObserver { ...@@ -40,31 +40,37 @@ class PLATFORM_EXPORT ThreadHeapStatsObserver {
virtual void DecreaseAllocatedObjectSize(size_t) = 0; virtual void DecreaseAllocatedObjectSize(size_t) = 0;
}; };
#define FOR_ALL_SCOPES(V) \ #define FOR_ALL_SCOPES(V) \
V(AtomicPauseCompaction) \ V(AtomicPauseCompaction) \
V(AtomicPauseMarkEpilogue) \ V(AtomicPauseMarkEpilogue) \
V(AtomicPauseMarkPrologue) \ V(AtomicPauseMarkPrologue) \
V(AtomicPauseMarkRoots) \ V(AtomicPauseMarkRoots) \
V(AtomicPauseMarkTransitiveClosure) \ V(AtomicPauseMarkTransitiveClosure) \
V(AtomicPauseSweepAndCompact) \ V(AtomicPauseSweepAndCompact) \
V(CompleteSweep) \ V(CompleteSweep) \
V(IncrementalMarkingFinalize) \ V(IncrementalMarkingFinalize) \
V(IncrementalMarkingStartMarking) \ V(IncrementalMarkingStartMarking) \
V(IncrementalMarkingStep) \ V(IncrementalMarkingStep) \
V(InvokePreFinalizers) \ V(IncrementalMarkingWithDeadline) \
V(LazySweepInIdle) \ V(InvokePreFinalizers) \
V(LazySweepOnAllocation) \ V(LazySweepInIdle) \
V(MarkBailOutObjects) \ V(LazySweepOnAllocation) \
V(MarkInvokeEphemeronCallbacks) \ V(MarkBailOutObjects) \
V(MarkProcessWorklist) \ V(MarkInvokeEphemeronCallbacks) \
V(MarkNotFullyConstructedObjects) \ V(MarkFlushV8References) \
V(MarkWeakProcessing) \ V(MarkFlushEphemeronPairs) \
V(UnifiedMarkingStep) \ V(MarkProcessWorklists) \
V(VisitCrossThreadPersistents) \ V(MarkProcessMarkingWorklist) \
V(VisitPersistentRoots) \ V(MarkProcessWriteBarrierWorklist) \
V(VisitPersistents) \ V(MarkProcessNotFullyconstructeddWorklist) \
V(VisitRoots) \ V(MarkNotFullyConstructedObjects) \
V(VisitStackRoots) \ V(MarkWeakProcessing) \
V(UnifiedMarkingStep) \
V(VisitCrossThreadPersistents) \
V(VisitPersistentRoots) \
V(VisitPersistents) \
V(VisitRoots) \
V(VisitStackRoots) \
V(VisitRememberedSets) V(VisitRememberedSets)
#define FOR_ALL_CONCURRENT_SCOPES(V) \ #define FOR_ALL_CONCURRENT_SCOPES(V) \
......
...@@ -1180,7 +1180,6 @@ void ThreadState::IncrementalMarkingStep(BlinkGC::StackState stack_state) { ...@@ -1180,7 +1180,6 @@ void ThreadState::IncrementalMarkingStep(BlinkGC::StackState stack_state) {
skip_incremental_marking_for_testing_ = false; skip_incremental_marking_for_testing_ = false;
} else { } else {
complete = MarkPhaseAdvanceMarking( complete = MarkPhaseAdvanceMarking(
base::TimeTicks::Now() +
marking_scheduling_->GetNextIncrementalStepDurationForTask( marking_scheduling_->GetNextIncrementalStepDurationForTask(
Heap().stats_collector()->object_size_in_bytes()), Heap().stats_collector()->object_size_in_bytes()),
EphemeronProcessing::kPartialProcessing); EphemeronProcessing::kPartialProcessing);
...@@ -1345,7 +1344,8 @@ void ThreadState::AtomicPauseMarkTransitiveClosure() { ...@@ -1345,7 +1344,8 @@ void ThreadState::AtomicPauseMarkTransitiveClosure() {
Heap().stats_collector(), Heap().stats_collector(),
ThreadHeapStatsCollector::kAtomicPauseMarkTransitiveClosure, "epoch", ThreadHeapStatsCollector::kAtomicPauseMarkTransitiveClosure, "epoch",
gc_age_, "forced", IsForcedGC(current_gc_data_.reason)); gc_age_, "forced", IsForcedGC(current_gc_data_.reason));
CHECK(MarkPhaseAdvanceMarking(base::TimeTicks::Max(), // base::TimeTicks::Now() + base::TimeDelta::Max() == base::TimeTicks::Max()
CHECK(MarkPhaseAdvanceMarking(base::TimeDelta::Max(),
EphemeronProcessing::kFullProcessing)); EphemeronProcessing::kFullProcessing));
} }
...@@ -1588,20 +1588,23 @@ bool ThreadState::MarkPhaseAdvanceMarkingBasedOnSchedule( ...@@ -1588,20 +1588,23 @@ bool ThreadState::MarkPhaseAdvanceMarkingBasedOnSchedule(
base::TimeDelta max_deadline, base::TimeDelta max_deadline,
EphemeronProcessing ephemeron_processing) { EphemeronProcessing ephemeron_processing) {
return MarkPhaseAdvanceMarking( return MarkPhaseAdvanceMarking(
base::TimeTicks::Now() + std::min(max_deadline,
std::min(max_deadline, marking_scheduling_->GetNextIncrementalStepDurationForTask(
marking_scheduling_->GetNextIncrementalStepDurationForTask( Heap().stats_collector()->object_size_in_bytes())),
Heap().stats_collector()->object_size_in_bytes())),
ephemeron_processing); ephemeron_processing);
} }
bool ThreadState::MarkPhaseAdvanceMarking( bool ThreadState::MarkPhaseAdvanceMarking(
base::TimeTicks deadline, base::TimeDelta deadline,
EphemeronProcessing ephemeron_processing) { EphemeronProcessing ephemeron_processing) {
MarkingVisitor* visitor = current_gc_data_.visitor.get(); MarkingVisitor* visitor = current_gc_data_.visitor.get();
const bool finished = ThreadHeapStatsCollector::Scope deadline_scope(
Heap().AdvanceMarking(reinterpret_cast<MarkingVisitor*>(visitor), Heap().stats_collector(),
deadline, ephemeron_processing); ThreadHeapStatsCollector::kIncrementalMarkingWithDeadline, "deadline_ms",
deadline.InMillisecondsF());
const bool finished = Heap().AdvanceMarking(
reinterpret_cast<MarkingVisitor*>(visitor),
base::TimeTicks::Now() + deadline, ephemeron_processing);
// visitor->marked_bytes() can also include bytes marked during roots // visitor->marked_bytes() can also include bytes marked during roots
// visitation which is not counted in worklist_processing_time_foreground. // visitation which is not counted in worklist_processing_time_foreground.
// Since the size of the roots is usually small relative to the size of // Since the size of the roots is usually small relative to the size of
......
...@@ -516,7 +516,7 @@ class PLATFORM_EXPORT ThreadState final { ...@@ -516,7 +516,7 @@ class PLATFORM_EXPORT ThreadState final {
void MarkPhaseVisitNotFullyConstructedObjects(); void MarkPhaseVisitNotFullyConstructedObjects();
bool MarkPhaseAdvanceMarkingBasedOnSchedule(base::TimeDelta, bool MarkPhaseAdvanceMarkingBasedOnSchedule(base::TimeDelta,
EphemeronProcessing); EphemeronProcessing);
bool MarkPhaseAdvanceMarking(base::TimeTicks, EphemeronProcessing); bool MarkPhaseAdvanceMarking(base::TimeDelta, EphemeronProcessing);
void VerifyMarking(BlinkGC::MarkingType); void VerifyMarking(BlinkGC::MarkingType);
// Visit the stack after pushing registers onto the stack. // Visit the stack after pushing registers onto the stack.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment