Commit 66b80e0b authored by Michael Lippautz's avatar Michael Lippautz Committed by Commit Bot

heap: Add flushing of not fully constructed objects

Upon entering an incremental step without stack such objects can be
flushed to a worklist that is processed in the regular marking loop as
the objects are definitely constructed at this point.

Bug: 843903
Change-Id: Ie4acbb7a58e7bedaee3b7806bdd18ca306ff14d7
Reviewed-on: https://chromium-review.googlesource.com/c/1350751
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Reviewed-by: default avatarKentaro Hara <haraken@chromium.org>
Reviewed-by: default avatarHannes Payer <hpayer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#612780}
parent eade5334
......@@ -180,6 +180,8 @@ void ThreadHeap::RegisterWeakTable(void* table,
void ThreadHeap::CommitCallbackStacks() {
marking_worklist_.reset(new MarkingWorklist());
not_fully_constructed_worklist_.reset(new NotFullyConstructedWorklist());
previously_not_fully_constructed_worklist_.reset(
new NotFullyConstructedWorklist());
weak_callback_worklist_.reset(new WeakCallbackWorklist());
DCHECK(ephemeron_callbacks_.IsEmpty());
}
......@@ -187,6 +189,7 @@ void ThreadHeap::CommitCallbackStacks() {
void ThreadHeap::DecommitCallbackStacks() {
marking_worklist_.reset(nullptr);
not_fully_constructed_worklist_.reset(nullptr);
previously_not_fully_constructed_worklist_.reset(nullptr);
weak_callback_worklist_.reset(nullptr);
ephemeron_callbacks_.clear();
}
......@@ -207,6 +210,15 @@ void ThreadHeap::RegisterMovingObjectCallback(MovableReference* slot,
Compaction()->RegisterMovingObjectCallback(slot, callback, callback_data);
}
void ThreadHeap::FlushNotFullyConstructedObjects() {
if (!not_fully_constructed_worklist_->IsGlobalEmpty()) {
not_fully_constructed_worklist_->FlushToGlobal(WorklistTaskId::MainThread);
previously_not_fully_constructed_worklist_->MergeGlobalPool(
not_fully_constructed_worklist_.get());
}
DCHECK(not_fully_constructed_worklist_->IsGlobalEmpty());
}
void ThreadHeap::MarkNotFullyConstructedObjects(MarkingVisitor* visitor) {
DCHECK(!thread_state_->IsIncrementalMarking());
ThreadHeapStatsCollector::Scope stats_scope(
......@@ -245,9 +257,35 @@ void ThreadHeap::InvokeEphemeronCallbacks(Visitor* visitor) {
ephemeron_callbacks_ = std::move(final_set);
}
bool ThreadHeap::AdvanceMarking(MarkingVisitor* visitor, TimeTicks deadline) {
namespace {
template <typename Worklist, typename Callback>
bool DrainWorklistWithDeadline(TimeTicks deadline,
Worklist* worklist,
Callback callback) {
const size_t kDeadlineCheckInterval = 2500;
size_t processed_callback_count = 0;
typename Worklist::EntryType item;
while (worklist->Pop(WorklistTaskId::MainThread, &item)) {
callback(item);
processed_callback_count++;
if (++processed_callback_count == kDeadlineCheckInterval) {
if (deadline <= CurrentTimeTicks()) {
return false;
}
processed_callback_count = 0;
}
}
return true;
}
} // namespace
bool ThreadHeap::AdvanceMarking(MarkingVisitor* visitor, TimeTicks deadline) {
// const size_t kDeadlineCheckInterval = 2500;
// size_t processed_callback_count = 0;
bool finished;
// Ephemeron fixed point loop.
do {
{
......@@ -255,16 +293,25 @@ bool ThreadHeap::AdvanceMarking(MarkingVisitor* visitor, TimeTicks deadline) {
// currently pushed onto the marking worklist.
ThreadHeapStatsCollector::Scope stats_scope(
stats_collector(), ThreadHeapStatsCollector::kMarkProcessWorklist);
MarkingItem item;
while (marking_worklist_->Pop(WorklistTaskId::MainThread, &item)) {
item.callback(visitor, item.object);
processed_callback_count++;
if (processed_callback_count % kDeadlineCheckInterval == 0) {
if (deadline <= CurrentTimeTicks()) {
return false;
}
}
}
finished =
DrainWorklistWithDeadline(deadline, marking_worklist_.get(),
[visitor](const MarkingItem& item) {
item.callback(visitor, item.object);
});
if (!finished)
return false;
// Iteratively mark all objects that were previously discovered while
// being in construction. The objects can be processed incrementally once
// a safepoint was reached.
finished = DrainWorklistWithDeadline(
deadline, previously_not_fully_constructed_worklist_.get(),
[visitor](const NotFullyConstructedItem& item) {
visitor->DynamicallyMarkAddress(reinterpret_cast<Address>(item));
});
if (!finished)
return false;
}
InvokeEphemeronCallbacks(visitor);
......
......@@ -284,6 +284,11 @@ class PLATFORM_EXPORT ThreadHeap {
void WeakProcessing(Visitor*);
// Moves not fully constructed objects to previously not fully constructed
// objects. Such objects can be iterated using the Trace() method and do
// not need to rely on conservative handling.
void FlushNotFullyConstructedObjects();
// Marks not fully constructed objects.
void MarkNotFullyConstructedObjects(MarkingVisitor*);
// Marks the transitive closure including ephemerons.
......@@ -436,9 +441,29 @@ class PLATFORM_EXPORT ThreadHeap {
std::unique_ptr<RegionTree> region_tree_;
std::unique_ptr<AddressCache> address_cache_;
std::unique_ptr<PagePool> free_page_pool_;
// All objects on this worklist have been fully initialized and assigned a
// trace callback for iterating the body of the object. This worklist should
// contain almost all objects.
std::unique_ptr<MarkingWorklist> marking_worklist_;
// Objects on this worklist were observed to be in construction (in their
// constructor) and thus have been delayed for processing. They have not yet
// been assigned a valid header and trace callback.
std::unique_ptr<NotFullyConstructedWorklist> not_fully_constructed_worklist_;
// Objects on this worklist were previously in construction but have been
// moved here upon observing a safepoint, i.e., processing without stack. They
// have not yet been assigned a valid header and trace callback but are fully
// specified and can thus be iterated using the trace callback (which can be
// looked up dynamically).
std::unique_ptr<NotFullyConstructedWorklist>
previously_not_fully_constructed_worklist_;
// Worklist of weak callbacks accumulated for objects. Such callbacks are
// processed after finishing marking objects.
std::unique_ptr<WeakCallbackWorklist> weak_callback_worklist_;
// No duplicates allowed for ephemeron callbacks. Hence, we use a hashmap
// with the key being the HashTable.
WTF::HashMap<void*, EphemeronCallback> ephemeron_callbacks_;
......
......@@ -39,6 +39,19 @@ MarkingVisitor::MarkingVisitor(ThreadState* state, MarkingMode marking_mode)
MarkingVisitor::~MarkingVisitor() = default;
void MarkingVisitor::DynamicallyMarkAddress(Address address) {
BasePage* const page = PageFromObject(address);
HeapObjectHeader* const header =
page->IsLargeObjectPage()
? static_cast<LargeObjectPage*>(page)->ObjectHeader()
: static_cast<NormalPage*>(page)->FindHeaderFromAddress(address);
DCHECK(header);
DCHECK(!header->IsInConstruction());
const GCInfo* gc_info =
GCInfoTable::Get().GCInfoFromIndex(header->GcInfoIndex());
MarkHeader(header, gc_info->trace_);
}
void MarkingVisitor::ConservativelyMarkAddress(BasePage* page,
Address address) {
#if DCHECK_IS_ON()
......
......@@ -59,6 +59,10 @@ class PLATFORM_EXPORT MarkingVisitor : public Visitor {
MarkedPointerCallbackForTesting);
#endif // DCHECK_IS_ON()
// Marks an object dynamically using any address within its body and adds a
// tracing callback for processing of the object.
void DynamicallyMarkAddress(Address);
// Marks an object and adds a tracing callback for processing of the object.
inline void MarkHeader(HeapObjectHeader*, TraceCallback);
......
......@@ -932,7 +932,7 @@ void ThreadState::RunScheduledGC(BlinkGC::StackState stack_state) {
// Idle time GC will be scheduled by Blink Scheduler.
break;
case kIncrementalMarkingStepScheduled:
IncrementalMarkingStep();
IncrementalMarkingStep(stack_state);
break;
case kIncrementalMarkingFinalizeScheduled:
IncrementalMarkingFinalize();
......@@ -1455,7 +1455,7 @@ void ThreadState::IncrementalMarkingStart(BlinkGC::GCReason reason) {
}
}
void ThreadState::IncrementalMarkingStep() {
void ThreadState::IncrementalMarkingStep(BlinkGC::StackState stack_state) {
DCHECK(IsMarkingInProgress());
ThreadHeapStatsCollector::EnabledScope stats_scope(
......@@ -1465,6 +1465,9 @@ void ThreadState::IncrementalMarkingStep() {
<< "IncrementalMarking: Step "
<< "Reason: " << GcReasonString(current_gc_data_.reason);
AtomicPauseScope atomic_pause_scope(this);
if (stack_state == BlinkGC::kNoHeapPointersOnStack) {
Heap().FlushNotFullyConstructedObjects();
}
const bool complete = MarkPhaseAdvanceMarking(
CurrentTimeTicks() + next_incremental_marking_step_duration_);
if (complete) {
......@@ -1715,6 +1718,10 @@ void ThreadState::AtomicPausePrologue(BlinkGC::StackState stack_state,
if (isolate_ && perform_cleanup_)
perform_cleanup_(isolate_);
if (stack_state == BlinkGC::kNoHeapPointersOnStack) {
Heap().FlushNotFullyConstructedObjects();
}
DCHECK(InAtomicMarkingPause());
Heap().MakeConsistentForGC();
Heap().ClearArenaAges();
......
......@@ -300,7 +300,7 @@ class PLATFORM_EXPORT ThreadState final
void ScheduleIncrementalMarkingFinalize();
void IncrementalMarkingStart(BlinkGC::GCReason);
void IncrementalMarkingStep();
void IncrementalMarkingStep(BlinkGC::StackState);
void IncrementalMarkingFinalize();
bool FinishIncrementalMarkingIfRunning(BlinkGC::StackState,
BlinkGC::MarkingType,
......
......@@ -30,12 +30,14 @@ namespace blink {
//
// Work stealing is best effort, i.e., there is no way to inform other tasks
// of the need of items.
template <typename EntryType, int segment_size, int max_tasks = 1>
template <typename _EntryType, int segment_size, int max_tasks = 1>
class Worklist {
USING_FAST_MALLOC(Worklist);
using WorklistType = Worklist<EntryType, segment_size, max_tasks>;
using WorklistType = Worklist<_EntryType, segment_size, max_tasks>;
public:
using EntryType = _EntryType;
class View {
public:
View(WorklistType* worklist, int task_id)
......
......@@ -159,7 +159,7 @@ TEST(LifecycleContextTest, ShouldNotHitCFICheckOnIncrementalMarking) {
while (thread_state->GetGCState() ==
ThreadState::kIncrementalMarkingStepScheduled)
thread_state->IncrementalMarkingStep();
thread_state->IncrementalMarkingStep(BlinkGC::kNoHeapPointersOnStack);
thread_state->IncrementalMarkingFinalize();
RuntimeEnabledFeatures::SetHeapIncrementalMarkingEnabled(was_enabled);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment