Commit 1bb45b23 authored by Michael Lippautz's avatar Michael Lippautz Committed by Commit Bot

heap: Further optimize write barrier path during GC

- Avoid looking up GCInfo and one word in memory when pushing an object
  that was discovered through the write barrier.
- Use already existing branch for regular and large objects to compute
  size, assuming that the object will be marked.

Output on Z840 (down from ~17x):
[ RUN      ] WriteBarrierPerfTest.MemberWritePerformance
*RESULT WriteBarrierPerfTest writes during GC: = 36403.349108117945 writes/ms
*RESULT WriteBarrierPerfTest writes outside GC: = 476190.4761904762 writes/ms
*RESULT WriteBarrierPerfTest relative speed difference: = 13.08095238095238 times
[       OK ] WriteBarrierPerfTest.MemberWritePerformance (29 ms)

Bug: 1014414
Change-Id: I2c915edd1607bc839ea0478891971d23496b2678
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1865344Reviewed-by: default avatarOmer Katz <omerkatz@chromium.org>
Reviewed-by: default avatarAnton Bikineev <bikineev@chromium.org>
Reviewed-by: default avatarKentaro Hara <haraken@chromium.org>
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#707469}
parent cf4a16f0
......@@ -98,13 +98,6 @@ ThreadHeap::ThreadHeap(ThreadState* thread_state)
address_cache_(std::make_unique<AddressCache>()),
free_page_pool_(std::make_unique<PagePool>()),
process_heap_reporter_(std::make_unique<ProcessHeapReporter>()),
marking_worklist_(nullptr),
not_fully_constructed_worklist_(nullptr),
weak_callback_worklist_(nullptr),
movable_reference_worklist_(nullptr),
weak_table_worklist_(nullptr),
backing_store_callback_worklist_(nullptr),
v8_references_worklist_(nullptr),
vector_backing_arena_index_(BlinkGC::kVector1ArenaIndex),
current_arena_ages_(0) {
if (ThreadState::Current()->IsMainThread())
......@@ -158,6 +151,7 @@ Address ThreadHeap::CheckAndMarkPointer(MarkingVisitor* visitor,
void ThreadHeap::SetupWorklists() {
marking_worklist_.reset(new MarkingWorklist());
write_barrier_worklist_.reset(new WriteBarrierWorklist());
not_fully_constructed_worklist_.reset(new NotFullyConstructedWorklist());
previously_not_fully_constructed_worklist_.reset(
new NotFullyConstructedWorklist());
......@@ -171,6 +165,7 @@ void ThreadHeap::SetupWorklists() {
void ThreadHeap::DestroyMarkingWorklists(BlinkGC::StackState stack_state) {
marking_worklist_.reset(nullptr);
write_barrier_worklist_.reset(nullptr);
previously_not_fully_constructed_worklist_.reset(nullptr);
weak_callback_worklist_.reset(nullptr);
weak_table_worklist_.reset();
......@@ -326,6 +321,18 @@ bool ThreadHeap::AdvanceMarking(MarkingVisitor* visitor,
if (!finished)
break;
finished = DrainWorklistWithDeadline(
deadline, write_barrier_worklist_.get(),
[visitor](HeapObjectHeader* header) {
DCHECK(!header->IsInConstruction());
GCInfoTable::Get()
.GCInfoFromIndex(header->GcInfoIndex())
->trace(visitor, header->Payload());
},
WorklistTaskId::MutatorThread);
if (!finished)
break;
// Iteratively mark all objects that were previously discovered while
// being in construction. The objects can be processed incrementally once
// a safepoint was reached.
......@@ -351,17 +358,29 @@ bool ThreadHeap::AdvanceMarking(MarkingVisitor* visitor,
bool ThreadHeap::AdvanceConcurrentMarking(ConcurrentMarkingVisitor* visitor,
base::TimeTicks deadline) {
bool finished = false;
// Iteratively mark all objects that are reachable from the objects
// currently pushed onto the marking worklist.
return DrainWorklistWithDeadline(
finished = DrainWorklistWithDeadline(
deadline, marking_worklist_.get(),
[visitor](const MarkingItem& item) {
DCHECK(
!HeapObjectHeader::FromPayload(item.object)
->IsInConstruction<HeapObjectHeader::AccessMode::kAtomic>());
DCHECK(!HeapObjectHeader::FromPayload(item.object)->IsInConstruction());
item.callback(visitor, item.object);
},
visitor->task_id());
if (!finished)
return false;
finished = DrainWorklistWithDeadline(
deadline, write_barrier_worklist_.get(),
[visitor](HeapObjectHeader* header) {
DCHECK(!header->IsInConstruction());
GCInfoTable::Get()
.GCInfoFromIndex(header->GcInfoIndex())
->trace(visitor, header->Payload());
},
visitor->task_id());
return finished;
}
void ThreadHeap::WeakProcessing(MarkingVisitor* visitor) {
......
......@@ -82,6 +82,7 @@ using V8Reference = const TraceWrapperV8Reference<v8::Value>*;
// Segment size of 512 entries necessary to avoid throughput regressions. Since
// the work list is currently a temporary object this is not a problem.
using MarkingWorklist = Worklist<MarkingItem, 512 /* local entries */>;
using WriteBarrierWorklist = Worklist<HeapObjectHeader*, 256>;
using NotFullyConstructedWorklist =
Worklist<NotFullyConstructedItem, 16 /* local entries */>;
using WeakCallbackWorklist =
......@@ -227,6 +228,10 @@ class PLATFORM_EXPORT ThreadHeap {
return marking_worklist_.get();
}
WriteBarrierWorklist* GetWriteBarrierWorklist() const {
return write_barrier_worklist_.get();
}
NotFullyConstructedWorklist* GetNotFullyConstructedWorklist() const {
return not_fully_constructed_worklist_.get();
}
......@@ -438,6 +443,11 @@ class PLATFORM_EXPORT ThreadHeap {
// contain almost all objects.
std::unique_ptr<MarkingWorklist> marking_worklist_;
// Objects on this worklist have been collected in the write barrier. The
// worklist is different from |marking_worklist_| to minimize execution in the
// path where a write barrier is executed.
std::unique_ptr<WriteBarrierWorklist> write_barrier_worklist_;
// Objects on this worklist were observed to be in construction (in their
// constructor) and thus have been delayed for processing. They have not yet
// been assigned a valid header and trace callback.
......
......@@ -111,12 +111,14 @@ class IncrementalMarkingScope : public IncrementalMarkingScopeBase {
: IncrementalMarkingScopeBase(thread_state),
gc_forbidden_scope_(thread_state),
marking_worklist_(heap_.GetMarkingWorklist()),
write_barrier_worklist_(heap_.GetWriteBarrierWorklist()),
not_fully_constructed_worklist_(
heap_.GetNotFullyConstructedWorklist()) {
thread_state_->SetGCPhase(ThreadState::GCPhase::kMarking);
ThreadState::AtomicPauseScope atomic_pause_scope_(thread_state_);
ScriptForbiddenScope script_forbidden_scope;
EXPECT_TRUE(marking_worklist_->IsGlobalEmpty());
EXPECT_TRUE(write_barrier_worklist_->IsGlobalEmpty());
EXPECT_TRUE(not_fully_constructed_worklist_->IsGlobalEmpty());
thread_state->EnableIncrementalMarkingBarrier();
thread_state->current_gc_data_.visitor = std::make_unique<MarkingVisitor>(
......@@ -125,6 +127,7 @@ class IncrementalMarkingScope : public IncrementalMarkingScopeBase {
~IncrementalMarkingScope() {
EXPECT_TRUE(marking_worklist_->IsGlobalEmpty());
EXPECT_TRUE(write_barrier_worklist_->IsGlobalEmpty());
EXPECT_TRUE(not_fully_constructed_worklist_->IsGlobalEmpty());
thread_state_->DisableIncrementalMarkingBarrier();
// Need to clear out unused worklists that might have been polluted during
......@@ -135,6 +138,9 @@ class IncrementalMarkingScope : public IncrementalMarkingScopeBase {
}
MarkingWorklist* marking_worklist() const { return marking_worklist_; }
WriteBarrierWorklist* write_barrier_worklist() const {
return write_barrier_worklist_;
}
NotFullyConstructedWorklist* not_fully_constructed_worklist() const {
return not_fully_constructed_worklist_;
}
......@@ -142,6 +148,7 @@ class IncrementalMarkingScope : public IncrementalMarkingScopeBase {
protected:
ThreadState::GCForbiddenScope gc_forbidden_scope_;
MarkingWorklist* const marking_worklist_;
WriteBarrierWorklist* const write_barrier_worklist_;
NotFullyConstructedWorklist* const not_fully_constructed_worklist_;
};
......@@ -156,6 +163,7 @@ class ExpectWriteBarrierFires : public IncrementalMarkingScope {
objects_(objects),
backing_visitor_(thread_state_, &objects_) {
EXPECT_TRUE(marking_worklist_->IsGlobalEmpty());
EXPECT_TRUE(write_barrier_worklist_->IsGlobalEmpty());
for (void* object : objects_) {
// Ensure that the object is in the normal arena so we can ignore backing
// objects on the marking stack.
......@@ -168,9 +176,8 @@ class ExpectWriteBarrierFires : public IncrementalMarkingScope {
}
~ExpectWriteBarrierFires() {
EXPECT_FALSE(marking_worklist_->IsGlobalEmpty());
// All objects watched should be on the marking or write barrier worklist.
MarkingItem item;
// All objects watched should be on the marking stack.
while (marking_worklist_->Pop(WorklistTaskId::MutatorThread, &item)) {
// Inspect backing stores to allow specifying objects that are only
// reachable through a backing store.
......@@ -184,6 +191,21 @@ class ExpectWriteBarrierFires : public IncrementalMarkingScope {
if (objects_.end() != pos)
objects_.erase(pos);
}
HeapObjectHeader* header;
while (
write_barrier_worklist_->Pop(WorklistTaskId::MutatorThread, &header)) {
// Inspect backing stores to allow specifying objects that are only
// reachable through a backing store.
if (!ThreadHeap::IsNormalArenaIndex(
PageFromObject(header->Payload())->Arena()->ArenaIndex())) {
backing_visitor_.ProcessBackingStore(header);
continue;
}
auto** pos =
std::find(objects_.begin(), objects_.end(), header->Payload());
if (objects_.end() != pos)
objects_.erase(pos);
}
EXPECT_TRUE(objects_.IsEmpty());
// All headers of objects watched should be marked at this point.
for (HeapObjectHeader* header : headers_) {
......@@ -191,6 +213,7 @@ class ExpectWriteBarrierFires : public IncrementalMarkingScope {
header->Unmark();
}
EXPECT_TRUE(marking_worklist_->IsGlobalEmpty());
EXPECT_TRUE(write_barrier_worklist_->IsGlobalEmpty());
}
private:
......@@ -208,6 +231,7 @@ class ExpectNoWriteBarrierFires : public IncrementalMarkingScope {
std::initializer_list<void*> objects)
: IncrementalMarkingScope(thread_state) {
EXPECT_TRUE(marking_worklist_->IsGlobalEmpty());
EXPECT_TRUE(write_barrier_worklist_->IsGlobalEmpty());
for (void* object : objects_) {
HeapObjectHeader* header = HeapObjectHeader::FromPayload(object);
headers_.push_back(std::make_pair(header, header->IsMarked()));
......@@ -216,6 +240,7 @@ class ExpectNoWriteBarrierFires : public IncrementalMarkingScope {
~ExpectNoWriteBarrierFires() {
EXPECT_TRUE(marking_worklist_->IsGlobalEmpty());
EXPECT_TRUE(write_barrier_worklist_->IsGlobalEmpty());
for (const auto& pair : headers_) {
EXPECT_EQ(pair.second, pair.first->IsMarked());
pair.first->Unmark();
......@@ -1442,7 +1467,6 @@ TEST_F(IncrementalMarkingTest, WriteBarrierDuringMixinConstruction) {
// Clear any objects that have been added to the regular marking worklist in
// the process of calling the constructor.
EXPECT_FALSE(scope.marking_worklist()->IsGlobalEmpty());
MarkingItem marking_item;
while (scope.marking_worklist()->Pop(WorklistTaskId::MutatorThread,
&marking_item)) {
......@@ -1452,6 +1476,14 @@ TEST_F(IncrementalMarkingTest, WriteBarrierDuringMixinConstruction) {
header->Unmark();
}
EXPECT_TRUE(scope.marking_worklist()->IsGlobalEmpty());
// Clear any write barriers so far.
HeapObjectHeader* header;
while (scope.write_barrier_worklist()->Pop(WorklistTaskId::MutatorThread,
&header)) {
if (header->IsMarked())
header->Unmark();
}
EXPECT_TRUE(scope.write_barrier_worklist()->IsGlobalEmpty());
EXPECT_FALSE(scope.not_fully_constructed_worklist()->IsGlobalEmpty());
NotFullyConstructedItem partial_item;
......
......@@ -95,12 +95,16 @@ bool MarkingVisitor::WriteBarrierSlow(void* value) {
return false;
HeapObjectHeader* header;
size_t size;
if (LIKELY(!base_page->IsLargeObjectPage())) {
header = reinterpret_cast<HeapObjectHeader*>(
static_cast<NormalPage*>(base_page)->FindHeaderFromAddress(
reinterpret_cast<Address>(value)));
size = header->size();
} else {
header = static_cast<LargeObjectPage*>(base_page)->ObjectHeader();
LargeObjectPage* large_page = static_cast<LargeObjectPage*>(base_page);
header = large_page->ObjectHeader();
size = large_page->size();
}
DCHECK(header->IsValid());
......@@ -117,10 +121,8 @@ bool MarkingVisitor::WriteBarrierSlow(void* value) {
}
MarkingVisitor* visitor = thread_state->CurrentVisitor();
visitor->AccountMarkedBytes(header);
visitor->marking_worklist_.Push(
{header->Payload(),
GCInfoTable::Get().GCInfoFromIndex(header->GcInfoIndex())->trace});
visitor->AccountMarkedBytes(size);
visitor->write_barrier_worklist_.Push(header);
return true;
}
......@@ -144,7 +146,9 @@ void MarkingVisitor::TraceMarkedBackingStoreSlow(void* value) {
}
MarkingVisitor::MarkingVisitor(ThreadState* state, MarkingMode marking_mode)
: MarkingVisitorBase(state, marking_mode, WorklistTaskId::MutatorThread) {
: MarkingVisitorBase(state, marking_mode, WorklistTaskId::MutatorThread),
write_barrier_worklist_(Heap().GetWriteBarrierWorklist(),
WorklistTaskId::MutatorThread) {
DCHECK(state->InAtomicMarkingPause());
DCHECK(state->CheckThread());
}
......
......@@ -125,9 +125,9 @@ class PLATFORM_EXPORT MarkingVisitorBase : public Visitor {
// marked upon calling.
inline bool MarkHeaderNoTracing(HeapObjectHeader*);
// Account for an object's live bytes. Should only be adjusted when
// Account for |size| live bytes. Should only be adjusted when
// transitioning an object from unmarked to marked state.
ALWAYS_INLINE void AccountMarkedBytes(HeapObjectHeader*);
ALWAYS_INLINE void AccountMarkedBytes(size_t size);
void RegisterBackingStoreReference(void** slot);
......@@ -142,12 +142,8 @@ class PLATFORM_EXPORT MarkingVisitorBase : public Visitor {
int task_id_;
};
ALWAYS_INLINE void MarkingVisitorBase::AccountMarkedBytes(
HeapObjectHeader* header) {
marked_bytes_ +=
header->IsLargeObject()
? reinterpret_cast<LargeObjectPage*>(PageFromObject(header))->size()
: header->size();
ALWAYS_INLINE void MarkingVisitorBase::AccountMarkedBytes(size_t size) {
marked_bytes_ += size;
}
inline bool MarkingVisitorBase::MarkHeaderNoTracing(HeapObjectHeader* header) {
......@@ -161,7 +157,11 @@ inline bool MarkingVisitorBase::MarkHeaderNoTracing(HeapObjectHeader* header) {
DCHECK(!header->IsFree());
if (header->TryMark<HeapObjectHeader::AccessMode::kAtomic>()) {
AccountMarkedBytes(header);
const size_t size =
header->IsLargeObject()
? reinterpret_cast<LargeObjectPage*>(PageFromObject(header))->size()
: header->size();
AccountMarkedBytes(size);
return true;
}
return false;
......@@ -216,6 +216,8 @@ class PLATFORM_EXPORT MarkingVisitor : public MarkingVisitorBase {
// Exact version of the marking write barriers.
static bool WriteBarrierSlow(void*);
static void TraceMarkedBackingStoreSlow(void*);
WriteBarrierWorklist::View write_barrier_worklist_;
};
ALWAYS_INLINE bool MarkingVisitor::WriteBarrier(void* value) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment