Commit f0798027 authored by Michael Lippautz's avatar Michael Lippautz Committed by Commit Bot

heap: Account marked bytes when processing an object

During marking, (a) marking an object and (b) processing it currently
happen on the main thread. In future, concurrent marking will move both
steps to helper tasks.

Even with concurrent marking, the write barrier remains on the main
thread and will require at least (a) to be executed right away. It is
thus beneficial to increment live bytes counters in (b) which can be
offloaded to helper tasks.

Output on Z840 (down from ~13.1x):
[ RUN      ] WriteBarrierPerfTest.MemberWritePerformance
*RESULT WriteBarrierPerfTest writes during GC: = 46497.21016738996 writes/ms
*RESULT WriteBarrierPerfTest writes outside GC: = 541516.2454873646 writes/ms
*RESULT WriteBarrierPerfTest relative speed difference: = 11.646209386281587 times
[       OK ] WriteBarrierPerfTest.MemberWritePerformance (67 ms)

Bug: 1014414
Change-Id: I5295e4738fce9410fbfbed51829c04b2e25f2d9e
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1883719
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Reviewed-by: default avatarKentaro Hara <haraken@chromium.org>
Reviewed-by: default avatarOmer Katz <omerkatz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#710262}
parent 47ebf6af
......@@ -313,9 +313,11 @@ bool ThreadHeap::AdvanceMarking(MarkingVisitor* visitor,
finished = DrainWorklistWithDeadline(
deadline, marking_worklist_.get(),
[visitor](const MarkingItem& item) {
DCHECK(!HeapObjectHeader::FromPayload(item.object)
->IsInConstruction());
HeapObjectHeader* header =
HeapObjectHeader::FromPayload(item.object);
DCHECK(!header->IsInConstruction());
item.callback(visitor, item.object);
visitor->AccountMarkedBytes(header);
},
WorklistTaskId::MutatorThread);
if (!finished)
......@@ -328,14 +330,15 @@ bool ThreadHeap::AdvanceMarking(MarkingVisitor* visitor,
GCInfoTable::Get()
.GCInfoFromIndex(header->GcInfoIndex())
->trace(visitor, header->Payload());
visitor->AccountMarkedBytes(header);
},
WorklistTaskId::MutatorThread);
if (!finished)
break;
// Iteratively mark all objects that were previously discovered while
// being in construction. The objects can be processed incrementally once
// a safepoint was reached.
// Convert |previously_not_fully_constructed_worklist_| to
// |marking_worklist_|. This merely re-adds items with the proper
// callbacks.
finished = DrainWorklistWithDeadline(
deadline, previously_not_fully_constructed_worklist_.get(),
[visitor](const NotFullyConstructedItem& item) {
......@@ -364,8 +367,10 @@ bool ThreadHeap::AdvanceConcurrentMarking(ConcurrentMarkingVisitor* visitor,
finished = DrainWorklistWithDeadline(
deadline, marking_worklist_.get(),
[visitor](const MarkingItem& item) {
DCHECK(!HeapObjectHeader::FromPayload(item.object)->IsInConstruction());
HeapObjectHeader* header = HeapObjectHeader::FromPayload(item.object);
DCHECK(!header->IsInConstruction());
item.callback(visitor, item.object);
visitor->AccountMarkedBytes(header);
},
visitor->task_id());
if (!finished)
......@@ -378,6 +383,7 @@ bool ThreadHeap::AdvanceConcurrentMarking(ConcurrentMarkingVisitor* visitor,
GCInfoTable::Get()
.GCInfoFromIndex(header->GcInfoIndex())
->trace(visitor, header->Payload());
visitor->AccountMarkedBytes(header);
},
visitor->task_id());
return finished;
......
......@@ -14,8 +14,7 @@ struct BackingModifier {
HeapObjectHeader* const header;
};
BackingModifier CanFreeOrShrinkBacking(ThreadState* const state,
void* address) {
BackingModifier CanModifyBacking(ThreadState* const state, void* address) {
// - |SweepForbidden| protects against modifying objects from destructors.
// - |IsSweepingInProgress| protects against modifying objects while
// concurrent sweeping is in progress.
......@@ -51,7 +50,7 @@ void HeapAllocator::BackingFree(void* address) {
return;
ThreadState* const state = ThreadState::Current();
BackingModifier result = CanFreeOrShrinkBacking(state, address);
BackingModifier result = CanModifyBacking(state, address);
if (!result.can_modify)
return;
......@@ -78,8 +77,9 @@ bool HeapAllocator::BackingExpand(void* address, size_t new_size) {
return false;
ThreadState* state = ThreadState::Current();
// Don't expand if concurrent sweeping is in progress.
if (state->SweepForbidden() || state->IsSweepingInProgress())
BackingModifier result = CanModifyBacking(state, address);
if (!result.can_modify)
return false;
DCHECK(!state->in_atomic_pause());
DCHECK(state->IsAllocationAllowed());
......@@ -93,14 +93,9 @@ bool HeapAllocator::BackingExpand(void* address, size_t new_size) {
HeapObjectHeader* header = HeapObjectHeader::FromPayload(address);
NormalPageArena* arena = static_cast<NormalPage*>(page)->ArenaForNormalPage();
const size_t old_size = header->size();
bool succeed = arena->ExpandObject(header, new_size);
if (succeed) {
state->Heap().AllocationPointAdjusted(arena->ArenaIndex());
if (header->IsMarked<HeapObjectHeader::AccessMode::kAtomic>() &&
state->IsMarkingInProgress()) {
state->CurrentVisitor()->AdjustMarkedBytes(header, old_size);
}
}
return succeed;
}
......@@ -126,7 +121,7 @@ bool HeapAllocator::BackingShrink(void* address,
DCHECK_LT(quantized_shrunk_size, quantized_current_size);
ThreadState* const state = ThreadState::Current();
BackingModifier result = CanFreeOrShrinkBacking(state, address);
BackingModifier result = CanModifyBacking(state, address);
if (!result.can_modify)
return false;
......
......@@ -65,14 +65,6 @@ void MarkingVisitorCommon::RegisterBackingStoreCallback(
}
}
void MarkingVisitorCommon::AdjustMarkedBytes(HeapObjectHeader* header,
size_t old_size) {
DCHECK(header->IsMarked<HeapObjectHeader::AccessMode::kAtomic>());
// Currently, only expansion of an object is supported during marking.
DCHECK_GE(header->size(), old_size);
marked_bytes_ += header->size() - old_size;
}
// static
bool MarkingVisitor::WriteBarrierSlow(void* value) {
if (!value || IsHashTableDeleteValue(value))
......@@ -88,33 +80,28 @@ bool MarkingVisitor::WriteBarrierSlow(void* value) {
return false;
HeapObjectHeader* header;
size_t size;
if (LIKELY(!base_page->IsLargeObjectPage())) {
header = reinterpret_cast<HeapObjectHeader*>(
static_cast<NormalPage*>(base_page)->FindHeaderFromAddress(
reinterpret_cast<Address>(value)));
size = header->size();
} else {
LargeObjectPage* large_page = static_cast<LargeObjectPage*>(base_page);
header = large_page->ObjectHeader();
size = large_page->size();
}
DCHECK(header->IsValid());
if (!header->TryMark<HeapObjectHeader::AccessMode::kAtomic>())
return false;
MarkingVisitor* visitor = thread_state->CurrentVisitor();
if (UNLIKELY(IsInConstruction(header))) {
// It is assumed that objects on not_fully_constructed_worklist_ are not
// marked.
header->Unmark();
thread_state->CurrentVisitor()->not_fully_constructed_worklist_.Push(
header->Payload());
visitor->not_fully_constructed_worklist_.Push(header->Payload());
return true;
}
MarkingVisitor* visitor = thread_state->CurrentVisitor();
visitor->AccountMarkedBytes(size);
visitor->write_barrier_worklist_.Push(header);
return true;
}
......@@ -152,7 +139,10 @@ void MarkingVisitor::DynamicallyMarkAddress(Address address) {
DCHECK(!IsInConstruction(header));
const GCInfo* gc_info =
GCInfoTable::Get().GCInfoFromIndex(header->GcInfoIndex());
MarkHeader(header, gc_info->trace);
if (MarkHeaderNoTracing(header)) {
marking_worklist_.Push(
{reinterpret_cast<void*>(header->Payload()), gc_info->trace});
}
}
void MarkingVisitor::ConservativelyMarkAddress(BasePage* page,
......@@ -168,7 +158,8 @@ void MarkingVisitor::ConservativelyMarkAddress(BasePage* page,
if (!header || header->IsMarked())
return;
// Simple case for fully constructed objects.
// Simple case for fully constructed objects. This just adds the object to the
// regular marking worklist.
const GCInfo* gc_info =
GCInfoTable::Get().GCInfoFromIndex(header->GcInfoIndex());
if (!IsInConstruction(header)) {
......@@ -203,6 +194,7 @@ void MarkingVisitor::ConservativelyMarkAddress(BasePage* page,
if (maybe_ptr)
Heap().CheckAndMarkPointer(this, maybe_ptr);
}
AccountMarkedBytes(header);
}
void MarkingVisitor::FlushMarkingWorklist() {
......
......@@ -82,7 +82,9 @@ class PLATFORM_EXPORT MarkingVisitorCommon : public Visitor {
RegisterBackingStoreReference(object_slot);
if (!object)
return;
MarkHeaderNoTracing(HeapObjectHeader::FromPayload(object));
HeapObjectHeader* header = HeapObjectHeader::FromPayload(object);
MarkHeaderNoTracing(header);
AccountMarkedBytes(header);
}
// This callback mechanism is needed to account for backing store objects
......@@ -101,7 +103,10 @@ class PLATFORM_EXPORT MarkingVisitorCommon : public Visitor {
int task_id() const { return task_id_; }
void AdjustMarkedBytes(HeapObjectHeader*, size_t);
// Account for object's live bytes. Should only be adjusted when
// actually tracing through an already marked object. Logically, this means
// accounting for the bytes when transitioning from grey to black.
ALWAYS_INLINE void AccountMarkedBytes(HeapObjectHeader*);
protected:
MarkingVisitorCommon(ThreadState*, MarkingMode, int task_id);
......@@ -111,10 +116,6 @@ class PLATFORM_EXPORT MarkingVisitorCommon : public Visitor {
// marked upon calling.
inline bool MarkHeaderNoTracing(HeapObjectHeader*);
// Account for |size| live bytes. Should only be adjusted when
// transitioning an object from unmarked to marked state.
ALWAYS_INLINE void AccountMarkedBytes(size_t size);
void RegisterBackingStoreReference(void** slot);
MarkingWorklist::View marking_worklist_;
......@@ -128,8 +129,12 @@ class PLATFORM_EXPORT MarkingVisitorCommon : public Visitor {
int task_id_;
};
ALWAYS_INLINE void MarkingVisitorCommon::AccountMarkedBytes(size_t size) {
marked_bytes_ += size;
ALWAYS_INLINE void MarkingVisitorCommon::AccountMarkedBytes(
HeapObjectHeader* header) {
marked_bytes_ +=
header->IsLargeObject()
? reinterpret_cast<LargeObjectPage*>(PageFromObject(header))->size()
: header->size();
}
inline bool MarkingVisitorCommon::MarkHeaderNoTracing(
......@@ -143,15 +148,7 @@ inline bool MarkingVisitorCommon::MarkHeaderNoTracing(
// freed backing store.
DCHECK(!header->IsFree());
if (header->TryMark<HeapObjectHeader::AccessMode::kAtomic>()) {
const size_t size =
header->IsLargeObject()
? reinterpret_cast<LargeObjectPage*>(PageFromObject(header))->size()
: header->size();
AccountMarkedBytes(size);
return true;
}
return false;
return header->TryMark<HeapObjectHeader::AccessMode::kAtomic>();
}
// Base visitor used to mark Oilpan objects on any thread.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment