Commit f0798027 authored by Michael Lippautz's avatar Michael Lippautz Committed by Commit Bot

heap: Account marked bytes when processing an object

During marking, (a) marking an object and (b) processing it currently
happen on the main thread. In future, concurrent marking will move both
steps to helper tasks.

Even with concurrent marking, the write barrier remains on the main
thread and will require at least (a) to be executed right away. It is
thus beneficial to increment live bytes counters in (b) which can be
offloaded to helper tasks.

Output on Z840 (down from ~13.1x):
[ RUN      ] WriteBarrierPerfTest.MemberWritePerformance
*RESULT WriteBarrierPerfTest writes during GC: = 46497.21016738996 writes/ms
*RESULT WriteBarrierPerfTest writes outside GC: = 541516.2454873646 writes/ms
*RESULT WriteBarrierPerfTest relative speed difference: = 11.646209386281587 times
[       OK ] WriteBarrierPerfTest.MemberWritePerformance (67 ms)

Bug: 1014414
Change-Id: I5295e4738fce9410fbfbed51829c04b2e25f2d9e
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1883719
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Reviewed-by: default avatarKentaro Hara <haraken@chromium.org>
Reviewed-by: default avatarOmer Katz <omerkatz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#710262}
parent 47ebf6af
...@@ -313,9 +313,11 @@ bool ThreadHeap::AdvanceMarking(MarkingVisitor* visitor, ...@@ -313,9 +313,11 @@ bool ThreadHeap::AdvanceMarking(MarkingVisitor* visitor,
finished = DrainWorklistWithDeadline( finished = DrainWorklistWithDeadline(
deadline, marking_worklist_.get(), deadline, marking_worklist_.get(),
[visitor](const MarkingItem& item) { [visitor](const MarkingItem& item) {
DCHECK(!HeapObjectHeader::FromPayload(item.object) HeapObjectHeader* header =
->IsInConstruction()); HeapObjectHeader::FromPayload(item.object);
DCHECK(!header->IsInConstruction());
item.callback(visitor, item.object); item.callback(visitor, item.object);
visitor->AccountMarkedBytes(header);
}, },
WorklistTaskId::MutatorThread); WorklistTaskId::MutatorThread);
if (!finished) if (!finished)
...@@ -328,14 +330,15 @@ bool ThreadHeap::AdvanceMarking(MarkingVisitor* visitor, ...@@ -328,14 +330,15 @@ bool ThreadHeap::AdvanceMarking(MarkingVisitor* visitor,
GCInfoTable::Get() GCInfoTable::Get()
.GCInfoFromIndex(header->GcInfoIndex()) .GCInfoFromIndex(header->GcInfoIndex())
->trace(visitor, header->Payload()); ->trace(visitor, header->Payload());
visitor->AccountMarkedBytes(header);
}, },
WorklistTaskId::MutatorThread); WorklistTaskId::MutatorThread);
if (!finished) if (!finished)
break; break;
// Iteratively mark all objects that were previously discovered while // Convert |previously_not_fully_constructed_worklist_| to
// being in construction. The objects can be processed incrementally once // |marking_worklist_|. This merely re-adds items with the proper
// a safepoint was reached. // callbacks.
finished = DrainWorklistWithDeadline( finished = DrainWorklistWithDeadline(
deadline, previously_not_fully_constructed_worklist_.get(), deadline, previously_not_fully_constructed_worklist_.get(),
[visitor](const NotFullyConstructedItem& item) { [visitor](const NotFullyConstructedItem& item) {
...@@ -364,8 +367,10 @@ bool ThreadHeap::AdvanceConcurrentMarking(ConcurrentMarkingVisitor* visitor, ...@@ -364,8 +367,10 @@ bool ThreadHeap::AdvanceConcurrentMarking(ConcurrentMarkingVisitor* visitor,
finished = DrainWorklistWithDeadline( finished = DrainWorklistWithDeadline(
deadline, marking_worklist_.get(), deadline, marking_worklist_.get(),
[visitor](const MarkingItem& item) { [visitor](const MarkingItem& item) {
DCHECK(!HeapObjectHeader::FromPayload(item.object)->IsInConstruction()); HeapObjectHeader* header = HeapObjectHeader::FromPayload(item.object);
DCHECK(!header->IsInConstruction());
item.callback(visitor, item.object); item.callback(visitor, item.object);
visitor->AccountMarkedBytes(header);
}, },
visitor->task_id()); visitor->task_id());
if (!finished) if (!finished)
...@@ -378,6 +383,7 @@ bool ThreadHeap::AdvanceConcurrentMarking(ConcurrentMarkingVisitor* visitor, ...@@ -378,6 +383,7 @@ bool ThreadHeap::AdvanceConcurrentMarking(ConcurrentMarkingVisitor* visitor,
GCInfoTable::Get() GCInfoTable::Get()
.GCInfoFromIndex(header->GcInfoIndex()) .GCInfoFromIndex(header->GcInfoIndex())
->trace(visitor, header->Payload()); ->trace(visitor, header->Payload());
visitor->AccountMarkedBytes(header);
}, },
visitor->task_id()); visitor->task_id());
return finished; return finished;
......
...@@ -14,8 +14,7 @@ struct BackingModifier { ...@@ -14,8 +14,7 @@ struct BackingModifier {
HeapObjectHeader* const header; HeapObjectHeader* const header;
}; };
BackingModifier CanFreeOrShrinkBacking(ThreadState* const state, BackingModifier CanModifyBacking(ThreadState* const state, void* address) {
void* address) {
// - |SweepForbidden| protects against modifying objects from destructors. // - |SweepForbidden| protects against modifying objects from destructors.
// - |IsSweepingInProgress| protects against modifying objects while // - |IsSweepingInProgress| protects against modifying objects while
// concurrent sweeping is in progress. // concurrent sweeping is in progress.
...@@ -51,7 +50,7 @@ void HeapAllocator::BackingFree(void* address) { ...@@ -51,7 +50,7 @@ void HeapAllocator::BackingFree(void* address) {
return; return;
ThreadState* const state = ThreadState::Current(); ThreadState* const state = ThreadState::Current();
BackingModifier result = CanFreeOrShrinkBacking(state, address); BackingModifier result = CanModifyBacking(state, address);
if (!result.can_modify) if (!result.can_modify)
return; return;
...@@ -78,8 +77,9 @@ bool HeapAllocator::BackingExpand(void* address, size_t new_size) { ...@@ -78,8 +77,9 @@ bool HeapAllocator::BackingExpand(void* address, size_t new_size) {
return false; return false;
ThreadState* state = ThreadState::Current(); ThreadState* state = ThreadState::Current();
// Don't expand if concurrent sweeping is in progress.
if (state->SweepForbidden() || state->IsSweepingInProgress()) BackingModifier result = CanModifyBacking(state, address);
if (!result.can_modify)
return false; return false;
DCHECK(!state->in_atomic_pause()); DCHECK(!state->in_atomic_pause());
DCHECK(state->IsAllocationAllowed()); DCHECK(state->IsAllocationAllowed());
...@@ -93,14 +93,9 @@ bool HeapAllocator::BackingExpand(void* address, size_t new_size) { ...@@ -93,14 +93,9 @@ bool HeapAllocator::BackingExpand(void* address, size_t new_size) {
HeapObjectHeader* header = HeapObjectHeader::FromPayload(address); HeapObjectHeader* header = HeapObjectHeader::FromPayload(address);
NormalPageArena* arena = static_cast<NormalPage*>(page)->ArenaForNormalPage(); NormalPageArena* arena = static_cast<NormalPage*>(page)->ArenaForNormalPage();
const size_t old_size = header->size();
bool succeed = arena->ExpandObject(header, new_size); bool succeed = arena->ExpandObject(header, new_size);
if (succeed) { if (succeed) {
state->Heap().AllocationPointAdjusted(arena->ArenaIndex()); state->Heap().AllocationPointAdjusted(arena->ArenaIndex());
if (header->IsMarked<HeapObjectHeader::AccessMode::kAtomic>() &&
state->IsMarkingInProgress()) {
state->CurrentVisitor()->AdjustMarkedBytes(header, old_size);
}
} }
return succeed; return succeed;
} }
...@@ -126,7 +121,7 @@ bool HeapAllocator::BackingShrink(void* address, ...@@ -126,7 +121,7 @@ bool HeapAllocator::BackingShrink(void* address,
DCHECK_LT(quantized_shrunk_size, quantized_current_size); DCHECK_LT(quantized_shrunk_size, quantized_current_size);
ThreadState* const state = ThreadState::Current(); ThreadState* const state = ThreadState::Current();
BackingModifier result = CanFreeOrShrinkBacking(state, address); BackingModifier result = CanModifyBacking(state, address);
if (!result.can_modify) if (!result.can_modify)
return false; return false;
......
...@@ -65,14 +65,6 @@ void MarkingVisitorCommon::RegisterBackingStoreCallback( ...@@ -65,14 +65,6 @@ void MarkingVisitorCommon::RegisterBackingStoreCallback(
} }
} }
void MarkingVisitorCommon::AdjustMarkedBytes(HeapObjectHeader* header,
size_t old_size) {
DCHECK(header->IsMarked<HeapObjectHeader::AccessMode::kAtomic>());
// Currently, only expansion of an object is supported during marking.
DCHECK_GE(header->size(), old_size);
marked_bytes_ += header->size() - old_size;
}
// static // static
bool MarkingVisitor::WriteBarrierSlow(void* value) { bool MarkingVisitor::WriteBarrierSlow(void* value) {
if (!value || IsHashTableDeleteValue(value)) if (!value || IsHashTableDeleteValue(value))
...@@ -88,33 +80,28 @@ bool MarkingVisitor::WriteBarrierSlow(void* value) { ...@@ -88,33 +80,28 @@ bool MarkingVisitor::WriteBarrierSlow(void* value) {
return false; return false;
HeapObjectHeader* header; HeapObjectHeader* header;
size_t size;
if (LIKELY(!base_page->IsLargeObjectPage())) { if (LIKELY(!base_page->IsLargeObjectPage())) {
header = reinterpret_cast<HeapObjectHeader*>( header = reinterpret_cast<HeapObjectHeader*>(
static_cast<NormalPage*>(base_page)->FindHeaderFromAddress( static_cast<NormalPage*>(base_page)->FindHeaderFromAddress(
reinterpret_cast<Address>(value))); reinterpret_cast<Address>(value)));
size = header->size();
} else { } else {
LargeObjectPage* large_page = static_cast<LargeObjectPage*>(base_page); LargeObjectPage* large_page = static_cast<LargeObjectPage*>(base_page);
header = large_page->ObjectHeader(); header = large_page->ObjectHeader();
size = large_page->size();
} }
DCHECK(header->IsValid()); DCHECK(header->IsValid());
if (!header->TryMark<HeapObjectHeader::AccessMode::kAtomic>()) if (!header->TryMark<HeapObjectHeader::AccessMode::kAtomic>())
return false; return false;
MarkingVisitor* visitor = thread_state->CurrentVisitor();
if (UNLIKELY(IsInConstruction(header))) { if (UNLIKELY(IsInConstruction(header))) {
// It is assumed that objects on not_fully_constructed_worklist_ are not // It is assumed that objects on not_fully_constructed_worklist_ are not
// marked. // marked.
header->Unmark(); header->Unmark();
thread_state->CurrentVisitor()->not_fully_constructed_worklist_.Push( visitor->not_fully_constructed_worklist_.Push(header->Payload());
header->Payload());
return true; return true;
} }
MarkingVisitor* visitor = thread_state->CurrentVisitor();
visitor->AccountMarkedBytes(size);
visitor->write_barrier_worklist_.Push(header); visitor->write_barrier_worklist_.Push(header);
return true; return true;
} }
...@@ -152,7 +139,10 @@ void MarkingVisitor::DynamicallyMarkAddress(Address address) { ...@@ -152,7 +139,10 @@ void MarkingVisitor::DynamicallyMarkAddress(Address address) {
DCHECK(!IsInConstruction(header)); DCHECK(!IsInConstruction(header));
const GCInfo* gc_info = const GCInfo* gc_info =
GCInfoTable::Get().GCInfoFromIndex(header->GcInfoIndex()); GCInfoTable::Get().GCInfoFromIndex(header->GcInfoIndex());
MarkHeader(header, gc_info->trace); if (MarkHeaderNoTracing(header)) {
marking_worklist_.Push(
{reinterpret_cast<void*>(header->Payload()), gc_info->trace});
}
} }
void MarkingVisitor::ConservativelyMarkAddress(BasePage* page, void MarkingVisitor::ConservativelyMarkAddress(BasePage* page,
...@@ -168,7 +158,8 @@ void MarkingVisitor::ConservativelyMarkAddress(BasePage* page, ...@@ -168,7 +158,8 @@ void MarkingVisitor::ConservativelyMarkAddress(BasePage* page,
if (!header || header->IsMarked()) if (!header || header->IsMarked())
return; return;
// Simple case for fully constructed objects. // Simple case for fully constructed objects. This just adds the object to the
// regular marking worklist.
const GCInfo* gc_info = const GCInfo* gc_info =
GCInfoTable::Get().GCInfoFromIndex(header->GcInfoIndex()); GCInfoTable::Get().GCInfoFromIndex(header->GcInfoIndex());
if (!IsInConstruction(header)) { if (!IsInConstruction(header)) {
...@@ -203,6 +194,7 @@ void MarkingVisitor::ConservativelyMarkAddress(BasePage* page, ...@@ -203,6 +194,7 @@ void MarkingVisitor::ConservativelyMarkAddress(BasePage* page,
if (maybe_ptr) if (maybe_ptr)
Heap().CheckAndMarkPointer(this, maybe_ptr); Heap().CheckAndMarkPointer(this, maybe_ptr);
} }
AccountMarkedBytes(header);
} }
void MarkingVisitor::FlushMarkingWorklist() { void MarkingVisitor::FlushMarkingWorklist() {
......
...@@ -82,7 +82,9 @@ class PLATFORM_EXPORT MarkingVisitorCommon : public Visitor { ...@@ -82,7 +82,9 @@ class PLATFORM_EXPORT MarkingVisitorCommon : public Visitor {
RegisterBackingStoreReference(object_slot); RegisterBackingStoreReference(object_slot);
if (!object) if (!object)
return; return;
MarkHeaderNoTracing(HeapObjectHeader::FromPayload(object)); HeapObjectHeader* header = HeapObjectHeader::FromPayload(object);
MarkHeaderNoTracing(header);
AccountMarkedBytes(header);
} }
// This callback mechanism is needed to account for backing store objects // This callback mechanism is needed to account for backing store objects
...@@ -101,7 +103,10 @@ class PLATFORM_EXPORT MarkingVisitorCommon : public Visitor { ...@@ -101,7 +103,10 @@ class PLATFORM_EXPORT MarkingVisitorCommon : public Visitor {
int task_id() const { return task_id_; } int task_id() const { return task_id_; }
void AdjustMarkedBytes(HeapObjectHeader*, size_t); // Account for object's live bytes. Should only be adjusted when
// actually tracing through an already marked object. Logically, this means
// accounting for the bytes when transitioning from grey to black.
ALWAYS_INLINE void AccountMarkedBytes(HeapObjectHeader*);
protected: protected:
MarkingVisitorCommon(ThreadState*, MarkingMode, int task_id); MarkingVisitorCommon(ThreadState*, MarkingMode, int task_id);
...@@ -111,10 +116,6 @@ class PLATFORM_EXPORT MarkingVisitorCommon : public Visitor { ...@@ -111,10 +116,6 @@ class PLATFORM_EXPORT MarkingVisitorCommon : public Visitor {
// marked upon calling. // marked upon calling.
inline bool MarkHeaderNoTracing(HeapObjectHeader*); inline bool MarkHeaderNoTracing(HeapObjectHeader*);
// Account for |size| live bytes. Should only be adjusted when
// transitioning an object from unmarked to marked state.
ALWAYS_INLINE void AccountMarkedBytes(size_t size);
void RegisterBackingStoreReference(void** slot); void RegisterBackingStoreReference(void** slot);
MarkingWorklist::View marking_worklist_; MarkingWorklist::View marking_worklist_;
...@@ -128,8 +129,12 @@ class PLATFORM_EXPORT MarkingVisitorCommon : public Visitor { ...@@ -128,8 +129,12 @@ class PLATFORM_EXPORT MarkingVisitorCommon : public Visitor {
int task_id_; int task_id_;
}; };
ALWAYS_INLINE void MarkingVisitorCommon::AccountMarkedBytes(size_t size) { ALWAYS_INLINE void MarkingVisitorCommon::AccountMarkedBytes(
marked_bytes_ += size; HeapObjectHeader* header) {
marked_bytes_ +=
header->IsLargeObject()
? reinterpret_cast<LargeObjectPage*>(PageFromObject(header))->size()
: header->size();
} }
inline bool MarkingVisitorCommon::MarkHeaderNoTracing( inline bool MarkingVisitorCommon::MarkHeaderNoTracing(
...@@ -143,15 +148,7 @@ inline bool MarkingVisitorCommon::MarkHeaderNoTracing( ...@@ -143,15 +148,7 @@ inline bool MarkingVisitorCommon::MarkHeaderNoTracing(
// freed backing store. // freed backing store.
DCHECK(!header->IsFree()); DCHECK(!header->IsFree());
if (header->TryMark<HeapObjectHeader::AccessMode::kAtomic>()) { return header->TryMark<HeapObjectHeader::AccessMode::kAtomic>();
const size_t size =
header->IsLargeObject()
? reinterpret_cast<LargeObjectPage*>(PageFromObject(header))->size()
: header->size();
AccountMarkedBytes(size);
return true;
}
return false;
} }
// Base visitor used to mark Oilpan objects on any thread. // Base visitor used to mark Oilpan objects on any thread.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment