Commit 984762d2 authored by Michael Lippautz's avatar Michael Lippautz Committed by Commit Bot

[oilpan,wtf] Rework HashTable write barriers

Instead of emitting write barriers for single writes in HashTable,
conservatively emit a barrier for the backing store during rehashing.

This moves us in the tradeoff space:
- Removes 40KiB of binary bloat
- Avoids emitting barriers when re-inserting single elements which
  improves throughput for the case where incremental marking is off
- Emits a single conservative barrier at the end of RehashTo. This
  potentially means that we (re-)scan more objects when incremental
  marking is on

Bug: chromium:839514, chromium:757440
Change-Id: Ib5495c9e2210836424d7b26f2138d852f8dafec7
Reviewed-on: https://chromium-review.googlesource.com/1055400
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Reviewed-by: default avatarKentaro Hara <haraken@chromium.org>
Cr-Commit-Position: refs/heads/master@{#558713}
parent 952c4e34
......@@ -128,6 +128,10 @@ class PLATFORM_EXPORT HeapAllocator {
static void FreeHashTableBacking(void* address, bool is_weak_table);
static bool ExpandHashTableBacking(void*, size_t);
static void TraceMarkedBackingStore(void* address) {
MarkingVisitor::TraceMarkedBackingStore(address);
}
static void BackingWriteBarrier(void* address) {
MarkingVisitor::WriteBarrier(address);
}
......
......@@ -35,8 +35,17 @@ class PLATFORM_EXPORT MarkingVisitor final : public Visitor {
static std::unique_ptr<MarkingVisitor> Create(ThreadState*, MarkingMode);
// Write barrier that adds |value| to the set of marked objects. The barrier
// bails out if marking is off or the object is not yet marked.
inline static void WriteBarrier(void* value);
// Eagerly traces an already marked backing store ensuring that all its
// children are discovered by the marker. The barrier bails out if marking
// is off and on individual objects reachable if they are already marked. The
// barrier uses the callback function through GcInfo, so it will not inline
// any templated type-specific code.
inline static void TraceMarkedBackingStore(void* value);
MarkingVisitor(ThreadState*, MarkingMode);
~MarkingVisitor() override;
......@@ -186,6 +195,30 @@ inline void MarkingVisitor::WriteBarrier(void* value) {
#endif // BUILDFLAG(BLINK_HEAP_INCREMENTAL_MARKING)
}
inline void MarkingVisitor::TraceMarkedBackingStore(void* value) {
#if BUILDFLAG(BLINK_HEAP_INCREMENTAL_MARKING)
if (!ThreadState::IsAnyIncrementalMarking() || !value)
return;
ThreadState* const thread_state = ThreadState::Current();
if (!thread_state->IsIncrementalMarking())
return;
// |value| is pointing to the start of a backing store.
HeapObjectHeader* header = HeapObjectHeader::FromPayload(value);
CHECK(header->IsMarked());
DCHECK(thread_state->CurrentVisitor());
// This check ensures that the visitor will not eagerly recurse into children
// but rather push all blink::GarbageCollected objects and only eagerly trace
// non-managed objects.
DCHECK(!thread_state->Heap().GetStackFrameDepth().IsEnabled());
// No weak handling for write barriers. Modifying weakly reachable objects
// strongifies them for the current cycle.
ThreadHeap::GcInfo(header->GcInfoIndex())
->trace_(thread_state->CurrentVisitor(), value);
#endif // BUILDFLAG(BLINK_HEAP_INCREMENTAL_MARKING)
}
} // namespace blink
#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_MARKING_VISITOR_H_
......@@ -100,6 +100,7 @@ class WTF_EXPORT PartitionAllocator {
Free(ptr); // Not the system free, the one from this class.
}
static void TraceMarkedBackingStore(void*) {}
static void BackingWriteBarrier(void*) {}
static bool IsAllocationAllowed() { return true; }
......
......@@ -534,8 +534,7 @@ struct Mover {
STATIC_ONLY(Mover);
static void Move(T&& from, T& to) {
to.~T();
ConstructTraits<T, Traits, Allocator>::ConstructAndNotifyElement(
&to, std::move(from));
new (NotNull, &to) T(std::move(from));
}
};
......@@ -543,10 +542,9 @@ template <typename T, typename Allocator, typename Traits>
struct Mover<T, Allocator, Traits, true> {
STATIC_ONLY(Mover);
static void Move(T&& from, T& to) {
to.~T();
Allocator::EnterGCForbiddenScope();
ConstructTraits<T, Traits, Allocator>::ConstructAndNotifyElement(
&to, std::move(from));
to.~T();
new (NotNull, &to) T(std::move(from));
Allocator::LeaveGCForbiddenScope();
}
};
......@@ -1758,6 +1756,10 @@ HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::
new_entry = reinserted_entry;
}
}
// Rescan the contents of the backing store as no write barriers were emitted
// during re-insertion. Traits::NeedsToForbidGCOnMove ensures that no
// garbage collection is triggered during moving.
Allocator::TraceMarkedBackingStore(table_);
deleted_count_ = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment