Commit 84c90850 authored by Michael Lippautz's avatar Michael Lippautz Committed by Commit Bot

[oilpan] HashTable: Avoid marking in Trace call

Trace calls should just dispatch and register at the visitors. This is
in preparation to override visitation behavior.

Bug: chromium:802273
Change-Id: I0d7d6f95a7adcf121123316af7cb5b837c9e262f
Reviewed-on: https://chromium-review.googlesource.com/923731Reviewed-by: default avatarKentaro Hara <haraken@chromium.org>
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#537603}
parent 4b1a0b12
...@@ -287,6 +287,14 @@ class PLATFORM_EXPORT HeapAllocator { ...@@ -287,6 +287,14 @@ class PLATFORM_EXPORT HeapAllocator {
visitor->Trace(vector_backing); visitor->Trace(vector_backing);
} }
template <typename T, typename HashTable, typename VisitorDispatcher>
static void TraceHashTableBacking(VisitorDispatcher visitor, T* backing) {
HeapHashTableBacking<HashTable>* hashtable_backing =
reinterpret_cast<HeapHashTableBacking<HashTable>*>(backing);
// Backing store reference is registered by the caller.
visitor->Trace(hashtable_backing);
}
private: private:
static void BackingFree(void*); static void BackingFree(void*);
static bool BackingExpand(void*, size_t); static bool BackingExpand(void*, size_t);
......
...@@ -2095,82 +2095,56 @@ template <typename VisitorDispatcher, typename A> ...@@ -2095,82 +2095,56 @@ template <typename VisitorDispatcher, typename A>
std::enable_if_t<A::kIsGarbageCollected> std::enable_if_t<A::kIsGarbageCollected>
HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::
Trace(VisitorDispatcher visitor) { Trace(VisitorDispatcher visitor) {
#if DUMP_HASHTABLE_STATS_PER_TABLE if (!table_)
// XXX: this will simply crash.
// Allocator::MarkNoTracing(visitor, stats_);
#endif
// If someone else already marked the backing and queued up the trace and/or
// weak callback then we are done. This optimization does not happen for
// ListHashSet since its iterator does not point at the backing.
if (!table_ || Allocator::IsHeapObjectAlive(table_))
return; return;
// Normally, we mark the backing store without performing trace. This means // Backing store can be moved during compaction.
// it is marked live, but the pointers inside it are not marked. Instead we Allocator::RegisterBackingStoreReference(visitor, &table_);
// will mark the pointers below. However, for backing stores that contain
// weak pointers the handling is rather different. We don't mark the
// backing store here, so the marking GC will leave the backing unmarked. If
// the backing is found in any other way than through its HashTable (ie from
// an iterator) then the mark bit will be set and the pointers will be
// marked strongly, avoiding problems with iterating over things that
// disappear due to weak processing while we are iterating over them. We
// register the backing store pointer for delayed marking which will take
// place after we know if the backing is reachable from elsewhere. We also
// register a weakProcessing callback which will perform weak processing if
// needed.
if (Traits::kWeakHandlingFlag == kNoWeakHandling) { if (Traits::kWeakHandlingFlag == kNoWeakHandling) {
Allocator::MarkNoTracing(visitor, table_); // Strong HashTable.
DCHECK(IsTraceableInCollectionTrait<Traits>::value);
Allocator::template TraceHashTableBacking<ValueType, HashTable>(visitor,
table_);
} else { } else {
// Weak HashTable. The HashTable may be held alive strongly from somewhere
// else, e.g., an iterator.
// Marking of the table is delayed because the backing store is potentially
// held alive strongly by other objects. Delayed marking happens after
// regular marking.
Allocator::RegisterDelayedMarkNoTracing(visitor, table_); Allocator::RegisterDelayedMarkNoTracing(visitor, table_);
// Since we're delaying marking this HashTable, it is possible that the // It is safe to register the table multiple times.
// registerWeakMembers is called multiple times (in rare
// cases). However, it shouldn't cause any issue.
Allocator::RegisterWeakMembers( Allocator::RegisterWeakMembers(
visitor, this, visitor, this,
WeakProcessingHashTableHelper<Traits::kWeakHandlingFlag, Key, Value, WeakProcessingHashTableHelper<Traits::kWeakHandlingFlag, Key, Value,
Extractor, HashFunctions, Traits, Extractor, HashFunctions, Traits,
KeyTraits, Allocator>::Process); KeyTraits, Allocator>::Process);
}
// If the backing store will be moved by sweep compaction, register the if (IsTraceableInCollectionTrait<Traits>::value) {
// table reference pointing to the backing store object, so that the
// reference is updated upon object relocation. A no-op if not enabled
// by the visitor.
Allocator::RegisterBackingStoreReference(visitor, &table_);
if (!IsTraceableInCollectionTrait<Traits>::value)
return;
if (Traits::kWeakHandlingFlag == kWeakHandling) {
// If we have both strong and weak pointers in the collection then
// we queue up the collection for fixed point iteration a la
// Ephemerons:
// http://dl.acm.org/citation.cfm?doid=263698.263733 - see also
// http://www.jucs.org/jucs_14_21/eliminating_cycles_in_weak
#if DCHECK_IS_ON() #if DCHECK_IS_ON()
DCHECK(!Enqueued() || Allocator::WeakTableRegistered(visitor, this)); DCHECK(!Allocator::WeakTableRegistered(visitor, this) || Enqueued());
DCHECK(!Enqueued() || Allocator::WeakTableRegistered(visitor, this));
#endif #endif
if (!Enqueued()) {
Allocator::RegisterWeakTable( // Mix of strong and weak fields. We use an approach similar to ephemeron
visitor, this, // marking to find a fixed point, c.f.:
WeakProcessingHashTableHelper< // - http://dl.acm.org/citation.cfm?doid=263698.263733
Traits::kWeakHandlingFlag, Key, Value, Extractor, HashFunctions, // - http://www.jucs.org/jucs_14_21/eliminating_cycles_in_weak
Traits, KeyTraits, Allocator>::EphemeronIteration, // Adding the table for ephemeron marking delays marking any elements in
WeakProcessingHashTableHelper< // the backing until regular marking is finished.
Traits::kWeakHandlingFlag, Key, Value, Extractor, HashFunctions, if (!Enqueued()) {
Traits, KeyTraits, Allocator>::EphemeronIterationDone); Allocator::RegisterWeakTable(
SetEnqueued(); visitor, this,
WeakProcessingHashTableHelper<
Traits::kWeakHandlingFlag, Key, Value, Extractor, HashFunctions,
Traits, KeyTraits, Allocator>::EphemeronIteration,
WeakProcessingHashTableHelper<
Traits::kWeakHandlingFlag, Key, Value, Extractor, HashFunctions,
Traits, KeyTraits, Allocator>::EphemeronIterationDone);
SetEnqueued();
}
} }
// We don't need to trace the elements here, since registering as a
// weak table above will cause them to be traced (perhaps several
// times). It's better to wait until everything else is traced
// before tracing the elements for the first time; this may reduce
// (by one) the number of iterations needed to get to a fixed point.
return;
}
for (ValueType* element = table_ + table_size_ - 1; element >= table_;
element--) {
if (!IsEmptyOrDeletedBucket(*element))
Allocator::template Trace<VisitorDispatcher, ValueType, Traits>(visitor,
*element);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment