Commit 470bf888 authored by Omer Katz's avatar Omer Katz Committed by Commit Bot

heap: Resolve data race in addition to HashTable

During insertion the HashTable might need to expand. If that happens,
a new backing store is allocated and all existing entries are moved
over to the new store.
Updating the backing store pointer can race with reading it during
tracing.
To resolve the race, the read in Trace and the write in RehashTo need
to be atomic.

Bug: 986235
Change-Id: I0d6d4350026144a1e1675313ec7b193fd58beaca
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1943141Reviewed-by: default avatarUlan Degenbaev <ulan@chromium.org>
Reviewed-by: default avatarKentaro Hara <haraken@chromium.org>
Reviewed-by: default avatarAnton Bikineev <bikineev@chromium.org>
Commit-Queue: Omer Katz <omerkatz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#720957}
parent d065c828
......@@ -92,6 +92,13 @@
#endif
#endif
namespace {
template <typename T>
ALWAYS_INLINE std::atomic<T>& AsAtomic(T& t) {
return reinterpret_cast<std::atomic<T>&>(t);
}
} // namespace
namespace WTF {
// This is for tracing inside collections that have special support for weak
......@@ -1786,8 +1793,8 @@ HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::
stats_->numRehashes.fetch_add(1, std::memory_order_relaxed);
#endif
table_ = new_table;
Allocator::template BackingWriteBarrierForHashTable<HashTable>(table_);
AsAtomic<ValueType*>(table_).store(new_table, std::memory_order_relaxed);
Allocator::template BackingWriteBarrierForHashTable<HashTable>(new_table);
table_size_ = new_table_size;
Value* new_entry = nullptr;
......@@ -1805,7 +1812,7 @@ HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::
// Rescan the contents of the backing store as no write barriers were emitted
// during re-insertion. Traits::NeedsToForbidGCOnMove ensures that no
// garbage collection is triggered during moving.
Allocator::TraceMarkedBackingStore(table_);
Allocator::TraceMarkedBackingStore(new_table);
deleted_count_ = 0;
......@@ -2094,10 +2101,12 @@ HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::
static_assert(WTF::IsWeak<ValueType>::value ||
IsTraceableInCollectionTrait<Traits>::value,
"Value should not be traced");
ValueType* table =
AsAtomic<ValueType*>(table_).load(std::memory_order_relaxed);
if (!WTF::IsWeak<ValueType>::value) {
// Strong HashTable.
Allocator::template TraceHashTableBackingStrongly<ValueType, HashTable>(
visitor, table_, &table_);
visitor, table, &table_);
} else {
// Weak HashTable. The HashTable may be held alive strongly from somewhere
// else, e.g., an iterator.
......@@ -2112,12 +2121,12 @@ HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::
// backing store is marked but only contains empty/deleted buckets as all
// non-empty/deleted buckets have been moved to the new backing store.
Allocator::template TraceHashTableBackingOnly<ValueType, HashTable>(
visitor, table_, &table_);
visitor, table, &table_);
// Trace the table weakly. For marking this will result in delaying the
// processing until the end of the atomic pause. It is safe to trace
// weakly multiple times.
Allocator::template TraceHashTableBackingWeakly<ValueType, HashTable>(
visitor, table_, &table_,
visitor, table, &table_,
WeakProcessingHashTableHelper<WeakHandlingTrait<ValueType>::value, Key,
Value, Extractor, HashFunctions, Traits,
KeyTraits, Allocator>::Process,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment