Commit 22d3fb09 authored by Anton Bikineev's avatar Anton Bikineev Committed by Commit Bot

heap: Don't trace hash table elements on reallocation

This CL augments 69488b17 for data structures based on HashTable.
It reorders resetting backing store pointer and moving of elements
on backing reallocation.

There is no reason to call write barrier or eagerly trace elements
being just moved, since the newly allocated backing is white.

Bug: 1021889
Change-Id: I553c95f59e92fd6654d8b666ade0938f4f34cd27
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1954464
Commit-Queue: Anton Bikineev <bikineev@chromium.org>
Reviewed-by: default avatarUlan Degenbaev <ulan@chromium.org>
Reviewed-by: default avatarKentaro Hara <haraken@chromium.org>
Cr-Commit-Position: refs/heads/master@{#725580}
parent b2c424e3
......@@ -125,8 +125,11 @@ class PLATFORM_EXPORT HeapAllocator {
static void FreeHashTableBacking(void* address);
static bool ExpandHashTableBacking(void*, size_t);
static void TraceMarkedBackingStore(void* address) {
MarkingVisitor::TraceMarkedBackingStore(address);
static void TraceBackingStoreIfMarked(void* address) {
// Trace backing store elements only if backing store was marked.
if (HeapObjectHeader::FromPayload(address)->IsMarked()) {
MarkingVisitor::TraceMarkedBackingStore(address);
}
}
static void BackingWriteBarrier(void* address) {
......
......@@ -76,7 +76,7 @@ class WTF_EXPORT PartitionAllocator {
Free(ptr); // Not the system free, the one from this class.
}
static void TraceMarkedBackingStore(void*) {}
static void TraceBackingStoreIfMarked(void*) {}
static void BackingWriteBarrier(void*) {}
template <typename>
static void BackingWriteBarrierForHashTable(void*) {}
......
......@@ -945,6 +945,22 @@ class HashTable final
void ClearEnqueued() { queue_flag_ = false; }
bool Enqueued() { return queue_flag_; }
// Constructor for hash tables with raw storage.
struct RawStorageTag {};
HashTable(RawStorageTag, ValueType* table, unsigned size)
: table_(table),
table_size_(size),
key_count_(0),
deleted_count_(0),
queue_flag_(0)
#if DCHECK_IS_ON()
,
access_forbidden_(0),
modifications_(0)
#endif
{
}
ValueType* table_;
unsigned table_size_;
unsigned key_count_;
......@@ -1753,10 +1769,6 @@ HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::
}
new_entry = RehashTo(original_table, new_table_size, new_entry);
EnterAccessForbiddenScope();
DeleteAllBucketsAndDeallocate(temporary_table, old_table_size);
LeaveAccessForbiddenScope();
return new_entry;
}
......@@ -1770,41 +1782,50 @@ template <typename Key,
Value*
HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::
RehashTo(ValueType* new_table, unsigned new_table_size, Value* entry) {
unsigned old_table_size = table_size_;
ValueType* old_table = table_;
#if DUMP_HASHTABLE_STATS
if (old_table_size != 0) {
if (table_size_ != 0) {
HashTableStats::instance().numRehashes.fetch_add(1,
std::memory_order_relaxed);
}
#endif
#if DUMP_HASHTABLE_STATS_PER_TABLE
if (old_table_size != 0)
if (table_size_ != 0)
stats_->numRehashes.fetch_add(1, std::memory_order_relaxed);
#endif
AsAtomicPtr(&table_)->store(new_table, std::memory_order_relaxed);
Allocator::template BackingWriteBarrierForHashTable<HashTable>(new_table);
table_size_ = new_table_size;
HashTable new_hash_table(RawStorageTag{}, new_table, new_table_size);
Value* new_entry = nullptr;
for (unsigned i = 0; i != old_table_size; ++i) {
if (IsEmptyOrDeletedBucket(old_table[i])) {
DCHECK_NE(&old_table[i], entry);
for (unsigned i = 0; i != table_size_; ++i) {
if (IsEmptyOrDeletedBucket(table_[i])) {
DCHECK_NE(&table_[i], entry);
continue;
}
Value* reinserted_entry = Reinsert(std::move(old_table[i]));
if (&old_table[i] == entry) {
Value* reinserted_entry = new_hash_table.Reinsert(std::move(table_[i]));
if (&table_[i] == entry) {
DCHECK(!new_entry);
new_entry = reinserted_entry;
}
}
// Rescan the contents of the backing store as no write barriers were emitted
// during re-insertion. Traits::NeedsToForbidGCOnMove ensures that no
// garbage collection is triggered during moving.
Allocator::TraceMarkedBackingStore(new_table);
Allocator::TraceBackingStoreIfMarked(new_hash_table.table_);
ValueType* old_table = table_;
unsigned old_table_size = table_size_;
// This swaps the newly allocated buffer with the current one. The store to
// the current table has to be atomic to prevent races with concurrent marker.
AsAtomicPtr(&table_)->store(new_hash_table.table_, std::memory_order_relaxed);
Allocator::template BackingWriteBarrierForHashTable<HashTable>(table_);
table_size_ = new_table_size;
new_hash_table.table_ = old_table;
new_hash_table.table_size_ = old_table_size;
// Explicitly clear since garbage collected HashTables don't do this on
// destruction.
new_hash_table.clear();
deleted_count_ = 0;
......@@ -1827,7 +1848,6 @@ Value*
HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::
Rehash(unsigned new_table_size, Value* entry) {
unsigned old_table_size = table_size_;
ValueType* old_table = table_;
#if DUMP_HASHTABLE_STATS
if (old_table_size != 0) {
......@@ -1854,10 +1874,6 @@ HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::
ValueType* new_table = AllocateTable(new_table_size);
Value* new_entry = RehashTo(new_table, new_table_size, entry);
EnterAccessForbiddenScope();
DeleteAllBucketsAndDeallocate(old_table, old_table_size);
LeaveAccessForbiddenScope();
return new_entry;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment