Commit 5ffb1011 authored by Jeremy Roman's avatar Jeremy Roman Committed by Commit Bot

Use std::atomic for WTF hash table stats.

Ordering guarantees required here are very weak, as it merely gathers aggregate
stats about hash table use and external synchronization is expected if precise
results are required.

Did a quick local test to confirm that it still works correctly.

Bug: 736037
Change-Id: Icc780ad38cd62474f2c21a973dfc393d5aa1ab75
Reviewed-on: https://chromium-review.googlesource.com/c/1352577
Commit-Queue: Yuta Kitamura <yutak@chromium.org>
Reviewed-by: default avatarYuta Kitamura <yutak@chromium.org>
Cr-Commit-Position: refs/heads/master@{#612059}
parent e1fc8f1b
......@@ -37,10 +37,10 @@ HashTableStats& HashTableStats::instance() {
}
void HashTableStats::copy(const HashTableStats* other) {
numAccesses = other->numAccesses;
numRehashes = other->numRehashes;
numRemoves = other->numRemoves;
numReinserts = other->numReinserts;
numAccesses = other->numAccesses.load(std::memory_order_relaxed);
numRehashes = other->numRehashes.load(std::memory_order_relaxed);
numRemoves = other->numRemoves.load(std::memory_order_relaxed);
numReinserts = other->numReinserts.load(std::memory_order_relaxed);
maxCollisions = other->maxCollisions;
numCollisions = other->numCollisions;
......
......@@ -42,7 +42,7 @@
#endif
#if DUMP_HASHTABLE_STATS
#include "third_party/blink/renderer/platform/wtf/atomics.h"
#include <atomic>
#include "third_party/blink/renderer/platform/wtf/threading.h"
#endif
......@@ -59,17 +59,19 @@
HashTableStats::instance().recordCollisionAtCount(probeCount); \
++perTableProbeCount; \
stats_->recordCollisionAtCount(perTableProbeCount)
#define UPDATE_ACCESS_COUNTS() \
AtomicIncrement(&HashTableStats::instance().numAccesses); \
int probeCount = 0; \
++stats_->numAccesses; \
#define UPDATE_ACCESS_COUNTS() \
HashTableStats::instance().numAccesses.fetch_add(1, \
std::memory_order_relaxed); \
int probeCount = 0; \
stats_->numAccesses.fetch_add(1, std::memory_order_relaxed); \
int perTableProbeCount = 0
#else
#define UPDATE_PROBE_COUNTS() \
++probeCount; \
HashTableStats::instance().recordCollisionAtCount(probeCount)
#define UPDATE_ACCESS_COUNTS() \
AtomicIncrement(&HashTableStats::instance().numAccesses); \
#define UPDATE_ACCESS_COUNTS() \
HashTableStats::instance().numAccesses.fetch_add(1, \
std::memory_order_relaxed); \
int probeCount = 0
#endif
#else
......@@ -77,8 +79,8 @@
#define UPDATE_PROBE_COUNTS() \
++perTableProbeCount; \
stats_->recordCollisionAtCount(perTableProbeCount)
#define UPDATE_ACCESS_COUNTS() \
++stats_->numAccesses; \
#define UPDATE_ACCESS_COUNTS() \
stats_->numAccesses.fetch_add(1, std::memory_order_relaxed); \
int perTableProbeCount = 0
#else
#define UPDATE_PROBE_COUNTS() \
......@@ -117,10 +119,10 @@ struct WTF_EXPORT HashTableStats {
collisionGraph() {}
// The following variables are all atomically incremented when modified.
int numAccesses;
int numRehashes;
int numRemoves;
int numReinserts;
std::atomic_int numAccesses;
std::atomic_int numRehashes;
std::atomic_int numRemoves;
std::atomic_int numReinserts;
// The following variables are only modified in the recordCollisionAtCount
// method within a mutex.
......@@ -1441,10 +1443,11 @@ HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::
DCHECK(
!IsDeletedBucket(*(LookupForWriting(Extractor::Extract(entry)).first)));
#if DUMP_HASHTABLE_STATS
AtomicIncrement(&HashTableStats::instance().numReinserts);
HashTableStats::instance().numReinserts.fetch_add(1,
std::memory_order_relaxed);
#endif
#if DUMP_HASHTABLE_STATS_PER_TABLE
++stats_->numReinserts;
stats_->numReinserts.fetch_add(1, std::memory_order_relaxed);
#endif
Value* new_entry = LookupForWriting(Extractor::Extract(entry)).first;
Mover<ValueType, Allocator, Traits,
......@@ -1536,10 +1539,10 @@ void HashTable<Key,
Allocator>::erase(const ValueType* pos) {
RegisterModification();
#if DUMP_HASHTABLE_STATS
AtomicIncrement(&HashTableStats::instance().numRemoves);
HashTableStats::instance().numRemoves.fetch_add(1, std::memory_order_relaxed);
#endif
#if DUMP_HASHTABLE_STATS_PER_TABLE
++stats_->numRemoves;
stats_->numRemoves.fetch_add(1, std::memory_order_relaxed);
#endif
EnterAccessForbiddenScope();
......@@ -1769,13 +1772,15 @@ HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::
ValueType* old_table = table_;
#if DUMP_HASHTABLE_STATS
if (old_table_size != 0)
AtomicIncrement(&HashTableStats::instance().numRehashes);
if (old_table_size != 0) {
HashTableStats::instance().numRehashes.fetch_add(1,
std::memory_order_relaxed);
}
#endif
#if DUMP_HASHTABLE_STATS_PER_TABLE
if (old_table_size != 0)
++stats_->numRehashes;
stats_->numRehashes.fetch_add(1, std::memory_order_relaxed);
#endif
table_ = new_table;
......@@ -1823,13 +1828,15 @@ HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::
ValueType* old_table = table_;
#if DUMP_HASHTABLE_STATS
if (old_table_size != 0)
AtomicIncrement(&HashTableStats::instance().numRehashes);
if (old_table_size != 0) {
HashTableStats::instance().numRehashes.fetch_add(1,
std::memory_order_relaxed);
}
#endif
#if DUMP_HASHTABLE_STATS_PER_TABLE
if (old_table_size != 0)
++stats_->numRehashes;
stats_->numRehashes.fetch_add(1, std::memory_order_relaxed);
#endif
// The Allocator::kIsGarbageCollected check is not needed. The check is just
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment