Commit 093e4778 authored by Adenilson Cavalcanti's avatar Adenilson Cavalcanti Committed by Commit Bot

Reactivate HashTable stats feature

It is a feature that allows to collect data from HashTables (e.g. rehashes,
reads, number of collisions, etc).

This change does:
- fixes the functionality for general HashTable stats
- also fixes for individual HashTables

And introduces a public member function (guarded behind a macro)
called DumpStats() that allows to dump the stats from HashMap.

Bug: 735663
Change-Id: I86f826c3d23f02f0d06dba7e0a5a157601bbd4ce
Reviewed-on: https://chromium-review.googlesource.com/556919
Commit-Queue: Adenilson Cavalcanti <cavalcantii@chromium.org>
Reviewed-by: default avatarYuta Kitamura <yutak@chromium.org>
Cr-Commit-Position: refs/heads/master@{#488777}
parent 01db9e6d
...@@ -91,6 +91,10 @@ class HashMap { ...@@ -91,6 +91,10 @@ class HashMap {
"Cannot put raw pointers to garbage-collected classes into " "Cannot put raw pointers to garbage-collected classes into "
"an off-heap HashMap. Use HeapHashMap<> instead."); "an off-heap HashMap. Use HeapHashMap<> instead.");
} }
#if DUMP_HASHTABLE_STATS_PER_TABLE
void DumpStats() { impl_.DumpStats(); }
#endif
HashMap(const HashMap&) = default; HashMap(const HashMap&) = default;
HashMap& operator=(const HashMap&) = default; HashMap& operator=(const HashMap&) = default;
HashMap(HashMap&&) = default; HashMap(HashMap&&) = default;
......
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
#include "platform/wtf/HashTable.h" #include "platform/wtf/HashTable.h"
#if DUMP_HASHTABLE_STATS #if DUMP_HASHTABLE_STATS || DUMP_HASHTABLE_STATS_PER_TABLE
#include "platform/wtf/DataLog.h" #include "platform/wtf/DataLog.h"
#include "platform/wtf/ThreadingPrimitives.h" #include "platform/wtf/ThreadingPrimitives.h"
...@@ -62,27 +62,27 @@ void HashTableStats::recordCollisionAtCount(int count) { ...@@ -62,27 +62,27 @@ void HashTableStats::recordCollisionAtCount(int count) {
hashTableStatsMutex().unlock(); hashTableStatsMutex().unlock();
} }
void HashTableStats::dumpStats() { void HashTableStats::DumpStats() {
// Lock the global hash table singleton while dumping. // Lock the global hash table singleton while dumping.
bool isGlobalSingleton = this == &instance(); bool isGlobalSingleton = this == &instance();
if (isGlobalSingleton) if (isGlobalSingleton)
hashTableStatsMutex().lock(); hashTableStatsMutex().lock();
dataLogF("\nWTF::HashTable statistics\n\n"); DataLogF("\nWTF::HashTable statistics\n\n");
dataLogF("%d accesses\n", numAccesses); DataLogF("%d accesses\n", numAccesses);
dataLogF("%d total collisions, average %.2f probes per access\n", DataLogF("%d total collisions, average %.2f probes per access\n",
numCollisions, 1.0 * (numAccesses + numCollisions) / numAccesses); numCollisions, 1.0 * (numAccesses + numCollisions) / numAccesses);
dataLogF("longest collision chain: %d\n", maxCollisions); DataLogF("longest collision chain: %d\n", maxCollisions);
for (int i = 1; i <= maxCollisions; i++) { for (int i = 1; i <= maxCollisions; i++) {
dataLogF( DataLogF(
" %d lookups with exactly %d collisions (%.2f%% , %.2f%% with this " " %d lookups with exactly %d collisions (%.2f%% , %.2f%% with this "
"many or more)\n", "many or more)\n",
collisionGraph[i], i, collisionGraph[i], i,
100.0 * (collisionGraph[i] - collisionGraph[i + 1]) / numAccesses, 100.0 * (collisionGraph[i] - collisionGraph[i + 1]) / numAccesses,
100.0 * collisionGraph[i] / numAccesses); 100.0 * collisionGraph[i] / numAccesses);
} }
dataLogF("%d rehashes\n", numRehashes); DataLogF("%d rehashes\n", numRehashes);
dataLogF("%d reinserts\n", numReinserts); DataLogF("%d reinserts\n", numReinserts);
if (isGlobalSingleton) if (isGlobalSingleton)
hashTableStatsMutex().unlock(); hashTableStatsMutex().unlock();
......
...@@ -32,8 +32,13 @@ ...@@ -32,8 +32,13 @@
#include "platform/wtf/allocator/PartitionAllocator.h" #include "platform/wtf/allocator/PartitionAllocator.h"
#include <memory> #include <memory>
#if !defined(DUMP_HASHTABLE_STATS)
#define DUMP_HASHTABLE_STATS 0 #define DUMP_HASHTABLE_STATS 0
#endif
#if !defined(DUMP_HASHTABLE_STATS_PER_TABLE)
#define DUMP_HASHTABLE_STATS_PER_TABLE 0 #define DUMP_HASHTABLE_STATS_PER_TABLE 0
#endif
#if DUMP_HASHTABLE_STATS #if DUMP_HASHTABLE_STATS
#include "platform/wtf/Atomics.h" #include "platform/wtf/Atomics.h"
...@@ -54,7 +59,7 @@ ...@@ -54,7 +59,7 @@
++perTableProbeCount; \ ++perTableProbeCount; \
stats_->recordCollisionAtCount(perTableProbeCount) stats_->recordCollisionAtCount(perTableProbeCount)
#define UPDATE_ACCESS_COUNTS() \ #define UPDATE_ACCESS_COUNTS() \
atomicIncrement(&HashTableStats::instance().numAccesses); \ AtomicIncrement(&HashTableStats::instance().numAccesses); \
int probeCount = 0; \ int probeCount = 0; \
++stats_->numAccesses; \ ++stats_->numAccesses; \
int perTableProbeCount = 0 int perTableProbeCount = 0
...@@ -63,7 +68,7 @@ ...@@ -63,7 +68,7 @@
++probeCount; \ ++probeCount; \
HashTableStats::instance().recordCollisionAtCount(probeCount) HashTableStats::instance().recordCollisionAtCount(probeCount)
#define UPDATE_ACCESS_COUNTS() \ #define UPDATE_ACCESS_COUNTS() \
atomicIncrement(&HashTableStats::instance().numAccesses); \ AtomicIncrement(&HashTableStats::instance().numAccesses); \
int probeCount = 0 int probeCount = 0
#endif #endif
#else #else
...@@ -99,7 +104,7 @@ template <WeakHandlingFlag weakHandlingFlag, ...@@ -99,7 +104,7 @@ template <WeakHandlingFlag weakHandlingFlag,
typename Traits> typename Traits>
struct TraceInCollectionTrait; struct TraceInCollectionTrait;
#if DUMP_HASHTABLE_STATS #if DUMP_HASHTABLE_STATS || DUMP_HASHTABLE_STATS_PER_TABLE
struct WTF_EXPORT HashTableStats { struct WTF_EXPORT HashTableStats {
HashTableStats() HashTableStats()
: numAccesses(0), : numAccesses(0),
...@@ -124,7 +129,7 @@ struct WTF_EXPORT HashTableStats { ...@@ -124,7 +129,7 @@ struct WTF_EXPORT HashTableStats {
void copy(const HashTableStats* other); void copy(const HashTableStats* other);
void recordCollisionAtCount(int count); void recordCollisionAtCount(int count);
void dumpStats(); void DumpStats();
static HashTableStats& instance(); static HashTableStats& instance();
...@@ -133,7 +138,7 @@ struct WTF_EXPORT HashTableStats { ...@@ -133,7 +138,7 @@ struct WTF_EXPORT HashTableStats {
}; };
#if DUMP_HASHTABLE_STATS_PER_TABLE #if DUMP_HASHTABLE_STATS_PER_TABLE
template <typename Allocator, bool isGCType = Allocator::isGarbageCollected> template <typename Allocator, bool isGCType = Allocator::kIsGarbageCollected>
class HashTableStatsPtr; class HashTableStatsPtr;
template <typename Allocator> template <typename Allocator>
...@@ -141,15 +146,15 @@ class HashTableStatsPtr<Allocator, false> final { ...@@ -141,15 +146,15 @@ class HashTableStatsPtr<Allocator, false> final {
STATIC_ONLY(HashTableStatsPtr); STATIC_ONLY(HashTableStatsPtr);
public: public:
static std::unique_ptr<HashTableStats> create() { static std::unique_ptr<HashTableStats> Create() {
return WTF::wrapUnique(new HashTableStats); return base::MakeUnique<HashTableStats>();
} }
static std::unique_ptr<HashTableStats> copy( static std::unique_ptr<HashTableStats> copy(
const std::unique_ptr<HashTableStats>& other) { const std::unique_ptr<HashTableStats>& other) {
if (!other) if (!other)
return nullptr; return nullptr;
return WTF::wrapUnique(new HashTableStats(*other)); return base::MakeUnique<HashTableStats>(*other);
} }
static void swap(std::unique_ptr<HashTableStats>& stats, static void swap(std::unique_ptr<HashTableStats>& stats,
...@@ -163,20 +168,15 @@ class HashTableStatsPtr<Allocator, true> final { ...@@ -163,20 +168,15 @@ class HashTableStatsPtr<Allocator, true> final {
STATIC_ONLY(HashTableStatsPtr); STATIC_ONLY(HashTableStatsPtr);
public: public:
static HashTableStats* create() { static HashTableStats* Create() {
// Resort to manually allocating this POD on the vector // TODO(cavalcantii): fix this.
// backing heap, as blink::GarbageCollected<> isn't in scope return new HashTableStats;
// in WTF.
void* storage = reinterpret_cast<void*>(
Allocator::template allocateVectorBacking<unsigned char>(
sizeof(HashTableStats)));
return new (storage) HashTableStats;
} }
static HashTableStats* copy(const HashTableStats* other) { static HashTableStats* copy(const HashTableStats* other) {
if (!other) if (!other)
return nullptr; return nullptr;
HashTableStats* obj = create(); HashTableStats* obj = Create();
obj->copy(other); obj->copy(other);
return obj; return obj;
} }
...@@ -907,9 +907,14 @@ class HashTable final ...@@ -907,9 +907,14 @@ class HashTable final
#if DUMP_HASHTABLE_STATS_PER_TABLE #if DUMP_HASHTABLE_STATS_PER_TABLE
public: public:
mutable mutable
typename std::conditional<Allocator::isGarbageCollected, typename std::conditional<Allocator::kIsGarbageCollected,
HashTableStats*, HashTableStats*,
std::unique_ptr<HashTableStats>>::type stats_; std::unique_ptr<HashTableStats>>::type stats_;
void DumpStats() {
if (stats_) {
stats_->DumpStats();
}
}
#endif #endif
template <WeakHandlingFlag x, template <WeakHandlingFlag x,
...@@ -1380,7 +1385,7 @@ HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: ...@@ -1380,7 +1385,7 @@ HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::
DCHECK( DCHECK(
!IsDeletedBucket(*(LookupForWriting(Extractor::Extract(entry)).first))); !IsDeletedBucket(*(LookupForWriting(Extractor::Extract(entry)).first)));
#if DUMP_HASHTABLE_STATS #if DUMP_HASHTABLE_STATS
atomicIncrement(&HashTableStats::instance().numReinserts); AtomicIncrement(&HashTableStats::instance().numReinserts);
#endif #endif
#if DUMP_HASHTABLE_STATS_PER_TABLE #if DUMP_HASHTABLE_STATS_PER_TABLE
++stats_->numReinserts; ++stats_->numReinserts;
...@@ -1475,7 +1480,7 @@ void HashTable<Key, ...@@ -1475,7 +1480,7 @@ void HashTable<Key,
Allocator>::erase(const ValueType* pos) { Allocator>::erase(const ValueType* pos) {
RegisterModification(); RegisterModification();
#if DUMP_HASHTABLE_STATS #if DUMP_HASHTABLE_STATS
atomicIncrement(&HashTableStats::instance().numRemoves); AtomicIncrement(&HashTableStats::instance().numRemoves);
#endif #endif
#if DUMP_HASHTABLE_STATS_PER_TABLE #if DUMP_HASHTABLE_STATS_PER_TABLE
++stats_->numRemoves; ++stats_->numRemoves;
...@@ -1707,12 +1712,12 @@ HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: ...@@ -1707,12 +1712,12 @@ HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::
ValueType* old_table = table_; ValueType* old_table = table_;
#if DUMP_HASHTABLE_STATS #if DUMP_HASHTABLE_STATS
if (oldTableSize != 0) if (old_table_size != 0)
atomicIncrement(&HashTableStats::instance().numRehashes); AtomicIncrement(&HashTableStats::instance().numRehashes);
#endif #endif
#if DUMP_HASHTABLE_STATS_PER_TABLE #if DUMP_HASHTABLE_STATS_PER_TABLE
if (oldTableSize != 0) if (old_table_size != 0)
++stats_->numRehashes; ++stats_->numRehashes;
#endif #endif
...@@ -1736,7 +1741,7 @@ HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: ...@@ -1736,7 +1741,7 @@ HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::
#if DUMP_HASHTABLE_STATS_PER_TABLE #if DUMP_HASHTABLE_STATS_PER_TABLE
if (!stats_) if (!stats_)
stats_ = HashTableStatsPtr<Allocator>::create(); stats_ = HashTableStatsPtr<Allocator>::Create();
#endif #endif
return new_entry; return new_entry;
...@@ -1756,16 +1761,16 @@ HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: ...@@ -1756,16 +1761,16 @@ HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::
ValueType* old_table = table_; ValueType* old_table = table_;
#if DUMP_HASHTABLE_STATS #if DUMP_HASHTABLE_STATS
if (oldTableSize != 0) if (old_table_size != 0)
atomicIncrement(&HashTableStats::instance().numRehashes); AtomicIncrement(&HashTableStats::instance().numRehashes);
#endif #endif
#if DUMP_HASHTABLE_STATS_PER_TABLE #if DUMP_HASHTABLE_STATS_PER_TABLE
if (oldTableSize != 0) if (old_table_size != 0)
++stats_->numRehashes; ++stats_->numRehashes;
#endif #endif
// The Allocator::isGarbageCollected check is not needed. The check is just // The Allocator::kIsGarbageCollected check is not needed. The check is just
// a static hint for a compiler to indicate that Base::expandBuffer returns // a static hint for a compiler to indicate that Base::expandBuffer returns
// false if Allocator is a PartitionAllocator. // false if Allocator is a PartitionAllocator.
if (Allocator::kIsGarbageCollected && new_table_size > old_table_size) { if (Allocator::kIsGarbageCollected && new_table_size > old_table_size) {
...@@ -2069,7 +2074,8 @@ void HashTable<Key, ...@@ -2069,7 +2074,8 @@ void HashTable<Key,
KeyTraits, KeyTraits,
Allocator>::Trace(VisitorDispatcher visitor) { Allocator>::Trace(VisitorDispatcher visitor) {
#if DUMP_HASHTABLE_STATS_PER_TABLE #if DUMP_HASHTABLE_STATS_PER_TABLE
Allocator::markNoTracing(visitor, stats_); // XXX: this will simply crash.
// Allocator::MarkNoTracing(visitor, stats_);
#endif #endif
// If someone else already marked the backing and queued up the trace and/or // If someone else already marked the backing and queued up the trace and/or
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment