Commit 31f93aa2 authored by Omer Katz's avatar Omer Katz Committed by Commit Bot

heap: Check empty or deleted keys on local copy

Tracing an HashTable involves checking whether keys are empty or
deleted. This can result in data races with the mutator if the
HashTable is modified while it is traced.
To avoid such data races, we create a local copy of the key on the
concurrent thread and perform the check on the copy.

Using the local copy on the mutator thread would result in significant
regressions. Measured locally on the facebook_infinite_scroll:2018 story
from v8.browsind_desktop benchmark, showed an increase ranging from 70%
to 110% in the time it takes to process ephemerons (this measurement
doesn't cover all HashTables, just the ephemeron HashTable).
To avoid these regressions, the local copy is used only for concurrent
visitors.

For further details see
https://drive.google.com/open?id=13DTRcF3xfVjJ2QaKzAOYFzqvTfGRNiX6WWL3CCACSoM

Bug: 986235
Change-Id: Ie135c1076bd1834ae7f8b97ca6446890fc30a02c
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2012961
Commit-Queue: Omer Katz <omerkatz@chromium.org>
Reviewed-by: default avatarKentaro Hara <haraken@chromium.org>
Reviewed-by: default avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#735833}
parent 02d52420
......@@ -244,6 +244,8 @@ class PLATFORM_EXPORT ConcurrentMarkingVisitor
// Concurrent variant of MarkingVisitorCommon::AccountMarkedBytes.
void AccountMarkedBytesSafe(HeapObjectHeader*);
bool IsConcurrent() const override { return true; }
bool ConcurrentTracingBailOut(TraceDescriptor desc) override {
not_safe_to_concurrently_trace_worklist_.Push(desc);
return true;
......
......@@ -483,7 +483,19 @@ struct TraceHashTableBackingInCollectionTrait {
// Use the payload size as recorded by the heap to determine how many
// elements to trace.
size_t length = header->PayloadSize() / sizeof(Value);
const bool is_concurrent = visitor->IsConcurrent();
for (size_t i = 0; i < length; ++i) {
// If tracing concurrently, use a concurrent-safe version of
// IsEmptyOrDeletedBucket (check performed on a local copy instead
// of directly on the bucket).
if (is_concurrent &&
!HashTableHelper<Value, typename Table::ExtractorType,
typename Table::KeyTraitsType>::
IsEmptyOrDeletedBucketSafe(array[i])) {
blink::TraceCollectionIfEnabled<WeakHandling, Value, Traits>::Trace(
visitor, &array[i]);
continue;
}
if (!HashTableHelper<Value, typename Table::ExtractorType,
typename Table::KeyTraitsType>::
IsEmptyOrDeletedBucket(array[i])) {
......@@ -494,7 +506,6 @@ struct TraceHashTableBackingInCollectionTrait {
return false;
}
};
template <typename Table>
struct TraceInCollectionTrait<kNoWeakHandling,
blink::HeapHashTableBacking<Table>,
......@@ -504,7 +515,6 @@ struct TraceInCollectionTrait<kNoWeakHandling,
Table>::Trace(visitor, self);
}
};
template <typename Table>
struct TraceInCollectionTrait<kWeakHandling,
blink::HeapHashTableBacking<Table>,
......@@ -550,11 +560,21 @@ struct TraceInCollectionTrait<
blink::HeapObjectHeader* header =
blink::HeapObjectHeader::FromPayload(self);
size_t length = header->PayloadSize() / sizeof(Node*);
const bool is_concurrent = visitor->IsConcurrent();
for (size_t i = 0; i < length; ++i) {
if (!HashTableHelper<Node*, typename Table::ExtractorType,
typename Table::KeyTraitsType>::
IsEmptyOrDeletedBucket(array[i])) {
visitor->Trace(array[i]);
Node* node;
if (is_concurrent) {
// If tracing concurrently, IsEmptyOrDeletedBucket can cause data
// races. Loading array[i] atomically prevents possible data races.
// array[i] is of type Node* so can directly loaded atomically.
node = AsAtomicPtr(&array[i])->load(std::memory_order_relaxed);
} else {
node = array[i];
}
if (!HashTableHelper<
Node*, typename Table::ExtractorType,
typename Table::KeyTraitsType>::IsEmptyOrDeletedBucket(node)) {
visitor->Trace(node);
}
}
return false;
......
......@@ -292,6 +292,8 @@ class PLATFORM_EXPORT Visitor {
// are aware of custom weakness and won't resize their backings.
virtual void RegisterWeakCallback(WeakCallback callback, void* parameter) = 0;
virtual bool IsConcurrent() const { return false; }
// TODO(crbug/986235): ConcurrentTracingBailOut is part of a temporary
// bailout mechanism to avoid tracing collections on concurrent threads.
// This method and any usage of it will be removed as soon as making all
......
......@@ -249,6 +249,7 @@ jumbo_source_set("wtf_unittests_sources") {
testonly = true
sources = [
"allocator/atomic_memcpy_test.cc",
"allocator/partitions_test.cc",
"ascii_ctype_test.cc",
"assertions_test.cc",
......
......@@ -18,3 +18,22 @@ static_assert(WTF::IsStackAllocatedType<StackAllocatedType>::value,
"Failed to detect STACK_ALLOCATED macro.");
} // namespace
namespace WTF {
void AtomicMemcpy(void* to, const void* from, size_t bytes) {
size_t* sizet_to = reinterpret_cast<size_t*>(to);
const size_t* sizet_from = reinterpret_cast<const size_t*>(from);
for (; bytes > sizeof(size_t);
bytes -= sizeof(size_t), ++sizet_to, ++sizet_from) {
*sizet_to = AsAtomicPtr(sizet_from)->load(std::memory_order_relaxed);
}
uint8_t* uint8t_to = reinterpret_cast<uint8_t*>(sizet_to);
const uint8_t* uint8t_from = reinterpret_cast<const uint8_t*>(sizet_from);
for (; bytes > 0; bytes -= sizeof(uint8_t), ++uint8t_to, ++uint8t_from) {
*uint8t_to = AsAtomicPtr(uint8t_from)->load(std::memory_order_relaxed);
}
DCHECK_EQ(0u, bytes);
}
} // namespace WTF
......@@ -7,6 +7,7 @@
#include <atomic>
#include "build/build_config.h"
#include "third_party/blink/renderer/platform/wtf/allocator/partitions.h"
#include "third_party/blink/renderer/platform/wtf/assertions.h"
#include "third_party/blink/renderer/platform/wtf/type_traits.h"
......@@ -160,6 +161,59 @@ ALWAYS_INLINE const std::atomic<T>* AsAtomicPtr(const T* t) {
return reinterpret_cast<const std::atomic<T>*>(t);
}
// Load |bytes| bytes from |from| to |to| using atomic reads. Assumes |to| is
// size_t-aligned and points to a buffer of size at least |bytes|. Note that
// atomicity is guaranteed only per word, not for the entire |bytes| bytes as
// a whole.
WTF_EXPORT void AtomicMemcpy(void* to, const void* from, size_t bytes);
template <size_t bytes>
ALWAYS_INLINE void AtomicMemcpy(void* to, const void* from) {
AtomicMemcpy(to, from, bytes);
}
// AtomicMemcpy specializations:
#if defined(ARCH_CPU_X86_64)
template <>
ALWAYS_INLINE void AtomicMemcpy<sizeof(uint32_t)>(void* to, const void* from) {
*reinterpret_cast<uint32_t*>(to) =
AsAtomicPtr(reinterpret_cast<const uint32_t*>(from))
->load(std::memory_order_relaxed);
}
#endif // ARCH_CPU_X86_64
template <>
ALWAYS_INLINE void AtomicMemcpy<sizeof(size_t)>(void* to, const void* from) {
*reinterpret_cast<size_t*>(to) =
AsAtomicPtr(reinterpret_cast<const size_t*>(from))
->load(std::memory_order_relaxed);
}
template <>
ALWAYS_INLINE void AtomicMemcpy<2 * sizeof(size_t)>(void* to,
const void* from) {
*reinterpret_cast<size_t*>(to) =
AsAtomicPtr(reinterpret_cast<const size_t*>(from))
->load(std::memory_order_relaxed);
*(reinterpret_cast<size_t*>(to) + 1) =
AsAtomicPtr(reinterpret_cast<const size_t*>(from) + 1)
->load(std::memory_order_relaxed);
}
template <>
ALWAYS_INLINE void AtomicMemcpy<3 * sizeof(size_t)>(void* to,
const void* from) {
*reinterpret_cast<size_t*>(to) =
AsAtomicPtr(reinterpret_cast<const size_t*>(from))
->load(std::memory_order_relaxed);
*(reinterpret_cast<size_t*>(to) + 1) =
AsAtomicPtr(reinterpret_cast<const size_t*>(from) + 1)
->load(std::memory_order_relaxed);
*(reinterpret_cast<size_t*>(to) + 2) =
AsAtomicPtr(reinterpret_cast<const size_t*>(from) + 2)
->load(std::memory_order_relaxed);
}
} // namespace WTF
// This version of placement new omits a 0 check.
......
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "third_party/blink/renderer/platform/wtf/allocator/allocator.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace WTF {
class AtomicMemcpyTest : public ::testing::Test {};
template <size_t buffer_size>
void TestAtomicMemcpy() {
unsigned char src[buffer_size];
for (size_t i = 1; i < buffer_size; ++i)
src[i] = static_cast<char>(i);
// Allocating extra memory before and after the buffer to make sure the
// atomic memcpy doesn't exceed the buffer in any direction.
unsigned char tgt[buffer_size + (2 * sizeof(size_t))];
memset(tgt, 0, buffer_size + (2 * sizeof(size_t)));
AtomicMemcpy<buffer_size>(tgt + sizeof(size_t), src);
// Check nothing before the buffer was changed
EXPECT_EQ(0u, *reinterpret_cast<size_t*>(&tgt[0]));
// Check buffer was copied correctly
EXPECT_TRUE(!memcmp(src, tgt + sizeof(size_t), buffer_size));
// Check nothing after the buffer was changed
EXPECT_EQ(0u, *reinterpret_cast<size_t*>(&tgt[sizeof(size_t) + buffer_size]));
}
TEST_F(AtomicMemcpyTest, UINT8T) {
TestAtomicMemcpy<sizeof(uint8_t)>();
}
TEST_F(AtomicMemcpyTest, UINT16T) {
TestAtomicMemcpy<sizeof(uint16_t)>();
}
TEST_F(AtomicMemcpyTest, UINT32T) {
TestAtomicMemcpy<sizeof(uint32_t)>();
}
TEST_F(AtomicMemcpyTest, UINT64T) {
TestAtomicMemcpy<sizeof(uint64_t)>();
}
// Tests for sizes that don't match a specific promitive type:
TEST_F(AtomicMemcpyTest, 17Bytes) {
TestAtomicMemcpy<17>();
}
TEST_F(AtomicMemcpyTest, 34Bytes) {
TestAtomicMemcpy<34>();
}
TEST_F(AtomicMemcpyTest, 68Bytes) {
TestAtomicMemcpy<68>();
}
TEST_F(AtomicMemcpyTest, 127Bytes) {
TestAtomicMemcpy<127>();
}
} // namespace WTF
......@@ -22,6 +22,7 @@
#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_WTF_HASH_MAP_H_
#include <initializer_list>
#include "third_party/blink/renderer/platform/wtf/allocator/allocator.h"
#include "third_party/blink/renderer/platform/wtf/allocator/partition_allocator.h"
#include "third_party/blink/renderer/platform/wtf/construct_traits.h"
#include "third_party/blink/renderer/platform/wtf/hash_table.h"
......@@ -43,6 +44,12 @@ struct KeyValuePairKeyExtractor {
static const typename T::KeyType& Extract(const T& p) {
return p.key;
}
// Assumes out points to a buffer of size at least sizeof(T::KeyType).
template <typename T>
static const typename T::KeyType& ExtractSafe(const T& p, void* out) {
AtomicMemcpy<sizeof(typename T::KeyType)>(out, &p.key);
return *reinterpret_cast<typename T::KeyType*>(out);
}
};
// Note: empty or deleted key values are not allowed, using them may lead to
......
......@@ -22,6 +22,7 @@
#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_WTF_HASH_SET_H_
#include <initializer_list>
#include "third_party/blink/renderer/platform/wtf/allocator/allocator.h"
#include "third_party/blink/renderer/platform/wtf/allocator/partition_allocator.h"
#include "third_party/blink/renderer/platform/wtf/hash_table.h"
#include "third_party/blink/renderer/platform/wtf/wtf_size_t.h"
......@@ -151,6 +152,12 @@ struct IdentityExtractor {
static const T& Extract(const T& t) {
return t;
}
// Assumes out points to a buffer of size at least sizeof(T).
template <typename T>
static const T& ExtractSafe(const T& t, void* out) {
AtomicMemcpy<sizeof(T)>(out, &t);
return *reinterpret_cast<T*>(out);
}
};
template <typename Translator>
......
......@@ -649,15 +649,23 @@ struct HashTableAddResult final {
template <typename Value, typename Extractor, typename KeyTraits>
struct HashTableHelper {
using Key = typename KeyTraits::TraitType;
STATIC_ONLY(HashTableHelper);
static bool IsEmptyBucket(const Value& value) {
return IsHashTraitsEmptyValue<KeyTraits>(Extractor::Extract(value));
static bool IsEmptyBucket(const Key& key) {
return IsHashTraitsEmptyValue<KeyTraits>(key);
}
static bool IsDeletedBucket(const Value& value) {
return KeyTraits::IsDeletedValue(Extractor::Extract(value));
static bool IsDeletedBucket(const Key& key) {
return KeyTraits::IsDeletedValue(key);
}
static bool IsEmptyOrDeletedBucket(const Value& value) {
return IsEmptyBucket(value) || IsDeletedBucket(value);
const auto& key = Extractor::Extract(value);
return IsEmptyBucket(key) || IsDeletedBucket(key);
}
static bool IsEmptyOrDeletedBucketSafe(const Value& value) {
char buf[sizeof(Key)];
const Key& key = Extractor::ExtractSafe(value, &buf);
return IsEmptyBucket(key) || IsDeletedBucket(key);
}
};
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment