Commit 57e438bc authored by Chan Li's avatar Chan Li Committed by Commit Bot

Revert "heap: Check empty or deleted keys on local copy"

This reverts commit 31f93aa2.

Reason for revert: 

Findit found this CL as the culprit for failed wtf_unittests in builds:
- https://ci.chromium.org/p/chromium/builders/ci/Linux%20MSan%20Tests/21982
- https://ci.chromium.org/p/chromium/builders/ci/Linux%20ChromiumOS%20MSan%20Tests/17513

Sample analysis: https://findit-for-me.appspot.com/waterfall/failure?redirect=1&url=https://ci.chromium.org/p/chromium/builders/ci/Linux%20MSan%20Tests/21982


Original change's description:
> heap: Check empty or deleted keys on local copy
> 
> Tracing an HashTable involves checking whether keys are empty or
> deleted. This can result in data races with the mutator if the
> HashTable is modified while it is traced.
> To avoid such data races, we create a local copy of the key on the
> concurrent thread and perform the check on the copy.
> 
> Using the local copy on the mutator thread would result in significant
> regressions. Measured locally on the facebook_infinite_scroll:2018 story
> from v8.browsind_desktop benchmark, showed an increase ranging from 70%
> to 110% in the time it takes to process ephemerons (this measurement
> doesn't cover all HashTables, just the ephemeron HashTable).
> To avoid these regressions, the local copy is used only for concurrent
> visitors.
> 
> For further details see
> https://drive.google.com/open?id=13DTRcF3xfVjJ2QaKzAOYFzqvTfGRNiX6WWL3CCACSoM
> 
> Bug: 986235
> Change-Id: Ie135c1076bd1834ae7f8b97ca6446890fc30a02c
> Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2012961
> Commit-Queue: Omer Katz <omerkatz@chromium.org>
> Reviewed-by: Kentaro Hara <haraken@chromium.org>
> Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#735833}

TBR=haraken@chromium.org,mlippautz@chromium.org,omerkatz@chromium.org

Change-Id: I8f0e249f4db3326b38d774c9d718fdcd0a2566f6
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: 986235
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2025324Reviewed-by: default avatarChan Li <chanli@chromium.org>
Commit-Queue: Chan Li <chanli@chromium.org>
Cr-Commit-Position: refs/heads/master@{#735962}
parent f08e2899
......@@ -244,8 +244,6 @@ class PLATFORM_EXPORT ConcurrentMarkingVisitor
// Concurrent variant of MarkingVisitorCommon::AccountMarkedBytes.
void AccountMarkedBytesSafe(HeapObjectHeader*);
bool IsConcurrent() const override { return true; }
bool ConcurrentTracingBailOut(TraceDescriptor desc) override {
not_safe_to_concurrently_trace_worklist_.Push(desc);
return true;
......
......@@ -483,19 +483,7 @@ struct TraceHashTableBackingInCollectionTrait {
// Use the payload size as recorded by the heap to determine how many
// elements to trace.
size_t length = header->PayloadSize() / sizeof(Value);
const bool is_concurrent = visitor->IsConcurrent();
for (size_t i = 0; i < length; ++i) {
// If tracing concurrently, use a concurrent-safe version of
// IsEmptyOrDeletedBucket (check performed on a local copy instead
// of directly on the bucket).
if (is_concurrent &&
!HashTableHelper<Value, typename Table::ExtractorType,
typename Table::KeyTraitsType>::
IsEmptyOrDeletedBucketSafe(array[i])) {
blink::TraceCollectionIfEnabled<WeakHandling, Value, Traits>::Trace(
visitor, &array[i]);
continue;
}
if (!HashTableHelper<Value, typename Table::ExtractorType,
typename Table::KeyTraitsType>::
IsEmptyOrDeletedBucket(array[i])) {
......@@ -506,6 +494,7 @@ struct TraceHashTableBackingInCollectionTrait {
return false;
}
};
template <typename Table>
struct TraceInCollectionTrait<kNoWeakHandling,
blink::HeapHashTableBacking<Table>,
......@@ -515,6 +504,7 @@ struct TraceInCollectionTrait<kNoWeakHandling,
Table>::Trace(visitor, self);
}
};
template <typename Table>
struct TraceInCollectionTrait<kWeakHandling,
blink::HeapHashTableBacking<Table>,
......@@ -560,21 +550,11 @@ struct TraceInCollectionTrait<
blink::HeapObjectHeader* header =
blink::HeapObjectHeader::FromPayload(self);
size_t length = header->PayloadSize() / sizeof(Node*);
const bool is_concurrent = visitor->IsConcurrent();
for (size_t i = 0; i < length; ++i) {
Node* node;
if (is_concurrent) {
// If tracing concurrently, IsEmptyOrDeletedBucket can cause data
// races. Loading array[i] atomically prevents possible data races.
// array[i] is of type Node* so can directly loaded atomically.
node = AsAtomicPtr(&array[i])->load(std::memory_order_relaxed);
} else {
node = array[i];
}
if (!HashTableHelper<
Node*, typename Table::ExtractorType,
typename Table::KeyTraitsType>::IsEmptyOrDeletedBucket(node)) {
visitor->Trace(node);
if (!HashTableHelper<Node*, typename Table::ExtractorType,
typename Table::KeyTraitsType>::
IsEmptyOrDeletedBucket(array[i])) {
visitor->Trace(array[i]);
}
}
return false;
......
......@@ -292,8 +292,6 @@ class PLATFORM_EXPORT Visitor {
// are aware of custom weakness and won't resize their backings.
virtual void RegisterWeakCallback(WeakCallback callback, void* parameter) = 0;
virtual bool IsConcurrent() const { return false; }
// TODO(crbug/986235): ConcurrentTracingBailOut is part of a temporary
// bailout mechanism to avoid tracing collections on concurrent threads.
// This method and any usage of it will be removed as soon as making all
......
......@@ -249,7 +249,6 @@ jumbo_source_set("wtf_unittests_sources") {
testonly = true
sources = [
"allocator/atomic_memcpy_test.cc",
"allocator/partitions_test.cc",
"ascii_ctype_test.cc",
"assertions_test.cc",
......
......@@ -18,22 +18,3 @@ static_assert(WTF::IsStackAllocatedType<StackAllocatedType>::value,
"Failed to detect STACK_ALLOCATED macro.");
} // namespace
namespace WTF {
void AtomicMemcpy(void* to, const void* from, size_t bytes) {
size_t* sizet_to = reinterpret_cast<size_t*>(to);
const size_t* sizet_from = reinterpret_cast<const size_t*>(from);
for (; bytes > sizeof(size_t);
bytes -= sizeof(size_t), ++sizet_to, ++sizet_from) {
*sizet_to = AsAtomicPtr(sizet_from)->load(std::memory_order_relaxed);
}
uint8_t* uint8t_to = reinterpret_cast<uint8_t*>(sizet_to);
const uint8_t* uint8t_from = reinterpret_cast<const uint8_t*>(sizet_from);
for (; bytes > 0; bytes -= sizeof(uint8_t), ++uint8t_to, ++uint8t_from) {
*uint8t_to = AsAtomicPtr(uint8t_from)->load(std::memory_order_relaxed);
}
DCHECK_EQ(0u, bytes);
}
} // namespace WTF
......@@ -7,7 +7,6 @@
#include <atomic>
#include "build/build_config.h"
#include "third_party/blink/renderer/platform/wtf/allocator/partitions.h"
#include "third_party/blink/renderer/platform/wtf/assertions.h"
#include "third_party/blink/renderer/platform/wtf/type_traits.h"
......@@ -161,59 +160,6 @@ ALWAYS_INLINE const std::atomic<T>* AsAtomicPtr(const T* t) {
return reinterpret_cast<const std::atomic<T>*>(t);
}
// Load |bytes| bytes from |from| to |to| using atomic reads. Assumes |to| is
// size_t-aligned and points to a buffer of size at least |bytes|. Note that
// atomicity is guaranteed only per word, not for the entire |bytes| bytes as
// a whole.
WTF_EXPORT void AtomicMemcpy(void* to, const void* from, size_t bytes);
template <size_t bytes>
ALWAYS_INLINE void AtomicMemcpy(void* to, const void* from) {
AtomicMemcpy(to, from, bytes);
}
// AtomicMemcpy specializations:
#if defined(ARCH_CPU_X86_64)
template <>
ALWAYS_INLINE void AtomicMemcpy<sizeof(uint32_t)>(void* to, const void* from) {
*reinterpret_cast<uint32_t*>(to) =
AsAtomicPtr(reinterpret_cast<const uint32_t*>(from))
->load(std::memory_order_relaxed);
}
#endif // ARCH_CPU_X86_64
template <>
ALWAYS_INLINE void AtomicMemcpy<sizeof(size_t)>(void* to, const void* from) {
*reinterpret_cast<size_t*>(to) =
AsAtomicPtr(reinterpret_cast<const size_t*>(from))
->load(std::memory_order_relaxed);
}
template <>
ALWAYS_INLINE void AtomicMemcpy<2 * sizeof(size_t)>(void* to,
const void* from) {
*reinterpret_cast<size_t*>(to) =
AsAtomicPtr(reinterpret_cast<const size_t*>(from))
->load(std::memory_order_relaxed);
*(reinterpret_cast<size_t*>(to) + 1) =
AsAtomicPtr(reinterpret_cast<const size_t*>(from) + 1)
->load(std::memory_order_relaxed);
}
template <>
ALWAYS_INLINE void AtomicMemcpy<3 * sizeof(size_t)>(void* to,
const void* from) {
*reinterpret_cast<size_t*>(to) =
AsAtomicPtr(reinterpret_cast<const size_t*>(from))
->load(std::memory_order_relaxed);
*(reinterpret_cast<size_t*>(to) + 1) =
AsAtomicPtr(reinterpret_cast<const size_t*>(from) + 1)
->load(std::memory_order_relaxed);
*(reinterpret_cast<size_t*>(to) + 2) =
AsAtomicPtr(reinterpret_cast<const size_t*>(from) + 2)
->load(std::memory_order_relaxed);
}
} // namespace WTF
// This version of placement new omits a 0 check.
......
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "third_party/blink/renderer/platform/wtf/allocator/allocator.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace WTF {
class AtomicMemcpyTest : public ::testing::Test {};
template <size_t buffer_size>
void TestAtomicMemcpy() {
unsigned char src[buffer_size];
for (size_t i = 1; i < buffer_size; ++i)
src[i] = static_cast<char>(i);
// Allocating extra memory before and after the buffer to make sure the
// atomic memcpy doesn't exceed the buffer in any direction.
unsigned char tgt[buffer_size + (2 * sizeof(size_t))];
memset(tgt, 0, buffer_size + (2 * sizeof(size_t)));
AtomicMemcpy<buffer_size>(tgt + sizeof(size_t), src);
// Check nothing before the buffer was changed
EXPECT_EQ(0u, *reinterpret_cast<size_t*>(&tgt[0]));
// Check buffer was copied correctly
EXPECT_TRUE(!memcmp(src, tgt + sizeof(size_t), buffer_size));
// Check nothing after the buffer was changed
EXPECT_EQ(0u, *reinterpret_cast<size_t*>(&tgt[sizeof(size_t) + buffer_size]));
}
TEST_F(AtomicMemcpyTest, UINT8T) {
TestAtomicMemcpy<sizeof(uint8_t)>();
}
TEST_F(AtomicMemcpyTest, UINT16T) {
TestAtomicMemcpy<sizeof(uint16_t)>();
}
TEST_F(AtomicMemcpyTest, UINT32T) {
TestAtomicMemcpy<sizeof(uint32_t)>();
}
TEST_F(AtomicMemcpyTest, UINT64T) {
TestAtomicMemcpy<sizeof(uint64_t)>();
}
// Tests for sizes that don't match a specific promitive type:
TEST_F(AtomicMemcpyTest, 17Bytes) {
TestAtomicMemcpy<17>();
}
TEST_F(AtomicMemcpyTest, 34Bytes) {
TestAtomicMemcpy<34>();
}
TEST_F(AtomicMemcpyTest, 68Bytes) {
TestAtomicMemcpy<68>();
}
TEST_F(AtomicMemcpyTest, 127Bytes) {
TestAtomicMemcpy<127>();
}
} // namespace WTF
......@@ -22,7 +22,6 @@
#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_WTF_HASH_MAP_H_
#include <initializer_list>
#include "third_party/blink/renderer/platform/wtf/allocator/allocator.h"
#include "third_party/blink/renderer/platform/wtf/allocator/partition_allocator.h"
#include "third_party/blink/renderer/platform/wtf/construct_traits.h"
#include "third_party/blink/renderer/platform/wtf/hash_table.h"
......@@ -44,12 +43,6 @@ struct KeyValuePairKeyExtractor {
static const typename T::KeyType& Extract(const T& p) {
return p.key;
}
// Assumes out points to a buffer of size at least sizeof(T::KeyType).
template <typename T>
static const typename T::KeyType& ExtractSafe(const T& p, void* out) {
AtomicMemcpy<sizeof(typename T::KeyType)>(out, &p.key);
return *reinterpret_cast<typename T::KeyType*>(out);
}
};
// Note: empty or deleted key values are not allowed, using them may lead to
......
......@@ -22,7 +22,6 @@
#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_WTF_HASH_SET_H_
#include <initializer_list>
#include "third_party/blink/renderer/platform/wtf/allocator/allocator.h"
#include "third_party/blink/renderer/platform/wtf/allocator/partition_allocator.h"
#include "third_party/blink/renderer/platform/wtf/hash_table.h"
#include "third_party/blink/renderer/platform/wtf/wtf_size_t.h"
......@@ -152,12 +151,6 @@ struct IdentityExtractor {
static const T& Extract(const T& t) {
return t;
}
// Assumes out points to a buffer of size at least sizeof(T).
template <typename T>
static const T& ExtractSafe(const T& t, void* out) {
AtomicMemcpy<sizeof(T)>(out, &t);
return *reinterpret_cast<T*>(out);
}
};
template <typename Translator>
......
......@@ -649,23 +649,15 @@ struct HashTableAddResult final {
template <typename Value, typename Extractor, typename KeyTraits>
struct HashTableHelper {
using Key = typename KeyTraits::TraitType;
STATIC_ONLY(HashTableHelper);
static bool IsEmptyBucket(const Key& key) {
return IsHashTraitsEmptyValue<KeyTraits>(key);
static bool IsEmptyBucket(const Value& value) {
return IsHashTraitsEmptyValue<KeyTraits>(Extractor::Extract(value));
}
static bool IsDeletedBucket(const Key& key) {
return KeyTraits::IsDeletedValue(key);
static bool IsDeletedBucket(const Value& value) {
return KeyTraits::IsDeletedValue(Extractor::Extract(value));
}
static bool IsEmptyOrDeletedBucket(const Value& value) {
const auto& key = Extractor::Extract(value);
return IsEmptyBucket(key) || IsDeletedBucket(key);
}
static bool IsEmptyOrDeletedBucketSafe(const Value& value) {
char buf[sizeof(Key)];
const Key& key = Extractor::ExtractSafe(value, &buf);
return IsEmptyBucket(key) || IsDeletedBucket(key);
return IsEmptyBucket(value) || IsDeletedBucket(value);
}
};
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment