Commit 69488b17 authored by Anton Bikineev's avatar Anton Bikineev Committed by Commit Bot

heap: Don't call write barrier on vector reallocations

This CL reorders reallocation scenario as follows:
1) allocating a new buffer;
2) moving elements in the current buffer to the new one ***without
   tracing them or executing write barriers on them***
3) resetting HeapVector's pointer to the new buffer;
4) executing write barrier on the new buffer.

The followup to the CL will be introducing the same optimization for
HashTable.

Bug: 1021889

Change-Id: I367165ba26045d3a4ca967c7f9f006ff88be4ed4
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1899883
Commit-Queue: Anton Bikineev <bikineev@chromium.org>
Reviewed-by: default avatarKentaro Hara <haraken@chromium.org>
Cr-Commit-Position: refs/heads/master@{#714926}
parent a0ab2c88
...@@ -487,12 +487,21 @@ class ConstructTraits<blink::Member<T>, Traits, Allocator> { ...@@ -487,12 +487,21 @@ class ConstructTraits<blink::Member<T>, Traits, Allocator> {
STATIC_ONLY(ConstructTraits); STATIC_ONLY(ConstructTraits);
public: public:
template <typename... Args>
static blink::Member<T>* Construct(void* location, Args&&... args) {
return new (NotNull, location)
blink::Member<T>(std::forward<Args>(args)...);
}
static void NotifyNewElement(blink::Member<T>* element) {
element->WriteBarrier();
}
template <typename... Args> template <typename... Args>
static blink::Member<T>* ConstructAndNotifyElement(void* location, static blink::Member<T>* ConstructAndNotifyElement(void* location,
Args&&... args) { Args&&... args) {
blink::Member<T>* object = blink::Member<T>* object = Construct(location, std::forward<Args>(args)...);
new (NotNull, location) blink::Member<T>(std::forward<Args>(args)...); NotifyNewElement(object);
object->WriteBarrier();
return object; return object;
} }
......
...@@ -21,15 +21,24 @@ class ConstructTraits { ...@@ -21,15 +21,24 @@ class ConstructTraits {
public: public:
// Construct a single element that would otherwise be constructed using // Construct a single element that would otherwise be constructed using
// placement new. // placement new.
template <typename... Args>
static T* Construct(void* location, Args&&... args) {
return new (NotNull, location) T(std::forward<Args>(args)...);
}
// After constructing elements using memcopy or memmove (or similar)
// |NotifyNewElement| needs to be called to propagate that information.
static void NotifyNewElement(T* element) {
Allocator::template NotifyNewObject<T, Traits>(element);
}
template <typename... Args> template <typename... Args>
static T* ConstructAndNotifyElement(void* location, Args&&... args) { static T* ConstructAndNotifyElement(void* location, Args&&... args) {
T* object = new (NotNull, location) T(std::forward<Args>(args)...); T* object = Construct(location, std::forward<Args>(args)...);
Allocator::template NotifyNewObject<T, Traits>(object); NotifyNewElement(object);
return object; return object;
} }
// After constructing elements using memcopy or memmove (or similar)
// |NotifyNewElements| needs to be called to propagate that information.
static void NotifyNewElements(T* array, size_t len) { static void NotifyNewElements(T* array, size_t len) {
Allocator::template NotifyNewObjects<T, Traits>(array, len); Allocator::template NotifyNewObjects<T, Traits>(array, len);
} }
......
...@@ -1353,7 +1353,7 @@ HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: ...@@ -1353,7 +1353,7 @@ HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::
DCHECK(!IsEmptyOrDeletedBucket(*entry)); DCHECK(!IsEmptyOrDeletedBucket(*entry));
// Translate constructs an element so we need to notify using the trait. Avoid // Translate constructs an element so we need to notify using the trait. Avoid
// doing that in the translator so that they can be easily customized. // doing that in the translator so that they can be easily customized.
ConstructTraits<ValueType, Traits, Allocator>::NotifyNewElements(entry, 1); ConstructTraits<ValueType, Traits, Allocator>::NotifyNewElement(entry);
++key_count_; ++key_count_;
...@@ -1421,7 +1421,7 @@ HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: ...@@ -1421,7 +1421,7 @@ HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::
DCHECK(!IsEmptyOrDeletedBucket(*entry)); DCHECK(!IsEmptyOrDeletedBucket(*entry));
// Translate constructs an element so we need to notify using the trait. Avoid // Translate constructs an element so we need to notify using the trait. Avoid
// doing that in the translator so that they can be easily customized. // doing that in the translator so that they can be easily customized.
ConstructTraits<ValueType, Traits, Allocator>::NotifyNewElements(entry, 1); ConstructTraits<ValueType, Traits, Allocator>::NotifyNewElement(entry);
++key_count_; ++key_count_;
if (ShouldExpand()) if (ShouldExpand())
......
...@@ -151,27 +151,31 @@ struct VectorMover; ...@@ -151,27 +151,31 @@ struct VectorMover;
template <typename T, typename Allocator> template <typename T, typename Allocator>
struct VectorMover<false, T, Allocator> { struct VectorMover<false, T, Allocator> {
STATIC_ONLY(VectorMover); STATIC_ONLY(VectorMover);
static void Move(T* src, T* src_end, T* dst) { using Traits = ConstructTraits<T, VectorTraits<T>, Allocator>;
static void Move(T* src, T* src_end, T* dst, bool has_inline_buffer) {
while (src != src_end) { while (src != src_end) {
ConstructTraits<T, VectorTraits<T>, Allocator>::ConstructAndNotifyElement( T* newly_created = Traits::Construct(dst, std::move(*src));
dst, std::move(*src)); if (has_inline_buffer)
Traits::NotifyNewElement(newly_created);
src->~T(); src->~T();
++dst; ++dst;
++src; ++src;
} }
} }
static void MoveOverlapping(T* src, T* src_end, T* dst) { static void MoveOverlapping(T* src,
T* src_end,
T* dst,
bool has_inline_buffer) {
if (src > dst) { if (src > dst) {
Move(src, src_end, dst); Move(src, src_end, dst, has_inline_buffer);
} else { } else {
T* dst_end = dst + (src_end - src); T* dst_end = dst + (src_end - src);
while (src != src_end) { while (src != src_end) {
--src_end; --src_end;
--dst_end; --dst_end;
ConstructTraits<T, VectorTraits<T>, T* newly_created = Traits::Construct(dst_end, std::move(*src_end));
Allocator>::ConstructAndNotifyElement(dst_end, if (has_inline_buffer)
std::move( Traits::NotifyNewElement(newly_created);
*src_end));
src_end->~T(); src_end->~T();
} }
} }
...@@ -179,30 +183,37 @@ struct VectorMover<false, T, Allocator> { ...@@ -179,30 +183,37 @@ struct VectorMover<false, T, Allocator> {
static void Swap(T* src, T* src_end, T* dst) { static void Swap(T* src, T* src_end, T* dst) {
std::swap_ranges(src, src_end, dst); std::swap_ranges(src, src_end, dst);
const size_t len = src_end - src; const size_t len = src_end - src;
ConstructTraits<T, VectorTraits<T>, Allocator>::NotifyNewElements(src, len); Traits::NotifyNewElements(src, len);
ConstructTraits<T, VectorTraits<T>, Allocator>::NotifyNewElements(dst, len); Traits::NotifyNewElements(dst, len);
} }
}; };
template <typename T, typename Allocator> template <typename T, typename Allocator>
struct VectorMover<true, T, Allocator> { struct VectorMover<true, T, Allocator> {
STATIC_ONLY(VectorMover); STATIC_ONLY(VectorMover);
static void Move(const T* src, const T* src_end, T* dst) { using Traits = ConstructTraits<T, VectorTraits<T>, Allocator>;
static void Move(const T* src,
const T* src_end,
T* dst,
bool has_inline_buffer) {
if (LIKELY(dst && src)) { if (LIKELY(dst && src)) {
memcpy(dst, src, memcpy(dst, src,
reinterpret_cast<const char*>(src_end) - reinterpret_cast<const char*>(src_end) -
reinterpret_cast<const char*>(src)); reinterpret_cast<const char*>(src));
ConstructTraits<T, VectorTraits<T>, Allocator>::NotifyNewElements( if (has_inline_buffer)
dst, src_end - src); Traits::NotifyNewElements(dst, src_end - src);
} }
} }
static void MoveOverlapping(const T* src, const T* src_end, T* dst) { static void MoveOverlapping(const T* src,
const T* src_end,
T* dst,
bool has_inline_buffer) {
if (LIKELY(dst && src)) { if (LIKELY(dst && src)) {
memmove(dst, src, memmove(dst, src,
reinterpret_cast<const char*>(src_end) - reinterpret_cast<const char*>(src_end) -
reinterpret_cast<const char*>(src)); reinterpret_cast<const char*>(src));
ConstructTraits<T, VectorTraits<T>, Allocator>::NotifyNewElements( if (has_inline_buffer)
dst, src_end - src); Traits::NotifyNewElements(dst, src_end - src);
} }
} }
static void Swap(T* src, T* src_end, T* dst) { static void Swap(T* src, T* src_end, T* dst) {
...@@ -210,8 +221,8 @@ struct VectorMover<true, T, Allocator> { ...@@ -210,8 +221,8 @@ struct VectorMover<true, T, Allocator> {
reinterpret_cast<char*>(src_end), reinterpret_cast<char*>(src_end),
reinterpret_cast<char*>(dst)); reinterpret_cast<char*>(dst));
const size_t len = src_end - src; const size_t len = src_end - src;
ConstructTraits<T, VectorTraits<T>, Allocator>::NotifyNewElements(src, len); Traits::NotifyNewElements(src, len);
ConstructTraits<T, VectorTraits<T>, Allocator>::NotifyNewElements(dst, len); Traits::NotifyNewElements(dst, len);
} }
}; };
...@@ -332,14 +343,18 @@ struct VectorTypeOperations { ...@@ -332,14 +343,18 @@ struct VectorTypeOperations {
Allocator>::Initialize(begin, end); Allocator>::Initialize(begin, end);
} }
static void Move(T* src, T* src_end, T* dst) { static void Move(T* src, T* src_end, T* dst, bool has_inline_buffer = true) {
VectorMover<VectorTraits<T>::kCanMoveWithMemcpy, T, Allocator>::Move( VectorMover<VectorTraits<T>::kCanMoveWithMemcpy, T, Allocator>::Move(
src, src_end, dst); src, src_end, dst, has_inline_buffer);
} }
static void MoveOverlapping(T* src, T* src_end, T* dst) { static void MoveOverlapping(T* src,
T* src_end,
T* dst,
bool has_inline_buffer = true) {
VectorMover<VectorTraits<T>::kCanMoveWithMemcpy, T, VectorMover<VectorTraits<T>::kCanMoveWithMemcpy, T,
Allocator>::MoveOverlapping(src, src_end, dst); Allocator>::MoveOverlapping(src, src_end, dst,
has_inline_buffer);
} }
static void Swap(T* src, T* src_end, T* dst) { static void Swap(T* src, T* src_end, T* dst) {
...@@ -384,13 +399,20 @@ class VectorBufferBase { ...@@ -384,13 +399,20 @@ class VectorBufferBase {
DISALLOW_NEW(); DISALLOW_NEW();
public: public:
void AllocateBuffer(wtf_size_t new_capacity) { VectorBufferBase(VectorBufferBase&&) = default;
VectorBufferBase& operator=(VectorBufferBase&&) = default;
void AllocateBufferNoBarrier(wtf_size_t new_capacity) {
DCHECK(new_capacity); DCHECK(new_capacity);
DCHECK_LE(new_capacity, DCHECK_LE(new_capacity,
Allocator::template MaxElementCountInBackingStore<T>()); Allocator::template MaxElementCountInBackingStore<T>());
size_t size_to_allocate = AllocationSize(new_capacity); size_t size_to_allocate = AllocationSize(new_capacity);
buffer_ = Allocator::template AllocateVectorBacking<T>(size_to_allocate); buffer_ = Allocator::template AllocateVectorBacking<T>(size_to_allocate);
capacity_ = static_cast<wtf_size_t>(size_to_allocate / sizeof(T)); capacity_ = static_cast<wtf_size_t>(size_to_allocate / sizeof(T));
}
void AllocateBuffer(wtf_size_t new_capacity) {
AllocateBufferNoBarrier(new_capacity);
Allocator::BackingWriteBarrier(buffer_); Allocator::BackingWriteBarrier(buffer_);
} }
...@@ -423,6 +445,11 @@ class VectorBufferBase { ...@@ -423,6 +445,11 @@ class VectorBufferBase {
#endif #endif
} }
void MoveBufferInto(VectorBufferBase& other) {
other.buffer_ = buffer_;
other.capacity_ = capacity_;
}
// |end| is exclusive, a la STL. // |end| is exclusive, a la STL.
struct OffsetRange final { struct OffsetRange final {
OffsetRange() : begin(0), end(0) {} OffsetRange() : begin(0), end(0) {}
...@@ -436,6 +463,12 @@ class VectorBufferBase { ...@@ -436,6 +463,12 @@ class VectorBufferBase {
}; };
protected: protected:
static VectorBufferBase AllocateTemporaryBuffer(wtf_size_t capacity) {
VectorBufferBase buffer;
buffer.AllocateBufferNoBarrier(capacity);
return buffer;
}
VectorBufferBase() : buffer_(nullptr), capacity_(0) {} VectorBufferBase() : buffer_(nullptr), capacity_(0) {}
VectorBufferBase(T* buffer, wtf_size_t capacity) VectorBufferBase(T* buffer, wtf_size_t capacity)
...@@ -443,6 +476,7 @@ class VectorBufferBase { ...@@ -443,6 +476,7 @@ class VectorBufferBase {
VectorBufferBase(HashTableDeletedValueType value) VectorBufferBase(HashTableDeletedValueType value)
: buffer_(reinterpret_cast<T*>(-1)) {} : buffer_(reinterpret_cast<T*>(-1)) {}
bool IsHashTableDeletedValue() const { bool IsHashTableDeletedValue() const {
return buffer_ == reinterpret_cast<T*>(-1); return buffer_ == reinterpret_cast<T*>(-1);
} }
...@@ -1286,6 +1320,12 @@ class Vector ...@@ -1286,6 +1320,12 @@ class Vector
template <typename U> template <typename U>
void AppendSlowCase(U&&); void AppendSlowCase(U&&);
bool HasInlineBuffer() const {
return INLINE_CAPACITY && !this->HasOutOfLineBuffer();
}
void ReallocateBuffer(wtf_size_t);
// This is to prevent compilation of deprecated calls like 'vector.erase(0)'. // This is to prevent compilation of deprecated calls like 'vector.erase(0)'.
void erase(std::nullptr_t) = delete; void erase(std::nullptr_t) = delete;
...@@ -1615,8 +1655,7 @@ void Vector<T, inlineCapacity, Allocator>::ReserveCapacity( ...@@ -1615,8 +1655,7 @@ void Vector<T, inlineCapacity, Allocator>::ReserveCapacity(
wtf_size_t new_capacity) { wtf_size_t new_capacity) {
if (UNLIKELY(new_capacity <= capacity())) if (UNLIKELY(new_capacity <= capacity()))
return; return;
T* old_buffer = begin(); if (!data()) {
if (!old_buffer) {
Base::AllocateBuffer(new_capacity); Base::AllocateBuffer(new_capacity);
return; return;
} }
...@@ -1633,13 +1672,7 @@ void Vector<T, inlineCapacity, Allocator>::ReserveCapacity( ...@@ -1633,13 +1672,7 @@ void Vector<T, inlineCapacity, Allocator>::ReserveCapacity(
// Reallocating a backing buffer may resurrect a dead object. // Reallocating a backing buffer may resurrect a dead object.
CHECK(Allocator::IsAllocationAllowed()); CHECK(Allocator::IsAllocationAllowed());
T* old_end = end(); ReallocateBuffer(new_capacity);
Base::AllocateBuffer(new_capacity);
ANNOTATE_NEW_BUFFER(begin(), capacity(), size_);
TypeOperations::Move(old_buffer, old_end, begin());
ClearUnusedSlots(old_buffer, old_end);
ANNOTATE_DELETE_BUFFER(old_buffer, old_capacity, size_);
Base::DeallocateBuffer(old_buffer);
} }
template <typename T, wtf_size_t inlineCapacity, typename Allocator> template <typename T, wtf_size_t inlineCapacity, typename Allocator>
...@@ -1676,15 +1709,9 @@ void Vector<T, inlineCapacity, Allocator>::ShrinkCapacity( ...@@ -1676,15 +1709,9 @@ void Vector<T, inlineCapacity, Allocator>::ShrinkCapacity(
if (!Allocator::IsAllocationAllowed()) if (!Allocator::IsAllocationAllowed())
return; return;
T* old_end = end(); ReallocateBuffer(new_capacity);
Base::AllocateBuffer(new_capacity); return;
if (begin() != old_buffer) {
ANNOTATE_NEW_BUFFER(begin(), capacity(), size_);
TypeOperations::Move(old_buffer, old_end, begin());
ClearUnusedSlots(old_buffer, old_end);
ANNOTATE_DELETE_BUFFER(old_buffer, old_capacity, size_);
} }
} else {
Base::ResetBufferPointer(); Base::ResetBufferPointer();
#ifdef ANNOTATE_CONTIGUOUS_CONTAINER #ifdef ANNOTATE_CONTIGUOUS_CONTAINER
if (old_buffer != begin()) { if (old_buffer != begin()) {
...@@ -1692,8 +1719,6 @@ void Vector<T, inlineCapacity, Allocator>::ShrinkCapacity( ...@@ -1692,8 +1719,6 @@ void Vector<T, inlineCapacity, Allocator>::ShrinkCapacity(
ANNOTATE_DELETE_BUFFER(old_buffer, old_capacity, size_); ANNOTATE_DELETE_BUFFER(old_buffer, old_capacity, size_);
} }
#endif #endif
}
Base::DeallocateBuffer(old_buffer); Base::DeallocateBuffer(old_buffer);
} }
...@@ -1978,6 +2003,41 @@ Vector<T, inlineCapacity, Allocator>::Trace(VisitorDispatcher visitor) { ...@@ -1978,6 +2003,41 @@ Vector<T, inlineCapacity, Allocator>::Trace(VisitorDispatcher visitor) {
} }
} }
template <typename T, wtf_size_t inlineCapacity, typename Allocator>
void Vector<T, inlineCapacity, Allocator>::ReallocateBuffer(
wtf_size_t new_capacity) {
if (new_capacity <= INLINE_CAPACITY) {
if (HasInlineBuffer()) {
Base::ResetBufferPointer();
return;
}
// Shrinking to inline buffer from out-of-line one.
T *old_begin = begin(), *old_end = end();
#ifdef ANNOTATE_CONTIGUOUS_CONTAINER
const wtf_size_t old_capacity = capacity();
#endif
Base::ResetBufferPointer();
TypeOperations::Move(old_begin, old_end, begin());
ClearUnusedSlots(old_begin, old_end);
ANNOTATE_DELETE_BUFFER(old_begin, old_capacity, size_);
Base::DeallocateBuffer(old_begin);
return;
}
// Shrinking/resizing to out-of-line buffer.
VectorBufferBase<T, Allocator> buffer =
Base::AllocateTemporaryBuffer(new_capacity);
ANNOTATE_NEW_BUFFER(buffer.Buffer(), buffer.capacity(), size_);
// If there was a new out-of-line buffer allocated, there is no need in
// calling write barriers for entries in that backing store as it is still
// white.
TypeOperations::Move(begin(), end(), buffer.Buffer(), HasInlineBuffer());
ClearUnusedSlots(begin(), end());
ANNOTATE_DELETE_BUFFER(begin(), capacity(), size_);
Base::DeallocateBuffer(begin());
buffer.MoveBufferInto(*this);
Allocator::BackingWriteBarrier(begin());
}
} // namespace WTF } // namespace WTF
namespace base { namespace base {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment