Commit 69488b17 authored by Anton Bikineev's avatar Anton Bikineev Committed by Commit Bot

heap: Don't call write barrier on vector reallocations

This CL reorders reallocation scenario as follows:
1) allocating a new buffer;
2) moving elements in the current buffer to the new one ***without
   tracing them or executing write barriers on them***
3) resetting HeapVector's pointer to the new buffer;
4) executing write barrier on the new buffer.

The followup to the CL will be introducing the same optimization for
HashTable.

Bug: 1021889

Change-Id: I367165ba26045d3a4ca967c7f9f006ff88be4ed4
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1899883
Commit-Queue: Anton Bikineev <bikineev@chromium.org>
Reviewed-by: default avatarKentaro Hara <haraken@chromium.org>
Cr-Commit-Position: refs/heads/master@{#714926}
parent a0ab2c88
......@@ -487,12 +487,21 @@ class ConstructTraits<blink::Member<T>, Traits, Allocator> {
STATIC_ONLY(ConstructTraits);
public:
template <typename... Args>
static blink::Member<T>* Construct(void* location, Args&&... args) {
return new (NotNull, location)
blink::Member<T>(std::forward<Args>(args)...);
}
static void NotifyNewElement(blink::Member<T>* element) {
element->WriteBarrier();
}
template <typename... Args>
static blink::Member<T>* ConstructAndNotifyElement(void* location,
Args&&... args) {
blink::Member<T>* object =
new (NotNull, location) blink::Member<T>(std::forward<Args>(args)...);
object->WriteBarrier();
blink::Member<T>* object = Construct(location, std::forward<Args>(args)...);
NotifyNewElement(object);
return object;
}
......
......@@ -21,15 +21,24 @@ class ConstructTraits {
public:
// Construct a single element that would otherwise be constructed using
// placement new.
template <typename... Args>
static T* Construct(void* location, Args&&... args) {
return new (NotNull, location) T(std::forward<Args>(args)...);
}
// After constructing elements using memcopy or memmove (or similar)
// |NotifyNewElement| needs to be called to propagate that information.
static void NotifyNewElement(T* element) {
Allocator::template NotifyNewObject<T, Traits>(element);
}
template <typename... Args>
static T* ConstructAndNotifyElement(void* location, Args&&... args) {
T* object = new (NotNull, location) T(std::forward<Args>(args)...);
Allocator::template NotifyNewObject<T, Traits>(object);
T* object = Construct(location, std::forward<Args>(args)...);
NotifyNewElement(object);
return object;
}
// After constructing elements using memcopy or memmove (or similar)
// |NotifyNewElements| needs to be called to propagate that information.
static void NotifyNewElements(T* array, size_t len) {
Allocator::template NotifyNewObjects<T, Traits>(array, len);
}
......
......@@ -1353,7 +1353,7 @@ HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::
DCHECK(!IsEmptyOrDeletedBucket(*entry));
// Translate constructs an element so we need to notify using the trait. Avoid
// doing that in the translator so that they can be easily customized.
ConstructTraits<ValueType, Traits, Allocator>::NotifyNewElements(entry, 1);
ConstructTraits<ValueType, Traits, Allocator>::NotifyNewElement(entry);
++key_count_;
......@@ -1421,7 +1421,7 @@ HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::
DCHECK(!IsEmptyOrDeletedBucket(*entry));
// Translate constructs an element so we need to notify using the trait. Avoid
// doing that in the translator so that they can be easily customized.
ConstructTraits<ValueType, Traits, Allocator>::NotifyNewElements(entry, 1);
ConstructTraits<ValueType, Traits, Allocator>::NotifyNewElement(entry);
++key_count_;
if (ShouldExpand())
......
......@@ -151,27 +151,31 @@ struct VectorMover;
template <typename T, typename Allocator>
struct VectorMover<false, T, Allocator> {
STATIC_ONLY(VectorMover);
static void Move(T* src, T* src_end, T* dst) {
using Traits = ConstructTraits<T, VectorTraits<T>, Allocator>;
static void Move(T* src, T* src_end, T* dst, bool has_inline_buffer) {
while (src != src_end) {
ConstructTraits<T, VectorTraits<T>, Allocator>::ConstructAndNotifyElement(
dst, std::move(*src));
T* newly_created = Traits::Construct(dst, std::move(*src));
if (has_inline_buffer)
Traits::NotifyNewElement(newly_created);
src->~T();
++dst;
++src;
}
}
static void MoveOverlapping(T* src, T* src_end, T* dst) {
static void MoveOverlapping(T* src,
T* src_end,
T* dst,
bool has_inline_buffer) {
if (src > dst) {
Move(src, src_end, dst);
Move(src, src_end, dst, has_inline_buffer);
} else {
T* dst_end = dst + (src_end - src);
while (src != src_end) {
--src_end;
--dst_end;
ConstructTraits<T, VectorTraits<T>,
Allocator>::ConstructAndNotifyElement(dst_end,
std::move(
*src_end));
T* newly_created = Traits::Construct(dst_end, std::move(*src_end));
if (has_inline_buffer)
Traits::NotifyNewElement(newly_created);
src_end->~T();
}
}
......@@ -179,30 +183,37 @@ struct VectorMover<false, T, Allocator> {
static void Swap(T* src, T* src_end, T* dst) {
std::swap_ranges(src, src_end, dst);
const size_t len = src_end - src;
ConstructTraits<T, VectorTraits<T>, Allocator>::NotifyNewElements(src, len);
ConstructTraits<T, VectorTraits<T>, Allocator>::NotifyNewElements(dst, len);
Traits::NotifyNewElements(src, len);
Traits::NotifyNewElements(dst, len);
}
};
template <typename T, typename Allocator>
struct VectorMover<true, T, Allocator> {
STATIC_ONLY(VectorMover);
static void Move(const T* src, const T* src_end, T* dst) {
using Traits = ConstructTraits<T, VectorTraits<T>, Allocator>;
static void Move(const T* src,
const T* src_end,
T* dst,
bool has_inline_buffer) {
if (LIKELY(dst && src)) {
memcpy(dst, src,
reinterpret_cast<const char*>(src_end) -
reinterpret_cast<const char*>(src));
ConstructTraits<T, VectorTraits<T>, Allocator>::NotifyNewElements(
dst, src_end - src);
if (has_inline_buffer)
Traits::NotifyNewElements(dst, src_end - src);
}
}
static void MoveOverlapping(const T* src, const T* src_end, T* dst) {
static void MoveOverlapping(const T* src,
const T* src_end,
T* dst,
bool has_inline_buffer) {
if (LIKELY(dst && src)) {
memmove(dst, src,
reinterpret_cast<const char*>(src_end) -
reinterpret_cast<const char*>(src));
ConstructTraits<T, VectorTraits<T>, Allocator>::NotifyNewElements(
dst, src_end - src);
if (has_inline_buffer)
Traits::NotifyNewElements(dst, src_end - src);
}
}
static void Swap(T* src, T* src_end, T* dst) {
......@@ -210,8 +221,8 @@ struct VectorMover<true, T, Allocator> {
reinterpret_cast<char*>(src_end),
reinterpret_cast<char*>(dst));
const size_t len = src_end - src;
ConstructTraits<T, VectorTraits<T>, Allocator>::NotifyNewElements(src, len);
ConstructTraits<T, VectorTraits<T>, Allocator>::NotifyNewElements(dst, len);
Traits::NotifyNewElements(src, len);
Traits::NotifyNewElements(dst, len);
}
};
......@@ -332,14 +343,18 @@ struct VectorTypeOperations {
Allocator>::Initialize(begin, end);
}
static void Move(T* src, T* src_end, T* dst) {
static void Move(T* src, T* src_end, T* dst, bool has_inline_buffer = true) {
VectorMover<VectorTraits<T>::kCanMoveWithMemcpy, T, Allocator>::Move(
src, src_end, dst);
src, src_end, dst, has_inline_buffer);
}
static void MoveOverlapping(T* src, T* src_end, T* dst) {
static void MoveOverlapping(T* src,
T* src_end,
T* dst,
bool has_inline_buffer = true) {
VectorMover<VectorTraits<T>::kCanMoveWithMemcpy, T,
Allocator>::MoveOverlapping(src, src_end, dst);
Allocator>::MoveOverlapping(src, src_end, dst,
has_inline_buffer);
}
static void Swap(T* src, T* src_end, T* dst) {
......@@ -384,13 +399,20 @@ class VectorBufferBase {
DISALLOW_NEW();
public:
void AllocateBuffer(wtf_size_t new_capacity) {
VectorBufferBase(VectorBufferBase&&) = default;
VectorBufferBase& operator=(VectorBufferBase&&) = default;
void AllocateBufferNoBarrier(wtf_size_t new_capacity) {
DCHECK(new_capacity);
DCHECK_LE(new_capacity,
Allocator::template MaxElementCountInBackingStore<T>());
size_t size_to_allocate = AllocationSize(new_capacity);
buffer_ = Allocator::template AllocateVectorBacking<T>(size_to_allocate);
capacity_ = static_cast<wtf_size_t>(size_to_allocate / sizeof(T));
}
void AllocateBuffer(wtf_size_t new_capacity) {
AllocateBufferNoBarrier(new_capacity);
Allocator::BackingWriteBarrier(buffer_);
}
......@@ -423,6 +445,11 @@ class VectorBufferBase {
#endif
}
void MoveBufferInto(VectorBufferBase& other) {
other.buffer_ = buffer_;
other.capacity_ = capacity_;
}
// |end| is exclusive, a la STL.
struct OffsetRange final {
OffsetRange() : begin(0), end(0) {}
......@@ -436,6 +463,12 @@ class VectorBufferBase {
};
protected:
static VectorBufferBase AllocateTemporaryBuffer(wtf_size_t capacity) {
VectorBufferBase buffer;
buffer.AllocateBufferNoBarrier(capacity);
return buffer;
}
VectorBufferBase() : buffer_(nullptr), capacity_(0) {}
VectorBufferBase(T* buffer, wtf_size_t capacity)
......@@ -443,6 +476,7 @@ class VectorBufferBase {
VectorBufferBase(HashTableDeletedValueType value)
: buffer_(reinterpret_cast<T*>(-1)) {}
bool IsHashTableDeletedValue() const {
return buffer_ == reinterpret_cast<T*>(-1);
}
......@@ -1286,6 +1320,12 @@ class Vector
template <typename U>
void AppendSlowCase(U&&);
bool HasInlineBuffer() const {
return INLINE_CAPACITY && !this->HasOutOfLineBuffer();
}
void ReallocateBuffer(wtf_size_t);
// This is to prevent compilation of deprecated calls like 'vector.erase(0)'.
void erase(std::nullptr_t) = delete;
......@@ -1615,8 +1655,7 @@ void Vector<T, inlineCapacity, Allocator>::ReserveCapacity(
wtf_size_t new_capacity) {
if (UNLIKELY(new_capacity <= capacity()))
return;
T* old_buffer = begin();
if (!old_buffer) {
if (!data()) {
Base::AllocateBuffer(new_capacity);
return;
}
......@@ -1633,13 +1672,7 @@ void Vector<T, inlineCapacity, Allocator>::ReserveCapacity(
// Reallocating a backing buffer may resurrect a dead object.
CHECK(Allocator::IsAllocationAllowed());
T* old_end = end();
Base::AllocateBuffer(new_capacity);
ANNOTATE_NEW_BUFFER(begin(), capacity(), size_);
TypeOperations::Move(old_buffer, old_end, begin());
ClearUnusedSlots(old_buffer, old_end);
ANNOTATE_DELETE_BUFFER(old_buffer, old_capacity, size_);
Base::DeallocateBuffer(old_buffer);
ReallocateBuffer(new_capacity);
}
template <typename T, wtf_size_t inlineCapacity, typename Allocator>
......@@ -1676,24 +1709,16 @@ void Vector<T, inlineCapacity, Allocator>::ShrinkCapacity(
if (!Allocator::IsAllocationAllowed())
return;
T* old_end = end();
Base::AllocateBuffer(new_capacity);
if (begin() != old_buffer) {
ANNOTATE_NEW_BUFFER(begin(), capacity(), size_);
TypeOperations::Move(old_buffer, old_end, begin());
ClearUnusedSlots(old_buffer, old_end);
ANNOTATE_DELETE_BUFFER(old_buffer, old_capacity, size_);
}
} else {
Base::ResetBufferPointer();
ReallocateBuffer(new_capacity);
return;
}
Base::ResetBufferPointer();
#ifdef ANNOTATE_CONTIGUOUS_CONTAINER
if (old_buffer != begin()) {
ANNOTATE_NEW_BUFFER(begin(), capacity(), size_);
ANNOTATE_DELETE_BUFFER(old_buffer, old_capacity, size_);
}
#endif
if (old_buffer != begin()) {
ANNOTATE_NEW_BUFFER(begin(), capacity(), size_);
ANNOTATE_DELETE_BUFFER(old_buffer, old_capacity, size_);
}
#endif
Base::DeallocateBuffer(old_buffer);
}
......@@ -1978,6 +2003,41 @@ Vector<T, inlineCapacity, Allocator>::Trace(VisitorDispatcher visitor) {
}
}
template <typename T, wtf_size_t inlineCapacity, typename Allocator>
void Vector<T, inlineCapacity, Allocator>::ReallocateBuffer(
wtf_size_t new_capacity) {
if (new_capacity <= INLINE_CAPACITY) {
if (HasInlineBuffer()) {
Base::ResetBufferPointer();
return;
}
// Shrinking to inline buffer from out-of-line one.
T *old_begin = begin(), *old_end = end();
#ifdef ANNOTATE_CONTIGUOUS_CONTAINER
const wtf_size_t old_capacity = capacity();
#endif
Base::ResetBufferPointer();
TypeOperations::Move(old_begin, old_end, begin());
ClearUnusedSlots(old_begin, old_end);
ANNOTATE_DELETE_BUFFER(old_begin, old_capacity, size_);
Base::DeallocateBuffer(old_begin);
return;
}
// Shrinking/resizing to out-of-line buffer.
VectorBufferBase<T, Allocator> buffer =
Base::AllocateTemporaryBuffer(new_capacity);
ANNOTATE_NEW_BUFFER(buffer.Buffer(), buffer.capacity(), size_);
// If there was a new out-of-line buffer allocated, there is no need in
// calling write barriers for entries in that backing store as it is still
// white.
TypeOperations::Move(begin(), end(), buffer.Buffer(), HasInlineBuffer());
ClearUnusedSlots(begin(), end());
ANNOTATE_DELETE_BUFFER(begin(), capacity(), size_);
Base::DeallocateBuffer(begin());
buffer.MoveBufferInto(*this);
Allocator::BackingWriteBarrier(begin());
}
} // namespace WTF
namespace base {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment