Commit 47af09d1 authored by Michael Lippautz's avatar Michael Lippautz Committed by Chromium LUCI CQ

heap: Split off collection backings for the Oilpan library

Backings require re-implementing of traits. Split the files off instead
of relying on the macro to split the implementation of each trait.

Bug: 1056170
Change-Id: I43a005119986ec51c05a34f76dca25614a6f1196
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2642340
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Reviewed-by: default avatarOmer Katz <omerkatz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#846189}
parent a6e94cb4
......@@ -99,6 +99,8 @@ blink_platform_sources("heap") {
sources += [
"v8_wrapper/blink_gc.h",
"v8_wrapper/blink_gc_memory_dump_provider.h",
"v8_wrapper/collection_support/heap_hash_table_backing.h",
"v8_wrapper/collection_support/heap_vector_backing.h",
"v8_wrapper/disallow_new_wrapper.h",
"v8_wrapper/garbage_collected.h",
"v8_wrapper/gc_task_runner.h",
......@@ -126,6 +128,8 @@ blink_platform_sources("heap") {
"impl/blink_gc.h",
"impl/blink_gc_memory_dump_provider.cc",
"impl/blink_gc_memory_dump_provider.h",
"impl/collection_support/heap_hash_table_backing.h",
"impl/collection_support/heap_vector_backing.h",
"impl/finalizer_traits.h",
"impl/garbage_collected.h",
"impl/gc_info.cc",
......
// Copyright 2020 The Chromium Authors. All rights reserved.
// Copyright 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_COLLECTION_SUPPORT_HEAP_HASH_TABLE_BACKING_H_
#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_COLLECTION_SUPPORT_HEAP_HASH_TABLE_BACKING_H_
#include "third_party/blink/renderer/platform/heap/impl/heap_page.h"
#include "third_party/blink/renderer/platform/heap/impl/threading_traits.h"
#include "third_party/blink/renderer/platform/heap/impl/trace_traits.h"
#include "third_party/blink/renderer/platform/wtf/allocator/allocator.h"
#include "third_party/blink/renderer/platform/wtf/conditional_destructor.h"
#include "third_party/blink/renderer/platform/wtf/buildflags.h"
namespace blink {
template <typename Table>
class HeapHashTableBacking final
: public GarbageCollected<HeapHashTableBacking<Table>>,
public WTF::ConditionalDestructor<
HeapHashTableBacking<Table>,
std::is_trivially_destructible<typename Table::ValueType>::value> {
public:
template <typename Backing>
static void* AllocateObject(size_t);
// Conditionally invoked via destructor.
void Finalize();
};
template <typename Table>
struct ThreadingTrait<HeapHashTableBacking<Table>> {
STATIC_ONLY(ThreadingTrait);
using Key = typename Table::KeyType;
using Value = typename Table::ValueType;
static const ThreadAffinity kAffinity =
(ThreadingTrait<Key>::kAffinity == kMainThreadOnly) &&
(ThreadingTrait<Value>::kAffinity == kMainThreadOnly)
? kMainThreadOnly
: kAnyThread;
};
// static
template <typename Table>
template <typename Backing>
void* HeapHashTableBacking<Table>::AllocateObject(size_t size) {
ThreadState* state =
ThreadStateFor<ThreadingTrait<Backing>::kAffinity>::GetState();
DCHECK(state->IsAllocationAllowed());
return state->Heap().AllocateOnArenaIndex(
state, size, BlinkGC::kHashTableArenaIndex, GCInfoTrait<Backing>::Index(),
WTF_HEAP_PROFILER_TYPE_NAME(Backing));
}
template <typename Table>
void HeapHashTableBacking<Table>::Finalize() {
using Value = typename Table::ValueType;
static_assert(
!std::is_trivially_destructible<Value>::value,
"Finalization of trivially destructible classes should not happen.");
HeapObjectHeader* header = HeapObjectHeader::FromPayload(this);
// Use the payload size as recorded by the heap to determine how many
// elements to finalize.
size_t length = header->PayloadSize() / sizeof(Value);
Value* table = reinterpret_cast<Value*>(this);
for (unsigned i = 0; i < length; ++i) {
if (!Table::IsEmptyOrDeletedBucket(table[i]))
table[i].~Value();
}
}
template <typename Table>
struct MakeGarbageCollectedTrait<HeapHashTableBacking<Table>> {
static HeapHashTableBacking<Table>* Call(size_t num_elements) {
CHECK_GT(num_elements, 0u);
void* memory = HeapHashTableBacking<Table>::template AllocateObject<
HeapHashTableBacking<Table>>(num_elements *
sizeof(typename Table::ValueType));
HeapObjectHeader* header = HeapObjectHeader::FromPayload(memory);
// Placement new as regular operator new() is deleted.
HeapHashTableBacking<Table>* object =
::new (memory) HeapHashTableBacking<Table>();
header->MarkFullyConstructed<HeapObjectHeader::AccessMode::kAtomic>();
return object;
}
};
// The trace trait for the heap hashtable backing is used when we find a
// direct pointer to the backing from the conservative stack scanner. This
// normally indicates that there is an ongoing iteration over the table, and so
// we disable weak processing of table entries. When the backing is found
// through the owning hash table we mark differently, in order to do weak
// processing.
template <typename Table>
struct TraceTrait<HeapHashTableBacking<Table>> {
STATIC_ONLY(TraceTrait);
using Backing = HeapHashTableBacking<Table>;
using ValueType = typename Table::ValueTraits::TraitType;
using Traits = typename Table::ValueTraits;
public:
static TraceDescriptor GetTraceDescriptor(const void* self) {
return {self, Trace<WTF::kNoWeakHandling>};
}
static TraceDescriptor GetWeakTraceDescriptor(const void* self) {
return GetWeakTraceDescriptorImpl<ValueType>::GetWeakTraceDescriptor(self);
}
template <WTF::WeakHandlingFlag WeakHandling = WTF::kNoWeakHandling>
static void Trace(Visitor* visitor, const void* self) {
if (!Traits::kCanTraceConcurrently && self) {
if (visitor->DeferredTraceIfConcurrent({self, &Trace<WeakHandling>},
GetBackingStoreSize(self)))
return;
}
static_assert(WTF::IsTraceableInCollectionTrait<Traits>::value ||
WTF::IsWeak<ValueType>::value,
"T should not be traced");
WTF::TraceInCollectionTrait<WeakHandling, Backing, void>::Trace(visitor,
self);
}
private:
static size_t GetBackingStoreSize(const void* backing_store) {
const HeapObjectHeader* header =
HeapObjectHeader::FromPayload(backing_store);
return header->IsLargeObject<HeapObjectHeader::AccessMode::kAtomic>()
? static_cast<LargeObjectPage*>(PageFromObject(header))
->ObjectSize()
: header->size<HeapObjectHeader::AccessMode::kAtomic>();
}
template <typename ValueType>
struct GetWeakTraceDescriptorImpl {
static TraceDescriptor GetWeakTraceDescriptor(const void* backing) {
return {backing, nullptr};
}
};
template <typename K, typename V>
struct GetWeakTraceDescriptorImpl<WTF::KeyValuePair<K, V>> {
static TraceDescriptor GetWeakTraceDescriptor(const void* backing) {
return GetWeakTraceDescriptorKVPImpl<K, V>::GetWeakTraceDescriptor(
backing);
}
template <typename KeyType,
typename ValueType,
bool ephemeron_semantics = (WTF::IsWeak<KeyType>::value &&
!WTF::IsWeak<ValueType>::value &&
WTF::IsTraceable<ValueType>::value) ||
(WTF::IsWeak<ValueType>::value &&
!WTF::IsWeak<KeyType>::value &&
WTF::IsTraceable<KeyType>::value)>
struct GetWeakTraceDescriptorKVPImpl {
static TraceDescriptor GetWeakTraceDescriptor(const void* backing) {
return {backing, nullptr};
}
};
template <typename KeyType, typename ValueType>
struct GetWeakTraceDescriptorKVPImpl<KeyType, ValueType, true> {
static TraceDescriptor GetWeakTraceDescriptor(const void* backing) {
return {backing, Trace<WTF::kWeakHandling>};
}
};
};
};
} // namespace blink
namespace WTF {
namespace internal {
// ConcurrentBucket is a wrapper for HashTable buckets for concurrent marking.
// It is used to provide a snapshot view of the bucket key and guarantee
// that the same key is used for checking empty/deleted buckets and tracing.
template <typename T>
class ConcurrentBucket {
using KeyExtractionCallback = void (*)(const T&, void*);
public:
using BucketType = T;
ConcurrentBucket(const T& t, KeyExtractionCallback extract_key) {
extract_key(t, &buf_);
}
// for HashTable that don't use KeyValuePair (i.e. *HashSets), the key
// and the value are the same.
const T* key() const { return reinterpret_cast<const T*>(&buf_); }
const T* value() const { return key(); }
const T* bucket() const { return key(); }
private:
// Alignment is needed for atomic accesses to |buf_| and to assure |buf_|
// can be accessed the same as objects of type T
static constexpr size_t boundary = std::max(alignof(T), sizeof(size_t));
alignas(boundary) char buf_[sizeof(T)];
};
template <typename Key, typename Value>
class ConcurrentBucket<KeyValuePair<Key, Value>> {
using KeyExtractionCallback = void (*)(const KeyValuePair<Key, Value>&,
void*);
public:
using BucketType = ConcurrentBucket;
ConcurrentBucket(const KeyValuePair<Key, Value>& pair,
KeyExtractionCallback extract_key)
: value_(&pair.value) {
extract_key(pair, &buf_);
}
const Key* key() const { return reinterpret_cast<const Key*>(&buf_); }
const Value* value() const { return value_; }
const ConcurrentBucket* bucket() const { return this; }
private:
// Alignment is needed for atomic accesses to |buf_| and to assure |buf_|
// can be accessed the same as objects of type Key
static constexpr size_t boundary = std::max(alignof(Key), sizeof(size_t));
alignas(boundary) char buf_[sizeof(Key)];
const Value* value_;
};
} // namespace internal
// This trace method is for tracing a HashTableBacking either through regular
// tracing (via the relevant TraceTraits) or when finding a HashTableBacking
// through conservative stack scanning (which will treat all references in the
// backing strongly).
template <WTF::WeakHandlingFlag WeakHandling, typename Table>
struct TraceHashTableBackingInCollectionTrait {
using Value = typename Table::ValueType;
using Traits = typename Table::ValueTraits;
using Extractor = typename Table::ExtractorType;
static void Trace(blink::Visitor* visitor, const void* self) {
static_assert(IsTraceableInCollectionTrait<Traits>::value ||
WTF::IsWeak<Value>::value,
"Table should not be traced");
const Value* array = reinterpret_cast<const Value*>(self);
blink::HeapObjectHeader* header =
blink::HeapObjectHeader::FromPayload(self);
// Use the payload size as recorded by the heap to determine how many
// elements to trace.
size_t length = header->PayloadSize() / sizeof(Value);
const bool is_concurrent = visitor->IsConcurrent();
for (size_t i = 0; i < length; ++i) {
// If tracing concurrently, use a concurrent-safe version of
// IsEmptyOrDeletedBucket (check performed on a local copy instead
// of directly on the bucket).
if (is_concurrent) {
internal::ConcurrentBucket<Value> concurrent_bucket(
array[i], Extractor::ExtractSafe);
if (!HashTableHelper<Value, Extractor, typename Table::KeyTraitsType>::
IsEmptyOrDeletedBucketForKey(*concurrent_bucket.key())) {
blink::TraceCollectionIfEnabled<
WeakHandling,
typename internal::ConcurrentBucket<Value>::BucketType,
Traits>::Trace(visitor, concurrent_bucket.bucket());
}
} else {
if (!HashTableHelper<Value, Extractor, typename Table::KeyTraitsType>::
IsEmptyOrDeletedBucket(array[i])) {
blink::TraceCollectionIfEnabled<WeakHandling, Value, Traits>::Trace(
visitor, &array[i]);
}
}
}
}
};
template <typename Table>
struct TraceInCollectionTrait<kNoWeakHandling,
blink::HeapHashTableBacking<Table>,
void> {
static void Trace(blink::Visitor* visitor, const void* self) {
TraceHashTableBackingInCollectionTrait<kNoWeakHandling, Table>::Trace(
visitor, self);
}
};
template <typename Table>
struct TraceInCollectionTrait<kWeakHandling,
blink::HeapHashTableBacking<Table>,
void> {
static void Trace(blink::Visitor* visitor, const void* self) {
TraceHashTableBackingInCollectionTrait<kWeakHandling, Table>::Trace(visitor,
self);
}
};
// This trace method is for tracing a HashTableBacking either through regular
// tracing (via the relevant TraceTraits) or when finding a HashTableBacking
// through conservative stack scanning (which will treat all references in the
// backing strongly).
template <WTF::WeakHandlingFlag WeakHandling,
typename Key,
typename Value,
typename Traits>
struct TraceKeyValuePairInCollectionTrait {
using EphemeronHelper =
blink::EphemeronKeyValuePair<Key,
Value,
typename Traits::KeyTraits,
typename Traits::ValueTraits>;
static void Trace(blink::Visitor* visitor,
const Key* key,
const Value* value) {
TraceImpl::Trace(visitor, key, value);
}
private:
struct TraceImplEphemerons {
// Strongification of ephemerons, i.e., Weak/Strong and Strong/Weak.
static void Trace(blink::Visitor* visitor,
const Key* key,
const Value* value) {
// Strongification of ephemerons, i.e., Weak/Strong and Strong/Weak.
// The helper ensures that helper.key always refers to the weak part and
// helper.value always refers to the dependent part.
// We distinguish ephemeron from Weak/Weak and Strong/Strong to allow
// users to override visitation behavior. An example is creating a heap
// snapshot, where it is useful to annotate values as being kept alive
// from keys rather than the table.
EphemeronHelper helper(key, value);
if (WeakHandling == kNoWeakHandling) {
// Strongify the weak part.
blink::TraceCollectionIfEnabled<
kNoWeakHandling, typename EphemeronHelper::KeyType,
typename EphemeronHelper::KeyTraits>::Trace(visitor, helper.key);
}
// The following passes on kNoWeakHandling for tracing value as the value
// callback is only invoked to keep value alive iff key is alive,
// following ephemeron semantics.
visitor->TraceEphemeron(*helper.key, helper.value);
}
};
struct TraceImplDefault {
static void Trace(blink::Visitor* visitor,
const Key* key,
const Value* value) {
// Strongification of non-ephemeron KVP, i.e., Strong/Strong or Weak/Weak.
// Order does not matter here.
blink::TraceCollectionIfEnabled<
kNoWeakHandling, Key, typename Traits::KeyTraits>::Trace(visitor,
key);
blink::TraceCollectionIfEnabled<
kNoWeakHandling, Value, typename Traits::ValueTraits>::Trace(visitor,
value);
}
};
using TraceImpl = typename std::conditional<
EphemeronHelper::is_ephemeron &&
WTF::IsTraceable<typename EphemeronHelper::ValueType>::value,
TraceImplEphemerons,
TraceImplDefault>::type;
};
// Trait for strong treatment of KeyValuePair. This is used to handle regular
// KVP but also for strongification of otherwise weakly handled KVPs.
template <typename Key, typename Value, typename Traits>
struct TraceInCollectionTrait<kNoWeakHandling,
KeyValuePair<Key, Value>,
Traits> {
static void Trace(blink::Visitor* visitor,
const KeyValuePair<Key, Value>& self) {
TraceKeyValuePairInCollectionTrait<kNoWeakHandling, Key, Value,
Traits>::Trace(visitor, &self.key,
&self.value);
}
};
template <typename Key, typename Value, typename Traits>
struct TraceInCollectionTrait<kWeakHandling, KeyValuePair<Key, Value>, Traits> {
static bool IsAlive(const blink::LivenessBroker& info,
const KeyValuePair<Key, Value>& self) {
// Needed for Weak/Weak, Strong/Weak (reverse ephemeron), and Weak/Strong
// (ephemeron). Order of invocation does not matter as tracing weak key or
// value does not have any side effects.
return blink::TraceCollectionIfEnabled<
WeakHandlingTrait<Key>::value, Key,
typename Traits::KeyTraits>::IsAlive(info, self.key) &&
blink::TraceCollectionIfEnabled<
WeakHandlingTrait<Value>::value, Value,
typename Traits::ValueTraits>::IsAlive(info, self.value);
}
static void Trace(blink::Visitor* visitor,
const KeyValuePair<Key, Value>& self) {
TraceKeyValuePairInCollectionTrait<kWeakHandling, Key, Value,
Traits>::Trace(visitor, &self.key,
&self.value);
}
};
template <typename Key, typename Value, typename Traits>
struct TraceInCollectionTrait<
kNoWeakHandling,
internal::ConcurrentBucket<KeyValuePair<Key, Value>>,
Traits> {
static void Trace(
blink::Visitor* visitor,
const internal::ConcurrentBucket<KeyValuePair<Key, Value>>& self) {
TraceKeyValuePairInCollectionTrait<kNoWeakHandling, Key, Value,
Traits>::Trace(visitor, self.key(),
self.value());
}
};
template <typename Key, typename Value, typename Traits>
struct TraceInCollectionTrait<
kWeakHandling,
internal::ConcurrentBucket<KeyValuePair<Key, Value>>,
Traits> {
static void Trace(
blink::Visitor* visitor,
const internal::ConcurrentBucket<KeyValuePair<Key, Value>>& self) {
TraceKeyValuePairInCollectionTrait<kWeakHandling, Key, Value,
Traits>::Trace(visitor, self.key(),
self.value());
}
};
} // namespace WTF
#if BUILDFLAG(USE_V8_OILPAN)
#include "third_party/blink/renderer/platform/heap/v8_wrapper/collection_support/heap_hash_table_backing.h"
#else // !USE_V8_OILPAN
#include "third_party/blink/renderer/platform/heap/impl/collection_support/heap_hash_table_backing.h"
#endif // !USE_V8_OILPAN
#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_COLLECTION_SUPPORT_HEAP_HASH_TABLE_BACKING_H_
// Copyright 2020 The Chromium Authors. All rights reserved.
// Copyright 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_COLLECTION_SUPPORT_HEAP_VECTOR_BACKING_H_
#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_COLLECTION_SUPPORT_HEAP_VECTOR_BACKING_H_
#include "base/check_op.h"
#include "third_party/blink/renderer/platform/heap/heap.h"
#include "third_party/blink/renderer/platform/heap/impl/finalizer_traits.h"
#include "third_party/blink/renderer/platform/heap/impl/gc_info.h"
#include "third_party/blink/renderer/platform/heap/impl/threading_traits.h"
#include "third_party/blink/renderer/platform/heap/impl/trace_traits.h"
#include "third_party/blink/renderer/platform/heap/thread_state.h"
#include "third_party/blink/renderer/platform/wtf/conditional_destructor.h"
#include "third_party/blink/renderer/platform/wtf/vector.h"
#include "third_party/blink/renderer/platform/wtf/buildflags.h"
namespace blink {
template <typename T, typename Traits = WTF::VectorTraits<T>>
class HeapVectorBacking final
: public GarbageCollected<HeapVectorBacking<T, Traits>>,
public WTF::ConditionalDestructor<HeapVectorBacking<T, Traits>,
!Traits::kNeedsDestruction> {
public:
template <typename Backing>
static void* AllocateObject(size_t);
// Conditionally invoked via destructor.
void Finalize();
};
// static
template <typename T, typename Traits>
template <typename Backing>
void* HeapVectorBacking<T, Traits>::AllocateObject(size_t size) {
ThreadState* state = ThreadStateFor<ThreadingTrait<T>::kAffinity>::GetState();
DCHECK(state->IsAllocationAllowed());
return state->Heap().AllocateOnArenaIndex(
state, size, BlinkGC::kVectorArenaIndex, GCInfoTrait<Backing>::Index(),
WTF_HEAP_PROFILER_TYPE_NAME(Backing));
}
template <typename T, typename Traits>
void HeapVectorBacking<T, Traits>::Finalize() {
static_assert(Traits::kNeedsDestruction,
"Only vector buffers with items requiring destruction should "
"be finalized");
static_assert(
Traits::kCanClearUnusedSlotsWithMemset || std::is_polymorphic<T>::value,
"HeapVectorBacking doesn't support objects that cannot be cleared as "
"unused with memset or don't have a vtable");
static_assert(
!std::is_trivially_destructible<T>::value,
"Finalization of trivially destructible classes should not happen.");
HeapObjectHeader* header = HeapObjectHeader::FromPayload(this);
// Use the payload size as recorded by the heap to determine how many
// elements to finalize.
size_t length = header->PayloadSize() / sizeof(T);
Address payload = header->Payload();
#ifdef ANNOTATE_CONTIGUOUS_CONTAINER
ANNOTATE_CHANGE_SIZE(payload, length * sizeof(T), 0, length * sizeof(T));
#endif
// As commented above, HeapVectorBacking calls finalizers for unused slots
// (which are already zeroed out).
if (std::is_polymorphic<T>::value) {
for (unsigned i = 0; i < length; ++i) {
Address element = payload + i * sizeof(T);
if (blink::VTableInitialized(element))
reinterpret_cast<T*>(element)->~T();
}
} else {
T* buffer = reinterpret_cast<T*>(payload);
for (unsigned i = 0; i < length; ++i)
buffer[i].~T();
}
}
template <typename T>
struct MakeGarbageCollectedTrait<HeapVectorBacking<T>> {
static HeapVectorBacking<T>* Call(size_t num_elements) {
CHECK_GT(num_elements, 0u);
void* memory =
HeapVectorBacking<T>::template AllocateObject<HeapVectorBacking<T>>(
num_elements * sizeof(T));
HeapObjectHeader* header = HeapObjectHeader::FromPayload(memory);
// Placement new as regular operator new() is deleted.
HeapVectorBacking<T>* object = ::new (memory) HeapVectorBacking<T>();
header->MarkFullyConstructed<HeapObjectHeader::AccessMode::kAtomic>();
return object;
}
};
template <typename T, typename Traits>
struct ThreadingTrait<HeapVectorBacking<T, Traits>> {
STATIC_ONLY(ThreadingTrait);
static const ThreadAffinity kAffinity = ThreadingTrait<T>::Affinity;
};
template <typename T, typename Traits>
struct TraceTrait<HeapVectorBacking<T, Traits>> {
STATIC_ONLY(TraceTrait);
using Backing = HeapVectorBacking<T, Traits>;
public:
static TraceDescriptor GetTraceDescriptor(const void* self) {
return {self, TraceTrait<Backing>::Trace};
}
static void Trace(Visitor* visitor, const void* self) {
if (!Traits::kCanTraceConcurrently && self) {
if (visitor->DeferredTraceIfConcurrent({self, &Trace},
GetBackingStoreSize(self)))
return;
}
static_assert(!WTF::IsWeak<T>::value,
"Weakness is not supported in HeapVector and HeapDeque");
if (WTF::IsTraceableInCollectionTrait<Traits>::value) {
WTF::TraceInCollectionTrait<WTF::kNoWeakHandling,
HeapVectorBacking<T, Traits>,
void>::Trace(visitor, self);
}
}
private:
static size_t GetBackingStoreSize(const void* backing_store) {
const HeapObjectHeader* header =
HeapObjectHeader::FromPayload(backing_store);
return header->IsLargeObject<HeapObjectHeader::AccessMode::kAtomic>()
? static_cast<LargeObjectPage*>(PageFromObject(header))
->ObjectSize()
: header->size<HeapObjectHeader::AccessMode::kAtomic>();
}
};
} // namespace blink
namespace WTF {
// This trace method is used only for on-stack HeapVectors found in
// conservative scanning. On-heap HeapVectors are traced by Vector::trace.
template <typename T, typename Traits>
struct TraceInCollectionTrait<kNoWeakHandling,
blink::HeapVectorBacking<T, Traits>,
void> {
static void Trace(blink::Visitor* visitor, const void* self) {
// HeapVectorBacking does not know the exact size of the vector
// and just knows the capacity of the vector. Due to the constraint,
// HeapVectorBacking can support only the following objects:
//
// - An object that has a vtable. In this case, HeapVectorBacking
// traces only slots that are not zeroed out. This is because if
// the object has a vtable, the zeroed slot means that it is
// an unused slot (Remember that the unused slots are guaranteed
// to be zeroed out by VectorUnusedSlotClearer).
//
// - An object that can be initialized with memset. In this case,
// HeapVectorBacking traces all slots including unused slots.
// This is fine because the fact that the object can be initialized
// with memset indicates that it is safe to treat the zerod slot
// as a valid object.
static_assert(!IsTraceableInCollectionTrait<Traits>::value ||
Traits::kCanClearUnusedSlotsWithMemset ||
std::is_polymorphic<T>::value,
"HeapVectorBacking doesn't support objects that cannot be "
"cleared as unused with memset.");
// This trace method is instantiated for vectors where
// IsTraceableInCollectionTrait<Traits>::value is false, but the trace
// method should not be called. Thus we cannot static-assert
// IsTraceableInCollectionTrait<Traits>::value but should runtime-assert it.
DCHECK(IsTraceableInCollectionTrait<Traits>::value);
const T* array = reinterpret_cast<const T*>(self);
blink::HeapObjectHeader* header =
blink::HeapObjectHeader::FromPayload(self);
// Use the payload size as recorded by the heap to determine how many
// elements to trace.
size_t length = header->PayloadSize() / sizeof(T);
#ifdef ANNOTATE_CONTIGUOUS_CONTAINER
// As commented above, HeapVectorBacking can trace unused slots
// (which are already zeroed out).
ANNOTATE_CHANGE_SIZE(array, length, 0, length);
#endif
if (std::is_polymorphic<T>::value) {
const char* pointer = reinterpret_cast<const char*>(array);
for (unsigned i = 0; i < length; ++i) {
const char* element = pointer + i * sizeof(T);
if (blink::VTableInitialized(element)) {
blink::TraceIfNeeded<
T, IsTraceableInCollectionTrait<Traits>::value>::Trace(visitor,
array[i]);
}
}
} else {
for (size_t i = 0; i < length; ++i) {
blink::TraceIfNeeded<
T, IsTraceableInCollectionTrait<Traits>::value>::Trace(visitor,
array[i]);
}
}
}
};
} // namespace WTF
#if BUILDFLAG(USE_V8_OILPAN)
#include "third_party/blink/renderer/platform/heap/v8_wrapper/collection_support/heap_vector_backing.h"
#else // !USE_V8_OILPAN
#include "third_party/blink/renderer/platform/heap/impl/collection_support/heap_vector_backing.h"
#endif // !USE_V8_OILPAN
#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_COLLECTION_SUPPORT_HEAP_VECTOR_BACKING_H_
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_COLLECTION_SUPPORT_HEAP_HASH_TABLE_BACKING_H_
#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_COLLECTION_SUPPORT_HEAP_HASH_TABLE_BACKING_H_
#include "third_party/blink/renderer/platform/heap/impl/heap_page.h"
#include "third_party/blink/renderer/platform/heap/impl/threading_traits.h"
#include "third_party/blink/renderer/platform/heap/impl/trace_traits.h"
#include "third_party/blink/renderer/platform/wtf/allocator/allocator.h"
#include "third_party/blink/renderer/platform/wtf/conditional_destructor.h"
namespace blink {
template <typename Table>
class HeapHashTableBacking final
: public GarbageCollected<HeapHashTableBacking<Table>>,
public WTF::ConditionalDestructor<
HeapHashTableBacking<Table>,
std::is_trivially_destructible<typename Table::ValueType>::value> {
public:
template <typename Backing>
static void* AllocateObject(size_t);
// Conditionally invoked via destructor.
void Finalize();
};
template <typename Table>
struct ThreadingTrait<HeapHashTableBacking<Table>> {
STATIC_ONLY(ThreadingTrait);
using Key = typename Table::KeyType;
using Value = typename Table::ValueType;
static const ThreadAffinity kAffinity =
(ThreadingTrait<Key>::kAffinity == kMainThreadOnly) &&
(ThreadingTrait<Value>::kAffinity == kMainThreadOnly)
? kMainThreadOnly
: kAnyThread;
};
// static
template <typename Table>
template <typename Backing>
void* HeapHashTableBacking<Table>::AllocateObject(size_t size) {
ThreadState* state =
ThreadStateFor<ThreadingTrait<Backing>::kAffinity>::GetState();
DCHECK(state->IsAllocationAllowed());
return state->Heap().AllocateOnArenaIndex(
state, size, BlinkGC::kHashTableArenaIndex, GCInfoTrait<Backing>::Index(),
WTF_HEAP_PROFILER_TYPE_NAME(Backing));
}
template <typename Table>
void HeapHashTableBacking<Table>::Finalize() {
using Value = typename Table::ValueType;
static_assert(
!std::is_trivially_destructible<Value>::value,
"Finalization of trivially destructible classes should not happen.");
HeapObjectHeader* header = HeapObjectHeader::FromPayload(this);
// Use the payload size as recorded by the heap to determine how many
// elements to finalize.
size_t length = header->PayloadSize() / sizeof(Value);
Value* table = reinterpret_cast<Value*>(this);
for (unsigned i = 0; i < length; ++i) {
if (!Table::IsEmptyOrDeletedBucket(table[i]))
table[i].~Value();
}
}
template <typename Table>
struct MakeGarbageCollectedTrait<HeapHashTableBacking<Table>> {
static HeapHashTableBacking<Table>* Call(size_t num_elements) {
CHECK_GT(num_elements, 0u);
void* memory = HeapHashTableBacking<Table>::template AllocateObject<
HeapHashTableBacking<Table>>(num_elements *
sizeof(typename Table::ValueType));
HeapObjectHeader* header = HeapObjectHeader::FromPayload(memory);
// Placement new as regular operator new() is deleted.
HeapHashTableBacking<Table>* object =
::new (memory) HeapHashTableBacking<Table>();
header->MarkFullyConstructed<HeapObjectHeader::AccessMode::kAtomic>();
return object;
}
};
// The trace trait for the heap hashtable backing is used when we find a
// direct pointer to the backing from the conservative stack scanner. This
// normally indicates that there is an ongoing iteration over the table, and so
// we disable weak processing of table entries. When the backing is found
// through the owning hash table we mark differently, in order to do weak
// processing.
template <typename Table>
struct TraceTrait<HeapHashTableBacking<Table>> {
STATIC_ONLY(TraceTrait);
using Backing = HeapHashTableBacking<Table>;
using ValueType = typename Table::ValueTraits::TraitType;
using Traits = typename Table::ValueTraits;
public:
static TraceDescriptor GetTraceDescriptor(const void* self) {
return {self, Trace<WTF::kNoWeakHandling>};
}
static TraceDescriptor GetWeakTraceDescriptor(const void* self) {
return GetWeakTraceDescriptorImpl<ValueType>::GetWeakTraceDescriptor(self);
}
template <WTF::WeakHandlingFlag WeakHandling = WTF::kNoWeakHandling>
static void Trace(Visitor* visitor, const void* self) {
if (!Traits::kCanTraceConcurrently && self) {
if (visitor->DeferredTraceIfConcurrent({self, &Trace<WeakHandling>},
GetBackingStoreSize(self)))
return;
}
static_assert(WTF::IsTraceableInCollectionTrait<Traits>::value ||
WTF::IsWeak<ValueType>::value,
"T should not be traced");
WTF::TraceInCollectionTrait<WeakHandling, Backing, void>::Trace(visitor,
self);
}
private:
static size_t GetBackingStoreSize(const void* backing_store) {
const HeapObjectHeader* header =
HeapObjectHeader::FromPayload(backing_store);
return header->IsLargeObject<HeapObjectHeader::AccessMode::kAtomic>()
? static_cast<LargeObjectPage*>(PageFromObject(header))
->ObjectSize()
: header->size<HeapObjectHeader::AccessMode::kAtomic>();
}
template <typename ValueType>
struct GetWeakTraceDescriptorImpl {
static TraceDescriptor GetWeakTraceDescriptor(const void* backing) {
return {backing, nullptr};
}
};
template <typename K, typename V>
struct GetWeakTraceDescriptorImpl<WTF::KeyValuePair<K, V>> {
static TraceDescriptor GetWeakTraceDescriptor(const void* backing) {
return GetWeakTraceDescriptorKVPImpl<K, V>::GetWeakTraceDescriptor(
backing);
}
template <typename KeyType,
typename ValueType,
bool ephemeron_semantics = (WTF::IsWeak<KeyType>::value &&
!WTF::IsWeak<ValueType>::value &&
WTF::IsTraceable<ValueType>::value) ||
(WTF::IsWeak<ValueType>::value &&
!WTF::IsWeak<KeyType>::value &&
WTF::IsTraceable<KeyType>::value)>
struct GetWeakTraceDescriptorKVPImpl {
static TraceDescriptor GetWeakTraceDescriptor(const void* backing) {
return {backing, nullptr};
}
};
template <typename KeyType, typename ValueType>
struct GetWeakTraceDescriptorKVPImpl<KeyType, ValueType, true> {
static TraceDescriptor GetWeakTraceDescriptor(const void* backing) {
return {backing, Trace<WTF::kWeakHandling>};
}
};
};
};
} // namespace blink
namespace WTF {
namespace internal {
// ConcurrentBucket is a wrapper for HashTable buckets for concurrent marking.
// It is used to provide a snapshot view of the bucket key and guarantee
// that the same key is used for checking empty/deleted buckets and tracing.
template <typename T>
class ConcurrentBucket {
using KeyExtractionCallback = void (*)(const T&, void*);
public:
using BucketType = T;
ConcurrentBucket(const T& t, KeyExtractionCallback extract_key) {
extract_key(t, &buf_);
}
// for HashTable that don't use KeyValuePair (i.e. *HashSets), the key
// and the value are the same.
const T* key() const { return reinterpret_cast<const T*>(&buf_); }
const T* value() const { return key(); }
const T* bucket() const { return key(); }
private:
// Alignment is needed for atomic accesses to |buf_| and to assure |buf_|
// can be accessed the same as objects of type T
static constexpr size_t boundary = std::max(alignof(T), sizeof(size_t));
alignas(boundary) char buf_[sizeof(T)];
};
template <typename Key, typename Value>
class ConcurrentBucket<KeyValuePair<Key, Value>> {
using KeyExtractionCallback = void (*)(const KeyValuePair<Key, Value>&,
void*);
public:
using BucketType = ConcurrentBucket;
ConcurrentBucket(const KeyValuePair<Key, Value>& pair,
KeyExtractionCallback extract_key)
: value_(&pair.value) {
extract_key(pair, &buf_);
}
const Key* key() const { return reinterpret_cast<const Key*>(&buf_); }
const Value* value() const { return value_; }
const ConcurrentBucket* bucket() const { return this; }
private:
// Alignment is needed for atomic accesses to |buf_| and to assure |buf_|
// can be accessed the same as objects of type Key
static constexpr size_t boundary = std::max(alignof(Key), sizeof(size_t));
alignas(boundary) char buf_[sizeof(Key)];
const Value* value_;
};
} // namespace internal
// This trace method is for tracing a HashTableBacking either through regular
// tracing (via the relevant TraceTraits) or when finding a HashTableBacking
// through conservative stack scanning (which will treat all references in the
// backing strongly).
template <WTF::WeakHandlingFlag WeakHandling, typename Table>
struct TraceHashTableBackingInCollectionTrait {
using Value = typename Table::ValueType;
using Traits = typename Table::ValueTraits;
using Extractor = typename Table::ExtractorType;
static void Trace(blink::Visitor* visitor, const void* self) {
static_assert(IsTraceableInCollectionTrait<Traits>::value ||
WTF::IsWeak<Value>::value,
"Table should not be traced");
const Value* array = reinterpret_cast<const Value*>(self);
blink::HeapObjectHeader* header =
blink::HeapObjectHeader::FromPayload(self);
// Use the payload size as recorded by the heap to determine how many
// elements to trace.
size_t length = header->PayloadSize() / sizeof(Value);
const bool is_concurrent = visitor->IsConcurrent();
for (size_t i = 0; i < length; ++i) {
// If tracing concurrently, use a concurrent-safe version of
// IsEmptyOrDeletedBucket (check performed on a local copy instead
// of directly on the bucket).
if (is_concurrent) {
internal::ConcurrentBucket<Value> concurrent_bucket(
array[i], Extractor::ExtractSafe);
if (!HashTableHelper<Value, Extractor, typename Table::KeyTraitsType>::
IsEmptyOrDeletedBucketForKey(*concurrent_bucket.key())) {
blink::TraceCollectionIfEnabled<
WeakHandling,
typename internal::ConcurrentBucket<Value>::BucketType,
Traits>::Trace(visitor, concurrent_bucket.bucket());
}
} else {
if (!HashTableHelper<Value, Extractor, typename Table::KeyTraitsType>::
IsEmptyOrDeletedBucket(array[i])) {
blink::TraceCollectionIfEnabled<WeakHandling, Value, Traits>::Trace(
visitor, &array[i]);
}
}
}
}
};
template <typename Table>
struct TraceInCollectionTrait<kNoWeakHandling,
blink::HeapHashTableBacking<Table>,
void> {
static void Trace(blink::Visitor* visitor, const void* self) {
TraceHashTableBackingInCollectionTrait<kNoWeakHandling, Table>::Trace(
visitor, self);
}
};
template <typename Table>
struct TraceInCollectionTrait<kWeakHandling,
blink::HeapHashTableBacking<Table>,
void> {
static void Trace(blink::Visitor* visitor, const void* self) {
TraceHashTableBackingInCollectionTrait<kWeakHandling, Table>::Trace(visitor,
self);
}
};
// This trace method is for tracing a HashTableBacking either through regular
// tracing (via the relevant TraceTraits) or when finding a HashTableBacking
// through conservative stack scanning (which will treat all references in the
// backing strongly).
template <WTF::WeakHandlingFlag WeakHandling,
typename Key,
typename Value,
typename Traits>
struct TraceKeyValuePairInCollectionTrait {
using EphemeronHelper =
blink::EphemeronKeyValuePair<Key,
Value,
typename Traits::KeyTraits,
typename Traits::ValueTraits>;
static void Trace(blink::Visitor* visitor,
const Key* key,
const Value* value) {
TraceImpl::Trace(visitor, key, value);
}
private:
struct TraceImplEphemerons {
// Strongification of ephemerons, i.e., Weak/Strong and Strong/Weak.
static void Trace(blink::Visitor* visitor,
const Key* key,
const Value* value) {
// Strongification of ephemerons, i.e., Weak/Strong and Strong/Weak.
// The helper ensures that helper.key always refers to the weak part and
// helper.value always refers to the dependent part.
// We distinguish ephemeron from Weak/Weak and Strong/Strong to allow
// users to override visitation behavior. An example is creating a heap
// snapshot, where it is useful to annotate values as being kept alive
// from keys rather than the table.
EphemeronHelper helper(key, value);
if (WeakHandling == kNoWeakHandling) {
// Strongify the weak part.
blink::TraceCollectionIfEnabled<
kNoWeakHandling, typename EphemeronHelper::KeyType,
typename EphemeronHelper::KeyTraits>::Trace(visitor, helper.key);
}
// The following passes on kNoWeakHandling for tracing value as the value
// callback is only invoked to keep value alive iff key is alive,
// following ephemeron semantics.
visitor->TraceEphemeron(*helper.key, helper.value);
}
};
struct TraceImplDefault {
static void Trace(blink::Visitor* visitor,
const Key* key,
const Value* value) {
// Strongification of non-ephemeron KVP, i.e., Strong/Strong or Weak/Weak.
// Order does not matter here.
blink::TraceCollectionIfEnabled<
kNoWeakHandling, Key, typename Traits::KeyTraits>::Trace(visitor,
key);
blink::TraceCollectionIfEnabled<
kNoWeakHandling, Value, typename Traits::ValueTraits>::Trace(visitor,
value);
}
};
using TraceImpl = typename std::conditional<
EphemeronHelper::is_ephemeron &&
WTF::IsTraceable<typename EphemeronHelper::ValueType>::value,
TraceImplEphemerons,
TraceImplDefault>::type;
};
// Trait for strong treatment of KeyValuePair. This is used to handle regular
// KVP but also for strongification of otherwise weakly handled KVPs.
template <typename Key, typename Value, typename Traits>
struct TraceInCollectionTrait<kNoWeakHandling,
KeyValuePair<Key, Value>,
Traits> {
static void Trace(blink::Visitor* visitor,
const KeyValuePair<Key, Value>& self) {
TraceKeyValuePairInCollectionTrait<kNoWeakHandling, Key, Value,
Traits>::Trace(visitor, &self.key,
&self.value);
}
};
template <typename Key, typename Value, typename Traits>
struct TraceInCollectionTrait<kWeakHandling, KeyValuePair<Key, Value>, Traits> {
static bool IsAlive(const blink::LivenessBroker& info,
const KeyValuePair<Key, Value>& self) {
// Needed for Weak/Weak, Strong/Weak (reverse ephemeron), and Weak/Strong
// (ephemeron). Order of invocation does not matter as tracing weak key or
// value does not have any side effects.
return blink::TraceCollectionIfEnabled<
WeakHandlingTrait<Key>::value, Key,
typename Traits::KeyTraits>::IsAlive(info, self.key) &&
blink::TraceCollectionIfEnabled<
WeakHandlingTrait<Value>::value, Value,
typename Traits::ValueTraits>::IsAlive(info, self.value);
}
static void Trace(blink::Visitor* visitor,
const KeyValuePair<Key, Value>& self) {
TraceKeyValuePairInCollectionTrait<kWeakHandling, Key, Value,
Traits>::Trace(visitor, &self.key,
&self.value);
}
};
template <typename Key, typename Value, typename Traits>
struct TraceInCollectionTrait<
kNoWeakHandling,
internal::ConcurrentBucket<KeyValuePair<Key, Value>>,
Traits> {
static void Trace(
blink::Visitor* visitor,
const internal::ConcurrentBucket<KeyValuePair<Key, Value>>& self) {
TraceKeyValuePairInCollectionTrait<kNoWeakHandling, Key, Value,
Traits>::Trace(visitor, self.key(),
self.value());
}
};
template <typename Key, typename Value, typename Traits>
struct TraceInCollectionTrait<
kWeakHandling,
internal::ConcurrentBucket<KeyValuePair<Key, Value>>,
Traits> {
static void Trace(
blink::Visitor* visitor,
const internal::ConcurrentBucket<KeyValuePair<Key, Value>>& self) {
TraceKeyValuePairInCollectionTrait<kWeakHandling, Key, Value,
Traits>::Trace(visitor, self.key(),
self.value());
}
};
} // namespace WTF
#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_COLLECTION_SUPPORT_HEAP_HASH_TABLE_BACKING_H_
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_COLLECTION_SUPPORT_HEAP_VECTOR_BACKING_H_
#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_COLLECTION_SUPPORT_HEAP_VECTOR_BACKING_H_
#include "base/check_op.h"
#include "third_party/blink/renderer/platform/heap/heap.h"
#include "third_party/blink/renderer/platform/heap/impl/finalizer_traits.h"
#include "third_party/blink/renderer/platform/heap/impl/gc_info.h"
#include "third_party/blink/renderer/platform/heap/impl/threading_traits.h"
#include "third_party/blink/renderer/platform/heap/impl/trace_traits.h"
#include "third_party/blink/renderer/platform/heap/thread_state.h"
#include "third_party/blink/renderer/platform/wtf/conditional_destructor.h"
#include "third_party/blink/renderer/platform/wtf/vector.h"
namespace blink {
template <typename T, typename Traits = WTF::VectorTraits<T>>
class HeapVectorBacking final
: public GarbageCollected<HeapVectorBacking<T, Traits>>,
public WTF::ConditionalDestructor<HeapVectorBacking<T, Traits>,
!Traits::kNeedsDestruction> {
public:
template <typename Backing>
static void* AllocateObject(size_t);
// Conditionally invoked via destructor.
void Finalize();
};
// static
template <typename T, typename Traits>
template <typename Backing>
void* HeapVectorBacking<T, Traits>::AllocateObject(size_t size) {
ThreadState* state = ThreadStateFor<ThreadingTrait<T>::kAffinity>::GetState();
DCHECK(state->IsAllocationAllowed());
return state->Heap().AllocateOnArenaIndex(
state, size, BlinkGC::kVectorArenaIndex, GCInfoTrait<Backing>::Index(),
WTF_HEAP_PROFILER_TYPE_NAME(Backing));
}
template <typename T, typename Traits>
void HeapVectorBacking<T, Traits>::Finalize() {
static_assert(Traits::kNeedsDestruction,
"Only vector buffers with items requiring destruction should "
"be finalized");
static_assert(
Traits::kCanClearUnusedSlotsWithMemset || std::is_polymorphic<T>::value,
"HeapVectorBacking doesn't support objects that cannot be cleared as "
"unused with memset or don't have a vtable");
static_assert(
!std::is_trivially_destructible<T>::value,
"Finalization of trivially destructible classes should not happen.");
HeapObjectHeader* header = HeapObjectHeader::FromPayload(this);
// Use the payload size as recorded by the heap to determine how many
// elements to finalize.
size_t length = header->PayloadSize() / sizeof(T);
Address payload = header->Payload();
#ifdef ANNOTATE_CONTIGUOUS_CONTAINER
ANNOTATE_CHANGE_SIZE(payload, length * sizeof(T), 0, length * sizeof(T));
#endif
// As commented above, HeapVectorBacking calls finalizers for unused slots
// (which are already zeroed out).
if (std::is_polymorphic<T>::value) {
for (unsigned i = 0; i < length; ++i) {
Address element = payload + i * sizeof(T);
if (blink::VTableInitialized(element))
reinterpret_cast<T*>(element)->~T();
}
} else {
T* buffer = reinterpret_cast<T*>(payload);
for (unsigned i = 0; i < length; ++i)
buffer[i].~T();
}
}
template <typename T>
struct MakeGarbageCollectedTrait<HeapVectorBacking<T>> {
static HeapVectorBacking<T>* Call(size_t num_elements) {
CHECK_GT(num_elements, 0u);
void* memory =
HeapVectorBacking<T>::template AllocateObject<HeapVectorBacking<T>>(
num_elements * sizeof(T));
HeapObjectHeader* header = HeapObjectHeader::FromPayload(memory);
// Placement new as regular operator new() is deleted.
HeapVectorBacking<T>* object = ::new (memory) HeapVectorBacking<T>();
header->MarkFullyConstructed<HeapObjectHeader::AccessMode::kAtomic>();
return object;
}
};
template <typename T, typename Traits>
struct ThreadingTrait<HeapVectorBacking<T, Traits>> {
STATIC_ONLY(ThreadingTrait);
static const ThreadAffinity kAffinity = ThreadingTrait<T>::Affinity;
};
template <typename T, typename Traits>
struct TraceTrait<HeapVectorBacking<T, Traits>> {
STATIC_ONLY(TraceTrait);
using Backing = HeapVectorBacking<T, Traits>;
public:
static TraceDescriptor GetTraceDescriptor(const void* self) {
return {self, TraceTrait<Backing>::Trace};
}
static void Trace(Visitor* visitor, const void* self) {
if (!Traits::kCanTraceConcurrently && self) {
if (visitor->DeferredTraceIfConcurrent({self, &Trace},
GetBackingStoreSize(self)))
return;
}
static_assert(!WTF::IsWeak<T>::value,
"Weakness is not supported in HeapVector and HeapDeque");
if (WTF::IsTraceableInCollectionTrait<Traits>::value) {
WTF::TraceInCollectionTrait<WTF::kNoWeakHandling,
HeapVectorBacking<T, Traits>,
void>::Trace(visitor, self);
}
}
private:
static size_t GetBackingStoreSize(const void* backing_store) {
const HeapObjectHeader* header =
HeapObjectHeader::FromPayload(backing_store);
return header->IsLargeObject<HeapObjectHeader::AccessMode::kAtomic>()
? static_cast<LargeObjectPage*>(PageFromObject(header))
->ObjectSize()
: header->size<HeapObjectHeader::AccessMode::kAtomic>();
}
};
} // namespace blink
namespace WTF {
// This trace method is used only for on-stack HeapVectors found in
// conservative scanning. On-heap HeapVectors are traced by Vector::trace.
template <typename T, typename Traits>
struct TraceInCollectionTrait<kNoWeakHandling,
blink::HeapVectorBacking<T, Traits>,
void> {
static void Trace(blink::Visitor* visitor, const void* self) {
// HeapVectorBacking does not know the exact size of the vector
// and just knows the capacity of the vector. Due to the constraint,
// HeapVectorBacking can support only the following objects:
//
// - An object that has a vtable. In this case, HeapVectorBacking
// traces only slots that are not zeroed out. This is because if
// the object has a vtable, the zeroed slot means that it is
// an unused slot (Remember that the unused slots are guaranteed
// to be zeroed out by VectorUnusedSlotClearer).
//
// - An object that can be initialized with memset. In this case,
// HeapVectorBacking traces all slots including unused slots.
// This is fine because the fact that the object can be initialized
// with memset indicates that it is safe to treat the zerod slot
// as a valid object.
static_assert(!IsTraceableInCollectionTrait<Traits>::value ||
Traits::kCanClearUnusedSlotsWithMemset ||
std::is_polymorphic<T>::value,
"HeapVectorBacking doesn't support objects that cannot be "
"cleared as unused with memset.");
// This trace method is instantiated for vectors where
// IsTraceableInCollectionTrait<Traits>::value is false, but the trace
// method should not be called. Thus we cannot static-assert
// IsTraceableInCollectionTrait<Traits>::value but should runtime-assert it.
DCHECK(IsTraceableInCollectionTrait<Traits>::value);
const T* array = reinterpret_cast<const T*>(self);
blink::HeapObjectHeader* header =
blink::HeapObjectHeader::FromPayload(self);
// Use the payload size as recorded by the heap to determine how many
// elements to trace.
size_t length = header->PayloadSize() / sizeof(T);
#ifdef ANNOTATE_CONTIGUOUS_CONTAINER
// As commented above, HeapVectorBacking can trace unused slots
// (which are already zeroed out).
ANNOTATE_CHANGE_SIZE(array, length, 0, length);
#endif
if (std::is_polymorphic<T>::value) {
const char* pointer = reinterpret_cast<const char*>(array);
for (unsigned i = 0; i < length; ++i) {
const char* element = pointer + i * sizeof(T);
if (blink::VTableInitialized(element)) {
blink::TraceIfNeeded<
T, IsTraceableInCollectionTrait<Traits>::value>::Trace(visitor,
array[i]);
}
}
} else {
for (size_t i = 0; i < length; ++i) {
blink::TraceIfNeeded<
T, IsTraceableInCollectionTrait<Traits>::value>::Trace(visitor,
array[i]);
}
}
}
};
} // namespace WTF
#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_COLLECTION_SUPPORT_HEAP_VECTOR_BACKING_H_
......@@ -7,6 +7,7 @@
#include "third_party/blink/renderer/platform/platform_export.h"
#include "third_party/blink/renderer/platform/wtf/allocator/allocator.h"
#include "v8/include/cppgc/liveness-broker.h"
namespace blink {
......@@ -21,6 +22,8 @@ class PLATFORM_EXPORT BlinkGC final {
enum StackState { kNoHeapPointersOnStack, kHeapPointersOnStack };
};
using WeakCallback = void (*)(const cppgc::LivenessBroker&, const void*);
} // namespace blink
#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_V8_WRAPPER_BLINK_GC_H_
// Copyright 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_V8_WRAPPER_COLLECTION_SUPPORT_HEAP_HASH_TABLE_BACKING_H_
#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_V8_WRAPPER_COLLECTION_SUPPORT_HEAP_HASH_TABLE_BACKING_H_
#include "third_party/blink/renderer/platform/heap/heap.h"
#include "third_party/blink/renderer/platform/wtf/conditional_destructor.h"
namespace blink {
template <typename Table>
class HeapHashTableBacking final
: public GarbageCollected<HeapHashTableBacking<Table>>,
public WTF::ConditionalDestructor<
HeapHashTableBacking<Table>,
std::is_trivially_destructible<typename Table::ValueType>::value> {
public:
// Conditionally invoked via destructor.
void Finalize();
};
template <typename Table>
void HeapHashTableBacking<Table>::Finalize() {
using Value = typename Table::ValueType;
static_assert(
!std::is_trivially_destructible<Value>::value,
"Finalization of trivially destructible classes should not happen.");
// TODO(1056170): Implement.
}
} // namespace blink
#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_V8_WRAPPER_COLLECTION_SUPPORT_HEAP_HASH_TABLE_BACKING_H_
// Copyright 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_V8_WRAPPER_COLLECTION_SUPPORT_HEAP_VECTOR_BACKING_H_
#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_V8_WRAPPER_COLLECTION_SUPPORT_HEAP_VECTOR_BACKING_H_
#include "third_party/blink/renderer/platform/heap/heap.h"
#include "third_party/blink/renderer/platform/wtf/conditional_destructor.h"
#include "third_party/blink/renderer/platform/wtf/vector_traits.h"
namespace blink {
template <typename T, typename Traits = WTF::VectorTraits<T>>
class HeapVectorBacking final
: public GarbageCollected<HeapVectorBacking<T, Traits>>,
public WTF::ConditionalDestructor<HeapVectorBacking<T, Traits>,
!Traits::kNeedsDestruction> {
public:
// Conditionally invoked via destructor.
void Finalize();
};
template <typename T, typename Traits>
void HeapVectorBacking<T, Traits>::Finalize() {
static_assert(Traits::kNeedsDestruction,
"Only vector buffers with items requiring destruction should "
"be finalized");
static_assert(
Traits::kCanClearUnusedSlotsWithMemset || std::is_polymorphic<T>::value,
"HeapVectorBacking doesn't support objects that cannot be cleared as "
"unused with memset or don't have a vtable");
static_assert(
!std::is_trivially_destructible<T>::value,
"Finalization of trivially destructible classes should not happen.");
// TODO(1056170): Implement.
}
} // namespace blink
#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_V8_WRAPPER_COLLECTION_SUPPORT_HEAP_VECTOR_BACKING_H_
......@@ -5,6 +5,10 @@
#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_V8_WRAPPER_HEAP_ALLOCATOR_IMPL_H_
#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_V8_WRAPPER_HEAP_ALLOCATOR_IMPL_H_
#include "third_party/blink/renderer/platform/heap/collection_support/heap_hash_table_backing.h"
#include "third_party/blink/renderer/platform/heap/collection_support/heap_vector_backing.h"
#include "third_party/blink/renderer/platform/heap/v8_wrapper/heap.h"
#include "third_party/blink/renderer/platform/heap/v8_wrapper/visitor.h"
#include "third_party/blink/renderer/platform/platform_export.h"
#include "third_party/blink/renderer/platform/wtf/allocator/allocator.h"
......@@ -14,6 +18,8 @@ class PLATFORM_EXPORT HeapAllocator {
STATIC_ONLY(HeapAllocator);
public:
using LivenessBroker = blink::LivenessBroker;
static constexpr bool kIsGarbageCollected = true;
// See wtf/size_t.h for details.
......@@ -109,6 +115,47 @@ class PLATFORM_EXPORT HeapAllocator {
static void NotifyNewObjects(T*, size_t) {
// TODO(1056170): Implement.
}
template <typename T, typename Traits>
static void Trace(Visitor* visitor, const T& t) {
// TODO(1056170): Forward to TraceInCollectionTrait.
}
template <typename T>
static void TraceVectorBacking(Visitor* visitor,
const T* backing,
const T* const* backing_slot) {
visitor->RegisterMovableReference(const_cast<const HeapVectorBacking<T>**>(
reinterpret_cast<const HeapVectorBacking<T>* const*>(backing_slot)));
visitor->Trace(reinterpret_cast<const HeapVectorBacking<T>*>(backing));
}
template <typename T, typename HashTable>
static void TraceHashTableBackingStrongly(Visitor* visitor,
const T* backing,
const T* const* backing_slot) {
visitor->RegisterMovableReference(
const_cast<const HeapHashTableBacking<HashTable>**>(
reinterpret_cast<const HeapHashTableBacking<HashTable>* const*>(
backing_slot)));
visitor->Trace(
reinterpret_cast<const HeapHashTableBacking<HashTable>*>(backing));
}
template <typename T, typename HashTable>
static void TraceHashTableBackingWeakly(Visitor* visitor,
const T* backing,
const T* const* backing_slot,
WeakCallback callback,
const void* parameter) {
visitor->RegisterMovableReference(
const_cast<const HeapHashTableBacking<HashTable>**>(
reinterpret_cast<const HeapHashTableBacking<HashTable>* const*>(
backing_slot)));
visitor->TraceWeakContainer(
reinterpret_cast<const HeapHashTableBacking<HashTable>*>(backing),
callback, parameter);
}
};
} // namespace blink
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment