Commit 3817ba7c authored by Anton Bikineev's avatar Anton Bikineev Committed by Commit Bot

heap: Implement generational barrier

Generational barrier is needed to track inter-generational pointers in
the remembered set (which is implemented as a card table). The barrier
is implemented under a macro flag BLINK_HEAP_YOUNG_GENERATION and is off
by default.

Design doc: https://bit.ly/2ZXGU20

Bug: 1029379
Change-Id: I9931c409d119d35e85185970cff0a62f1171fc6a
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2000726Reviewed-by: default avatarMichael Lippautz <mlippautz@chromium.org>
Commit-Queue: Anton Bikineev <bikineev@chromium.org>
Cr-Commit-Position: refs/heads/master@{#738952}
parent 32088ffb
......@@ -389,7 +389,15 @@ class PLATFORM_EXPORT ThreadHeap {
PageBloomFilter* page_bloom_filter() { return page_bloom_filter_.get(); }
bool IsInLastAllocatedRegion(Address address) const;
void SetLastAllocatedRegion(Address start, size_t length);
private:
struct LastAllocatedRegion {
Address start = nullptr;
size_t length = 0;
};
static int ArenaIndexForObjectSize(size_t);
void SetupWorklists();
......@@ -460,6 +468,8 @@ class PLATFORM_EXPORT ThreadHeap {
std::unique_ptr<HeapCompact> compaction_;
LastAllocatedRegion last_allocated_region_;
BaseArena* arenas_[BlinkGC::kNumberOfArenas];
static ThreadHeap* main_thread_heap_;
......@@ -638,6 +648,17 @@ Address ThreadHeap::Allocate(size_t size) {
GCInfoTrait<T>::Index(), type_name);
}
inline bool ThreadHeap::IsInLastAllocatedRegion(Address address) const {
return last_allocated_region_.start <= address &&
address <=
last_allocated_region_.start + last_allocated_region_.length;
}
inline void ThreadHeap::SetLastAllocatedRegion(Address start, size_t length) {
last_allocated_region_.start = start;
last_allocated_region_.length = length;
}
template <typename T>
void Visitor::HandleWeakCell(const WeakCallbackInfo&, void* object) {
WeakMember<T>* weak_member = reinterpret_cast<WeakMember<T>*>(object);
......
......@@ -203,44 +203,63 @@ class PLATFORM_EXPORT HeapAllocator {
template <typename T, typename Traits>
static void NotifyNewObject(T* object) {
#if BUILDFLAG(BLINK_HEAP_YOUNG_GENERATION)
ThreadState* const thread_state = ThreadState::Current();
if (!thread_state->IsIncrementalMarking()) {
MarkingVisitor::GenerationalBarrier(reinterpret_cast<Address>(object),
thread_state);
return;
}
#else
if (!ThreadState::IsAnyIncrementalMarking())
return;
// The object may have been in-place constructed as part of a large object.
// It is not safe to retrieve the page from the object here.
ThreadState* const thread_state = ThreadState::Current();
if (thread_state->IsIncrementalMarking()) {
// Eagerly trace the object ensuring that the object and all its children
// are discovered by the marker.
ThreadState::NoAllocationScope no_allocation_scope(thread_state);
DCHECK(thread_state->CurrentVisitor());
// No weak handling for write barriers. Modifying weakly reachable objects
// strongifies them for the current cycle.
DCHECK(!Traits::kCanHaveDeletedValue || !Traits::IsDeletedValue(*object));
TraceCollectionIfEnabled<WTF::kNoWeakHandling, T, Traits>::Trace(
thread_state->CurrentVisitor(), object);
if (!thread_state->IsIncrementalMarking()) {
return;
}
#endif // BLINK_HEAP_YOUNG_GENERATION
// Eagerly trace the object ensuring that the object and all its children
// are discovered by the marker.
ThreadState::NoAllocationScope no_allocation_scope(thread_state);
DCHECK(thread_state->CurrentVisitor());
// No weak handling for write barriers. Modifying weakly reachable objects
// strongifies them for the current cycle.
DCHECK(!Traits::kCanHaveDeletedValue || !Traits::IsDeletedValue(*object));
TraceCollectionIfEnabled<WTF::kNoWeakHandling, T, Traits>::Trace(
thread_state->CurrentVisitor(), object);
}
template <typename T, typename Traits>
static void NotifyNewObjects(T* array, size_t len) {
#if BUILDFLAG(BLINK_HEAP_YOUNG_GENERATION)
ThreadState* const thread_state = ThreadState::Current();
if (!thread_state->IsIncrementalMarking()) {
MarkingVisitor::GenerationalBarrier(reinterpret_cast<Address>(array),
thread_state);
return;
}
#else
if (!ThreadState::IsAnyIncrementalMarking())
return;
// The object may have been in-place constructed as part of a large object.
// It is not safe to retrieve the page from the object here.
ThreadState* const thread_state = ThreadState::Current();
if (thread_state->IsIncrementalMarking()) {
// See |NotifyNewObject| for details.
ThreadState::NoAllocationScope no_allocation_scope(thread_state);
DCHECK(thread_state->CurrentVisitor());
// No weak handling for write barriers. Modifying weakly reachable objects
// strongifies them for the current cycle.
while (len-- > 0) {
DCHECK(!Traits::kCanHaveDeletedValue ||
!Traits::IsDeletedValue(*array));
TraceCollectionIfEnabled<WTF::kNoWeakHandling, T, Traits>::Trace(
thread_state->CurrentVisitor(), array);
array++;
}
if (!thread_state->IsIncrementalMarking()) {
return;
}
#endif // BLINK_HEAP_YOUNG_GENERATION
// See |NotifyNewObject| for details.
ThreadState::NoAllocationScope no_allocation_scope(thread_state);
DCHECK(thread_state->CurrentVisitor());
// No weak handling for write barriers. Modifying weakly reachable objects
// strongifies them for the current cycle.
while (len-- > 0) {
DCHECK(!Traits::kCanHaveDeletedValue || !Traits::IsDeletedValue(*array));
TraceCollectionIfEnabled<WTF::kNoWeakHandling, T, Traits>::Trace(
thread_state->CurrentVisitor(), array);
array++;
}
}
......
......@@ -414,9 +414,7 @@ NormalPageArena::NormalPageArena(ThreadState* state, int index)
current_allocation_point_(nullptr),
remaining_allocation_size_(0),
last_remaining_allocation_size_(0),
promptly_freed_size_(0) {
ClearFreeLists();
}
promptly_freed_size_(0) {}
void NormalPageArena::AddToFreeList(Address address, size_t size) {
#if DCHECK_IS_ON()
......@@ -893,6 +891,10 @@ void NormalPageArena::SetAllocationPoint(Address point, size_t size) {
// Set up a new linear allocation area.
current_allocation_point_ = point;
last_remaining_allocation_size_ = remaining_allocation_size_ = size;
// Update last allocated region in ThreadHeap. This must also be done if the
// allocation point is set to 0 (before doing GC), so that the last allocated
// region is automatically reset after GC.
GetThreadState()->Heap().SetLastAllocatedRegion(point, size);
if (point) {
// Only, update allocated size and object start bitmap if the area is
// actually set up with a non-null address.
......@@ -1011,6 +1013,10 @@ Address LargeObjectArena::DoAllocateLargeObjectPage(size_t allocation_size,
swept_pages_.PushLocked(large_object);
// Update last allocated region in ThreadHeap.
GetThreadState()->Heap().SetLastAllocatedRegion(large_object->Payload(),
large_object->PayloadSize());
// Add all segments of kBlinkPageSize to the bloom filter so that the large
// object can be kept by derived pointers on stack. An alternative might be to
// prohibit derived pointers to large objects, but that is dangerous since the
......@@ -1067,7 +1073,9 @@ Address LargeObjectArena::LazySweepPages(size_t allocation_size,
return result;
}
FreeList::FreeList() : biggest_free_list_index_(0) {}
FreeList::FreeList() : biggest_free_list_index_(0) {
Clear();
}
void FreeList::Add(Address address, size_t size) {
DCHECK_LT(size, BlinkPagePayloadSize());
......
......@@ -10,14 +10,6 @@
namespace blink {
namespace {
ALWAYS_INLINE bool IsHashTableDeleteValue(const void* value) {
return value == reinterpret_cast<void*>(-1);
}
} // namespace
MarkingVisitorCommon::MarkingVisitorCommon(ThreadState* state,
MarkingMode marking_mode,
int task_id)
......@@ -136,19 +128,9 @@ void MarkingVisitorCommon::VisitBackingStoreOnly(void* object,
}
// static
bool MarkingVisitor::WriteBarrierSlow(void* value) {
if (!value || IsHashTableDeleteValue(value))
return false;
// It is guaranteed that managed references point to either GarbageCollected
// or GarbageCollectedMixin. Mixins are restricted to regular objects sizes.
// It is thus possible to get to the page header by aligning properly.
BasePage* base_page = PageFromObject(value);
ThreadState* const thread_state = base_page->thread_state();
if (!thread_state->IsIncrementalMarking())
return false;
bool MarkingVisitor::MarkValue(void* value,
BasePage* base_page,
ThreadState* thread_state) {
HeapObjectHeader* header;
if (LIKELY(!base_page->IsLargeObjectPage())) {
header = reinterpret_cast<HeapObjectHeader*>(
......@@ -176,6 +158,45 @@ bool MarkingVisitor::WriteBarrierSlow(void* value) {
return true;
}
// static
bool MarkingVisitor::WriteBarrierSlow(void* value) {
if (!value || IsHashTableDeleteValue(value))
return false;
// It is guaranteed that managed references point to either GarbageCollected
// or GarbageCollectedMixin. Mixins are restricted to regular objects sizes.
// It is thus possible to get to the page header by aligning properly.
BasePage* base_page = PageFromObject(value);
ThreadState* const thread_state = base_page->thread_state();
if (!thread_state->IsIncrementalMarking())
return false;
return MarkValue(value, base_page, thread_state);
}
void MarkingVisitor::GenerationalBarrierSlow(Address slot,
ThreadState* thread_state) {
BasePage* slot_page = thread_state->Heap().LookupPageForAddress(slot);
DCHECK(slot_page);
if (UNLIKELY(slot_page->IsLargeObjectPage())) {
auto* large_page = static_cast<LargeObjectPage*>(slot_page);
if (LIKELY(!large_page->ObjectHeader()->IsMarked()))
return;
large_page->SetRemembered(true);
return;
}
auto* normal_page = static_cast<NormalPage*>(slot_page);
const HeapObjectHeader* source_header = reinterpret_cast<HeapObjectHeader*>(
normal_page->object_start_bit_map()->FindHeader(slot));
DCHECK_LT(0u, source_header->GcInfoIndex());
if (UNLIKELY(source_header->IsMarked())) {
normal_page->MarkCard(slot);
}
}
void MarkingVisitor::TraceMarkedBackingStoreSlow(void* value) {
if (!value)
return;
......
......@@ -12,6 +12,14 @@
namespace blink {
namespace {
ALWAYS_INLINE bool IsHashTableDeleteValue(const void* value) {
return value == reinterpret_cast<void*>(-1);
}
} // namespace
class BasePage;
// Base visitor used to mark Oilpan objects on any thread.
......@@ -164,10 +172,12 @@ class PLATFORM_EXPORT MarkingVisitor
// Write barrier that adds a value the |slot| refers to to the set of marked
// objects. The barrier bails out if marking is off or the object is not yet
// marked. Returns true if the object was marked on this call.
// marked. Returns true if the value has been marked on this call.
template <typename T>
static bool WriteBarrier(T** slot);
static bool GenerationalBarrier(Address slot, ThreadState* state);
// Eagerly traces an already marked backing store ensuring that all its
// children are discovered by the marker. The barrier bails out if marking
// is off and on individual objects reachable if they are already marked. The
......@@ -195,8 +205,10 @@ class PLATFORM_EXPORT MarkingVisitor
void FlushMarkingWorklists();
private:
// Exact version of the marking write barriers.
// Exact version of the marking and generational write barriers.
static bool WriteBarrierSlow(void*);
static void GenerationalBarrierSlow(Address, ThreadState*);
static bool MarkValue(void*, BasePage*, ThreadState*);
static void TraceMarkedBackingStoreSlow(void*);
};
......@@ -210,12 +222,38 @@ ALWAYS_INLINE bool MarkingVisitor::IsInConstruction(HeapObjectHeader* header) {
// static
template <typename T>
ALWAYS_INLINE bool MarkingVisitor::WriteBarrier(T** slot) {
#if BUILDFLAG(BLINK_HEAP_YOUNG_GENERATION)
void* value = *slot;
if (!value || IsHashTableDeleteValue(value))
return false;
// Dijkstra barrier if concurrent marking is in progress.
BasePage* value_page = PageFromObject(value);
ThreadState* thread_state = value_page->thread_state();
if (UNLIKELY(thread_state->IsIncrementalMarking()))
return MarkValue(value, value_page, thread_state);
GenerationalBarrier(reinterpret_cast<Address>(slot), thread_state);
return false;
#else
if (!ThreadState::IsAnyIncrementalMarking())
return false;
// Avoid any further checks and dispatch to a call at this point. Aggressive
// inlining otherwise pollutes the regular execution paths.
return WriteBarrierSlow(*slot);
#endif
}
// static
ALWAYS_INLINE bool MarkingVisitor::GenerationalBarrier(Address slot,
ThreadState* state) {
if (LIKELY(state->Heap().IsInLastAllocatedRegion(slot)))
return false;
if (UNLIKELY(state->IsOnStack(slot)))
return false;
GenerationalBarrierSlow(slot, state);
return false;
}
// static
......
......@@ -371,6 +371,12 @@ class PLATFORM_EXPORT ThreadState final {
return &FromObject(object)->Heap() == &Heap();
}
ALWAYS_INLINE bool IsOnStack(Address address) const {
return reinterpret_cast<Address>(start_of_stack_) >= address &&
address >= (reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(
WTF::GetCurrentStackPosition())));
}
int GcAge() const { return gc_age_; }
MarkingVisitor* CurrentVisitor() const {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment