Commit 43607a09 authored by Haruka Matsumura's avatar Haruka Matsumura Committed by Commit Bot

Oilpan: Modify collected traced_slot_ in HeapCompaction

This CL modifies collected traced_slot_ in HeapCompaction.
In atomic pause GC world, slots that has nullptr references and only has an inline buffer are not collected by traced_slot_ because such HeapCollections are ealy returned in Trace method.
However, In incremental GC, we want to collect such slots because they might have references to OutOfLineBuffer after tracing.
So, we modified to collect all slots.

Bug: 864425
Change-Id: I08d94ae86eb33973addcd2f535a003d29c9eeb3b
Reviewed-on: https://chromium-review.googlesource.com/1156112
Commit-Queue: Haruka Matsumura <harukamt@google.com>
Reviewed-by: default avatarKeishi Hattori <keishi@chromium.org>
Reviewed-by: default avatarKentaro Hara <haraken@chromium.org>
Cr-Commit-Position: refs/heads/master@{#582102}
parent 8bcd799b
...@@ -25,6 +25,8 @@ class VerifyingScriptWrappableVisitor : public ScriptWrappableVisitor { ...@@ -25,6 +25,8 @@ class VerifyingScriptWrappableVisitor : public ScriptWrappableVisitor {
void VisitBackingStoreStrongly(void* object, void VisitBackingStoreStrongly(void* object,
void** object_slot, void** object_slot,
TraceDescriptor desc) override { TraceDescriptor desc) override {
if (!object)
return;
desc.callback(this, desc.base_object_payload); desc.callback(this, desc.base_object_payload);
} }
......
...@@ -156,6 +156,8 @@ void V8EmbedderGraphBuilder::VisitWithWrappers( ...@@ -156,6 +156,8 @@ void V8EmbedderGraphBuilder::VisitWithWrappers(
void V8EmbedderGraphBuilder::VisitBackingStoreStrongly(void* object, void V8EmbedderGraphBuilder::VisitBackingStoreStrongly(void* object,
void** object_slot, void** object_slot,
TraceDescriptor desc) { TraceDescriptor desc) {
if (!object)
return;
desc.callback(this, desc.base_object_payload); desc.callback(this, desc.base_object_payload);
} }
......
...@@ -276,6 +276,8 @@ void ScriptWrappableMarkingVisitor::VisitBackingStoreStrongly( ...@@ -276,6 +276,8 @@ void ScriptWrappableMarkingVisitor::VisitBackingStoreStrongly(
void* object, void* object,
void** object_slot, void** object_slot,
TraceDescriptor desc) { TraceDescriptor desc) {
if (!object)
return;
desc.callback(this, desc.base_object_payload); desc.callback(this, desc.base_object_payload);
} }
......
...@@ -334,7 +334,6 @@ void ThreadHeap::VisitStackRoots(MarkingVisitor* visitor) { ...@@ -334,7 +334,6 @@ void ThreadHeap::VisitStackRoots(MarkingVisitor* visitor) {
} }
BasePage* ThreadHeap::LookupPageForAddress(Address address) { BasePage* ThreadHeap::LookupPageForAddress(Address address) {
DCHECK(thread_state_->InAtomicMarkingPause());
if (PageMemoryRegion* region = region_tree_->Lookup(address)) { if (PageMemoryRegion* region = region_tree_->Lookup(address)) {
return region->PageFromAddress(address); return region->PageFromAddress(address);
} }
......
...@@ -28,8 +28,8 @@ bool HeapCompact::force_compaction_gc_ = false; ...@@ -28,8 +28,8 @@ bool HeapCompact::force_compaction_gc_ = false;
// heap compaction-enhanced GC. // heap compaction-enhanced GC.
class HeapCompact::MovableObjectFixups final { class HeapCompact::MovableObjectFixups final {
public: public:
static std::unique_ptr<MovableObjectFixups> Create() { static std::unique_ptr<MovableObjectFixups> Create(ThreadHeap* heap) {
return base::WrapUnique(new MovableObjectFixups); return base::WrapUnique(new MovableObjectFixups(heap));
} }
~MovableObjectFixups() = default; ~MovableObjectFixups() = default;
...@@ -62,13 +62,21 @@ class HeapCompact::MovableObjectFixups final { ...@@ -62,13 +62,21 @@ class HeapCompact::MovableObjectFixups final {
void Add(MovableReference* slot) { void Add(MovableReference* slot) {
DCHECK(*slot); DCHECK(*slot);
MovableReference reference = *slot; MovableReference reference = *slot;
BasePage* ref_page = PageFromObject(reference); BasePage* ref_page =
heap_->LookupPageForAddress(reinterpret_cast<Address>(reference));
// ref_page is null if *slot is pointing to an off-heap region. This may
// happy if *slot is pointing to an inline buffer of HeapVector with inline
// capacity.
if (!ref_page)
return;
// Nothing to compact on a large object's page. // Nothing to compact on a large object's page.
if (ref_page->IsLargeObjectPage()) if (ref_page->IsLargeObjectPage())
return; return;
if (!HeapCompact::IsCompactableArena(ref_page->Arena()->ArenaIndex()))
return;
#if DCHECK_IS_ON() #if DCHECK_IS_ON()
DCHECK(HeapCompact::IsCompactableArena(ref_page->Arena()->ArenaIndex()));
auto it = fixups_.find(reference); auto it = fixups_.find(reference);
DCHECK(it == fixups_.end() || it->value == slot); DCHECK(it == fixups_.end() || it->value == slot);
#endif #endif
...@@ -201,7 +209,13 @@ class HeapCompact::MovableObjectFixups final { ...@@ -201,7 +209,13 @@ class HeapCompact::MovableObjectFixups final {
// compacted.) // compacted.)
if (!*slot) if (!*slot)
return; return;
BasePage* slot_page = PageFromObject(*slot); BasePage* slot_page =
heap_->LookupPageForAddress(reinterpret_cast<Address>(*slot));
// ref_page is null if *slot is pointing to an off-heap region. This may
// happy if *slot is pointing to an inline buffer of HeapVector with
// inline capacity.
if (!slot_page)
return;
DCHECK( DCHECK(
slot_page->IsLargeObjectPage() || slot_page->IsLargeObjectPage() ||
(HeapCompact::IsCompactableArena(slot_page->Arena()->ArenaIndex()) && (HeapCompact::IsCompactableArena(slot_page->Arena()->ArenaIndex()) &&
...@@ -238,7 +252,8 @@ class HeapCompact::MovableObjectFixups final { ...@@ -238,7 +252,8 @@ class HeapCompact::MovableObjectFixups final {
#endif #endif
private: private:
MovableObjectFixups() = default; MovableObjectFixups(ThreadHeap* heap) : heap_(heap) {}
ThreadHeap* heap_;
// Tracking movable and updatable references. For now, we keep a // Tracking movable and updatable references. For now, we keep a
// map which for each movable object, recording the slot that // map which for each movable object, recording the slot that
...@@ -290,7 +305,7 @@ HeapCompact::~HeapCompact() = default; ...@@ -290,7 +305,7 @@ HeapCompact::~HeapCompact() = default;
HeapCompact::MovableObjectFixups& HeapCompact::Fixups() { HeapCompact::MovableObjectFixups& HeapCompact::Fixups() {
if (!fixups_) if (!fixups_)
fixups_ = MovableObjectFixups::Create(); fixups_ = MovableObjectFixups::Create(heap_);
return *fixups_; return *fixups_;
} }
......
...@@ -142,8 +142,10 @@ class PLATFORM_EXPORT MarkingVisitor final : public Visitor { ...@@ -142,8 +142,10 @@ class PLATFORM_EXPORT MarkingVisitor final : public Visitor {
// processing. In this case, the contents are processed separately using // processing. In this case, the contents are processed separately using
// the corresponding traits but the backing store requires marking. // the corresponding traits but the backing store requires marking.
void VisitBackingStoreOnly(void* object, void** object_slot) final { void VisitBackingStoreOnly(void* object, void** object_slot) final {
MarkHeaderNoTracing(HeapObjectHeader::FromPayload(object));
RegisterBackingStoreReference(object_slot); RegisterBackingStoreReference(object_slot);
if (!object)
return;
MarkHeaderNoTracing(HeapObjectHeader::FromPayload(object));
} }
void RegisterBackingStoreCallback(void* backing_store, void RegisterBackingStoreCallback(void* backing_store,
......
...@@ -149,8 +149,6 @@ class PLATFORM_EXPORT Visitor { ...@@ -149,8 +149,6 @@ class PLATFORM_EXPORT Visitor {
static_assert(IsGarbageCollectedType<T>::value, static_assert(IsGarbageCollectedType<T>::value,
"T needs to be a garbage collected object"); "T needs to be a garbage collected object");
if (!backing_store)
return;
VisitBackingStoreOnly(reinterpret_cast<void*>(backing_store), VisitBackingStoreOnly(reinterpret_cast<void*>(backing_store),
reinterpret_cast<void**>(backing_store_slot)); reinterpret_cast<void**>(backing_store_slot));
} }
......
...@@ -678,6 +678,8 @@ Deque<T, inlineCapacity, Allocator>::Trace(VisitorDispatcher visitor) { ...@@ -678,6 +678,8 @@ Deque<T, inlineCapacity, Allocator>::Trace(VisitorDispatcher visitor) {
Allocator::TraceVectorBacking(visitor, buffer_.Buffer(), Allocator::TraceVectorBacking(visitor, buffer_.Buffer(),
buffer_.BufferSlot()); buffer_.BufferSlot());
} else { } else {
Allocator::TraceVectorBacking(visitor, static_cast<T*>(nullptr),
buffer_.BufferSlot());
const T* buffer_begin = buffer_.Buffer(); const T* buffer_begin = buffer_.Buffer();
const T* end = buffer_begin + end_; const T* end = buffer_begin + end_;
if (IsTraceableInCollectionTrait<VectorTraits<T>>::value) { if (IsTraceableInCollectionTrait<VectorTraits<T>>::value) {
......
...@@ -2065,9 +2065,6 @@ struct WeakProcessingHashTableHelper<kWeakHandling, ...@@ -2065,9 +2065,6 @@ struct WeakProcessingHashTableHelper<kWeakHandling,
if (!table->table_) if (!table->table_)
return; return;
// Only trace the backing store. Its fields will be processed below.
Allocator::template TraceHashTableBackingOnly<ValueType, HashTableType>(
visitor, table->table_, &(table->table_));
// Now perform weak processing (this is a no-op if the backing was // Now perform weak processing (this is a no-op if the backing was
// accessible through an iterator and was already marked strongly). // accessible through an iterator and was already marked strongly).
for (ValueType* element = table->table_ + table->table_size_ - 1; for (ValueType* element = table->table_ + table->table_size_ - 1;
...@@ -2122,15 +2119,18 @@ template <typename VisitorDispatcher, typename A> ...@@ -2122,15 +2119,18 @@ template <typename VisitorDispatcher, typename A>
std::enable_if_t<A::kIsGarbageCollected> std::enable_if_t<A::kIsGarbageCollected>
HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::
Trace(VisitorDispatcher visitor) { Trace(VisitorDispatcher visitor) {
if (!table_)
return;
if (Traits::kWeakHandlingFlag == kNoWeakHandling) { if (Traits::kWeakHandlingFlag == kNoWeakHandling) {
// Strong HashTable. // Strong HashTable.
DCHECK(IsTraceableInCollectionTrait<Traits>::value); DCHECK(IsTraceableInCollectionTrait<Traits>::value);
Allocator::template TraceHashTableBackingStrongly<ValueType, HashTable>( Allocator::template TraceHashTableBackingStrongly<ValueType, HashTable>(
visitor, table_, &table_); visitor, table_, &table_);
} else { } else {
// Only trace the backing store. The elements will be processed in the weak
// processing callback.
Allocator::template TraceHashTableBackingOnly<ValueType, HashTable>(
visitor, table_, &table_);
if (!table_)
return;
// Weak HashTable. The HashTable may be held alive strongly from somewhere // Weak HashTable. The HashTable may be held alive strongly from somewhere
// else, e.g., an iterator. // else, e.g., an iterator.
......
...@@ -1962,11 +1962,16 @@ std::enable_if_t<A::kIsGarbageCollected> ...@@ -1962,11 +1962,16 @@ std::enable_if_t<A::kIsGarbageCollected>
Vector<T, inlineCapacity, Allocator>::Trace(VisitorDispatcher visitor) { Vector<T, inlineCapacity, Allocator>::Trace(VisitorDispatcher visitor) {
static_assert(Allocator::kIsGarbageCollected, static_assert(Allocator::kIsGarbageCollected,
"Garbage collector must be enabled."); "Garbage collector must be enabled.");
if (!Buffer())
return;
if (this->HasOutOfLineBuffer()) { if (this->HasOutOfLineBuffer()) {
Allocator::TraceVectorBacking(visitor, Buffer(), Base::BufferSlot()); Allocator::TraceVectorBacking(visitor, Buffer(), Base::BufferSlot());
} else { } else {
// We should not visit inline buffers, but we still need to register the
// slot for heap compaction. So, we pass nullptr to this method.
Allocator::TraceVectorBacking(visitor, static_cast<T*>(nullptr),
Base::BufferSlot());
if (!Buffer())
return;
// Inline buffer requires tracing immediately. // Inline buffer requires tracing immediately.
const T* buffer_begin = Buffer(); const T* buffer_begin = Buffer();
const T* buffer_end = Buffer() + size(); const T* buffer_end = Buffer() + size();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment