Commit d0e0bb6a authored by Keishi Hattori's avatar Keishi Hattori Committed by Commit Bot

Oilpan: Emit write barrier on collection backing assignment

Collection backings use raw pointers instead of Member so we need manual write barriers.
This CL emits a write barrier on collection backing assignment, and disables promptly free for any marked backings, as they may already be registered in the marking CallbackStack.

Bug: 757440
Change-Id: I56f676808bfa94f468594541f979161b8feb773e
Reviewed-on: https://chromium-review.googlesource.com/931141Reviewed-by: default avatarKentaro Hara <haraken@chromium.org>
Reviewed-by: default avatarMichael Lippautz <mlippautz@chromium.org>
Commit-Queue: Keishi Hattori <keishi@chromium.org>
Cr-Commit-Position: refs/heads/master@{#539841}
parent ade876eb
...@@ -451,6 +451,7 @@ class PLATFORM_EXPORT ThreadHeap { ...@@ -451,6 +451,7 @@ class PLATFORM_EXPORT ThreadHeap {
return BlinkGC::kVector1ArenaIndex <= arena_index && return BlinkGC::kVector1ArenaIndex <= arena_index &&
arena_index <= BlinkGC::kVector4ArenaIndex; arena_index <= BlinkGC::kVector4ArenaIndex;
} }
static bool IsNormalArenaIndex(int);
void AllocationPointAdjusted(int arena_index); void AllocationPointAdjusted(int arena_index);
void PromptlyFreed(size_t gc_info_index); void PromptlyFreed(size_t gc_info_index);
void ClearArenaAges(); void ClearArenaAges();
...@@ -505,7 +506,6 @@ class PLATFORM_EXPORT ThreadHeap { ...@@ -505,7 +506,6 @@ class PLATFORM_EXPORT ThreadHeap {
void ResetHeapCounters(); void ResetHeapCounters();
static int ArenaIndexForObjectSize(size_t); static int ArenaIndexForObjectSize(size_t);
static bool IsNormalArenaIndex(int);
void CommitCallbackStacks(); void CommitCallbackStacks();
void DecommitCallbackStacks(); void DecommitCallbackStacks();
......
...@@ -21,9 +21,13 @@ void HeapAllocator::BackingFree(void* address) { ...@@ -21,9 +21,13 @@ void HeapAllocator::BackingFree(void* address) {
if (page->IsLargeObjectPage() || page->Arena()->GetThreadState() != state) if (page->IsLargeObjectPage() || page->Arena()->GetThreadState() != state)
return; return;
state->CheckObjectNotInCallbackStacks(address);
HeapObjectHeader* header = HeapObjectHeader::FromPayload(address); HeapObjectHeader* header = HeapObjectHeader::FromPayload(address);
// Don't promptly free marked backing as they may be registered on the marking
// callback stack. The effect on non incremental marking GCs is that promptly
// free is disabled for surviving backings during lazy sweeping.
if (header->IsMarked())
return;
state->CheckObjectNotInCallbackStacks(address);
NormalPageArena* arena = static_cast<NormalPage*>(page)->ArenaForNormalPage(); NormalPageArena* arena = static_cast<NormalPage*>(page)->ArenaForNormalPage();
state->Heap().PromptlyFreed(header->GcInfoIndex()); state->Heap().PromptlyFreed(header->GcInfoIndex());
arena->PromptlyFreeObject(header); arena->PromptlyFreeObject(header);
......
...@@ -127,6 +127,10 @@ class PLATFORM_EXPORT HeapAllocator { ...@@ -127,6 +127,10 @@ class PLATFORM_EXPORT HeapAllocator {
static void FreeHashTableBacking(void* address, bool is_weak_table); static void FreeHashTableBacking(void* address, bool is_weak_table);
static bool ExpandHashTableBacking(void*, size_t); static bool ExpandHashTableBacking(void*, size_t);
static void BackingWriteBarrier(void* address) {
ThreadState::Current()->Heap().WriteBarrier(address);
}
template <typename Return, typename Metadata> template <typename Return, typename Metadata>
static Return Malloc(size_t size, const char* type_name) { static Return Malloc(size_t size, const char* type_name) {
return reinterpret_cast<Return>(ThreadHeap::Allocate<Metadata>( return reinterpret_cast<Return>(ThreadHeap::Allocate<Metadata>(
......
...@@ -64,6 +64,10 @@ class ExpectWriteBarrierFires : public IncrementalMarkingScope { ...@@ -64,6 +64,10 @@ class ExpectWriteBarrierFires : public IncrementalMarkingScope {
: IncrementalMarkingScope(thread_state), objects_(objects) { : IncrementalMarkingScope(thread_state), objects_(objects) {
EXPECT_TRUE(marking_stack_->IsEmpty()); EXPECT_TRUE(marking_stack_->IsEmpty());
for (T* object : objects_) { for (T* object : objects_) {
// Ensure that the object is in the normal arena so we can ignore backing
// objects on the marking stack.
CHECK(ThreadHeap::IsNormalArenaIndex(
PageFromObject(object)->Arena()->ArenaIndex()));
headers_.push_back(HeapObjectHeader::FromPayload(object)); headers_.push_back(HeapObjectHeader::FromPayload(object));
EXPECT_FALSE(headers_.back()->IsMarked()); EXPECT_FALSE(headers_.back()->IsMarked());
} }
...@@ -80,6 +84,10 @@ class ExpectWriteBarrierFires : public IncrementalMarkingScope { ...@@ -80,6 +84,10 @@ class ExpectWriteBarrierFires : public IncrementalMarkingScope {
while (!marking_stack_->IsEmpty()) { while (!marking_stack_->IsEmpty()) {
CallbackStack::Item* item = marking_stack_->Pop(); CallbackStack::Item* item = marking_stack_->Pop();
T* obj = reinterpret_cast<T*>(item->Object()); T* obj = reinterpret_cast<T*>(item->Object());
// Ignore the backing object.
if (!ThreadHeap::IsNormalArenaIndex(
PageFromObject(obj)->Arena()->ArenaIndex()))
continue;
auto pos = std::find(objects_.begin(), objects_.end(), obj); auto pos = std::find(objects_.begin(), objects_.end(), obj);
// The following check makes sure that there are no unexpected objects on // The following check makes sure that there are no unexpected objects on
// the marking stack. If it fails then the write barrier fired for an // the marking stack. If it fails then the write barrier fired for an
......
...@@ -1699,6 +1699,7 @@ HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: ...@@ -1699,6 +1699,7 @@ HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::
} }
} }
table_ = temporary_table; table_ = temporary_table;
Allocator::BackingWriteBarrier(table_);
if (Traits::kEmptyValueIsZero) { if (Traits::kEmptyValueIsZero) {
memset(original_table, 0, new_table_size * sizeof(ValueType)); memset(original_table, 0, new_table_size * sizeof(ValueType));
...@@ -1739,6 +1740,7 @@ HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: ...@@ -1739,6 +1740,7 @@ HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::
#endif #endif
table_ = new_table; table_ = new_table;
Allocator::BackingWriteBarrier(table_);
table_size_ = new_table_size; table_size_ = new_table_size;
Value* new_entry = nullptr; Value* new_entry = nullptr;
...@@ -1909,6 +1911,8 @@ void HashTable<Key, ...@@ -1909,6 +1911,8 @@ void HashTable<Key,
Allocator>::swap(HashTable& other) { Allocator>::swap(HashTable& other) {
DCHECK(!AccessForbidden()); DCHECK(!AccessForbidden());
std::swap(table_, other.table_); std::swap(table_, other.table_);
Allocator::BackingWriteBarrier(table_);
Allocator::BackingWriteBarrier(other.table_);
std::swap(table_size_, other.table_size_); std::swap(table_size_, other.table_size_);
std::swap(key_count_, other.key_count_); std::swap(key_count_, other.key_count_);
// std::swap does not work for bit fields. // std::swap does not work for bit fields.
......
...@@ -400,6 +400,7 @@ class VectorBufferBase { ...@@ -400,6 +400,7 @@ class VectorBufferBase {
else else
buffer_ = Allocator::template AllocateVectorBacking<T>(size_to_allocate); buffer_ = Allocator::template AllocateVectorBacking<T>(size_to_allocate);
capacity_ = size_to_allocate / sizeof(T); capacity_ = size_to_allocate / sizeof(T);
Allocator::BackingWriteBarrier(buffer_);
} }
void AllocateExpandedBuffer(size_t new_capacity) { void AllocateExpandedBuffer(size_t new_capacity) {
...@@ -412,6 +413,7 @@ class VectorBufferBase { ...@@ -412,6 +413,7 @@ class VectorBufferBase {
buffer_ = Allocator::template AllocateExpandedVectorBacking<T>( buffer_ = Allocator::template AllocateExpandedVectorBacking<T>(
size_to_allocate); size_to_allocate);
capacity_ = size_to_allocate / sizeof(T); capacity_ = size_to_allocate / sizeof(T);
Allocator::BackingWriteBarrier(buffer_);
} }
size_t AllocationSize(size_t capacity) const { size_t AllocationSize(size_t capacity) const {
...@@ -538,6 +540,8 @@ class VectorBuffer<T, 0, Allocator> ...@@ -538,6 +540,8 @@ class VectorBuffer<T, 0, Allocator>
static_assert(VectorTraits<T>::kCanSwapUsingCopyOrMove, static_assert(VectorTraits<T>::kCanSwapUsingCopyOrMove,
"Cannot swap HeapVectors of TraceWrapperMembers."); "Cannot swap HeapVectors of TraceWrapperMembers.");
std::swap(buffer_, other.buffer_); std::swap(buffer_, other.buffer_);
Allocator::BackingWriteBarrier(buffer_);
Allocator::BackingWriteBarrier(other.buffer_);
std::swap(capacity_, other.capacity_); std::swap(capacity_, other.capacity_);
std::swap(size_, other.size_); std::swap(size_, other.size_);
if (buffer_) { if (buffer_) {
...@@ -691,6 +695,8 @@ class VectorBuffer : protected VectorBufferBase<T, true, Allocator> { ...@@ -691,6 +695,8 @@ class VectorBuffer : protected VectorBufferBase<T, true, Allocator> {
// The easiest case: both buffers are non-inline. We just need to swap the // The easiest case: both buffers are non-inline. We just need to swap the
// pointers. // pointers.
std::swap(buffer_, other.buffer_); std::swap(buffer_, other.buffer_);
Allocator::BackingWriteBarrier(buffer_);
Allocator::BackingWriteBarrier(other.buffer_);
std::swap(capacity_, other.capacity_); std::swap(capacity_, other.capacity_);
std::swap(size_, other.size_); std::swap(size_, other.size_);
if (buffer_) { if (buffer_) {
...@@ -761,6 +767,7 @@ class VectorBuffer : protected VectorBufferBase<T, true, Allocator> { ...@@ -761,6 +767,7 @@ class VectorBuffer : protected VectorBufferBase<T, true, Allocator> {
other.buffer_ = other.InlineBuffer(); other.buffer_ = other.InlineBuffer();
std::swap(size_, other.size_); std::swap(size_, other.size_);
ANNOTATE_NEW_BUFFER(other.buffer_, inlineCapacity, other.size_); ANNOTATE_NEW_BUFFER(other.buffer_, inlineCapacity, other.size_);
Allocator::BackingWriteBarrier(buffer_);
} else if (!this_source_begin && } else if (!this_source_begin &&
other_source_begin) { // Their buffer is inline, ours is not. other_source_begin) { // Their buffer is inline, ours is not.
DCHECK_NE(Buffer(), InlineBuffer()); DCHECK_NE(Buffer(), InlineBuffer());
...@@ -770,6 +777,7 @@ class VectorBuffer : protected VectorBufferBase<T, true, Allocator> { ...@@ -770,6 +777,7 @@ class VectorBuffer : protected VectorBufferBase<T, true, Allocator> {
buffer_ = InlineBuffer(); buffer_ = InlineBuffer();
std::swap(size_, other.size_); std::swap(size_, other.size_);
ANNOTATE_NEW_BUFFER(buffer_, inlineCapacity, size_); ANNOTATE_NEW_BUFFER(buffer_, inlineCapacity, size_);
Allocator::BackingWriteBarrier(other.buffer_);
} else { // Both buffers are inline. } else { // Both buffers are inline.
DCHECK(this_source_begin); DCHECK(this_source_begin);
DCHECK(other_source_begin); DCHECK(other_source_begin);
......
...@@ -100,6 +100,8 @@ class WTF_EXPORT PartitionAllocator { ...@@ -100,6 +100,8 @@ class WTF_EXPORT PartitionAllocator {
Free(ptr); // Not the system free, the one from this class. Free(ptr); // Not the system free, the one from this class.
} }
static void BackingWriteBarrier(void*) {}
static bool IsAllocationAllowed() { return true; } static bool IsAllocationAllowed() { return true; }
static bool IsObjectResurrectionForbidden() { return false; } static bool IsObjectResurrectionForbidden() { return false; }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment