Commit d0e0bb6a authored by Keishi Hattori's avatar Keishi Hattori Committed by Commit Bot

Oilpan: Emit write barrier on collection backing assignment

Collection backings use raw pointers instead of Member so we need manual write barriers.
This CL emits a write barrier on collection backing assignment, and disables promptly free for any marked backings, as they may already be registered in the marking CallbackStack.

Bug: 757440
Change-Id: I56f676808bfa94f468594541f979161b8feb773e
Reviewed-on: https://chromium-review.googlesource.com/931141Reviewed-by: default avatarKentaro Hara <haraken@chromium.org>
Reviewed-by: default avatarMichael Lippautz <mlippautz@chromium.org>
Commit-Queue: Keishi Hattori <keishi@chromium.org>
Cr-Commit-Position: refs/heads/master@{#539841}
parent ade876eb
......@@ -451,6 +451,7 @@ class PLATFORM_EXPORT ThreadHeap {
return BlinkGC::kVector1ArenaIndex <= arena_index &&
arena_index <= BlinkGC::kVector4ArenaIndex;
}
static bool IsNormalArenaIndex(int);
void AllocationPointAdjusted(int arena_index);
void PromptlyFreed(size_t gc_info_index);
void ClearArenaAges();
......@@ -505,7 +506,6 @@ class PLATFORM_EXPORT ThreadHeap {
void ResetHeapCounters();
static int ArenaIndexForObjectSize(size_t);
static bool IsNormalArenaIndex(int);
void CommitCallbackStacks();
void DecommitCallbackStacks();
......
......@@ -21,9 +21,13 @@ void HeapAllocator::BackingFree(void* address) {
if (page->IsLargeObjectPage() || page->Arena()->GetThreadState() != state)
return;
state->CheckObjectNotInCallbackStacks(address);
HeapObjectHeader* header = HeapObjectHeader::FromPayload(address);
// Don't promptly free marked backing as they may be registered on the marking
// callback stack. The effect on non incremental marking GCs is that promptly
// free is disabled for surviving backings during lazy sweeping.
if (header->IsMarked())
return;
state->CheckObjectNotInCallbackStacks(address);
NormalPageArena* arena = static_cast<NormalPage*>(page)->ArenaForNormalPage();
state->Heap().PromptlyFreed(header->GcInfoIndex());
arena->PromptlyFreeObject(header);
......
......@@ -127,6 +127,10 @@ class PLATFORM_EXPORT HeapAllocator {
static void FreeHashTableBacking(void* address, bool is_weak_table);
static bool ExpandHashTableBacking(void*, size_t);
static void BackingWriteBarrier(void* address) {
ThreadState::Current()->Heap().WriteBarrier(address);
}
template <typename Return, typename Metadata>
static Return Malloc(size_t size, const char* type_name) {
return reinterpret_cast<Return>(ThreadHeap::Allocate<Metadata>(
......
......@@ -64,6 +64,10 @@ class ExpectWriteBarrierFires : public IncrementalMarkingScope {
: IncrementalMarkingScope(thread_state), objects_(objects) {
EXPECT_TRUE(marking_stack_->IsEmpty());
for (T* object : objects_) {
// Ensure that the object is in the normal arena so we can ignore backing
// objects on the marking stack.
CHECK(ThreadHeap::IsNormalArenaIndex(
PageFromObject(object)->Arena()->ArenaIndex()));
headers_.push_back(HeapObjectHeader::FromPayload(object));
EXPECT_FALSE(headers_.back()->IsMarked());
}
......@@ -80,6 +84,10 @@ class ExpectWriteBarrierFires : public IncrementalMarkingScope {
while (!marking_stack_->IsEmpty()) {
CallbackStack::Item* item = marking_stack_->Pop();
T* obj = reinterpret_cast<T*>(item->Object());
// Ignore the backing object.
if (!ThreadHeap::IsNormalArenaIndex(
PageFromObject(obj)->Arena()->ArenaIndex()))
continue;
auto pos = std::find(objects_.begin(), objects_.end(), obj);
// The following check makes sure that there are no unexpected objects on
// the marking stack. If it fails then the write barrier fired for an
......
......@@ -1699,6 +1699,7 @@ HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::
}
}
table_ = temporary_table;
Allocator::BackingWriteBarrier(table_);
if (Traits::kEmptyValueIsZero) {
memset(original_table, 0, new_table_size * sizeof(ValueType));
......@@ -1739,6 +1740,7 @@ HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>::
#endif
table_ = new_table;
Allocator::BackingWriteBarrier(table_);
table_size_ = new_table_size;
Value* new_entry = nullptr;
......@@ -1909,6 +1911,8 @@ void HashTable<Key,
Allocator>::swap(HashTable& other) {
DCHECK(!AccessForbidden());
std::swap(table_, other.table_);
Allocator::BackingWriteBarrier(table_);
Allocator::BackingWriteBarrier(other.table_);
std::swap(table_size_, other.table_size_);
std::swap(key_count_, other.key_count_);
// std::swap does not work for bit fields.
......
......@@ -400,6 +400,7 @@ class VectorBufferBase {
else
buffer_ = Allocator::template AllocateVectorBacking<T>(size_to_allocate);
capacity_ = size_to_allocate / sizeof(T);
Allocator::BackingWriteBarrier(buffer_);
}
void AllocateExpandedBuffer(size_t new_capacity) {
......@@ -412,6 +413,7 @@ class VectorBufferBase {
buffer_ = Allocator::template AllocateExpandedVectorBacking<T>(
size_to_allocate);
capacity_ = size_to_allocate / sizeof(T);
Allocator::BackingWriteBarrier(buffer_);
}
size_t AllocationSize(size_t capacity) const {
......@@ -538,6 +540,8 @@ class VectorBuffer<T, 0, Allocator>
static_assert(VectorTraits<T>::kCanSwapUsingCopyOrMove,
"Cannot swap HeapVectors of TraceWrapperMembers.");
std::swap(buffer_, other.buffer_);
Allocator::BackingWriteBarrier(buffer_);
Allocator::BackingWriteBarrier(other.buffer_);
std::swap(capacity_, other.capacity_);
std::swap(size_, other.size_);
if (buffer_) {
......@@ -691,6 +695,8 @@ class VectorBuffer : protected VectorBufferBase<T, true, Allocator> {
// The easiest case: both buffers are non-inline. We just need to swap the
// pointers.
std::swap(buffer_, other.buffer_);
Allocator::BackingWriteBarrier(buffer_);
Allocator::BackingWriteBarrier(other.buffer_);
std::swap(capacity_, other.capacity_);
std::swap(size_, other.size_);
if (buffer_) {
......@@ -761,6 +767,7 @@ class VectorBuffer : protected VectorBufferBase<T, true, Allocator> {
other.buffer_ = other.InlineBuffer();
std::swap(size_, other.size_);
ANNOTATE_NEW_BUFFER(other.buffer_, inlineCapacity, other.size_);
Allocator::BackingWriteBarrier(buffer_);
} else if (!this_source_begin &&
other_source_begin) { // Their buffer is inline, ours is not.
DCHECK_NE(Buffer(), InlineBuffer());
......@@ -770,6 +777,7 @@ class VectorBuffer : protected VectorBufferBase<T, true, Allocator> {
buffer_ = InlineBuffer();
std::swap(size_, other.size_);
ANNOTATE_NEW_BUFFER(buffer_, inlineCapacity, size_);
Allocator::BackingWriteBarrier(other.buffer_);
} else { // Both buffers are inline.
DCHECK(this_source_begin);
DCHECK(other_source_begin);
......
......@@ -100,6 +100,8 @@ class WTF_EXPORT PartitionAllocator {
Free(ptr); // Not the system free, the one from this class.
}
static void BackingWriteBarrier(void*) {}
static bool IsAllocationAllowed() { return true; }
static bool IsObjectResurrectionForbidden() { return false; }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment