Commit fdd33088 authored by Michael Lippautz's avatar Michael Lippautz Committed by Commit Bot

[oilpan] Faster write barrier guard

Add a general counter for threads that are currently in incremental
marking.

This guard can be checked as first reason for bailing out in any write
barrier as it only requires a single load and does not need to go
through TLS or page lookups.

Bug: chromium:757440
Change-Id: Ida63e560bb52ccd66da739139c0510229b05a550
Reviewed-on: https://chromium-review.googlesource.com/995272
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Reviewed-by: default avatarKentaro Hara <haraken@chromium.org>
Cr-Commit-Position: refs/heads/master@{#548033}
parent 65e7fbb7
...@@ -753,18 +753,6 @@ bool ThreadHeap::AdvanceLazySweep(double deadline_seconds) { ...@@ -753,18 +753,6 @@ bool ThreadHeap::AdvanceLazySweep(double deadline_seconds) {
return true; return true;
} }
void ThreadHeap::EnableIncrementalMarkingBarrier() {
thread_state_->SetIncrementalMarking(true);
for (int i = 0; i < BlinkGC::kNumberOfArenas; ++i)
arenas_[i]->EnableIncrementalMarkingBarrier();
}
void ThreadHeap::DisableIncrementalMarkingBarrier() {
thread_state_->SetIncrementalMarking(false);
for (int i = 0; i < BlinkGC::kNumberOfArenas; ++i)
arenas_[i]->DisableIncrementalMarkingBarrier();
}
void ThreadHeap::WriteBarrier(const void* value) { void ThreadHeap::WriteBarrier(const void* value) {
if (!value || !thread_state_->IsIncrementalMarking()) if (!value || !thread_state_->IsIncrementalMarking())
return; return;
...@@ -774,7 +762,6 @@ void ThreadHeap::WriteBarrier(const void* value) { ...@@ -774,7 +762,6 @@ void ThreadHeap::WriteBarrier(const void* value) {
void ThreadHeap::WriteBarrierInternal(BasePage* page, const void* value) { void ThreadHeap::WriteBarrierInternal(BasePage* page, const void* value) {
DCHECK(thread_state_->IsIncrementalMarking()); DCHECK(thread_state_->IsIncrementalMarking());
DCHECK(page->IsIncrementalMarking());
DCHECK(value); DCHECK(value);
HeapObjectHeader* const header = HeapObjectHeader* const header =
page->IsLargeObjectPage() page->IsLargeObjectPage()
......
...@@ -472,11 +472,6 @@ class PLATFORM_EXPORT ThreadHeap { ...@@ -472,11 +472,6 @@ class PLATFORM_EXPORT ThreadHeap {
enum SnapshotType { kHeapSnapshot, kFreelistSnapshot }; enum SnapshotType { kHeapSnapshot, kFreelistSnapshot };
void TakeSnapshot(SnapshotType); void TakeSnapshot(SnapshotType);
// Enables or disables the incremental marking barrier that intercepts
// writes to Member<T> objects.
void EnableIncrementalMarkingBarrier();
void DisableIncrementalMarkingBarrier();
// Write barrier used after adding an object to the graph. // Write barrier used after adding an object to the graph.
void WriteBarrier(const void* value); void WriteBarrier(const void* value);
......
...@@ -130,7 +130,7 @@ class PLATFORM_EXPORT HeapAllocator { ...@@ -130,7 +130,7 @@ class PLATFORM_EXPORT HeapAllocator {
static void BackingWriteBarrier(void* address) { static void BackingWriteBarrier(void* address) {
#if BUILDFLAG(BLINK_HEAP_INCREMENTAL_MARKING) #if BUILDFLAG(BLINK_HEAP_INCREMENTAL_MARKING)
if (!address) if (!address || !ThreadState::IsAnyIncrementalMarking())
return; return;
ThreadState* state = PageFromObject(address)->Arena()->GetThreadState(); ThreadState* state = PageFromObject(address)->Arena()->GetThreadState();
state->Heap().WriteBarrier(address); state->Heap().WriteBarrier(address);
...@@ -214,6 +214,8 @@ class PLATFORM_EXPORT HeapAllocator { ...@@ -214,6 +214,8 @@ class PLATFORM_EXPORT HeapAllocator {
template <typename T, typename Traits> template <typename T, typename Traits>
static void NotifyNewObject(T* object) { static void NotifyNewObject(T* object) {
#if BUILDFLAG(BLINK_HEAP_INCREMENTAL_MARKING) #if BUILDFLAG(BLINK_HEAP_INCREMENTAL_MARKING)
if (!ThreadState::IsAnyIncrementalMarking())
return;
// The object may have been in-place constructed as part of a large object. // The object may have been in-place constructed as part of a large object.
// It is not safe to retrieve the page from the object here. // It is not safe to retrieve the page from the object here.
ThreadState* const thread_state = ThreadState::Current(); ThreadState* const thread_state = ThreadState::Current();
...@@ -239,6 +241,8 @@ class PLATFORM_EXPORT HeapAllocator { ...@@ -239,6 +241,8 @@ class PLATFORM_EXPORT HeapAllocator {
template <typename T, typename Traits> template <typename T, typename Traits>
static void NotifyNewObjects(T* array, size_t len) { static void NotifyNewObjects(T* array, size_t len) {
#if BUILDFLAG(BLINK_HEAP_INCREMENTAL_MARKING) #if BUILDFLAG(BLINK_HEAP_INCREMENTAL_MARKING)
if (!ThreadState::IsAnyIncrementalMarking())
return;
// The object may have been in-place constructed as part of a large object. // The object may have been in-place constructed as part of a large object.
// It is not safe to retrieve the page from the object here. // It is not safe to retrieve the page from the object here.
ThreadState* const thread_state = ThreadState::Current(); ThreadState* const thread_state = ThreadState::Current();
......
...@@ -416,18 +416,6 @@ bool BaseArena::WillObjectBeLazilySwept(BasePage* page, ...@@ -416,18 +416,6 @@ bool BaseArena::WillObjectBeLazilySwept(BasePage* page,
return true; return true;
} }
void BaseArena::EnableIncrementalMarkingBarrier() {
DCHECK(SweepingCompleted());
for (BasePage* page = first_page_; page; page = page->Next())
page->SetIncrementalMarking(true);
}
void BaseArena::DisableIncrementalMarkingBarrier() {
DCHECK(SweepingCompleted());
for (BasePage* page = first_page_; page; page = page->Next())
page->SetIncrementalMarking(false);
}
NormalPageArena::NormalPageArena(ThreadState* state, int index) NormalPageArena::NormalPageArena(ThreadState* state, int index)
: BaseArena(state, index), : BaseArena(state, index),
current_allocation_point_(nullptr), current_allocation_point_(nullptr),
...@@ -1276,8 +1264,7 @@ BasePage::BasePage(PageMemory* storage, BaseArena* arena) ...@@ -1276,8 +1264,7 @@ BasePage::BasePage(PageMemory* storage, BaseArena* arena)
storage_(storage), storage_(storage),
arena_(arena), arena_(arena),
next_(nullptr), next_(nullptr),
swept_(true), swept_(true) {
incremental_marking_(arena->GetThreadState()->IsIncrementalMarking()) {
#if DCHECK_IS_ON() #if DCHECK_IS_ON()
DCHECK(IsPageHeaderAddress(reinterpret_cast<Address>(this))); DCHECK(IsPageHeaderAddress(reinterpret_cast<Address>(this)));
#endif #endif
......
...@@ -411,10 +411,6 @@ class BasePage { ...@@ -411,10 +411,6 @@ class BasePage {
swept_ = false; swept_ = false;
} }
void SetIncrementalMarking(bool value) { incremental_marking_ = value; }
bool IsIncrementalMarking() const { return incremental_marking_; }
// Returns true if magic number is valid. // Returns true if magic number is valid.
bool IsValid() const; bool IsValid() const;
...@@ -432,8 +428,7 @@ class BasePage { ...@@ -432,8 +428,7 @@ class BasePage {
// Track the sweeping state of a page. Set to false at the start of a sweep, // Track the sweeping state of a page. Set to false at the start of a sweep,
// true upon completion of lazy sweeping. // true upon completion of lazy sweeping.
bool swept_; bool swept_;
// Track whether incremental marking is currently running.
bool incremental_marking_;
friend class BaseArena; friend class BaseArena;
}; };
...@@ -779,9 +774,6 @@ class PLATFORM_EXPORT BaseArena { ...@@ -779,9 +774,6 @@ class PLATFORM_EXPORT BaseArena {
bool WillObjectBeLazilySwept(BasePage*, void*) const; bool WillObjectBeLazilySwept(BasePage*, void*) const;
void EnableIncrementalMarkingBarrier();
void DisableIncrementalMarkingBarrier();
virtual void Verify(){}; virtual void Verify(){};
virtual void VerifyMarking(){}; virtual void VerifyMarking(){};
......
...@@ -51,7 +51,7 @@ class IncrementalMarkingScope : public IncrementalMarkingScopeBase { ...@@ -51,7 +51,7 @@ class IncrementalMarkingScope : public IncrementalMarkingScopeBase {
ThreadState::AtomicPauseScope atomic_pause_scope_(thread_state_); ThreadState::AtomicPauseScope atomic_pause_scope_(thread_state_);
EXPECT_TRUE(marking_worklist_->IsGlobalEmpty()); EXPECT_TRUE(marking_worklist_->IsGlobalEmpty());
EXPECT_TRUE(not_fully_constructed_worklist_->IsGlobalEmpty()); EXPECT_TRUE(not_fully_constructed_worklist_->IsGlobalEmpty());
heap_.EnableIncrementalMarkingBarrier(); thread_state->EnableIncrementalMarkingBarrier();
thread_state->current_gc_data_.visitor = thread_state->current_gc_data_.visitor =
MarkingVisitor::Create(thread_state, MarkingVisitor::kGlobalMarking); MarkingVisitor::Create(thread_state, MarkingVisitor::kGlobalMarking);
} }
...@@ -59,7 +59,7 @@ class IncrementalMarkingScope : public IncrementalMarkingScopeBase { ...@@ -59,7 +59,7 @@ class IncrementalMarkingScope : public IncrementalMarkingScopeBase {
~IncrementalMarkingScope() { ~IncrementalMarkingScope() {
EXPECT_TRUE(marking_worklist_->IsGlobalEmpty()); EXPECT_TRUE(marking_worklist_->IsGlobalEmpty());
EXPECT_TRUE(not_fully_constructed_worklist_->IsGlobalEmpty()); EXPECT_TRUE(not_fully_constructed_worklist_->IsGlobalEmpty());
heap_.DisableIncrementalMarkingBarrier(); thread_state_->DisableIncrementalMarkingBarrier();
// Need to clear out unused worklists that might have been polluted during // Need to clear out unused worklists that might have been polluted during
// test. // test.
heap_.GetPostMarkingWorklist()->Clear(); heap_.GetPostMarkingWorklist()->Clear();
...@@ -193,14 +193,11 @@ class Object : public GarbageCollected<Object> { ...@@ -193,14 +193,11 @@ class Object : public GarbageCollected<Object> {
TEST(IncrementalMarkingTest, EnableDisableBarrier) { TEST(IncrementalMarkingTest, EnableDisableBarrier) {
Object* object = Object::Create(); Object* object = Object::Create();
BasePage* page = PageFromObject(object); BasePage* page = PageFromObject(object);
ThreadHeap& heap = ThreadState::Current()->Heap();
EXPECT_FALSE(page->IsIncrementalMarking());
EXPECT_FALSE(ThreadState::Current()->IsIncrementalMarking()); EXPECT_FALSE(ThreadState::Current()->IsIncrementalMarking());
heap.EnableIncrementalMarkingBarrier(); ThreadState::Current()->EnableIncrementalMarkingBarrier();
EXPECT_TRUE(page->IsIncrementalMarking());
EXPECT_TRUE(ThreadState::Current()->IsIncrementalMarking()); EXPECT_TRUE(ThreadState::Current()->IsIncrementalMarking());
heap.DisableIncrementalMarkingBarrier(); EXPECT_TRUE(ThreadState::IsAnyIncrementalMarking());
EXPECT_FALSE(page->IsIncrementalMarking()); ThreadState::Current()->DisableIncrementalMarkingBarrier();
EXPECT_FALSE(ThreadState::Current()->IsIncrementalMarking()); EXPECT_FALSE(ThreadState::Current()->IsIncrementalMarking());
} }
......
...@@ -272,13 +272,14 @@ class Member : public MemberBase<T, TracenessMemberConfiguration::kTraced> { ...@@ -272,13 +272,14 @@ class Member : public MemberBase<T, TracenessMemberConfiguration::kTraced> {
protected: protected:
ALWAYS_INLINE void WriteBarrier(const T* value) const { ALWAYS_INLINE void WriteBarrier(const T* value) const {
#if BUILDFLAG(BLINK_HEAP_INCREMENTAL_MARKING) #if BUILDFLAG(BLINK_HEAP_INCREMENTAL_MARKING)
if (LIKELY(value && !this->IsHashTableDeletedValue())) { if (LIKELY(value && !this->IsHashTableDeletedValue()) &&
ThreadState::IsAnyIncrementalMarking()) {
ThreadState* const thread_state = ThreadState::Current();
if (thread_state->IsIncrementalMarking()) {
// The following method for retrieving a page works as allocation of // The following method for retrieving a page works as allocation of
// mixins on large object pages is prohibited. // mixins on large object pages is prohibited.
BasePage* const page = PageFromObject(value); BasePage* const page = PageFromObject(value);
if (page->IsIncrementalMarking()) { thread_state->Heap().WriteBarrierInternal(page, value);
DCHECK(ThreadState::Current()->IsIncrementalMarking());
ThreadState::Current()->Heap().WriteBarrierInternal(page, value);
} }
} }
#endif // BUILDFLAG(BLINK_HEAP_INCREMENTAL_MARKING) #endif // BUILDFLAG(BLINK_HEAP_INCREMENTAL_MARKING)
......
...@@ -37,6 +37,7 @@ ...@@ -37,6 +37,7 @@
#include <limits> #include <limits>
#include <memory> #include <memory>
#include "base/atomicops.h"
#include "base/location.h" #include "base/location.h"
#include "base/trace_event/process_memory_dump.h" #include "base/trace_event/process_memory_dump.h"
#include "build/build_config.h" #include "build/build_config.h"
...@@ -1233,6 +1234,21 @@ void ThreadState::InvokePreFinalizers() { ...@@ -1233,6 +1234,21 @@ void ThreadState::InvokePreFinalizers() {
} }
} }
// static
base::subtle::AtomicWord ThreadState::incremental_marking_counter_ = 0;
void ThreadState::EnableIncrementalMarkingBarrier() {
CHECK(!IsIncrementalMarking());
base::subtle::Barrier_AtomicIncrement(&incremental_marking_counter_, 1);
SetIncrementalMarking(true);
}
void ThreadState::DisableIncrementalMarkingBarrier() {
CHECK(IsIncrementalMarking());
base::subtle::Barrier_AtomicIncrement(&incremental_marking_counter_, -1);
SetIncrementalMarking(false);
}
void ThreadState::IncrementalMarkingStart() { void ThreadState::IncrementalMarkingStart() {
VLOG(2) << "[state:" << this << "] " VLOG(2) << "[state:" << this << "] "
<< "IncrementalMarking: Start"; << "IncrementalMarking: Start";
...@@ -1243,7 +1259,7 @@ void ThreadState::IncrementalMarkingStart() { ...@@ -1243,7 +1259,7 @@ void ThreadState::IncrementalMarkingStart() {
MarkPhasePrologue(BlinkGC::kNoHeapPointersOnStack, MarkPhasePrologue(BlinkGC::kNoHeapPointersOnStack,
BlinkGC::kIncrementalMarking, BlinkGC::kIdleGC); BlinkGC::kIncrementalMarking, BlinkGC::kIdleGC);
MarkPhaseVisitRoots(); MarkPhaseVisitRoots();
Heap().EnableIncrementalMarkingBarrier(); EnableIncrementalMarkingBarrier();
ScheduleIncrementalMarkingStep(); ScheduleIncrementalMarkingStep();
DCHECK(IsMarkingInProgress()); DCHECK(IsMarkingInProgress());
} }
...@@ -1268,6 +1284,7 @@ void ThreadState::IncrementalMarkingFinalize() { ...@@ -1268,6 +1284,7 @@ void ThreadState::IncrementalMarkingFinalize() {
VLOG(2) << "[state:" << this << "] " VLOG(2) << "[state:" << this << "] "
<< "IncrementalMarking: Finalize"; << "IncrementalMarking: Finalize";
SetGCState(kNoGCScheduled); SetGCState(kNoGCScheduled);
DisableIncrementalMarkingBarrier();
AtomicPauseScope atomic_pause_scope(this); AtomicPauseScope atomic_pause_scope(this);
DCHECK(IsMarkingInProgress()); DCHECK(IsMarkingInProgress());
RecursiveMutexLocker persistent_lock( RecursiveMutexLocker persistent_lock(
...@@ -1276,7 +1293,6 @@ void ThreadState::IncrementalMarkingFinalize() { ...@@ -1276,7 +1293,6 @@ void ThreadState::IncrementalMarkingFinalize() {
bool complete = bool complete =
MarkPhaseAdvanceMarking(std::numeric_limits<double>::infinity()); MarkPhaseAdvanceMarking(std::numeric_limits<double>::infinity());
CHECK(complete); CHECK(complete);
Heap().DisableIncrementalMarkingBarrier();
MarkPhaseEpilogue(current_gc_data_.marking_type); MarkPhaseEpilogue(current_gc_data_.marking_type);
PreSweep(current_gc_data_.marking_type, BlinkGC::kLazySweeping); PreSweep(current_gc_data_.marking_type, BlinkGC::kLazySweeping);
DCHECK(IsSweepingInProgress()); DCHECK(IsSweepingInProgress());
......
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
#include <memory> #include <memory>
#include "base/atomicops.h"
#include "base/macros.h" #include "base/macros.h"
#include "platform/PlatformExport.h" #include "platform/PlatformExport.h"
#include "platform/bindings/ScriptForbiddenScope.h" #include "platform/bindings/ScriptForbiddenScope.h"
...@@ -215,6 +216,14 @@ class PLATFORM_EXPORT ThreadState { ...@@ -215,6 +216,14 @@ class PLATFORM_EXPORT ThreadState {
ThreadState* state_; ThreadState* state_;
}; };
// Returns true if any thread is currently incremental marking its heap and
// false otherwise. For an exact check use
// ThreadState::IsIncrementalMarking().
static bool IsAnyIncrementalMarking() {
// Stores use full barrier to allow using the simplest relaxed load here.
return base::subtle::NoBarrier_Load(&incremental_marking_counter_) > 0;
}
static void AttachMainThread(); static void AttachMainThread();
// Associate ThreadState object with the current thread. After this // Associate ThreadState object with the current thread. After this
...@@ -277,6 +286,9 @@ class PLATFORM_EXPORT ThreadState { ...@@ -277,6 +286,9 @@ class PLATFORM_EXPORT ThreadState {
void IncrementalMarkingStep(); void IncrementalMarkingStep();
void IncrementalMarkingFinalize(); void IncrementalMarkingFinalize();
void EnableIncrementalMarkingBarrier();
void DisableIncrementalMarkingBarrier();
// A GC runs in the following sequence. // A GC runs in the following sequence.
// //
// 1) preGC() is called. // 1) preGC() is called.
...@@ -575,6 +587,11 @@ class PLATFORM_EXPORT ThreadState { ...@@ -575,6 +587,11 @@ class PLATFORM_EXPORT ThreadState {
template <typename T> template <typename T>
friend class PrefinalizerRegistration; friend class PrefinalizerRegistration;
// Number of ThreadState's that are currently in incremental marking. The
// counter is incremented by one when some ThreadState enters incremental
// marking and decremented upon finishing.
static base::subtle::AtomicWord incremental_marking_counter_;
ThreadState(); ThreadState();
~ThreadState(); ~ThreadState();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment