Commit 03474222 authored by Etienne Pierre-doray's avatar Etienne Pierre-doray Committed by Commit Bot

[Blink Heap]: Marking uses jobs API.

Note: This CL enables feature BlinkHeapConcurrentMarking to get
the desired behavior. However, we should land enabling
BlinkHeapConcurrentMarking on its own first.

active_markers + GlobalPoolSize() is used to determine
the desired number of workers.

NotifyConcurrencyIncrease() is called periodically from
ConcurrentMarkingStep() if GlobalPoolSize() > 0,
to make sure enough workers are scheduled.

CancelableTaskScheduler is no longer used and deleted in this CL.

Benchmark v8.browsing_desktop on win-10-perf browse:news:cnn:2018:
blink-gc-atomic-pause-mark-transitive-closure: 13% improvement
blink-gc-atomic-pause-sweep-and-compact: 5% improvement
blink-gc-complete-sweep: 34% improvement
blink-gc-total: 2% improvement
https://pinpoint-dot-chromeperf.appspot.com/job/136a60ce620000

Bug: 1046343
Change-Id: Ie2957abf200037ccacc017b1e38dc41f47811ee0
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2029609
Commit-Queue: Etienne Pierre-Doray <etiennep@chromium.org>
Reviewed-by: default avatarOmer Katz <omerkatz@chromium.org>
Reviewed-by: default avatarKentaro Hara <haraken@chromium.org>
Reviewed-by: default avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#751502}
parent e27879d0
...@@ -45,8 +45,6 @@ blink_platform_sources("heap") { ...@@ -45,8 +45,6 @@ blink_platform_sources("heap") {
"blink_gc.h", "blink_gc.h",
"blink_gc_memory_dump_provider.cc", "blink_gc_memory_dump_provider.cc",
"blink_gc_memory_dump_provider.h", "blink_gc_memory_dump_provider.h",
"cancelable_task_scheduler.cc",
"cancelable_task_scheduler.h",
"collection_support/heap_hash_table_backing.h", "collection_support/heap_hash_table_backing.h",
"collection_support/heap_linked_stack.h", "collection_support/heap_linked_stack.h",
"collection_support/heap_vector_backing.h", "collection_support/heap_vector_backing.h",
...@@ -149,7 +147,6 @@ jumbo_source_set("blink_heap_unittests_sources") { ...@@ -149,7 +147,6 @@ jumbo_source_set("blink_heap_unittests_sources") {
sources = [ sources = [
"../testing/run_all_tests.cc", "../testing/run_all_tests.cc",
"blink_gc_memory_dump_provider_test.cc", "blink_gc_memory_dump_provider_test.cc",
"cancelable_task_scheduler_test.cc",
"card_table_test.cc", "card_table_test.cc",
"collection_support/heap_linked_stack_test.cc", "collection_support/heap_linked_stack_test.cc",
"concurrent_marking_test.cc", "concurrent_marking_test.cc",
......
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "third_party/blink/renderer/platform/heap/cancelable_task_scheduler.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/task_runner.h"
#include "third_party/blink/renderer/platform/wtf/allocator/allocator.h"
#include "third_party/blink/renderer/platform/wtf/vector.h"
namespace blink {
class CancelableTaskScheduler::TaskData {
USING_FAST_MALLOC(TaskData);
DISALLOW_COPY_AND_ASSIGN(TaskData);
public:
TaskData(Task task, CancelableTaskScheduler* scheduler)
: task_(std::move(task)), scheduler_(scheduler), status_(kWaiting) {}
~TaskData() {
// The task runner is responsible for unregistering the task in case the
// task hasn't been cancelled.
if (TryCancel()) {
scheduler_->UnregisterAndSignal(this);
}
}
void Run() {
if (TryRun()) {
std::move(task_).Run();
scheduler_->UnregisterAndSignal(this);
}
}
bool TryCancel() {
Status expected = kWaiting;
return status_.compare_exchange_strong(expected, kCancelled,
std::memory_order_acq_rel,
std::memory_order_acquire);
}
private:
// Identifies the state a cancelable task is in:
// |kWaiting|: The task is scheduled and waiting to be executed. {TryRun} will
// succeed.
// |kCancelled|: The task has been cancelled. {TryRun} will fail.
// |kRunning|: The task is currently running and cannot be canceled anymore.
enum Status : uint8_t { kWaiting, kCancelled, kRunning };
bool TryRun() {
Status expected = kWaiting;
return status_.compare_exchange_strong(expected, kRunning,
std::memory_order_acq_rel,
std::memory_order_acquire);
}
Task task_;
CancelableTaskScheduler* const scheduler_;
std::atomic<Status> status_;
};
CancelableTaskScheduler::CancelableTaskScheduler(
scoped_refptr<base::TaskRunner> task_runner)
: cond_var_(&lock_), task_runner_(std::move(task_runner)) {}
CancelableTaskScheduler::~CancelableTaskScheduler() {
base::AutoLock lock(lock_);
CHECK(tasks_.IsEmpty());
}
void CancelableTaskScheduler::ScheduleTask(Task task) {
std::unique_ptr<TaskData> task_data = Register(std::move(task));
task_runner_->PostTask(FROM_HERE,
base::BindOnce(&TaskData::Run, std::move(task_data)));
}
size_t CancelableTaskScheduler::CancelAndWait() {
size_t result = 0;
base::AutoLock lock(lock_);
while (!tasks_.IsEmpty()) {
result += RemoveCancelledTasks();
if (!tasks_.IsEmpty()) {
cond_var_.Wait();
}
}
return result;
}
std::unique_ptr<CancelableTaskScheduler::TaskData>
CancelableTaskScheduler::Register(Task task) {
auto task_data = std::make_unique<TaskData>(std::move(task), this);
base::AutoLock lock(lock_);
tasks_.insert(task_data.get());
return task_data;
}
void CancelableTaskScheduler::UnregisterAndSignal(TaskData* task_data) {
base::AutoLock lock(lock_);
CHECK(tasks_.Contains(task_data));
tasks_.erase(task_data);
cond_var_.Signal();
}
// This function is needed because WTF::HashSet::erase function invalidates
// all iterators. Returns number of removed tasks.
size_t CancelableTaskScheduler::RemoveCancelledTasks() {
WTF::Vector<TaskData*> to_be_removed;
// Assume worst case.
to_be_removed.ReserveCapacity(tasks_.size());
for (TaskData* task : tasks_) {
if (task->TryCancel()) {
to_be_removed.push_back(task);
}
}
tasks_.RemoveAll(to_be_removed);
return to_be_removed.size();
}
size_t CancelableTaskScheduler::NumberOfTasksForTesting() const {
base::AutoLock lock(lock_);
return tasks_.size();
}
} // namespace blink
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_CANCELABLE_TASK_SCHEDULER_H_
#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_CANCELABLE_TASK_SCHEDULER_H_
#include <memory>
#include "base/memory/scoped_refptr.h"
#include "base/synchronization/condition_variable.h"
#include "base/synchronization/lock.h"
#include "third_party/blink/renderer/platform/platform_export.h"
#include "third_party/blink/renderer/platform/wtf/allocator/allocator.h"
#include "third_party/blink/renderer/platform/wtf/functional.h"
#include "third_party/blink/renderer/platform/wtf/hash_set.h"
namespace base {
class TaskRunner;
}
namespace blink {
// CancelableTaskScheduler allows for scheduling tasks that can be cancelled
// before they are invoked. User is responsible for synchronizing completion of
// tasks and destruction of CancelableTaskScheduler.
class PLATFORM_EXPORT CancelableTaskScheduler final {
USING_FAST_MALLOC(CancelableTaskScheduler);
public:
using Task = WTF::CrossThreadOnceFunction<void()>;
explicit CancelableTaskScheduler(scoped_refptr<base::TaskRunner>);
~CancelableTaskScheduler();
// Schedules task to run on TaskRunner.
void ScheduleTask(Task);
// Cancels all not yet started tasks and waits for running ones to complete.
// Returns number of cancelled (not executed) tasks.
size_t CancelAndWait();
private:
class TaskData;
template <class T>
friend class CancelableTaskSchedulerTest;
std::unique_ptr<TaskData> Register(Task);
void UnregisterAndSignal(TaskData*);
size_t RemoveCancelledTasks();
size_t NumberOfTasksForTesting() const;
WTF::HashSet<TaskData*> tasks_;
mutable base::Lock lock_;
base::ConditionVariable cond_var_;
scoped_refptr<base::TaskRunner> task_runner_;
};
} // namespace blink
#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_CANCELABLE_TASK_SCHEDULER_H_
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "third_party/blink/renderer/platform/heap/cancelable_task_scheduler.h"
#include <atomic>
#include "base/memory/scoped_refptr.h"
#include "base/task_runner.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "third_party/blink/renderer/platform/heap/heap_test_utilities.h"
#include "third_party/blink/renderer/platform/scheduler/public/worker_pool.h"
#include "third_party/blink/renderer/platform/scheduler/test/fake_task_runner.h"
#include "third_party/blink/renderer/platform/wtf/cross_thread_functional.h"
namespace blink {
class ParallelTaskRunner : public base::TaskRunner {
public:
bool PostDelayedTask(const base::Location& location,
base::OnceClosure task,
base::TimeDelta) override {
worker_pool::PostTask(location, WTF::CrossThreadBindOnce(std::move(task)));
return true;
}
void RunUntilIdle() {}
};
template <class Runner>
class CancelableTaskSchedulerTest : public TestSupportingGC {
public:
using Task = CancelableTaskScheduler::Task;
void ScheduleTask(Task callback) {
scheduler_.ScheduleTask(std::move(callback));
}
void RunTaskRunner() { task_runner_->RunUntilIdle(); }
size_t CancelAndWait() { return scheduler_.CancelAndWait(); }
size_t NumberOfRegisteredTasks() const {
return scheduler_.NumberOfTasksForTesting();
}
private:
scoped_refptr<Runner> task_runner_ = base::MakeRefCounted<Runner>();
CancelableTaskScheduler scheduler_{task_runner_};
};
using RunnerTypes =
::testing::Types<scheduler::FakeTaskRunner, ParallelTaskRunner>;
TYPED_TEST_SUITE(CancelableTaskSchedulerTest, RunnerTypes);
TYPED_TEST(CancelableTaskSchedulerTest, EmptyCancelTasks) {
const size_t cancelled = this->CancelAndWait();
EXPECT_EQ(0u, cancelled);
EXPECT_EQ(0u, this->NumberOfRegisteredTasks());
}
TYPED_TEST(CancelableTaskSchedulerTest, RunAndCancelTasks) {
static constexpr size_t kNumberOfTasks = 10u;
const auto callback = [](std::atomic<int>* i) { ++(*i); };
std::atomic<int> var{0};
for (size_t i = 0; i < kNumberOfTasks; ++i) {
this->ScheduleTask(
WTF::CrossThreadBindOnce(callback, WTF::CrossThreadUnretained(&var)));
EXPECT_GE(i + 1, this->NumberOfRegisteredTasks());
}
this->RunTaskRunner();
// Tasks will remove themselves after running
EXPECT_LE(0u, this->NumberOfRegisteredTasks());
const size_t cancelled = this->CancelAndWait();
EXPECT_EQ(0u, this->NumberOfRegisteredTasks());
EXPECT_EQ(kNumberOfTasks, var + cancelled);
}
TEST(CancelableTaskSchedulerTest, RemoveTasksFromQueue) {
auto task_runner = base::MakeRefCounted<scheduler::FakeTaskRunner>();
CancelableTaskScheduler scheduler{task_runner};
int var = 0;
scheduler.ScheduleTask(WTF::CrossThreadBindOnce(
[](int* var) { ++(*var); }, WTF::CrossThreadUnretained(&var)));
auto tasks = task_runner->TakePendingTasksForTesting();
// Clearing the task queue should destroy all cancelable closures, which in
// turn will notify CancelableTaskScheduler to remove corresponding tasks.
tasks.clear();
EXPECT_EQ(0, var);
}
} // namespace blink
...@@ -264,11 +264,11 @@ void ThreadHeap::MarkNotFullyConstructedObjects(MarkingVisitor* visitor) { ...@@ -264,11 +264,11 @@ void ThreadHeap::MarkNotFullyConstructedObjects(MarkingVisitor* visitor) {
namespace { namespace {
template <typename Worklist, typename Callback> template <typename Worklist, typename Callback, typename YieldPredicate>
bool DrainWorklistWithDeadline(base::TimeTicks deadline, bool DrainWorklist(Worklist* worklist,
Worklist* worklist, Callback callback,
Callback callback, YieldPredicate should_yield,
int task_id) { int task_id) {
const size_t kDeadlineCheckInterval = 1250; const size_t kDeadlineCheckInterval = 1250;
size_t processed_callback_count = 0; size_t processed_callback_count = 0;
...@@ -276,15 +276,34 @@ bool DrainWorklistWithDeadline(base::TimeTicks deadline, ...@@ -276,15 +276,34 @@ bool DrainWorklistWithDeadline(base::TimeTicks deadline,
while (worklist->Pop(task_id, &item)) { while (worklist->Pop(task_id, &item)) {
callback(item); callback(item);
if (++processed_callback_count == kDeadlineCheckInterval) { if (++processed_callback_count == kDeadlineCheckInterval) {
if (deadline <= base::TimeTicks::Now()) { if (should_yield())
return false; return false;
}
processed_callback_count = 0; processed_callback_count = 0;
} }
} }
return true; return true;
} }
template <typename Worklist, typename Callback>
bool DrainWorklistWithDeadline(base::TimeTicks deadline,
Worklist* worklist,
Callback callback,
int task_id) {
return DrainWorklist(
worklist, std::move(callback),
[deadline]() { return deadline <= base::TimeTicks::Now(); }, task_id);
}
template <typename Worklist, typename Callback>
bool DrainWorklistWithYielding(base::JobDelegate* delegate,
Worklist* worklist,
Callback callback,
int task_id) {
return DrainWorklist(
worklist, std::move(callback),
[delegate]() { return delegate->ShouldYield(); }, task_id);
}
} // namespace } // namespace
bool ThreadHeap::InvokeEphemeronCallbacks(MarkingVisitor* visitor, bool ThreadHeap::InvokeEphemeronCallbacks(MarkingVisitor* visitor,
...@@ -413,14 +432,19 @@ bool ThreadHeap::HasWorkForConcurrentMarking() const { ...@@ -413,14 +432,19 @@ bool ThreadHeap::HasWorkForConcurrentMarking() const {
!write_barrier_worklist_->IsGlobalPoolEmpty(); !write_barrier_worklist_->IsGlobalPoolEmpty();
} }
size_t ThreadHeap::ConcurrentMarkingGlobalWorkSize() const {
return marking_worklist_->GlobalPoolSize() +
write_barrier_worklist_->GlobalPoolSize();
}
bool ThreadHeap::AdvanceConcurrentMarking(ConcurrentMarkingVisitor* visitor, bool ThreadHeap::AdvanceConcurrentMarking(ConcurrentMarkingVisitor* visitor,
base::TimeTicks deadline) { base::JobDelegate* delegate) {
bool finished; bool finished;
do { do {
// Iteratively mark all objects that are reachable from the objects // Iteratively mark all objects that are reachable from the objects
// currently pushed onto the marking worklist. // currently pushed onto the marking worklist.
finished = DrainWorklistWithDeadline( finished = DrainWorklistWithYielding(
deadline, marking_worklist_.get(), delegate, marking_worklist_.get(),
[visitor](const MarkingItem& item) { [visitor](const MarkingItem& item) {
HeapObjectHeader* header = HeapObjectHeader* header =
HeapObjectHeader::FromPayload(item.base_object_payload); HeapObjectHeader::FromPayload(item.base_object_payload);
...@@ -432,8 +456,8 @@ bool ThreadHeap::AdvanceConcurrentMarking(ConcurrentMarkingVisitor* visitor, ...@@ -432,8 +456,8 @@ bool ThreadHeap::AdvanceConcurrentMarking(ConcurrentMarkingVisitor* visitor,
if (!finished) if (!finished)
break; break;
finished = DrainWorklistWithDeadline( finished = DrainWorklistWithYielding(
deadline, write_barrier_worklist_.get(), delegate, write_barrier_worklist_.get(),
[visitor](HeapObjectHeader* header) { [visitor](HeapObjectHeader* header) {
DCHECK(!ConcurrentMarkingVisitor::IsInConstruction(header)); DCHECK(!ConcurrentMarkingVisitor::IsInConstruction(header));
GCInfo::From(header->GcInfoIndex()).trace(visitor, header->Payload()); GCInfo::From(header->GcInfoIndex()).trace(visitor, header->Payload());
......
...@@ -310,8 +310,11 @@ class PLATFORM_EXPORT ThreadHeap { ...@@ -310,8 +310,11 @@ class PLATFORM_EXPORT ThreadHeap {
// Returns true if concurrent markers will have work to steal // Returns true if concurrent markers will have work to steal
bool HasWorkForConcurrentMarking() const; bool HasWorkForConcurrentMarking() const;
// Returns the amount of work currently available for stealing (there could be
// work remaining even if this is 0).
size_t ConcurrentMarkingGlobalWorkSize() const;
// Returns true if marker is done // Returns true if marker is done
bool AdvanceConcurrentMarking(ConcurrentMarkingVisitor*, base::TimeTicks); bool AdvanceConcurrentMarking(ConcurrentMarkingVisitor*, base::JobDelegate*);
// Conservatively checks whether an address is a pointer in any of the // Conservatively checks whether an address is a pointer in any of the
// thread heaps. If so marks the object pointed to as live. // thread heaps. If so marks the object pointed to as live.
......
...@@ -49,7 +49,6 @@ ...@@ -49,7 +49,6 @@
#include "third_party/blink/renderer/platform/bindings/script_forbidden_scope.h" #include "third_party/blink/renderer/platform/bindings/script_forbidden_scope.h"
#include "third_party/blink/renderer/platform/bindings/v8_per_isolate_data.h" #include "third_party/blink/renderer/platform/bindings/v8_per_isolate_data.h"
#include "third_party/blink/renderer/platform/heap/blink_gc_memory_dump_provider.h" #include "third_party/blink/renderer/platform/heap/blink_gc_memory_dump_provider.h"
#include "third_party/blink/renderer/platform/heap/cancelable_task_scheduler.h"
#include "third_party/blink/renderer/platform/heap/handle.h" #include "third_party/blink/renderer/platform/heap/handle.h"
#include "third_party/blink/renderer/platform/heap/heap.h" #include "third_party/blink/renderer/platform/heap/heap.h"
#include "third_party/blink/renderer/platform/heap/heap_buildflags.h" #include "third_party/blink/renderer/platform/heap/heap_buildflags.h"
...@@ -98,19 +97,6 @@ uint8_t ThreadState::main_thread_state_storage_[sizeof(ThreadState)]; ...@@ -98,19 +97,6 @@ uint8_t ThreadState::main_thread_state_storage_[sizeof(ThreadState)];
namespace { namespace {
// Concurrent marking should stop every once in a while to flush private
// segments to v8 marking worklist. It should also stop to avoid priority
// inversion.
//
// TODO(omerkatz): What is a good value to set here?
constexpr base::TimeDelta kConcurrentMarkingStepDuration =
base::TimeDelta::FromMilliseconds(2);
// Number of concurrent marking tasks to use.
//
// TODO(omerkatz): kNumberOfMarkingTasks should be set heuristically
// instead of a constant.
constexpr uint8_t kNumberOfConcurrentMarkingTasks = 3u;
constexpr size_t kMaxTerminationGCLoops = 20; constexpr size_t kMaxTerminationGCLoops = 20;
// Helper function to convert a byte count to a KB count, capping at // Helper function to convert a byte count to a KB count, capping at
...@@ -221,9 +207,7 @@ ThreadState::ThreadState() ...@@ -221,9 +207,7 @@ ThreadState::ThreadState()
asan_fake_stack_(__asan_get_current_fake_stack()), asan_fake_stack_(__asan_get_current_fake_stack()),
#endif #endif
incremental_marking_scheduler_( incremental_marking_scheduler_(
std::make_unique<IncrementalMarkingScheduler>(this)), std::make_unique<IncrementalMarkingScheduler>(this)) {
marker_scheduler_(std::make_unique<CancelableTaskScheduler>(
base::MakeRefCounted<WorkerPoolTaskRunner>())) {
DCHECK(CheckThread()); DCHECK(CheckThread());
DCHECK(!**thread_specific_); DCHECK(!**thread_specific_);
**thread_specific_ = this; **thread_specific_ = this;
...@@ -752,9 +736,10 @@ void ThreadState::AtomicPauseMarkPrologue( ...@@ -752,9 +736,10 @@ void ThreadState::AtomicPauseMarkPrologue(
SetGCState(kNoGCScheduled); SetGCState(kNoGCScheduled);
if (base::FeatureList::IsEnabled( if (base::FeatureList::IsEnabled(
blink::features::kBlinkHeapConcurrentMarking)) { blink::features::kBlinkHeapConcurrentMarking)) {
// Stop concurrent markers // Stop concurrent markers and wait synchronously until they have all
marker_scheduler_->CancelAndWait(); // returned.
active_markers_ = 0; marker_handle_.Cancel();
DCHECK_EQ(0U, active_markers_);
available_concurrent_marking_task_ids_.clear(); available_concurrent_marking_task_ids_.clear();
} }
#if DCHECK_IS_ON() #if DCHECK_IS_ON()
...@@ -1198,27 +1183,7 @@ void ThreadState::IncrementalMarkingStart(BlinkGC::GCReason reason) { ...@@ -1198,27 +1183,7 @@ void ThreadState::IncrementalMarkingStart(BlinkGC::GCReason reason) {
EnableIncrementalMarkingBarrier(); EnableIncrementalMarkingBarrier();
if (base::FeatureList::IsEnabled( if (base::FeatureList::IsEnabled(
blink::features::kBlinkHeapConcurrentMarking)) { blink::features::kBlinkHeapConcurrentMarking)) {
// No active concurrent markers yet, so it is safe to write to
// concurrently_marked_bytes_ without a lock.
concurrently_marked_bytes_ = 0;
current_gc_data_.visitor->FlushMarkingWorklists(); current_gc_data_.visitor->FlushMarkingWorklists();
// Check that the marking worklist has enough private segments for all
// concurrent marking tasks.
const uint8_t max_concurrent_task_id =
WorklistTaskId::ConcurrentThreadBase +
kNumberOfConcurrentMarkingTasks;
static_assert(
MarkingWorklist::kNumTasks == WriteBarrierWorklist::kNumTasks,
"Marking worklist and write-barrier worklist should be the "
"same size");
static_assert(max_concurrent_task_id <= MarkingWorklist::kNumTasks,
"Number of concurrent marking tasks should not exceed "
"number of tasks in worlkist");
// Initialize concurrent marking task ids.
for (uint8_t i = WorklistTaskId::ConcurrentThreadBase;
i < max_concurrent_task_id; ++i) {
available_concurrent_marking_task_ids_.push_back(i);
}
ScheduleConcurrentMarking(); ScheduleConcurrentMarking();
} }
SetGCState(kIncrementalMarkingStepScheduled); SetGCState(kIncrementalMarkingStepScheduled);
...@@ -1275,11 +1240,16 @@ void ThreadState::IncrementalMarkingStep(BlinkGC::StackState stack_state, ...@@ -1275,11 +1240,16 @@ void ThreadState::IncrementalMarkingStep(BlinkGC::StackState stack_state,
bool ThreadState::ConcurrentMarkingStep() { bool ThreadState::ConcurrentMarkingStep() {
current_gc_data_.visitor->FlushMarkingWorklists(); current_gc_data_.visitor->FlushMarkingWorklists();
if (Heap().HasWorkForConcurrentMarking()) { if (Heap().HasWorkForConcurrentMarking()) {
ScheduleConcurrentMarking(); // Notifies the scheduler that max concurrency might have increased.
// This will adjust the number of markers if necessary.
marker_handle_.NotifyConcurrencyIncrease();
return false; return false;
} }
base::AutoLock lock(concurrent_marker_bootstrapping_lock_); base::AutoLock lock(concurrent_marker_lock_);
return active_markers_ == 0; // !HasWorkForConcurrentMarking() is checked after |active_markers_| == 0
// because active markers can otherwise flush work and return.
return active_markers_.load(std::memory_order_relaxed) == 0 &&
!Heap().HasWorkForConcurrentMarking();
} }
void ThreadState::IncrementalMarkingFinalize() { void ThreadState::IncrementalMarkingFinalize() {
...@@ -1790,32 +1760,57 @@ void ThreadState::EnableCompactionForNextGCForTesting() { ...@@ -1790,32 +1760,57 @@ void ThreadState::EnableCompactionForNextGCForTesting() {
} }
void ThreadState::ScheduleConcurrentMarking() { void ThreadState::ScheduleConcurrentMarking() {
base::AutoLock lock(concurrent_marker_bootstrapping_lock_);
DCHECK(base::FeatureList::IsEnabled( DCHECK(base::FeatureList::IsEnabled(
blink::features::kBlinkHeapConcurrentMarking)); blink::features::kBlinkHeapConcurrentMarking));
DCHECK_EQ(0U, active_markers_);
// No active concurrent markers yet, so it is safe to write to
// concurrently_marked_bytes_ without a lock.
concurrently_marked_bytes_ = 0;
for (uint8_t i = active_markers_; i < kNumberOfConcurrentMarkingTasks; ++i) { const uint8_t max_concurrent_task_id = MarkingWorklist::kNumTasks;
marker_scheduler_->ScheduleTask(WTF::CrossThreadBindOnce( // Initialize concurrent marking task ids.
&ThreadState::PerformConcurrentMark, WTF::CrossThreadUnretained(this))); for (uint8_t i = WorklistTaskId::ConcurrentThreadBase;
i < max_concurrent_task_id; ++i) {
available_concurrent_marking_task_ids_.push_back(i);
} }
active_markers_ = kNumberOfConcurrentMarkingTasks; // |USER_VISIBLE| is used to minimize marking on foreground thread.
marker_handle_ = base::PostJob(
FROM_HERE, {base::ThreadPool(), base::TaskPriority::USER_VISIBLE},
ConvertToBaseRepeatingCallback(
WTF::CrossThreadBindRepeating(&ThreadState::PerformConcurrentMark,
WTF::CrossThreadUnretained(this))),
ConvertToBaseRepeatingCallback(WTF::CrossThreadBindRepeating(
[](ThreadState* state) -> size_t {
// We need to account for local segments in addition to
// ConcurrentMarkingGlobalWorkSize().
return std::min<size_t>(
state->Heap().ConcurrentMarkingGlobalWorkSize() +
state->active_markers_.load(std::memory_order_relaxed),
MarkingWorklist::kNumTasks -
WorklistTaskId::ConcurrentThreadBase);
},
WTF::CrossThreadUnretained(this))));
} }
void ThreadState::PerformConcurrentMark() { void ThreadState::PerformConcurrentMark(base::JobDelegate* job) {
VLOG(2) << "[state:" << this << "] [threadid:" << CurrentThread() << "] " VLOG(2) << "[state:" << this << "] [threadid:" << CurrentThread() << "] "
<< "ConcurrentMark"; << "ConcurrentMark";
ThreadHeapStatsCollector::EnabledConcurrentScope stats_scope( ThreadHeapStatsCollector::EnabledConcurrentScope stats_scope(
Heap().stats_collector(), Heap().stats_collector(),
ThreadHeapStatsCollector::kConcurrentMarkingStep); ThreadHeapStatsCollector::kConcurrentMarkingStep);
if (!Heap().HasWorkForConcurrentMarking())
return;
uint8_t task_id; uint8_t task_id;
{ {
base::AutoLock lock(concurrent_marker_bootstrapping_lock_); base::AutoLock lock(concurrent_marker_lock_);
DCHECK(!available_concurrent_marking_task_ids_.IsEmpty()); DCHECK(!available_concurrent_marking_task_ids_.IsEmpty());
task_id = available_concurrent_marking_task_ids_.back(); task_id = available_concurrent_marking_task_ids_.back();
available_concurrent_marking_task_ids_.pop_back(); available_concurrent_marking_task_ids_.pop_back();
active_markers_.fetch_add(1, std::memory_order_relaxed);
} }
std::unique_ptr<ConcurrentMarkingVisitor> concurrent_visitor = std::unique_ptr<ConcurrentMarkingVisitor> concurrent_visitor =
...@@ -1827,26 +1822,15 @@ void ThreadState::PerformConcurrentMark() { ...@@ -1827,26 +1822,15 @@ void ThreadState::PerformConcurrentMark() {
this, GetMarkingMode(Heap().Compaction()->IsCompacting()), this, GetMarkingMode(Heap().Compaction()->IsCompacting()),
task_id); task_id);
const bool finished = Heap().AdvanceConcurrentMarking( Heap().AdvanceConcurrentMarking(concurrent_visitor.get(), job);
concurrent_visitor.get(),
base::TimeTicks::Now() + kConcurrentMarkingStepDuration);
concurrent_visitor->FlushWorklists(); concurrent_visitor->FlushWorklists();
{ {
base::AutoLock lock(concurrent_marker_bootstrapping_lock_); base::AutoLock lock(concurrent_marker_lock_);
// When marking is done, flush visitor worklists and decrement number of active_markers_.fetch_sub(1, std::memory_order_relaxed);
// active markers so we know how many markers are left
concurrently_marked_bytes_ += concurrent_visitor->marked_bytes(); concurrently_marked_bytes_ += concurrent_visitor->marked_bytes();
available_concurrent_marking_task_ids_.push_back(task_id); available_concurrent_marking_task_ids_.push_back(task_id);
if (finished) {
--active_markers_;
return;
}
} }
// Reschedule this marker
marker_scheduler_->ScheduleTask(WTF::CrossThreadBindOnce(
&ThreadState::PerformConcurrentMark, WTF::CrossThreadUnretained(this)));
} }
} // namespace blink } // namespace blink
...@@ -63,7 +63,6 @@ namespace incremental_marking_test { ...@@ -63,7 +63,6 @@ namespace incremental_marking_test {
class IncrementalMarkingScope; class IncrementalMarkingScope;
} // namespace incremental_marking_test } // namespace incremental_marking_test
class CancelableTaskScheduler;
class MarkingVisitor; class MarkingVisitor;
class PersistentNode; class PersistentNode;
class PersistentRegion; class PersistentRegion;
...@@ -518,7 +517,7 @@ class PLATFORM_EXPORT ThreadState final { ...@@ -518,7 +517,7 @@ class PLATFORM_EXPORT ThreadState final {
// terminated and the worklist is empty) // terminated and the worklist is empty)
bool ConcurrentMarkingStep(); bool ConcurrentMarkingStep();
void ScheduleConcurrentMarking(); void ScheduleConcurrentMarking();
void PerformConcurrentMark(); void PerformConcurrentMark(base::JobDelegate* job);
// Schedule helpers. // Schedule helpers.
void ScheduleIdleLazySweep(); void ScheduleIdleLazySweep();
...@@ -628,11 +627,11 @@ class PLATFORM_EXPORT ThreadState final { ...@@ -628,11 +627,11 @@ class PLATFORM_EXPORT ThreadState final {
std::unique_ptr<IncrementalMarkingScheduler> incremental_marking_scheduler_; std::unique_ptr<IncrementalMarkingScheduler> incremental_marking_scheduler_;
std::unique_ptr<CancelableTaskScheduler> marker_scheduler_; base::Lock concurrent_marker_lock_;
Vector<uint8_t> available_concurrent_marking_task_ids_; Vector<uint8_t> available_concurrent_marking_task_ids_;
uint8_t active_markers_ = 0; std::atomic<size_t> active_markers_{0};
base::Lock concurrent_marker_bootstrapping_lock_;
size_t concurrently_marked_bytes_ = 0; size_t concurrently_marked_bytes_ = 0;
base::JobHandle marker_handle_;
base::JobHandle sweeper_handle_; base::JobHandle sweeper_handle_;
std::atomic_bool has_unswept_pages_{false}; std::atomic_bool has_unswept_pages_{false};
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment