Commit 0e035541 authored by Etienne Pierre-Doray's avatar Etienne Pierre-Doray Committed by Commit Bot

[Blink Heap]: Marking uses jobs API.

Manual reland take 2:
Original CL: https://chromium-review.googlesource.com/c/chromium/src/+/2029609
Revert 1: https://chromium-review.googlesource.com/c/chromium/src/+/2109970
Revert 2: https://chromium-review.googlesource.com/c/chromium/src/+/2254470
Reason for revert: flaky DCHECK in AssertExpectedConcurrency
Fix: DCHECK ended being removed in https://chromium-review.googlesource.com/c/chromium/src/+/2363074
Additional changes: the impl now uses exposed worker_count and
IsCompleted() instead of available_concurrent_marking_task_ids_/
active_markers_, thus reducing complexity and removing need for
concurrent_marker_bootstrapping_lock_.

Original Description:
Note: This CL enables feature BlinkHeapConcurrentMarking to get
the desired behavior. However, we should land enabling
BlinkHeapConcurrentMarking on its own first.

active_markers + GlobalPoolSize() is used to determine
the desired number of workers.

NotifyConcurrencyIncrease() is called periodically from
ConcurrentMarkingStep() if GlobalPoolSize() > 0,
to make sure enough workers are scheduled.

Bug: 1046343
Change-Id: Iac4941d804714745545314eec1e1b33d1734a4a3
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2261352
Commit-Queue: Etienne Pierre-Doray <etiennep@chromium.org>
Reviewed-by: default avatarOmer Katz <omerkatz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#799603}
parent 87fa05ae
...@@ -48,8 +48,6 @@ blink_platform_sources("heap") { ...@@ -48,8 +48,6 @@ blink_platform_sources("heap") {
"blink_gc.h", "blink_gc.h",
"blink_gc_memory_dump_provider.cc", "blink_gc_memory_dump_provider.cc",
"blink_gc_memory_dump_provider.h", "blink_gc_memory_dump_provider.h",
"cancelable_task_scheduler.cc",
"cancelable_task_scheduler.h",
"collection_support/heap_hash_table_backing.h", "collection_support/heap_hash_table_backing.h",
"collection_support/heap_linked_stack.h", "collection_support/heap_linked_stack.h",
"collection_support/heap_vector_backing.h", "collection_support/heap_vector_backing.h",
...@@ -154,7 +152,6 @@ source_set("blink_heap_unittests_sources") { ...@@ -154,7 +152,6 @@ source_set("blink_heap_unittests_sources") {
sources = [ sources = [
"../testing/run_all_tests.cc", "../testing/run_all_tests.cc",
"test/blink_gc_memory_dump_provider_test.cc", "test/blink_gc_memory_dump_provider_test.cc",
"test/cancelable_task_scheduler_test.cc",
"test/card_table_test.cc", "test/card_table_test.cc",
"test/concurrent_marking_test.cc", "test/concurrent_marking_test.cc",
"test/gc_info_test.cc", "test/gc_info_test.cc",
......
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "third_party/blink/renderer/platform/heap/cancelable_task_scheduler.h"
#include "base/check.h"
#include "base/macros.h"
#include "base/task_runner.h"
#include "third_party/blink/renderer/platform/wtf/allocator/allocator.h"
#include "third_party/blink/renderer/platform/wtf/vector.h"
namespace blink {
class CancelableTaskScheduler::TaskData {
USING_FAST_MALLOC(TaskData);
DISALLOW_COPY_AND_ASSIGN(TaskData);
public:
TaskData(Task task, CancelableTaskScheduler* scheduler)
: task_(std::move(task)), scheduler_(scheduler), status_(kWaiting) {}
~TaskData() {
// The task runner is responsible for unregistering the task in case the
// task hasn't been cancelled.
if (TryCancel()) {
scheduler_->UnregisterAndSignal(this);
}
}
void Run() {
if (TryRun()) {
std::move(task_).Run();
scheduler_->UnregisterAndSignal(this);
}
}
bool TryCancel() {
Status expected = kWaiting;
return status_.compare_exchange_strong(expected, kCancelled,
std::memory_order_acq_rel,
std::memory_order_acquire);
}
private:
// Identifies the state a cancelable task is in:
// |kWaiting|: The task is scheduled and waiting to be executed. {TryRun} will
// succeed.
// |kCancelled|: The task has been cancelled. {TryRun} will fail.
// |kRunning|: The task is currently running and cannot be canceled anymore.
enum Status : uint8_t { kWaiting, kCancelled, kRunning };
bool TryRun() {
Status expected = kWaiting;
return status_.compare_exchange_strong(expected, kRunning,
std::memory_order_acq_rel,
std::memory_order_acquire);
}
Task task_;
CancelableTaskScheduler* const scheduler_;
std::atomic<Status> status_;
};
CancelableTaskScheduler::CancelableTaskScheduler(
scoped_refptr<base::TaskRunner> task_runner)
: cond_var_(&lock_), task_runner_(std::move(task_runner)) {}
CancelableTaskScheduler::~CancelableTaskScheduler() {
base::AutoLock lock(lock_);
CHECK(tasks_.IsEmpty());
}
void CancelableTaskScheduler::ScheduleTask(Task task) {
std::unique_ptr<TaskData> task_data = Register(std::move(task));
task_runner_->PostTask(FROM_HERE,
base::BindOnce(&TaskData::Run, std::move(task_data)));
}
size_t CancelableTaskScheduler::CancelAndWait() {
size_t result = 0;
base::AutoLock lock(lock_);
while (!tasks_.IsEmpty()) {
result += RemoveCancelledTasks();
if (!tasks_.IsEmpty()) {
cond_var_.Wait();
}
}
return result;
}
std::unique_ptr<CancelableTaskScheduler::TaskData>
CancelableTaskScheduler::Register(Task task) {
auto task_data = std::make_unique<TaskData>(std::move(task), this);
base::AutoLock lock(lock_);
tasks_.insert(task_data.get());
return task_data;
}
void CancelableTaskScheduler::UnregisterAndSignal(TaskData* task_data) {
base::AutoLock lock(lock_);
CHECK(tasks_.Contains(task_data));
tasks_.erase(task_data);
cond_var_.Signal();
}
// This function is needed because WTF::HashSet::erase function invalidates
// all iterators. Returns number of removed tasks.
size_t CancelableTaskScheduler::RemoveCancelledTasks() {
WTF::Vector<TaskData*> to_be_removed;
// Assume worst case.
to_be_removed.ReserveCapacity(tasks_.size());
for (TaskData* task : tasks_) {
if (task->TryCancel()) {
to_be_removed.push_back(task);
}
}
tasks_.RemoveAll(to_be_removed);
return to_be_removed.size();
}
size_t CancelableTaskScheduler::NumberOfTasksForTesting() const {
base::AutoLock lock(lock_);
return tasks_.size();
}
} // namespace blink
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_CANCELABLE_TASK_SCHEDULER_H_
#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_CANCELABLE_TASK_SCHEDULER_H_
#include <memory>
#include "base/memory/scoped_refptr.h"
#include "base/synchronization/condition_variable.h"
#include "base/synchronization/lock.h"
#include "third_party/blink/renderer/platform/platform_export.h"
#include "third_party/blink/renderer/platform/wtf/allocator/allocator.h"
#include "third_party/blink/renderer/platform/wtf/functional.h"
#include "third_party/blink/renderer/platform/wtf/hash_set.h"
namespace base {
class TaskRunner;
}
namespace blink {
// CancelableTaskScheduler allows for scheduling tasks that can be cancelled
// before they are invoked. User is responsible for synchronizing completion of
// tasks and destruction of CancelableTaskScheduler.
class PLATFORM_EXPORT CancelableTaskScheduler final {
USING_FAST_MALLOC(CancelableTaskScheduler);
public:
using Task = WTF::CrossThreadOnceFunction<void()>;
explicit CancelableTaskScheduler(scoped_refptr<base::TaskRunner>);
~CancelableTaskScheduler();
// Schedules task to run on TaskRunner.
void ScheduleTask(Task);
// Cancels all not yet started tasks and waits for running ones to complete.
// Returns number of cancelled (not executed) tasks.
size_t CancelAndWait();
private:
class TaskData;
template <class T>
friend class CancelableTaskSchedulerTest;
std::unique_ptr<TaskData> Register(Task);
void UnregisterAndSignal(TaskData*);
size_t RemoveCancelledTasks();
size_t NumberOfTasksForTesting() const;
WTF::HashSet<TaskData*> tasks_;
mutable base::Lock lock_;
base::ConditionVariable cond_var_;
scoped_refptr<base::TaskRunner> task_runner_;
};
} // namespace blink
#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_CANCELABLE_TASK_SCHEDULER_H_
...@@ -309,17 +309,18 @@ static constexpr size_t kDefaultConcurrentDeadlineCheckInterval = ...@@ -309,17 +309,18 @@ static constexpr size_t kDefaultConcurrentDeadlineCheckInterval =
template <size_t kDeadlineCheckInterval = kDefaultDeadlineCheckInterval, template <size_t kDeadlineCheckInterval = kDefaultDeadlineCheckInterval,
typename Worklist, typename Worklist,
typename Callback> typename Callback,
bool DrainWorklistWithDeadline(base::TimeTicks deadline, typename YieldPredicate>
Worklist* worklist, bool DrainWorklist(Worklist* worklist,
Callback callback, Callback callback,
int task_id) { YieldPredicate should_yield,
int task_id) {
size_t processed_callback_count = 0; size_t processed_callback_count = 0;
typename Worklist::EntryType item; typename Worklist::EntryType item;
while (worklist->Pop(task_id, &item)) { while (worklist->Pop(task_id, &item)) {
callback(item); callback(item);
if (processed_callback_count-- == 0) { if (processed_callback_count-- == 0) {
if (deadline <= base::TimeTicks::Now()) { if (should_yield()) {
return false; return false;
} }
processed_callback_count = kDeadlineCheckInterval; processed_callback_count = kDeadlineCheckInterval;
...@@ -328,6 +329,30 @@ bool DrainWorklistWithDeadline(base::TimeTicks deadline, ...@@ -328,6 +329,30 @@ bool DrainWorklistWithDeadline(base::TimeTicks deadline,
return true; return true;
} }
template <size_t kDeadlineCheckInterval = kDefaultDeadlineCheckInterval,
typename Worklist,
typename Callback>
bool DrainWorklistWithDeadline(base::TimeTicks deadline,
Worklist* worklist,
Callback callback,
int task_id) {
return DrainWorklist<kDeadlineCheckInterval>(
worklist, std::move(callback),
[deadline]() { return deadline <= base::TimeTicks::Now(); }, task_id);
}
template <size_t kDeadlineCheckInterval = kDefaultDeadlineCheckInterval,
typename Worklist,
typename Callback>
bool DrainWorklistWithYielding(base::JobDelegate* delegate,
Worklist* worklist,
Callback callback,
int task_id) {
return DrainWorklist<kDeadlineCheckInterval>(
worklist, std::move(callback),
[delegate]() { return delegate->ShouldYield(); }, task_id);
}
} // namespace } // namespace
bool ThreadHeap::InvokeEphemeronCallbacks( bool ThreadHeap::InvokeEphemeronCallbacks(
...@@ -492,7 +517,15 @@ bool ThreadHeap::HasWorkForConcurrentMarking() const { ...@@ -492,7 +517,15 @@ bool ThreadHeap::HasWorkForConcurrentMarking() const {
!ephemeron_pairs_to_process_worklist_->IsGlobalPoolEmpty(); !ephemeron_pairs_to_process_worklist_->IsGlobalPoolEmpty();
} }
size_t ThreadHeap::ConcurrentMarkingGlobalWorkSize() const {
return marking_worklist_->GlobalPoolSize() +
write_barrier_worklist_->GlobalPoolSize() +
previously_not_fully_constructed_worklist_->GlobalPoolSize() +
ephemeron_pairs_to_process_worklist_->GlobalPoolSize();
}
bool ThreadHeap::AdvanceConcurrentMarking(ConcurrentMarkingVisitor* visitor, bool ThreadHeap::AdvanceConcurrentMarking(ConcurrentMarkingVisitor* visitor,
base::JobDelegate* delegate,
base::TimeTicks deadline) { base::TimeTicks deadline) {
bool finished; bool finished;
do { do {
...@@ -500,8 +533,8 @@ bool ThreadHeap::AdvanceConcurrentMarking(ConcurrentMarkingVisitor* visitor, ...@@ -500,8 +533,8 @@ bool ThreadHeap::AdvanceConcurrentMarking(ConcurrentMarkingVisitor* visitor,
// |marking_worklist_|. This merely re-adds items with the proper // |marking_worklist_|. This merely re-adds items with the proper
// callbacks. // callbacks.
finished = finished =
DrainWorklistWithDeadline<kDefaultConcurrentDeadlineCheckInterval>( DrainWorklistWithYielding<kDefaultConcurrentDeadlineCheckInterval>(
deadline, previously_not_fully_constructed_worklist_.get(), delegate, previously_not_fully_constructed_worklist_.get(),
[visitor](NotFullyConstructedItem& item) { [visitor](NotFullyConstructedItem& item) {
visitor->DynamicallyMarkAddress( visitor->DynamicallyMarkAddress(
reinterpret_cast<ConstAddress>(item)); reinterpret_cast<ConstAddress>(item));
...@@ -512,9 +545,9 @@ bool ThreadHeap::AdvanceConcurrentMarking(ConcurrentMarkingVisitor* visitor, ...@@ -512,9 +545,9 @@ bool ThreadHeap::AdvanceConcurrentMarking(ConcurrentMarkingVisitor* visitor,
// Iteratively mark all objects that are reachable from the objects // Iteratively mark all objects that are reachable from the objects
// currently pushed onto the marking worklist. // currently pushed onto the marking worklist.
finished = DrainWorklistWithDeadline< finished = DrainWorklistWithYielding<
kDefaultConcurrentDeadlineCheckInterval>( kDefaultConcurrentDeadlineCheckInterval>(
deadline, marking_worklist_.get(), delegate, marking_worklist_.get(),
[visitor](const MarkingItem& item) { [visitor](const MarkingItem& item) {
HeapObjectHeader* header = HeapObjectHeader* header =
HeapObjectHeader::FromPayload(item.base_object_payload); HeapObjectHeader::FromPayload(item.base_object_payload);
...@@ -529,9 +562,9 @@ bool ThreadHeap::AdvanceConcurrentMarking(ConcurrentMarkingVisitor* visitor, ...@@ -529,9 +562,9 @@ bool ThreadHeap::AdvanceConcurrentMarking(ConcurrentMarkingVisitor* visitor,
if (!finished) if (!finished)
break; break;
finished = DrainWorklistWithDeadline< finished = DrainWorklistWithYielding<
kDefaultConcurrentDeadlineCheckInterval>( kDefaultConcurrentDeadlineCheckInterval>(
deadline, write_barrier_worklist_.get(), delegate, write_barrier_worklist_.get(),
[visitor](HeapObjectHeader* header) { [visitor](HeapObjectHeader* header) {
PageFromObject(header)->SynchronizedLoad(); PageFromObject(header)->SynchronizedLoad();
DCHECK( DCHECK(
...@@ -554,8 +587,8 @@ bool ThreadHeap::AdvanceConcurrentMarking(ConcurrentMarkingVisitor* visitor, ...@@ -554,8 +587,8 @@ bool ThreadHeap::AdvanceConcurrentMarking(ConcurrentMarkingVisitor* visitor,
// by the mutator thread and then invoked either concurrently or by the // by the mutator thread and then invoked either concurrently or by the
// mutator thread (in the atomic pause at latest). // mutator thread (in the atomic pause at latest).
finished = finished =
DrainWorklistWithDeadline<kDefaultConcurrentDeadlineCheckInterval>( DrainWorklistWithYielding<kDefaultConcurrentDeadlineCheckInterval>(
deadline, ephemeron_pairs_to_process_worklist_.get(), delegate, ephemeron_pairs_to_process_worklist_.get(),
[visitor](EphemeronPairItem& item) { [visitor](EphemeronPairItem& item) {
visitor->VisitEphemeron(item.key, item.value, visitor->VisitEphemeron(item.key, item.value,
item.value_trace_callback); item.value_trace_callback);
......
...@@ -301,8 +301,13 @@ class PLATFORM_EXPORT ThreadHeap { ...@@ -301,8 +301,13 @@ class PLATFORM_EXPORT ThreadHeap {
// Returns true if concurrent markers will have work to steal // Returns true if concurrent markers will have work to steal
bool HasWorkForConcurrentMarking() const; bool HasWorkForConcurrentMarking() const;
// Returns the amount of work currently available for stealing (there could be
// work remaining even if this is 0).
size_t ConcurrentMarkingGlobalWorkSize() const;
// Returns true if marker is done // Returns true if marker is done
bool AdvanceConcurrentMarking(ConcurrentMarkingVisitor*, base::TimeTicks); bool AdvanceConcurrentMarking(ConcurrentMarkingVisitor*,
base::JobDelegate*,
base::TimeTicks);
// Conservatively checks whether an address is a pointer in any of the // Conservatively checks whether an address is a pointer in any of the
// thread heaps. If so marks the object pointed to as live. // thread heaps. If so marks the object pointed to as live.
......
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "third_party/blink/renderer/platform/heap/cancelable_task_scheduler.h"
#include <atomic>
#include "base/memory/scoped_refptr.h"
#include "base/task_runner.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "third_party/blink/renderer/platform/heap/heap_test_utilities.h"
#include "third_party/blink/renderer/platform/scheduler/public/worker_pool.h"
#include "third_party/blink/renderer/platform/scheduler/test/fake_task_runner.h"
#include "third_party/blink/renderer/platform/wtf/cross_thread_functional.h"
namespace blink {
class ParallelTaskRunner : public base::TaskRunner {
public:
bool PostDelayedTask(const base::Location& location,
base::OnceClosure task,
base::TimeDelta) override {
worker_pool::PostTask(location, WTF::CrossThreadBindOnce(std::move(task)));
return true;
}
void RunUntilIdle() {}
};
template <class Runner>
class CancelableTaskSchedulerTest : public TestSupportingGC {
public:
using Task = CancelableTaskScheduler::Task;
void ScheduleTask(Task callback) {
scheduler_.ScheduleTask(std::move(callback));
}
void RunTaskRunner() { task_runner_->RunUntilIdle(); }
size_t CancelAndWait() { return scheduler_.CancelAndWait(); }
size_t NumberOfRegisteredTasks() const {
return scheduler_.NumberOfTasksForTesting();
}
private:
scoped_refptr<Runner> task_runner_ = base::MakeRefCounted<Runner>();
CancelableTaskScheduler scheduler_{task_runner_};
};
using RunnerTypes =
::testing::Types<scheduler::FakeTaskRunner, ParallelTaskRunner>;
TYPED_TEST_SUITE(CancelableTaskSchedulerTest, RunnerTypes);
TYPED_TEST(CancelableTaskSchedulerTest, EmptyCancelTasks) {
const size_t cancelled = this->CancelAndWait();
EXPECT_EQ(0u, cancelled);
EXPECT_EQ(0u, this->NumberOfRegisteredTasks());
}
TYPED_TEST(CancelableTaskSchedulerTest, RunAndCancelTasks) {
static constexpr size_t kNumberOfTasks = 10u;
const auto callback = [](std::atomic<int>* i) { ++(*i); };
std::atomic<int> var{0};
for (size_t i = 0; i < kNumberOfTasks; ++i) {
this->ScheduleTask(
WTF::CrossThreadBindOnce(callback, WTF::CrossThreadUnretained(&var)));
EXPECT_GE(i + 1, this->NumberOfRegisteredTasks());
}
this->RunTaskRunner();
// Tasks will remove themselves after running
EXPECT_LE(0u, this->NumberOfRegisteredTasks());
const size_t cancelled = this->CancelAndWait();
EXPECT_EQ(0u, this->NumberOfRegisteredTasks());
EXPECT_EQ(kNumberOfTasks, var + cancelled);
}
TEST(CancelableTaskSchedulerTest, RemoveTasksFromQueue) {
auto task_runner = base::MakeRefCounted<scheduler::FakeTaskRunner>();
CancelableTaskScheduler scheduler{task_runner};
int var = 0;
scheduler.ScheduleTask(WTF::CrossThreadBindOnce(
[](int* var) { ++(*var); }, WTF::CrossThreadUnretained(&var)));
auto tasks = task_runner->TakePendingTasksForTesting();
// Clearing the task queue should destroy all cancelable closures, which in
// turn will notify CancelableTaskScheduler to remove corresponding tasks.
tasks.clear();
EXPECT_EQ(0, var);
}
} // namespace blink
...@@ -50,7 +50,6 @@ ...@@ -50,7 +50,6 @@
#include "third_party/blink/renderer/platform/bindings/v8_per_isolate_data.h" #include "third_party/blink/renderer/platform/bindings/v8_per_isolate_data.h"
#include "third_party/blink/renderer/platform/heap/blink_gc.h" #include "third_party/blink/renderer/platform/heap/blink_gc.h"
#include "third_party/blink/renderer/platform/heap/blink_gc_memory_dump_provider.h" #include "third_party/blink/renderer/platform/heap/blink_gc_memory_dump_provider.h"
#include "third_party/blink/renderer/platform/heap/cancelable_task_scheduler.h"
#include "third_party/blink/renderer/platform/heap/handle.h" #include "third_party/blink/renderer/platform/heap/handle.h"
#include "third_party/blink/renderer/platform/heap/heap.h" #include "third_party/blink/renderer/platform/heap/heap.h"
#include "third_party/blink/renderer/platform/heap/heap_buildflags.h" #include "third_party/blink/renderer/platform/heap/heap_buildflags.h"
...@@ -107,11 +106,6 @@ namespace { ...@@ -107,11 +106,6 @@ namespace {
// TODO(omerkatz): What is a good value to set here? // TODO(omerkatz): What is a good value to set here?
constexpr base::TimeDelta kConcurrentMarkingStepDuration = constexpr base::TimeDelta kConcurrentMarkingStepDuration =
base::TimeDelta::FromMilliseconds(2); base::TimeDelta::FromMilliseconds(2);
// Number of concurrent marking tasks to use.
//
// TODO(omerkatz): kNumberOfMarkingTasks should be set heuristically
// instead of a constant.
constexpr uint8_t kNumberOfConcurrentMarkingTasks = 3u;
constexpr size_t kMaxTerminationGCLoops = 20; constexpr size_t kMaxTerminationGCLoops = 20;
...@@ -198,9 +192,7 @@ ThreadState::ThreadState() ...@@ -198,9 +192,7 @@ ThreadState::ThreadState()
asan_fake_stack_(__asan_get_current_fake_stack()), asan_fake_stack_(__asan_get_current_fake_stack()),
#endif #endif
incremental_marking_scheduler_( incremental_marking_scheduler_(
std::make_unique<IncrementalMarkingScheduler>(this)), std::make_unique<IncrementalMarkingScheduler>(this)) {
marker_scheduler_(std::make_unique<CancelableTaskScheduler>(
base::MakeRefCounted<WorkerPoolTaskRunner>())) {
DCHECK(CheckThread()); DCHECK(CheckThread());
DCHECK(!**thread_specific_); DCHECK(!**thread_specific_);
**thread_specific_ = this; **thread_specific_ = this;
...@@ -697,10 +689,9 @@ void ThreadState::AtomicPauseMarkPrologue( ...@@ -697,10 +689,9 @@ void ThreadState::AtomicPauseMarkPrologue(
SetGCState(kNoGCScheduled); SetGCState(kNoGCScheduled);
if (base::FeatureList::IsEnabled( if (base::FeatureList::IsEnabled(
blink::features::kBlinkHeapConcurrentMarking)) { blink::features::kBlinkHeapConcurrentMarking)) {
// Stop concurrent markers // Stop concurrent markers and wait synchronously until they have all
marker_scheduler_->CancelAndWait(); // returned.
active_markers_ = 0; marker_handle_.Cancel();
available_concurrent_marking_task_ids_.clear();
} }
#if DCHECK_IS_ON() #if DCHECK_IS_ON()
MarkingWorklist* marking_worklist = Heap().GetMarkingWorklist(); MarkingWorklist* marking_worklist = Heap().GetMarkingWorklist();
...@@ -1132,23 +1123,10 @@ void ThreadState::IncrementalMarkingStart(BlinkGC::GCReason reason) { ...@@ -1132,23 +1123,10 @@ void ThreadState::IncrementalMarkingStart(BlinkGC::GCReason reason) {
if (base::FeatureList::IsEnabled( if (base::FeatureList::IsEnabled(
blink::features::kBlinkHeapConcurrentMarking)) { blink::features::kBlinkHeapConcurrentMarking)) {
current_gc_data_.visitor->FlushMarkingWorklists(); current_gc_data_.visitor->FlushMarkingWorklists();
// Check that the marking worklist has enough private segments for all
// concurrent marking tasks.
const uint8_t max_concurrent_task_id =
WorklistTaskId::ConcurrentThreadBase +
kNumberOfConcurrentMarkingTasks;
static_assert( static_assert(
MarkingWorklist::kNumTasks == WriteBarrierWorklist::kNumTasks, MarkingWorklist::kNumTasks == WriteBarrierWorklist::kNumTasks,
"Marking worklist and write-barrier worklist should be the " "Marking worklist and write-barrier worklist should be the "
"same size"); "same size");
static_assert(max_concurrent_task_id <= MarkingWorklist::kNumTasks,
"Number of concurrent marking tasks should not exceed "
"number of tasks in worlkist");
// Initialize concurrent marking task ids.
for (uint8_t i = WorklistTaskId::ConcurrentThreadBase;
i < max_concurrent_task_id; ++i) {
available_concurrent_marking_task_ids_.push_back(i);
}
ScheduleConcurrentMarking(); ScheduleConcurrentMarking();
} }
SetGCState(kIncrementalMarkingStepScheduled); SetGCState(kIncrementalMarkingStepScheduled);
...@@ -1208,11 +1186,12 @@ void ThreadState::IncrementalMarkingStep(BlinkGC::StackState stack_state) { ...@@ -1208,11 +1186,12 @@ void ThreadState::IncrementalMarkingStep(BlinkGC::StackState stack_state) {
bool ThreadState::ConcurrentMarkingStep() { bool ThreadState::ConcurrentMarkingStep() {
current_gc_data_.visitor->FlushMarkingWorklists(); current_gc_data_.visitor->FlushMarkingWorklists();
if (Heap().HasWorkForConcurrentMarking()) { if (Heap().HasWorkForConcurrentMarking()) {
ScheduleConcurrentMarking(); // Notifies the scheduler that max concurrency might have increased.
// This will adjust the number of markers if necessary.
marker_handle_.NotifyConcurrencyIncrease();
return false; return false;
} }
base::AutoLock lock(concurrent_marker_bootstrapping_lock_); return marker_handle_.IsCompleted();
return active_markers_ == 0;
} }
void ThreadState::IncrementalMarkingFinalize() { void ThreadState::IncrementalMarkingFinalize() {
...@@ -1700,33 +1679,39 @@ void ThreadState::EnableCompactionForNextGCForTesting() { ...@@ -1700,33 +1679,39 @@ void ThreadState::EnableCompactionForNextGCForTesting() {
} }
void ThreadState::ScheduleConcurrentMarking() { void ThreadState::ScheduleConcurrentMarking() {
base::AutoLock lock(concurrent_marker_bootstrapping_lock_);
DCHECK(base::FeatureList::IsEnabled( DCHECK(base::FeatureList::IsEnabled(
blink::features::kBlinkHeapConcurrentMarking)); blink::features::kBlinkHeapConcurrentMarking));
for (uint8_t i = active_markers_; i < kNumberOfConcurrentMarkingTasks; ++i) { // |USER_BLOCKING| is used to minimize marking on foreground thread.
marker_scheduler_->ScheduleTask(WTF::CrossThreadBindOnce( marker_handle_ = base::PostJob(
&ThreadState::PerformConcurrentMark, WTF::CrossThreadUnretained(this))); FROM_HERE, {base::ThreadPool(), base::TaskPriority::USER_VISIBLE},
} ConvertToBaseRepeatingCallback(
WTF::CrossThreadBindRepeating(&ThreadState::PerformConcurrentMark,
active_markers_ = kNumberOfConcurrentMarkingTasks; WTF::CrossThreadUnretained(this))),
ConvertToBaseRepeatingCallback(WTF::CrossThreadBindRepeating(
[](ThreadState* state, size_t active_worker_count) -> size_t {
// We need to account for local segments in addition to
// ConcurrentMarkingGlobalWorkSize().
return std::min<size_t>(
state->Heap().ConcurrentMarkingGlobalWorkSize() +
active_worker_count,
MarkingWorklist::kNumTasks -
WorklistTaskId::ConcurrentThreadBase);
},
WTF::CrossThreadUnretained(this))));
} }
void ThreadState::PerformConcurrentMark() { void ThreadState::PerformConcurrentMark(base::JobDelegate* job) {
VLOG(2) << "[state:" << this << "] [threadid:" << CurrentThread() << "] " VLOG(2) << "[state:" << this << "] [threadid:" << CurrentThread() << "] "
<< "ConcurrentMark"; << "ConcurrentMark";
ThreadHeapStatsCollector::EnabledConcurrentScope stats_scope( ThreadHeapStatsCollector::EnabledConcurrentScope stats_scope(
Heap().stats_collector(), Heap().stats_collector(),
ThreadHeapStatsCollector::kConcurrentMarkingStep); ThreadHeapStatsCollector::kConcurrentMarkingStep);
uint8_t task_id; if (!Heap().HasWorkForConcurrentMarking())
{ return;
base::AutoLock lock(concurrent_marker_bootstrapping_lock_);
DCHECK(!available_concurrent_marking_task_ids_.IsEmpty()); uint8_t task_id = job->GetTaskId() + 1;
task_id = available_concurrent_marking_task_ids_.back();
available_concurrent_marking_task_ids_.pop_back();
}
std::unique_ptr<ConcurrentMarkingVisitor> concurrent_visitor = std::unique_ptr<ConcurrentMarkingVisitor> concurrent_visitor =
IsUnifiedGCMarkingInProgress() IsUnifiedGCMarkingInProgress()
...@@ -1737,28 +1722,14 @@ void ThreadState::PerformConcurrentMark() { ...@@ -1737,28 +1722,14 @@ void ThreadState::PerformConcurrentMark() {
this, GetMarkingMode(Heap().Compaction()->IsCompacting()), this, GetMarkingMode(Heap().Compaction()->IsCompacting()),
task_id); task_id);
const bool finished = Heap().AdvanceConcurrentMarking( Heap().AdvanceConcurrentMarking(
concurrent_visitor.get(), concurrent_visitor.get(), job,
base::TimeTicks::Now() + kConcurrentMarkingStepDuration); base::TimeTicks::Now() + kConcurrentMarkingStepDuration);
marking_scheduling_->AddConcurrentlyMarkedBytes( marking_scheduling_->AddConcurrentlyMarkedBytes(
concurrent_visitor->marked_bytes()); concurrent_visitor->marked_bytes());
concurrent_visitor->FlushWorklists(); concurrent_visitor->FlushWorklists();
{
base::AutoLock lock(concurrent_marker_bootstrapping_lock_);
// When marking is done, flush visitor worklists and decrement number of
// active markers so we know how many markers are left
available_concurrent_marking_task_ids_.push_back(task_id);
if (finished) {
--active_markers_;
return;
}
}
// Reschedule this marker
marker_scheduler_->ScheduleTask(WTF::CrossThreadBindOnce(
&ThreadState::PerformConcurrentMark, WTF::CrossThreadUnretained(this)));
} }
} // namespace blink } // namespace blink
...@@ -63,7 +63,6 @@ namespace incremental_marking_test { ...@@ -63,7 +63,6 @@ namespace incremental_marking_test {
class IncrementalMarkingScope; class IncrementalMarkingScope;
} // namespace incremental_marking_test } // namespace incremental_marking_test
class CancelableTaskScheduler;
class MarkingVisitor; class MarkingVisitor;
class MarkingSchedulingOracle; class MarkingSchedulingOracle;
class PersistentNode; class PersistentNode;
...@@ -558,7 +557,7 @@ class PLATFORM_EXPORT ThreadState final { ...@@ -558,7 +557,7 @@ class PLATFORM_EXPORT ThreadState final {
// terminated and the worklist is empty) // terminated and the worklist is empty)
bool ConcurrentMarkingStep(); bool ConcurrentMarkingStep();
void ScheduleConcurrentMarking(); void ScheduleConcurrentMarking();
void PerformConcurrentMark(); void PerformConcurrentMark(base::JobDelegate* job);
// Schedule helpers. // Schedule helpers.
void ScheduleIdleLazySweep(); void ScheduleIdleLazySweep();
...@@ -666,10 +665,7 @@ class PLATFORM_EXPORT ThreadState final { ...@@ -666,10 +665,7 @@ class PLATFORM_EXPORT ThreadState final {
std::unique_ptr<IncrementalMarkingScheduler> incremental_marking_scheduler_; std::unique_ptr<IncrementalMarkingScheduler> incremental_marking_scheduler_;
std::unique_ptr<MarkingSchedulingOracle> marking_scheduling_; std::unique_ptr<MarkingSchedulingOracle> marking_scheduling_;
std::unique_ptr<CancelableTaskScheduler> marker_scheduler_; base::JobHandle marker_handle_;
Vector<uint8_t> available_concurrent_marking_task_ids_;
uint8_t active_markers_ = 0;
base::Lock concurrent_marker_bootstrapping_lock_;
base::JobHandle sweeper_handle_; base::JobHandle sweeper_handle_;
std::atomic_bool has_unswept_pages_{false}; std::atomic_bool has_unswept_pages_{false};
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment