Commit f9c4d719 authored by Alex Clarke's avatar Alex Clarke Committed by Commit Bot

Avoid pointless new/delete in SequenceManagerImpl::TakeTaskImpl

According to our perf test on linux this shaves off ~0.1 us/task or
about 10% of our current overhead.

Bug: 897751
Change-Id: I79d6cf6bacd2b62be68b7ee028bf6749bec1ff57
Reviewed-on: https://chromium-review.googlesource.com/c/1348097
Commit-Queue: Alex Clarke <alexclarke@chromium.org>
Reviewed-by: default avatarSami Kyöstilä <skyostil@chromium.org>
Cr-Commit-Position: refs/heads/master@{#611059}
parent 818f675c
...@@ -29,6 +29,13 @@ ...@@ -29,6 +29,13 @@
namespace base { namespace base {
namespace sequence_manager { namespace sequence_manager {
// This controls how big the the initial for
// |MainThreadOnly::task_execution_stack| should be. We don't expect to see
// depths of more than 2 unless cooperative scheduling is used on Blink, where
// we might get up to 6. Anyway 10 was chosen because it's a round number
// greater than current anticipated usage.
static constexpr const size_t kInitialTaskExecutionStackReserveCount = 10;
std::unique_ptr<SequenceManager> CreateSequenceManagerOnCurrentThread() { std::unique_ptr<SequenceManager> CreateSequenceManagerOnCurrentThread() {
return internal::SequenceManagerImpl::CreateOnCurrentThread(); return internal::SequenceManagerImpl::CreateOnCurrentThread();
} }
...@@ -147,7 +154,9 @@ SequenceManagerImpl::MainThreadOnly::MainThreadOnly( ...@@ -147,7 +154,9 @@ SequenceManagerImpl::MainThreadOnly::MainThreadOnly(
: random_generator(RandUint64()), : random_generator(RandUint64()),
uniform_distribution(0.0, 1.0), uniform_distribution(0.0, 1.0),
selector(associated_thread), selector(associated_thread),
real_time_domain(new internal::RealTimeDomain()) {} real_time_domain(new internal::RealTimeDomain()) {
task_execution_stack.reserve(kInitialTaskExecutionStackReserveCount);
}
SequenceManagerImpl::MainThreadOnly::~MainThreadOnly() = default; SequenceManagerImpl::MainThreadOnly::~MainThreadOnly() = default;
...@@ -234,6 +243,8 @@ SequenceManagerImpl::CreateTaskQueueImpl(const TaskQueue::Spec& spec) { ...@@ -234,6 +243,8 @@ SequenceManagerImpl::CreateTaskQueueImpl(const TaskQueue::Spec& spec) {
std::make_unique<internal::TaskQueueImpl>(this, time_domain, spec); std::make_unique<internal::TaskQueueImpl>(this, time_domain, spec);
main_thread_only().active_queues.insert(task_queue.get()); main_thread_only().active_queues.insert(task_queue.get());
main_thread_only().selector.AddQueue(task_queue.get()); main_thread_only().selector.AddQueue(task_queue.get());
main_thread_only().queues_to_reload.resize(
main_thread_only().active_queues.size());
return task_queue; return task_queue;
} }
...@@ -313,18 +324,24 @@ void SequenceManagerImpl::UnregisterTaskQueueImpl( ...@@ -313,18 +324,24 @@ void SequenceManagerImpl::UnregisterTaskQueueImpl(
// it. // it.
main_thread_only().active_queues.erase(task_queue.get()); main_thread_only().active_queues.erase(task_queue.get());
main_thread_only().queues_to_delete[task_queue.get()] = std::move(task_queue); main_thread_only().queues_to_delete[task_queue.get()] = std::move(task_queue);
main_thread_only().queues_to_reload.resize(
main_thread_only().active_queues.size());
} }
void SequenceManagerImpl::ReloadEmptyWorkQueues() { void SequenceManagerImpl::ReloadEmptyWorkQueues() {
DCHECK(main_thread_only().queues_to_reload.empty()); size_t num_queues_to_reload = 0;
DCHECK_EQ(main_thread_only().active_queues.size(),
main_thread_only().queues_to_reload.size());
{ {
AutoLock lock(any_thread_lock_); AutoLock lock(any_thread_lock_);
for (internal::IncomingImmediateWorkList* iter = for (internal::IncomingImmediateWorkList* iter =
any_thread().incoming_immediate_work_list; any_thread().incoming_immediate_work_list;
iter; iter = iter->next) { iter; iter = iter->next) {
main_thread_only().queues_to_reload.push_back(iter->queue); DCHECK_LT(num_queues_to_reload,
main_thread_only().queues_to_reload.size());
main_thread_only().queues_to_reload[num_queues_to_reload++] = iter->queue;
iter->queue = nullptr; iter->queue = nullptr;
} }
...@@ -335,13 +352,13 @@ void SequenceManagerImpl::ReloadEmptyWorkQueues() { ...@@ -335,13 +352,13 @@ void SequenceManagerImpl::ReloadEmptyWorkQueues() {
// completely empty and we've just posted a task (this method handles that // completely empty and we've just posted a task (this method handles that
// case). Secondly if the work queue becomes empty in when calling // case). Secondly if the work queue becomes empty in when calling
// WorkQueue::TakeTaskFromWorkQueue (handled there). // WorkQueue::TakeTaskFromWorkQueue (handled there).
for (internal::TaskQueueImpl* queue : main_thread_only().queues_to_reload) { for (size_t i = 0; i < num_queues_to_reload; i++) {
// It's important we call ReloadImmediateWorkQueueIfEmpty out side of // It's important we call ReloadImmediateWorkQueueIfEmpty out side of
// |any_thread_lock_| avoid lock order inversion. // |any_thread_lock_| avoid lock order inversion.
queue->ReloadImmediateWorkQueueIfEmpty(); main_thread_only().queues_to_reload[i]->ReloadImmediateWorkQueueIfEmpty();
main_thread_only().queues_to_reload[i] =
nullptr; // Not strictly necessary.
} }
main_thread_only().queues_to_reload.clear();
} }
void SequenceManagerImpl::WakeUpReadyDelayedQueues(LazyNow* lazy_now) { void SequenceManagerImpl::WakeUpReadyDelayedQueues(LazyNow* lazy_now) {
...@@ -780,6 +797,7 @@ void SequenceManagerImpl::CleanUpQueues() { ...@@ -780,6 +797,7 @@ void SequenceManagerImpl::CleanUpQueues() {
for (auto it = main_thread_only().queues_to_gracefully_shutdown.begin(); for (auto it = main_thread_only().queues_to_gracefully_shutdown.begin();
it != main_thread_only().queues_to_gracefully_shutdown.end();) { it != main_thread_only().queues_to_gracefully_shutdown.end();) {
if (it->first->IsEmpty()) { if (it->first->IsEmpty()) {
// Will resize |main_thread_only().queues_to_reload|.
UnregisterTaskQueueImpl(std::move(it->second)); UnregisterTaskQueueImpl(std::move(it->second));
main_thread_only().active_queues.erase(it->first); main_thread_only().active_queues.erase(it->first);
main_thread_only().queues_to_gracefully_shutdown.erase(it++); main_thread_only().queues_to_gracefully_shutdown.erase(it++);
......
...@@ -275,23 +275,24 @@ class BASE_EXPORT SequenceManagerImpl ...@@ -275,23 +275,24 @@ class BASE_EXPORT SequenceManagerImpl
// internal scheduling code does not expect queues to be pulled // internal scheduling code does not expect queues to be pulled
// from underneath. // from underneath.
// Scratch space used to store the contents of
// any_thread().incoming_immediate_work_list for use by
// ReloadEmptyWorkQueues. We keep hold of this vector to avoid unnecessary
// memory allocations. This should have the same size as |active_queues|.
// DO NOT RELY ON THE VALIDITY OF THE POINTERS WITHIN!
std::vector<internal::TaskQueueImpl*> queues_to_reload;
std::set<internal::TaskQueueImpl*> active_queues; std::set<internal::TaskQueueImpl*> active_queues;
std::map<internal::TaskQueueImpl*, std::unique_ptr<internal::TaskQueueImpl>> std::map<internal::TaskQueueImpl*, std::unique_ptr<internal::TaskQueueImpl>>
queues_to_gracefully_shutdown; queues_to_gracefully_shutdown;
std::map<internal::TaskQueueImpl*, std::unique_ptr<internal::TaskQueueImpl>> std::map<internal::TaskQueueImpl*, std::unique_ptr<internal::TaskQueueImpl>>
queues_to_delete; queues_to_delete;
// Scratch space used to store the contents of
// any_thread().incoming_immediate_work_list for use by
// ReloadEmptyWorkQueues. We keep hold of this vector to avoid unnecessary
// memory allocations.
std::vector<internal::TaskQueueImpl*> queues_to_reload;
bool task_was_run_on_quiescence_monitored_queue = false; bool task_was_run_on_quiescence_monitored_queue = false;
bool nesting_observer_registered_ = false; bool nesting_observer_registered_ = false;
// Due to nested runloops more than one task can be executing concurrently. // Due to nested runloops more than one task can be executing concurrently.
std::list<ExecutingTask> task_execution_stack; std::vector<ExecutingTask> task_execution_stack;
Observer* observer = nullptr; // NOT OWNED Observer* observer = nullptr; // NOT OWNED
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment