Commit a71ceeb5 authored by Etienne Pierre-Doray's avatar Etienne Pierre-Doray Committed by Commit Bot

[TaskScheduler]: Implement scheduler perftests.

Implements perftests that measure throughput of running and posting tasks.

Results on Windows P920:
[ RUN      ] TaskSchedulerPerfTest.PostThenRunNoOpTasks
*RESULT Posting tasks throughput: Post-then-run no-op tasks= 1.86 us/task
*RESULT Running tasks throughput: Post-then-run no-op tasks= 3.5304 us/task
*RESULT Num tasks posted: Post-then-run no-op tasks= 10000 tasks
[       OK ] TaskSchedulerPerfTest.PostThenRunNoOpTasks (57 ms)
[ RUN      ] TaskSchedulerPerfTest.PostThenRunNoOpTasksManyThreads
*RESULT Posting tasks throughput: Post-then-run no-op tasks many threads= 2.693075 us/task
*RESULT Running tasks throughput: Post-then-run no-op tasks many threads= 2.58605 us/task
*RESULT Num tasks posted: Post-then-run no-op tasks many threads= 40000 tasks
[       OK ] TaskSchedulerPerfTest.PostThenRunNoOpTasksManyThreads (136 ms)
[ RUN      ] TaskSchedulerPerfTest.PostThenRunNoOpTasksMorePostingThanRunningThreads
*RESULT Posting tasks throughput: Post-then-run no-op tasks more posting than running threads= 3.253125 us/task
*RESULT Running tasks throughput: Post-then-run no-op tasks more posting than running threads= 2.419125 us/task
*RESULT Num tasks posted: Post-then-run no-op tasks more posting than running threads= 40000 tasks
[       OK ] TaskSchedulerPerfTest.PostThenRunNoOpTasksMorePostingThanRunningThreads (135 ms)
[ RUN      ] TaskSchedulerPerfTest.PostRunNoOpTasks
*RESULT Posting tasks throughput: Post/run no-op tasks= 2.9757 us/task
*RESULT Running tasks throughput: Post/run no-op tasks= 3.2136 us/task
*RESULT Num tasks posted: Post/run no-op tasks= 10000 tasks
[       OK ] TaskSchedulerPerfTest.PostRunNoOpTasks (35 ms)
[ RUN      ] TaskSchedulerPerfTest.PostRunNoOpTasksManyThreads
*RESULT Posting tasks throughput: Post/run no-op tasks many threads= 8.7977 us/task
*RESULT Running tasks throughput: Post/run no-op tasks many threads= 2.26535 us/task
*RESULT Num tasks posted: Post/run no-op tasks many threads= 40000 tasks
[       OK ] TaskSchedulerPerfTest.PostRunNoOpTasksManyThreads (94 ms)
[ RUN      ] TaskSchedulerPerfTest.PostRunBusyTasksManyThreads
*RESULT Posting tasks throughput: Post/run busy tasks many threads= 4.046625 us/task
*RESULT Running tasks throughput: Post/run busy tasks many threads= 50.63795 us/task
*RESULT Num tasks posted: Post/run busy tasks many threads= 40000 tasks
[       OK ] TaskSchedulerPerfTest.PostRunBusyTasksManyThreads (2028 ms)
[ RUN      ] TaskSchedulerPerfTest.PostWithConstantWorkload
*RESULT Posting tasks throughput: Post tasks with constant workload= .0135 us/task
*RESULT Running tasks throughput: Post tasks with constant workload= 2.5396 us/task
*RESULT Num tasks posted: Post tasks with constant workload= 10000 tasks
[       OK ] TaskSchedulerPerfTest.PostWithConstantWorkload (29 ms)
[----------] 7 tests from TaskSchedulerPerfTest (2523 ms total)

Change-Id: I1e47eaf2db792d96e3e3ba290769e95673bd0b62
Reviewed-on: https://chromium-review.googlesource.com/c/1296624
Commit-Queue: Etienne Pierre-Doray <etiennep@chromium.org>
Reviewed-by: default avatarFrançois Doray <fdoray@chromium.org>
Cr-Commit-Position: refs/heads/master@{#607835}
parent 05fcb2e2
...@@ -2070,6 +2070,7 @@ test("base_perftests") { ...@@ -2070,6 +2070,7 @@ test("base_perftests") {
"message_loop/message_pump_perftest.cc", "message_loop/message_pump_perftest.cc",
"observer_list_perftest.cc", "observer_list_perftest.cc",
"task/sequence_manager/sequence_manager_perftest.cc", "task/sequence_manager/sequence_manager_perftest.cc",
"task/task_scheduler/task_scheduler_perftest.cc",
# "test/run_all_unittests.cc", # "test/run_all_unittests.cc",
"json/json_perftest.cc", "json/json_perftest.cc",
......
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <stddef.h>
#include <atomic>
#include <memory>
#include <vector>
#include "base/barrier_closure.h"
#include "base/bind.h"
#include "base/bind_helpers.h"
#include "base/callback.h"
#include "base/optional.h"
#include "base/synchronization/waitable_event.h"
#include "base/task/post_task.h"
#include "base/task/task_scheduler/task_scheduler.h"
#include "base/threading/simple_thread.h"
#include "base/time/time.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "testing/perf/perf_test.h"
namespace base {
namespace internal {
namespace {
enum class ExecutionMode {
// Allows tasks to start running while tasks are being posted by posting
// threads.
kPostAndRun,
// Uses an execution fence to wait for all posting threads to be done before
// running tasks that were posted.
kPostThenRun,
};
// A thread that waits for the caller to signal an event before proceeding to
// call action.Run().
class PostingThread : public SimpleThread {
public:
// Creates a PostingThread that waits on |start_event| before calling
// action.Run().
PostingThread(WaitableEvent* start_event,
base::OnceClosure action,
base::OnceClosure completion)
: SimpleThread("PostingThread"),
start_event_(start_event),
action_(std::move(action)),
completion_(std::move(completion)) {
Start();
}
void Run() override {
start_event_->Wait();
std::move(action_).Run();
std::move(completion_).Run();
}
private:
WaitableEvent* const start_event_;
base::OnceClosure action_;
base::OnceClosure completion_;
DISALLOW_COPY_AND_ASSIGN(PostingThread);
};
class TaskSchedulerPerfTest : public testing::Test {
public:
// Posting actions:
void ContinuouslyBindAndPostNoOpTasks(size_t num_tasks) {
scoped_refptr<TaskRunner> task_runner = CreateTaskRunnerWithTraits({});
for (size_t i = 0; i < num_tasks; ++i) {
++num_tasks_pending_;
++num_posted_tasks_;
task_runner->PostTask(FROM_HERE,
base::BindOnce(
[](std::atomic_size_t* num_task_pending) {
(*num_task_pending)--;
},
&num_tasks_pending_));
}
}
void ContinuouslyPostNoOpTasks(size_t num_tasks) {
scoped_refptr<TaskRunner> task_runner = CreateTaskRunnerWithTraits({});
base::RepeatingClosure closure = base::BindRepeating(
[](std::atomic_size_t* num_task_pending) { (*num_task_pending)--; },
&num_tasks_pending_);
for (size_t i = 0; i < num_tasks; ++i) {
++num_tasks_pending_;
++num_posted_tasks_;
task_runner->PostTask(FROM_HERE, closure);
}
}
void ContinuouslyPostBusyWaitTasks(size_t num_tasks,
base::TimeDelta duration) {
scoped_refptr<TaskRunner> task_runner = CreateTaskRunnerWithTraits({});
base::RepeatingClosure closure = base::BindRepeating(
[](std::atomic_size_t* num_task_pending, base::TimeDelta duration) {
base::TimeTicks end_time = base::TimeTicks::Now() + duration;
while (base::TimeTicks::Now() < end_time)
;
(*num_task_pending)--;
},
Unretained(&num_tasks_pending_), duration);
for (size_t i = 0; i < num_tasks; ++i) {
++num_tasks_pending_;
++num_posted_tasks_;
task_runner->PostTask(FROM_HERE, closure);
}
}
protected:
TaskSchedulerPerfTest() { TaskScheduler::Create("PerfTest"); }
~TaskSchedulerPerfTest() override { TaskScheduler::SetInstance(nullptr); }
void StartTaskScheduler(size_t num_running_threads,
size_t num_posting_threads,
base::RepeatingClosure post_action) {
constexpr TimeDelta kSuggestedReclaimTime = TimeDelta::FromSeconds(30);
constexpr int kMaxNumBackgroundThreads = 1;
constexpr int kMaxNumBackgroundBlockingThreads = 1;
constexpr int kMaxNumForegroundBlockingThreads = 1;
TaskScheduler::GetInstance()->Start(
{{kMaxNumBackgroundThreads, kSuggestedReclaimTime},
{kMaxNumBackgroundBlockingThreads, kSuggestedReclaimTime},
{num_running_threads, kSuggestedReclaimTime},
{kMaxNumForegroundBlockingThreads, kSuggestedReclaimTime}},
nullptr);
base::RepeatingClosure done = BarrierClosure(
num_posting_threads,
base::BindOnce(&TaskSchedulerPerfTest::OnCompletePostingTasks,
base::Unretained(this)));
for (size_t i = 0; i < num_posting_threads; ++i) {
threads_.emplace_back(std::make_unique<PostingThread>(
&start_posting_tasks_, post_action, done));
}
}
void OnCompletePostingTasks() { complete_posting_tasks_.Signal(); }
void Benchmark(const std::string& trace, ExecutionMode execution_mode) {
base::Optional<TaskScheduler::ScopedExecutionFence> execution_fence;
if (execution_mode == ExecutionMode::kPostThenRun) {
execution_fence.emplace();
}
TimeTicks tasks_run_start = TimeTicks::Now();
start_posting_tasks_.Signal();
complete_posting_tasks_.Wait();
post_task_duration_ = TimeTicks::Now() - tasks_run_start;
if (execution_mode == ExecutionMode::kPostThenRun) {
tasks_run_start = TimeTicks::Now();
execution_fence.reset();
}
// Wait for no pending tasks.
TaskScheduler::GetInstance()->FlushForTesting();
tasks_run_duration_ = TimeTicks::Now() - tasks_run_start;
ASSERT_EQ(0U, num_tasks_pending_);
for (auto& thread : threads_)
thread->Join();
TaskScheduler::GetInstance()->JoinForTesting();
perf_test::PrintResult(
"Posting tasks throughput", "", trace,
num_posted_tasks_ /
static_cast<double>(post_task_duration_.InMilliseconds()),
"tasks/ms", true);
perf_test::PrintResult(
"Running tasks throughput", "", trace,
num_posted_tasks_ /
static_cast<double>(tasks_run_duration_.InMilliseconds()),
"tasks/ms", true);
perf_test::PrintResult("Num tasks posted", "", trace, num_posted_tasks_,
"tasks", true);
}
private:
WaitableEvent start_posting_tasks_;
WaitableEvent complete_posting_tasks_;
TimeDelta post_task_duration_;
TimeDelta tasks_run_duration_;
std::atomic_size_t num_tasks_pending_{0};
std::atomic_size_t num_posted_tasks_{0};
std::vector<std::unique_ptr<PostingThread>> threads_;
DISALLOW_COPY_AND_ASSIGN(TaskSchedulerPerfTest);
};
} // namespace
TEST_F(TaskSchedulerPerfTest, BindPostThenRunNoOpTasks) {
StartTaskScheduler(
1, 1,
BindRepeating(&TaskSchedulerPerfTest::ContinuouslyBindAndPostNoOpTasks,
Unretained(this), 10000));
Benchmark("Bind+Post-then-run no-op tasks", ExecutionMode::kPostThenRun);
}
TEST_F(TaskSchedulerPerfTest, PostThenRunNoOpTasks) {
StartTaskScheduler(
1, 1,
BindRepeating(&TaskSchedulerPerfTest::ContinuouslyPostNoOpTasks,
Unretained(this), 10000));
Benchmark("Post-then-run no-op tasks", ExecutionMode::kPostThenRun);
}
TEST_F(TaskSchedulerPerfTest, PostThenRunNoOpTasksManyThreads) {
StartTaskScheduler(
4, 4,
BindRepeating(&TaskSchedulerPerfTest::ContinuouslyPostNoOpTasks,
Unretained(this), 10000));
Benchmark("Post-then-run no-op tasks many threads",
ExecutionMode::kPostThenRun);
}
TEST_F(TaskSchedulerPerfTest,
PostThenRunNoOpTasksMorePostingThanRunningThreads) {
StartTaskScheduler(
1, 4,
BindRepeating(&TaskSchedulerPerfTest::ContinuouslyPostNoOpTasks,
Unretained(this), 10000));
Benchmark("Post-then-run no-op tasks more posting than running threads",
ExecutionMode::kPostThenRun);
}
TEST_F(TaskSchedulerPerfTest, PostRunNoOpTasks) {
StartTaskScheduler(
1, 1,
BindRepeating(&TaskSchedulerPerfTest::ContinuouslyPostNoOpTasks,
Unretained(this), 10000));
Benchmark("Post/run no-op tasks", ExecutionMode::kPostAndRun);
}
TEST_F(TaskSchedulerPerfTest, PostRunNoOpTasksManyThreads) {
StartTaskScheduler(
4, 4,
BindRepeating(&TaskSchedulerPerfTest::ContinuouslyPostNoOpTasks,
Unretained(this), 10000));
Benchmark("Post/run no-op tasks many threads", ExecutionMode::kPostAndRun);
}
TEST_F(TaskSchedulerPerfTest, PostRunBusyTasksManyThreads) {
StartTaskScheduler(
4, 4,
BindRepeating(&TaskSchedulerPerfTest::ContinuouslyPostBusyWaitTasks,
Unretained(this), 10000,
base::TimeDelta::FromMicroseconds(200)));
Benchmark("Post/run busy tasks many threads", ExecutionMode::kPostAndRun);
}
} // namespace internal
} // namespace base
...@@ -119,7 +119,7 @@ class BASE_EXPORT SimpleThread : public PlatformThread::Delegate { ...@@ -119,7 +119,7 @@ class BASE_EXPORT SimpleThread : public PlatformThread::Delegate {
bool HasBeenStarted(); bool HasBeenStarted();
// Returns True if Join() has ever been called. // Returns True if Join() has ever been called.
bool HasBeenJoined() { return joined_; } bool HasBeenJoined() const { return joined_; }
// Returns true if Start() or StartAsync() has been called. // Returns true if Start() or StartAsync() has been called.
bool HasStartBeenAttempted() { return start_called_; } bool HasStartBeenAttempted() { return start_called_; }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment