Commit 4e4da122 authored by Gabriel Charette's avatar Gabriel Charette Committed by Commit Bot

[TaskScheduler] Use TaskSchedulerWorkerPoolImplTestBase for common code of StandyPolicy tests

In preparation for further such tests for crbug.com/847501

Add support for custom reclaim time to
TaskSchedulerWorkerPoolImplTestBase and remove its requirement for
workers being idle before JoinForTesting() (it was overkill after
task_tracker_.FlushForTesting() and worker_pool_->JoinForTesting()
already waits for the workers to return from running their last task).

R=fdoray@chromium.org

Bug: 847501
Change-Id: I4437163427edf4778647fe1fe9e14f9332c466db
Reviewed-on: https://chromium-review.googlesource.com/1085383Reviewed-by: default avatarFrançois Doray <fdoray@chromium.org>
Commit-Queue: Gabriel Charette <gab@chromium.org>
Cr-Commit-Position: refs/heads/master@{#564995}
parent abcd38e0
...@@ -78,15 +78,15 @@ class TaskSchedulerWorkerPoolImplTestBase { ...@@ -78,15 +78,15 @@ class TaskSchedulerWorkerPoolImplTestBase {
TaskSchedulerWorkerPoolImplTestBase() TaskSchedulerWorkerPoolImplTestBase()
: service_thread_("TaskSchedulerServiceThread"){}; : service_thread_("TaskSchedulerServiceThread"){};
void CommonSetUp() { CreateAndStartWorkerPool(TimeDelta::Max(), kMaxTasks); } void CommonSetUp(TimeDelta suggested_reclaim_time = TimeDelta::Max()) {
CreateAndStartWorkerPool(suggested_reclaim_time, kMaxTasks);
}
void CommonTearDown() { void CommonTearDown() {
service_thread_.Stop(); service_thread_.Stop();
task_tracker_.FlushForTesting(); task_tracker_.FlushForTesting();
if (worker_pool_) { if (worker_pool_)
worker_pool_->WaitForAllWorkersIdleForTesting();
worker_pool_->JoinForTesting(); worker_pool_->JoinForTesting();
}
} }
void CreateWorkerPool() { void CreateWorkerPool() {
...@@ -779,78 +779,65 @@ TEST_F(TaskSchedulerWorkerPoolHistogramTest, NumTasksBeforeCleanup) { ...@@ -779,78 +779,65 @@ TEST_F(TaskSchedulerWorkerPoolHistogramTest, NumTasksBeforeCleanup) {
EXPECT_EQ(0, histogram->SnapshotSamples()->GetCount(10)); EXPECT_EQ(0, histogram->SnapshotSamples()->GetCount(10));
} }
TEST(TaskSchedulerWorkerPoolStandbyPolicyTest, InitOne) { namespace {
constexpr int kLocalMaxTasks = 8;
TaskTracker task_tracker("Test");
DelayedTaskManager delayed_task_manager;
scoped_refptr<TaskRunner> service_thread_task_runner =
MakeRefCounted<TestSimpleTaskRunner>();
delayed_task_manager.Start(service_thread_task_runner);
auto worker_pool = std::make_unique<SchedulerWorkerPoolImpl>(
"OnePolicyWorkerPool", "A", ThreadPriority::NORMAL,
task_tracker.GetTrackedRef(), &delayed_task_manager);
worker_pool->Start(
SchedulerWorkerPoolParams(kLocalMaxTasks, TimeDelta::Max()),
kLocalMaxTasks, service_thread_task_runner, nullptr,
SchedulerWorkerPoolImpl::WorkerEnvironment::NONE);
ASSERT_TRUE(worker_pool);
EXPECT_EQ(1U, worker_pool->NumberOfWorkersForTesting());
worker_pool->JoinForTesting();
}
// Verify the SchedulerWorkerPoolImpl keeps at least one idle standby thread, class TaskSchedulerWorkerPoolStandbyPolicyTest
// capacity permitting. : public TaskSchedulerWorkerPoolImplTestBase,
TEST(TaskSchedulerWorkerPoolStandbyPolicyTest, VerifyStandbyThread) { public testing::Test {
constexpr size_t kLocalMaxTasks = 3; public:
TaskSchedulerWorkerPoolStandbyPolicyTest() = default;
TaskTracker task_tracker("Test"); void SetUp() override {
DelayedTaskManager delayed_task_manager; TaskSchedulerWorkerPoolImplTestBase::CommonSetUp(
scoped_refptr<TaskRunner> service_thread_task_runner = kReclaimTimeForCleanupTests);
MakeRefCounted<TestSimpleTaskRunner>(); }
delayed_task_manager.Start(service_thread_task_runner);
auto worker_pool = std::make_unique<SchedulerWorkerPoolImpl>( void TearDown() override {
"StandbyThreadWorkerPool", "A", ThreadPriority::NORMAL, TaskSchedulerWorkerPoolImplTestBase::CommonTearDown();
task_tracker.GetTrackedRef(), &delayed_task_manager); }
worker_pool->Start(
SchedulerWorkerPoolParams(kLocalMaxTasks, kReclaimTimeForCleanupTests), private:
kLocalMaxTasks, service_thread_task_runner, nullptr, DISALLOW_COPY_AND_ASSIGN(TaskSchedulerWorkerPoolStandbyPolicyTest);
SchedulerWorkerPoolImpl::WorkerEnvironment::NONE); };
ASSERT_TRUE(worker_pool);
EXPECT_EQ(1U, worker_pool->NumberOfWorkersForTesting()); } // namespace
TEST_F(TaskSchedulerWorkerPoolStandbyPolicyTest, InitOne) {
EXPECT_EQ(1U, worker_pool_->NumberOfWorkersForTesting());
}
// Verify that the SchedulerWorkerPoolImpl keeps at least one idle standby
// thread, capacity permitting.
TEST_F(TaskSchedulerWorkerPoolStandbyPolicyTest, VerifyStandbyThread) {
auto task_runner = auto task_runner =
worker_pool->CreateTaskRunnerWithTraits({WithBaseSyncPrimitives()}); worker_pool_->CreateTaskRunnerWithTraits({WithBaseSyncPrimitives()});
WaitableEvent thread_running(WaitableEvent::ResetPolicy::AUTOMATIC); WaitableEvent thread_running(WaitableEvent::ResetPolicy::AUTOMATIC);
WaitableEvent threads_continue; WaitableEvent threads_continue;
RepeatingClosure closure = BindRepeating( RepeatingClosure thread_blocker = BindLambdaForTesting([&]() {
[](WaitableEvent* thread_running, WaitableEvent* threads_continue) { thread_running.Signal();
thread_running->Signal(); WaitWithoutBlockingObserver(&threads_continue);
WaitWithoutBlockingObserver(threads_continue); });
},
Unretained(&thread_running), Unretained(&threads_continue));
// There should be one idle thread until we reach capacity // There should be one idle thread until we reach capacity
for (size_t i = 0; i < kLocalMaxTasks; ++i) { for (size_t i = 0; i < kMaxTasks; ++i) {
EXPECT_EQ(i + 1, worker_pool->NumberOfWorkersForTesting()); EXPECT_EQ(i + 1, worker_pool_->NumberOfWorkersForTesting());
task_runner->PostTask(FROM_HERE, closure); task_runner->PostTask(FROM_HERE, thread_blocker);
thread_running.Wait(); thread_running.Wait();
} }
// There should not be an extra idle thread if it means going above capacity // There should not be an extra idle thread if it means going above capacity
EXPECT_EQ(kLocalMaxTasks, worker_pool->NumberOfWorkersForTesting()); EXPECT_EQ(kMaxTasks, worker_pool_->NumberOfWorkersForTesting());
threads_continue.Signal(); threads_continue.Signal();
// Wait long enough for all but one worker to clean up. // Wait long enough for all but one worker to clean up.
worker_pool->WaitForWorkersCleanedUpForTesting(kLocalMaxTasks - 1); worker_pool_->WaitForWorkersCleanedUpForTesting(kMaxTasks - 1);
EXPECT_EQ(1U, worker_pool->NumberOfWorkersForTesting()); EXPECT_EQ(1U, worker_pool_->NumberOfWorkersForTesting());
// Give extra time for a worker to cleanup : none should as the pool is // Give extra time for a worker to cleanup : none should as the pool is
// expected to keep a worker ready regardless of how long it was idle for. // expected to keep a worker ready regardless of how long it was idle for.
PlatformThread::Sleep(kReclaimTimeForCleanupTests); PlatformThread::Sleep(kReclaimTimeForCleanupTests);
EXPECT_EQ(1U, worker_pool->NumberOfWorkersForTesting()); EXPECT_EQ(1U, worker_pool_->NumberOfWorkersForTesting());
worker_pool->JoinForTesting();
} }
namespace { namespace {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment