Commit c5bfedac authored by Francois Doray's avatar Francois Doray Committed by Commit Bot

[threadpool] Remove unused histogram ThreadPool.NumTasksBetweenWaits.*.

Bug: 948455, 982146, 976123
Change-Id: If04b6996437204c6e1cc7d926ea259d46eb108c2
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1757504
Commit-Queue: François Doray <fdoray@chromium.org>
Auto-Submit: François Doray <fdoray@chromium.org>
Reviewed-by: default avatarGabriel Charette <gab@chromium.org>
Reviewed-by: default avatarEtienne Pierre-Doray <etiennep@chromium.org>
Reviewed-by: default avatarAlexander Timin <altimin@chromium.org>
Cr-Commit-Position: refs/heads/master@{#688171}
parent d2ff4af9
...@@ -41,6 +41,25 @@ ...@@ -41,6 +41,25 @@
#include "base/win/windows_version.h" #include "base/win/windows_version.h"
#endif // defined(OS_WIN) #endif // defined(OS_WIN)
// Data from deprecated UMA histograms:
//
// ThreadPool.NumTasksBetweenWaits.(Browser/Renderer).Foreground, August 2019
// Number of tasks between two waits by a foreground worker thread in a
// browser/renderer process.
//
// Windows (browser/renderer)
// 1 at 87th percentile / 92th percentile
// 2 at 95th percentile / 98th percentile
// 5 at 99th percentile / 100th percentile
// Mac (browser/renderer)
// 1 at 81th percentile / 90th percentile
// 2 at 92th percentile / 97th percentile
// 5 at 98th percentile / 100th percentile
// Android (browser/renderer)
// 1 at 92th percentile / 96th percentile
// 2 at 97th percentile / 98th percentile
// 5 at 99th percentile / 100th percentile
namespace base { namespace base {
namespace internal { namespace internal {
...@@ -49,8 +68,6 @@ namespace { ...@@ -49,8 +68,6 @@ namespace {
constexpr char kDetachDurationHistogramPrefix[] = "ThreadPool.DetachDuration."; constexpr char kDetachDurationHistogramPrefix[] = "ThreadPool.DetachDuration.";
constexpr char kNumTasksBeforeDetachHistogramPrefix[] = constexpr char kNumTasksBeforeDetachHistogramPrefix[] =
"ThreadPool.NumTasksBeforeDetach."; "ThreadPool.NumTasksBeforeDetach.";
constexpr char kNumTasksBetweenWaitsHistogramPrefix[] =
"ThreadPool.NumTasksBetweenWaits.";
constexpr char kNumWorkersHistogramPrefix[] = "ThreadPool.NumWorkers."; constexpr char kNumWorkersHistogramPrefix[] = "ThreadPool.NumWorkers.";
constexpr char kNumActiveWorkersHistogramPrefix[] = constexpr char kNumActiveWorkersHistogramPrefix[] =
"ThreadPool.NumActiveWorkers."; "ThreadPool.NumActiveWorkers.";
...@@ -258,10 +275,6 @@ class ThreadGroupImpl::WorkerThreadDelegateImpl : public WorkerThread::Delegate, ...@@ -258,10 +275,6 @@ class ThreadGroupImpl::WorkerThreadDelegateImpl : public WorkerThread::Delegate,
// Accessed only from the worker thread. // Accessed only from the worker thread.
struct WorkerOnly { struct WorkerOnly {
// Number of tasks executed since the last time the
// ThreadPool.NumTasksBetweenWaits histogram was recorded.
size_t num_tasks_since_last_wait = 0;
// Number of tasks executed since the last time the // Number of tasks executed since the last time the
// ThreadPool.NumTasksBeforeDetach histogram was recorded. // ThreadPool.NumTasksBeforeDetach histogram was recorded.
size_t num_tasks_since_last_detach = 0; size_t num_tasks_since_last_detach = 0;
...@@ -345,17 +358,6 @@ ThreadGroupImpl::ThreadGroupImpl(StringPiece histogram_label, ...@@ -345,17 +358,6 @@ ThreadGroupImpl::ThreadGroupImpl(StringPiece histogram_label,
1000, 1000,
50, 50,
HistogramBase::kUmaTargetedHistogramFlag)), HistogramBase::kUmaTargetedHistogramFlag)),
// Mimics the UMA_HISTOGRAM_COUNTS_100 macro. A WorkerThread is
// expected to run between zero and a few tens of tasks between waits.
// When it runs more than 100 tasks, there is no need to know the exact
// number of tasks that ran.
num_tasks_between_waits_histogram_(Histogram::FactoryGet(
JoinString({kNumTasksBetweenWaitsHistogramPrefix, histogram_label},
""),
1,
100,
50,
HistogramBase::kUmaTargetedHistogramFlag)),
// Mimics the UMA_HISTOGRAM_COUNTS_100 macro. A ThreadGroup is // Mimics the UMA_HISTOGRAM_COUNTS_100 macro. A ThreadGroup is
// expected to run between zero and a few tens of workers. // expected to run between zero and a few tens of workers.
// When it runs more than 100 worker, there is no need to know the exact // When it runs more than 100 worker, there is no need to know the exact
...@@ -569,8 +571,6 @@ void ThreadGroupImpl::WorkerThreadDelegateImpl::OnMainEntry( ...@@ -569,8 +571,6 @@ void ThreadGroupImpl::WorkerThreadDelegateImpl::OnMainEntry(
outer_->after_start().worker_environment); outer_->after_start().worker_environment);
#endif // defined(OS_WIN) #endif // defined(OS_WIN)
DCHECK_EQ(worker_only().num_tasks_since_last_wait, 0U);
PlatformThread::SetName( PlatformThread::SetName(
StringPrintf("ThreadPool%sWorker", outer_->thread_group_label_.c_str())); StringPrintf("ThreadPool%sWorker", outer_->thread_group_label_.c_str()));
...@@ -632,7 +632,6 @@ void ThreadGroupImpl::WorkerThreadDelegateImpl::DidProcessTask( ...@@ -632,7 +632,6 @@ void ThreadGroupImpl::WorkerThreadDelegateImpl::DidProcessTask(
DCHECK(worker_only().is_running_task); DCHECK(worker_only().is_running_task);
DCHECK(read_worker().may_block_start_time.is_null()); DCHECK(read_worker().may_block_start_time.is_null());
++worker_only().num_tasks_since_last_wait;
++worker_only().num_tasks_since_last_detach; ++worker_only().num_tasks_since_last_detach;
// A transaction to the TaskSource to reenqueue, if any. Instantiated here as // A transaction to the TaskSource to reenqueue, if any. Instantiated here as
...@@ -738,14 +737,6 @@ void ThreadGroupImpl::WorkerThreadDelegateImpl::OnWorkerBecomesIdleLockRequired( ...@@ -738,14 +737,6 @@ void ThreadGroupImpl::WorkerThreadDelegateImpl::OnWorkerBecomesIdleLockRequired(
WorkerThread* worker) { WorkerThread* worker) {
DCHECK_CALLED_ON_VALID_THREAD(worker_thread_checker_); DCHECK_CALLED_ON_VALID_THREAD(worker_thread_checker_);
// Record the ThreadPool.NumTasksBetweenWaits histogram. After GetWork()
// returns nullptr, the WorkerThread will perform a wait on its
// WaitableEvent, so we record how many tasks were ran since the last wait
// here.
outer_->num_tasks_between_waits_histogram_->Add(
worker_only().num_tasks_since_last_wait);
worker_only().num_tasks_since_last_wait = 0;
// Add the worker to the idle stack. // Add the worker to the idle stack.
DCHECK(!outer_->idle_workers_stack_.Contains(worker)); DCHECK(!outer_->idle_workers_stack_.Contains(worker));
outer_->idle_workers_stack_.Push(worker); outer_->idle_workers_stack_.Push(worker);
......
...@@ -98,10 +98,6 @@ class BASE_EXPORT ThreadGroupImpl : public ThreadGroup { ...@@ -98,10 +98,6 @@ class BASE_EXPORT ThreadGroupImpl : public ThreadGroup {
return num_tasks_before_detach_histogram_; return num_tasks_before_detach_histogram_;
} }
const HistogramBase* num_tasks_between_waits_histogram() const {
return num_tasks_between_waits_histogram_;
}
const HistogramBase* num_workers_histogram() const { const HistogramBase* num_workers_histogram() const {
return num_workers_histogram_; return num_workers_histogram_;
} }
...@@ -347,10 +343,6 @@ class BASE_EXPORT ThreadGroupImpl : public ThreadGroup { ...@@ -347,10 +343,6 @@ class BASE_EXPORT ThreadGroupImpl : public ThreadGroup {
// Intentionally leaked. // Intentionally leaked.
HistogramBase* const num_tasks_before_detach_histogram_; HistogramBase* const num_tasks_before_detach_histogram_;
// ThreadPool.NumTasksBetweenWaits.[thread group name] histogram.
// Intentionally leaked.
HistogramBase* const num_tasks_between_waits_histogram_;
// ThreadPool.NumWorkers.[thread group name] histogram. // ThreadPool.NumWorkers.[thread group name] histogram.
// Intentionally leaked. // Intentionally leaked.
HistogramBase* const num_workers_histogram_; HistogramBase* const num_workers_histogram_;
......
...@@ -620,92 +620,6 @@ class ThreadGroupImplHistogramTest : public ThreadGroupImplImplTest { ...@@ -620,92 +620,6 @@ class ThreadGroupImplHistogramTest : public ThreadGroupImplImplTest {
} // namespace } // namespace
TEST_F(ThreadGroupImplHistogramTest, NumTasksBetweenWaits) {
WaitableEvent event;
CreateAndStartThreadGroup(TimeDelta::Max(), kMaxTasks);
auto task_runner =
test::CreateSequencedTaskRunner({ThreadPool(), WithBaseSyncPrimitives()},
&mock_pooled_task_runner_delegate_);
// Post a task.
task_runner->PostTask(FROM_HERE, BindOnce(&test::WaitWithoutBlockingObserver,
Unretained(&event)));
// Post 2 more tasks while the first task hasn't completed its execution. It
// is guaranteed that these tasks will run immediately after the first task,
// without allowing the worker to sleep.
task_runner->PostTask(FROM_HERE, DoNothing());
task_runner->PostTask(FROM_HERE, DoNothing());
// Allow tasks to run and wait until the WorkerThread is idle.
event.Signal();
thread_group_->WaitForAllWorkersIdleForTesting();
// Wake up the WorkerThread that just became idle by posting a task and
// wait until it becomes idle again. The WorkerThread should record the
// ThreadPool.NumTasksBetweenWaits.* histogram on wake up.
task_runner->PostTask(FROM_HERE, DoNothing());
thread_group_->WaitForAllWorkersIdleForTesting();
// Verify that counts were recorded to the histogram as expected.
const auto* histogram = thread_group_->num_tasks_between_waits_histogram();
EXPECT_EQ(0, histogram->SnapshotSamples()->GetCount(0));
EXPECT_EQ(1, histogram->SnapshotSamples()->GetCount(3));
EXPECT_EQ(0, histogram->SnapshotSamples()->GetCount(10));
}
// Verifies that NumTasksBetweenWaits histogram is logged as expected across
// idle and cleanup periods.
TEST_F(ThreadGroupImplHistogramTest,
NumTasksBetweenWaitsWithIdlePeriodAndCleanup) {
WaitableEvent tasks_can_exit_event;
CreateAndStartThreadGroup(kReclaimTimeForCleanupTests, kMaxTasks);
WaitableEvent workers_continue;
FloodPool(&workers_continue);
const auto* histogram = thread_group_->num_tasks_between_waits_histogram();
// NumTasksBetweenWaits shouldn't be logged until idle.
EXPECT_EQ(0, histogram->SnapshotSamples()->GetCount(0));
EXPECT_EQ(0, histogram->SnapshotSamples()->GetCount(1));
EXPECT_EQ(0, histogram->SnapshotSamples()->GetCount(10));
// Make all workers go idle.
workers_continue.Signal();
thread_group_->WaitForAllWorkersIdleForTesting();
// All workers should have reported a single hit in the "1" bucket per the the
// histogram being reported when going idle and each worker having processed
// precisely 1 task per the controlled flooding logic above.
EXPECT_EQ(0, histogram->SnapshotSamples()->GetCount(0));
EXPECT_EQ(static_cast<int>(kMaxTasks),
histogram->SnapshotSamples()->GetCount(1));
EXPECT_EQ(0, histogram->SnapshotSamples()->GetCount(10));
thread_group_->WaitForWorkersCleanedUpForTesting(kMaxTasks - 1);
EXPECT_EQ(0, histogram->SnapshotSamples()->GetCount(0));
EXPECT_EQ(static_cast<int>(kMaxTasks),
histogram->SnapshotSamples()->GetCount(1));
EXPECT_EQ(0, histogram->SnapshotSamples()->GetCount(10));
// Flooding the thread group once again (without letting any workers go idle)
// shouldn't affect the counts either.
workers_continue.Reset();
FloodPool(&workers_continue);
EXPECT_EQ(0, histogram->SnapshotSamples()->GetCount(0));
EXPECT_EQ(static_cast<int>(kMaxTasks),
histogram->SnapshotSamples()->GetCount(1));
EXPECT_EQ(0, histogram->SnapshotSamples()->GetCount(10));
workers_continue.Signal();
thread_group_->WaitForAllWorkersIdleForTesting();
}
TEST_F(ThreadGroupImplHistogramTest, NumTasksBeforeCleanup) { TEST_F(ThreadGroupImplHistogramTest, NumTasksBeforeCleanup) {
CreateThreadGroup(); CreateThreadGroup();
auto histogrammed_thread_task_runner = auto histogrammed_thread_task_runner =
......
...@@ -142475,6 +142475,9 @@ should be kept until we use this API. --> ...@@ -142475,6 +142475,9 @@ should be kept until we use this API. -->
</histogram> </histogram>
<histogram base="true" name="ThreadPool.NumTasksBetweenWaits" units="tasks"> <histogram base="true" name="ThreadPool.NumTasksBetweenWaits" units="tasks">
<obsolete>
Deprecated 8/2019. Not used in active investigations.
</obsolete>
<owner>fdoray@chromium.org</owner> <owner>fdoray@chromium.org</owner>
<owner>gab@chromium.org</owner> <owner>gab@chromium.org</owner>
<owner>robliao@chromium.org</owner> <owner>robliao@chromium.org</owner>
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment