Commit f7adb4b1 authored by Brian Sheedy's avatar Brian Sheedy Committed by Commit Bot

Switch base_perftests to histograms

Switches uses of PrintResult to PerfResultReporter in the base_perftests
target and whitelists it for conversion to histograms before uploading
to the perf dashboard.

Bug: 923564
Change-Id: I0947dd98f842b072af7f7637db2191bb4a299992
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1968239Reviewed-by: default avatarSami Kyöstilä <skyostil@chromium.org>
Reviewed-by: default avatarAlbert J. Wong <ajwong@chromium.org>
Commit-Queue: Brian Sheedy <bsheedy@chromium.org>
Cr-Commit-Position: refs/heads/master@{#726218}
parent 1ff2fd21
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
#include "base/timer/lap_timer.h" #include "base/timer/lap_timer.h"
#include "build/build_config.h" #include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h" #include "testing/gtest/include/gtest/gtest.h"
#include "testing/perf/perf_test.h" #include "testing/perf/perf_result_reporter.h"
namespace base { namespace base {
namespace { namespace {
...@@ -29,6 +29,27 @@ constexpr int kMultiBucketIncrement = 13; ...@@ -29,6 +29,27 @@ constexpr int kMultiBucketIncrement = 13;
// Final size is 24 + (13 * 22) = 310 bytes. // Final size is 24 + (13 * 22) = 310 bytes.
constexpr int kMultiBucketRounds = 22; constexpr int kMultiBucketRounds = 22;
constexpr char kMetricPrefixMemoryAllocation[] = "MemoryAllocation.";
constexpr char kMetricThroughput[] = "throughput";
constexpr char kMetricTimePerAllocation[] = "time_per_allocation";
constexpr char kStoryBaseSingleBucket[] = "single_bucket";
constexpr char kStoryBaseSingleBucketWithFree[] = "single_bucket_with_free";
constexpr char kStoryBaseMultiBucket[] = "multi_bucket";
constexpr char kStoryBaseMultiBucketWithFree[] = "multi_bucket_with_free";
constexpr char kStorySuffixWithCompetingThread[] = "_with_competing_thread";
perf_test::PerfResultReporter SetUpReporter(const std::string& story_name) {
perf_test::PerfResultReporter reporter(kMetricPrefixMemoryAllocation,
story_name);
reporter.RegisterImportantMetric(kMetricThroughput, "runs/s");
reporter.RegisterImportantMetric(kMetricTimePerAllocation, "ns");
return reporter;
}
std::string GetSuffix(bool competing_thread) {
return competing_thread ? kStorySuffixWithCompetingThread : "";
}
class AllocatingThread : public PlatformThread::Delegate { class AllocatingThread : public PlatformThread::Delegate {
public: public:
explicit AllocatingThread(PartitionAllocatorGeneric* allocator) explicit AllocatingThread(PartitionAllocatorGeneric* allocator)
...@@ -60,14 +81,12 @@ class AllocatingThread : public PlatformThread::Delegate { ...@@ -60,14 +81,12 @@ class AllocatingThread : public PlatformThread::Delegate {
PlatformThreadHandle thread_handle_; PlatformThreadHandle thread_handle_;
}; };
void DisplayResults(const std::string& measurement, void DisplayResults(const std::string& story_name,
const std::string& modifier,
size_t iterations_per_second) { size_t iterations_per_second) {
perf_test::PrintResult(measurement, modifier, "", iterations_per_second, auto reporter = SetUpReporter(story_name);
"runs/s", true); reporter.AddResult(kMetricThroughput, iterations_per_second);
perf_test::PrintResult(measurement, modifier, "", reporter.AddResult(kMetricTimePerAllocation,
static_cast<size_t>(1e9 / iterations_per_second), static_cast<size_t>(1e9 / iterations_per_second));
"ns/run", true);
} }
class MemoryAllocationPerfNode { class MemoryAllocationPerfNode {
...@@ -99,7 +118,7 @@ class MemoryAllocationPerfTest : public testing::Test { ...@@ -99,7 +118,7 @@ class MemoryAllocationPerfTest : public testing::Test {
} }
protected: protected:
void TestSingleBucket() { void TestSingleBucket(bool competing_thread) {
MemoryAllocationPerfNode* first = MemoryAllocationPerfNode* first =
reinterpret_cast<MemoryAllocationPerfNode*>( reinterpret_cast<MemoryAllocationPerfNode*>(
alloc_.root()->Alloc(40, "<testing>")); alloc_.root()->Alloc(40, "<testing>"));
...@@ -122,12 +141,12 @@ class MemoryAllocationPerfTest : public testing::Test { ...@@ -122,12 +141,12 @@ class MemoryAllocationPerfTest : public testing::Test {
MemoryAllocationPerfNode::FreeAll(first, alloc_); MemoryAllocationPerfNode::FreeAll(first, alloc_);
DisplayResults("MemoryAllocationPerfTest", DisplayResults(
" single bucket allocation (40 bytes)", std::string(kStoryBaseSingleBucket) + GetSuffix(competing_thread),
timer_.LapsPerSecond()); timer_.LapsPerSecond());
} }
void TestSingleBucketWithFree() { void TestSingleBucketWithFree(bool competing_thread) {
// Allocate an initial element to make sure the bucket stays set up. // Allocate an initial element to make sure the bucket stays set up.
void* elem = alloc_.root()->Alloc(40, "<testing>"); void* elem = alloc_.root()->Alloc(40, "<testing>");
...@@ -140,12 +159,12 @@ class MemoryAllocationPerfTest : public testing::Test { ...@@ -140,12 +159,12 @@ class MemoryAllocationPerfTest : public testing::Test {
} while (!timer_.HasTimeLimitExpired()); } while (!timer_.HasTimeLimitExpired());
alloc_.root()->Free(elem); alloc_.root()->Free(elem);
DisplayResults("MemoryAllocationPerfTest", DisplayResults(std::string(kStoryBaseSingleBucketWithFree) +
" single bucket allocation + free (40 bytes)", GetSuffix(competing_thread),
timer_.LapsPerSecond()); timer_.LapsPerSecond());
} }
void TestMultiBucket() { void TestMultiBucket(bool competing_thread) {
MemoryAllocationPerfNode* first = MemoryAllocationPerfNode* first =
reinterpret_cast<MemoryAllocationPerfNode*>( reinterpret_cast<MemoryAllocationPerfNode*>(
alloc_.root()->Alloc(40, "<testing>")); alloc_.root()->Alloc(40, "<testing>"));
...@@ -168,11 +187,12 @@ class MemoryAllocationPerfTest : public testing::Test { ...@@ -168,11 +187,12 @@ class MemoryAllocationPerfTest : public testing::Test {
MemoryAllocationPerfNode::FreeAll(first, alloc_); MemoryAllocationPerfNode::FreeAll(first, alloc_);
DisplayResults("MemoryAllocationPerfTest", " multi-bucket allocation", DisplayResults(
timer_.LapsPerSecond() * kMultiBucketRounds); std::string(kStoryBaseMultiBucket) + GetSuffix(competing_thread),
timer_.LapsPerSecond() * kMultiBucketRounds);
} }
void TestMultiBucketWithFree() { void TestMultiBucketWithFree(bool competing_thread) {
std::vector<void*> elems; std::vector<void*> elems;
elems.reserve(kMultiBucketRounds); elems.reserve(kMultiBucketRounds);
// Do an initial round of allocation to make sure that the buckets stay in // Do an initial round of allocation to make sure that the buckets stay in
...@@ -199,8 +219,8 @@ class MemoryAllocationPerfTest : public testing::Test { ...@@ -199,8 +219,8 @@ class MemoryAllocationPerfTest : public testing::Test {
alloc_.root()->Free(ptr); alloc_.root()->Free(ptr);
} }
DisplayResults("MemoryAllocationPerfTest", DisplayResults(std::string(kStoryBaseMultiBucketWithFree) +
" multi-bucket allocation + free", GetSuffix(competing_thread),
timer_.LapsPerSecond() * kMultiBucketRounds); timer_.LapsPerSecond() * kMultiBucketRounds);
} }
...@@ -209,21 +229,21 @@ class MemoryAllocationPerfTest : public testing::Test { ...@@ -209,21 +229,21 @@ class MemoryAllocationPerfTest : public testing::Test {
}; };
TEST_F(MemoryAllocationPerfTest, SingleBucket) { TEST_F(MemoryAllocationPerfTest, SingleBucket) {
TestSingleBucket(); TestSingleBucket(false);
} }
TEST_F(MemoryAllocationPerfTest, SingleBucketWithCompetingThread) { TEST_F(MemoryAllocationPerfTest, SingleBucketWithCompetingThread) {
AllocatingThread t(&alloc_); AllocatingThread t(&alloc_);
TestSingleBucket(); TestSingleBucket(true);
} }
TEST_F(MemoryAllocationPerfTest, SingleBucketWithFree) { TEST_F(MemoryAllocationPerfTest, SingleBucketWithFree) {
TestSingleBucketWithFree(); TestSingleBucketWithFree(false);
} }
TEST_F(MemoryAllocationPerfTest, SingleBucketWithFreeWithCompetingThread) { TEST_F(MemoryAllocationPerfTest, SingleBucketWithFreeWithCompetingThread) {
AllocatingThread t(&alloc_); AllocatingThread t(&alloc_);
TestSingleBucketWithFree(); TestSingleBucketWithFree(true);
} }
// Failing on Nexus5x: crbug.com/949838 // Failing on Nexus5x: crbug.com/949838
...@@ -236,21 +256,21 @@ TEST_F(MemoryAllocationPerfTest, SingleBucketWithFreeWithCompetingThread) { ...@@ -236,21 +256,21 @@ TEST_F(MemoryAllocationPerfTest, SingleBucketWithFreeWithCompetingThread) {
#define MAYBE_MultiBucketWithCompetingThread MultiBucketWithCompetingThread #define MAYBE_MultiBucketWithCompetingThread MultiBucketWithCompetingThread
#endif #endif
TEST_F(MemoryAllocationPerfTest, MAYBE_MultiBucket) { TEST_F(MemoryAllocationPerfTest, MAYBE_MultiBucket) {
TestMultiBucket(); TestMultiBucket(false);
} }
TEST_F(MemoryAllocationPerfTest, MAYBE_MultiBucketWithCompetingThread) { TEST_F(MemoryAllocationPerfTest, MAYBE_MultiBucketWithCompetingThread) {
AllocatingThread t(&alloc_); AllocatingThread t(&alloc_);
TestMultiBucket(); TestMultiBucket(true);
} }
TEST_F(MemoryAllocationPerfTest, MultiBucketWithFree) { TEST_F(MemoryAllocationPerfTest, MultiBucketWithFree) {
TestMultiBucketWithFree(); TestMultiBucketWithFree(false);
} }
TEST_F(MemoryAllocationPerfTest, MultiBucketWithFreeWithCompetingThread) { TEST_F(MemoryAllocationPerfTest, MultiBucketWithFreeWithCompetingThread) {
AllocatingThread t(&alloc_); AllocatingThread t(&alloc_);
TestMultiBucketWithFree(); TestMultiBucketWithFree(true);
} }
} // anonymous namespace } // anonymous namespace
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
#include "base/time/time.h" #include "base/time/time.h"
#include "base/timer/lap_timer.h" #include "base/timer/lap_timer.h"
#include "testing/gtest/include/gtest/gtest.h" #include "testing/gtest/include/gtest/gtest.h"
#include "testing/perf/perf_test.h" #include "testing/perf/perf_result_reporter.h"
namespace base { namespace base {
namespace { namespace {
...@@ -16,6 +16,17 @@ constexpr int kWarmupRuns = 1; ...@@ -16,6 +16,17 @@ constexpr int kWarmupRuns = 1;
constexpr TimeDelta kTimeLimit = TimeDelta::FromSeconds(1); constexpr TimeDelta kTimeLimit = TimeDelta::FromSeconds(1);
constexpr int kTimeCheckInterval = 100000; constexpr int kTimeCheckInterval = 100000;
constexpr char kMetricPrefixSpinLock[] = "SpinLock.";
constexpr char kMetricLockUnlockThroughput[] = "lock_unlock_throughput";
constexpr char kStoryBaseline[] = "baseline_story";
constexpr char kStoryWithCompetingThread[] = "with_competing_thread";
perf_test::PerfResultReporter SetUpReporter(const std::string& story_name) {
perf_test::PerfResultReporter reporter(kMetricPrefixSpinLock, story_name);
reporter.RegisterImportantMetric(kMetricLockUnlockThroughput, "runs/s");
return reporter;
}
class Spin : public PlatformThread::Delegate { class Spin : public PlatformThread::Delegate {
public: public:
Spin(subtle::SpinLock* lock, size_t* data) Spin(subtle::SpinLock* lock, size_t* data)
...@@ -53,8 +64,8 @@ TEST(SpinLockPerfTest, Simple) { ...@@ -53,8 +64,8 @@ TEST(SpinLockPerfTest, Simple) {
timer.NextLap(); timer.NextLap();
} while (!timer.HasTimeLimitExpired()); } while (!timer.HasTimeLimitExpired());
perf_test::PrintResult("SpinLockPerfTest", " lock()/unlock()", "", auto reporter = SetUpReporter(kStoryBaseline);
timer.LapsPerSecond(), "runs/s", true); reporter.AddResult(kMetricLockUnlockThroughput, timer.LapsPerSecond());
} }
TEST(SpinLockPerfTest, WithCompetingThread) { TEST(SpinLockPerfTest, WithCompetingThread) {
...@@ -78,9 +89,8 @@ TEST(SpinLockPerfTest, WithCompetingThread) { ...@@ -78,9 +89,8 @@ TEST(SpinLockPerfTest, WithCompetingThread) {
thread_main.Stop(); thread_main.Stop();
PlatformThread::Join(thread_handle); PlatformThread::Join(thread_handle);
perf_test::PrintResult("SpinLockPerfTest.WithCompetingThread", auto reporter = SetUpReporter(kStoryWithCompetingThread);
" lock()/unlock()", "", timer.LapsPerSecond(), reporter.AddResult(kMetricLockUnlockThroughput, timer.LapsPerSecond());
"runs/s", true);
} }
} // namespace base } // namespace base
...@@ -14,7 +14,28 @@ ...@@ -14,7 +14,28 @@
#include "base/strings/string_number_conversions.h" #include "base/strings/string_number_conversions.h"
#include "base/time/time.h" #include "base/time/time.h"
#include "testing/gtest/include/gtest/gtest.h" #include "testing/gtest/include/gtest/gtest.h"
#include "testing/perf/perf_test.h" #include "testing/perf/perf_result_reporter.h"
namespace {
constexpr int kBytesPerMegabyte = 1000000;
constexpr char kMetricPrefixSHA1[] = "SHA1.";
constexpr char kMetricRuntime[] = "runtime";
constexpr char kMetricThroughput[] = "throughput";
// Histograms automatically calculate mean, min, max, and standard deviation,
// but not median, so have a separate metric for our manually calculated median.
constexpr char kMetricMedianThroughput[] = "median_throughput";
perf_test::PerfResultReporter SetUpReporter(const std::string& story_name) {
perf_test::PerfResultReporter reporter(kMetricPrefixSHA1, story_name);
reporter.RegisterImportantMetric(kMetricRuntime, "us");
reporter.RegisterImportantMetric(kMetricThroughput, "bytesPerSecond");
reporter.RegisterImportantMetric(kMetricMedianThroughput, "bytesPerSecond");
return reporter;
}
} // namespace
static void Timing(const size_t len) { static void Timing(const size_t len) {
std::vector<uint8_t> buf(len); std::vector<uint8_t> buf(len);
...@@ -36,20 +57,27 @@ static void Timing(const size_t len) { ...@@ -36,20 +57,27 @@ static void Timing(const size_t len) {
std::sort(utime.begin(), utime.end()); std::sort(utime.begin(), utime.end());
const int med = runs / 2; const int med = runs / 2;
const int min = 0;
// No need for conversions as length is in bytes and time in usecs: // Simply dividing len by utime gets us MB/s, but we need B/s.
// MB/s = (len / (bytes/megabytes)) / (usecs / usecs/sec) // MB/s = (len / (bytes/megabytes)) / (usecs / usecs/sec)
// MB/s = (len / 1,000,000)/(usecs / 1,000,000) // MB/s = (len / 1,000,000)/(usecs / 1,000,000)
// MB/s = (len * 1,000,000)/(usecs * 1,000,000) // MB/s = (len * 1,000,000)/(usecs * 1,000,000)
// MB/s = len/utime // MB/s = len/utime
double median_rate = len / utime[med].InMicroseconds(); double median_rate = kBytesPerMegabyte * len / utime[med].InMicroseconds();
double max_rate = len / utime[min].InMicroseconds(); // Convert to a comma-separated string so we can report every data point.
std::string rates;
for (const auto& t : utime) {
rates +=
base::NumberToString(kBytesPerMegabyte * len / t.InMicroseconds()) +
",";
}
// Strip off trailing comma.
rates.pop_back();
perf_test::PrintResult("len=", base::NumberToString(len), "median", auto reporter = SetUpReporter(base::NumberToString(len) + "_bytes");
median_rate, "MB/s", true); reporter.AddResult(kMetricRuntime, total_test_time);
perf_test::PrintResult("usecs=", base::NumberToString(total_test_time), "max", reporter.AddResult(kMetricMedianThroughput, median_rate);
max_rate, "MB/s", true); reporter.AddResultList(kMetricThroughput, rates);
} }
TEST(SHA1PerfTest, Speed) { TEST(SHA1PerfTest, Speed) {
......
...@@ -10,11 +10,23 @@ ...@@ -10,11 +10,23 @@
#include "base/values.h" #include "base/values.h"
#include "build/build_config.h" #include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h" #include "testing/gtest/include/gtest/gtest.h"
#include "testing/perf/perf_test.h" #include "testing/perf/perf_result_reporter.h"
namespace base { namespace base {
namespace { namespace {
constexpr char kMetricPrefixJSON[] = "JSON.";
constexpr char kMetricReadTime[] = "read_time";
constexpr char kMetricWriteTime[] = "write_time";
perf_test::PerfResultReporter SetUpReporter(const std::string& story_name) {
perf_test::PerfResultReporter reporter(kMetricPrefixJSON, story_name);
reporter.RegisterImportantMetric(kMetricReadTime, "ms");
reporter.RegisterImportantMetric(kMetricWriteTime, "ms");
return reporter;
}
// Generates a simple dictionary value with simple data types, a string and a // Generates a simple dictionary value with simple data types, a string and a
// list. // list.
DictionaryValue GenerateDict() { DictionaryValue GenerateDict() {
...@@ -62,16 +74,14 @@ class JSONPerfTest : public testing::Test { ...@@ -62,16 +74,14 @@ class JSONPerfTest : public testing::Test {
TimeTicks start_write = TimeTicks::Now(); TimeTicks start_write = TimeTicks::Now();
JSONWriter::Write(dict, &json); JSONWriter::Write(dict, &json);
TimeTicks end_write = TimeTicks::Now(); TimeTicks end_write = TimeTicks::Now();
perf_test::PrintResult("Write", "", description, auto reporter = SetUpReporter("breadth_" + base::NumberToString(breadth) +
(end_write - start_write).InMillisecondsF(), "ms", "_depth_" + base::NumberToString(depth));
true); reporter.AddResult(kMetricWriteTime, end_write - start_write);
TimeTicks start_read = TimeTicks::Now(); TimeTicks start_read = TimeTicks::Now();
JSONReader::Read(json); JSONReader::Read(json);
TimeTicks end_read = TimeTicks::Now(); TimeTicks end_read = TimeTicks::Now();
perf_test::PrintResult("Read", "", description, reporter.AddResult(kMetricReadTime, end_read - start_read);
(end_read - start_read).InMillisecondsF(), "ms",
true);
} }
}; };
......
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
#include "base/time/time.h" #include "base/time/time.h"
#include "build/build_config.h" #include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h" #include "testing/gtest/include/gtest/gtest.h"
#include "testing/perf/perf_test.h" #include "testing/perf/perf_result_reporter.h"
#if defined(OS_ANDROID) #if defined(OS_ANDROID)
#include "base/android/java_handler_thread.h" #include "base/android/java_handler_thread.h"
...@@ -30,6 +30,21 @@ ...@@ -30,6 +30,21 @@
namespace base { namespace base {
namespace { namespace {
constexpr char kMetricPrefixScheduleWork[] = "ScheduleWork.";
constexpr char kMetricMinBatchTime[] = "min_batch_time_per_task";
constexpr char kMetricMaxBatchTime[] = "max_batch_time_per_task";
constexpr char kMetricTotalTime[] = "total_time_per_task";
constexpr char kMetricThreadTime[] = "thread_time_per_task";
perf_test::PerfResultReporter SetUpReporter(const std::string& story_name) {
perf_test::PerfResultReporter reporter(kMetricPrefixScheduleWork, story_name);
reporter.RegisterImportantMetric(kMetricMinBatchTime, "us");
reporter.RegisterImportantMetric(kMetricMaxBatchTime, "us");
reporter.RegisterImportantMetric(kMetricTotalTime, "us");
reporter.RegisterImportantMetric(kMetricThreadTime, "us");
return reporter;
}
#if defined(OS_ANDROID) #if defined(OS_ANDROID)
class JavaHandlerThreadForTest : public android::JavaHandlerThread { class JavaHandlerThreadForTest : public android::JavaHandlerThread {
public: public:
...@@ -148,40 +163,24 @@ class ScheduleWorkTest : public testing::Test { ...@@ -148,40 +163,24 @@ class ScheduleWorkTest : public testing::Test {
min_batch_time = std::min(min_batch_time, min_batch_times_[i]); min_batch_time = std::min(min_batch_time, min_batch_times_[i]);
max_batch_time = std::max(max_batch_time, max_batch_times_[i]); max_batch_time = std::max(max_batch_time, max_batch_times_[i]);
} }
std::string trace = StringPrintf(
"%d_threads_scheduling_to_%s_pump", num_scheduling_threads, std::string story_name = StringPrintf(
"%s_pump_from_%d_threads",
target_type == MessagePumpType::IO target_type == MessagePumpType::IO
? "io" ? "io"
: (target_type == MessagePumpType::UI ? "ui" : "default")); : (target_type == MessagePumpType::UI ? "ui" : "default"),
perf_test::PrintResult( num_scheduling_threads);
"task", auto reporter = SetUpReporter(story_name);
"", reporter.AddResult(kMetricMinBatchTime, total_time.InMicroseconds() /
trace, static_cast<double>(counter_));
total_time.InMicroseconds() / static_cast<double>(counter_), reporter.AddResult(
"us/task", kMetricMaxBatchTime,
true); max_batch_time.InMicroseconds() / static_cast<double>(kBatchSize));
perf_test::PrintResult( reporter.AddResult(kMetricTotalTime, total_time.InMicroseconds() /
"task", static_cast<double>(counter_));
"_min_batch_time",
trace,
min_batch_time.InMicroseconds() / static_cast<double>(kBatchSize),
"us/task",
false);
perf_test::PrintResult(
"task",
"_max_batch_time",
trace,
max_batch_time.InMicroseconds() / static_cast<double>(kBatchSize),
"us/task",
false);
if (ThreadTicks::IsSupported()) { if (ThreadTicks::IsSupported()) {
perf_test::PrintResult( reporter.AddResult(kMetricThreadTime, total_thread_time.InMicroseconds() /
"task", static_cast<double>(counter_));
"_thread_time",
trace,
total_thread_time.InMicroseconds() / static_cast<double>(counter_),
"us/task",
true);
} }
} }
......
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
#include "base/strings/stringprintf.h" #include "base/strings/stringprintf.h"
#include "base/time/time.h" #include "base/time/time.h"
#include "testing/gtest/include/gtest/gtest.h" #include "testing/gtest/include/gtest/gtest.h"
#include "testing/perf/perf_test.h" #include "testing/perf/perf_result_reporter.h"
// Ask the compiler not to use a register for this counter, in case it decides // Ask the compiler not to use a register for this counter, in case it decides
// to do magic optimizations like |counter += kLaps|. // to do magic optimizations like |counter += kLaps|.
...@@ -19,6 +19,19 @@ volatile int g_observer_list_perf_test_counter; ...@@ -19,6 +19,19 @@ volatile int g_observer_list_perf_test_counter;
namespace base { namespace base {
constexpr char kMetricPrefixObserverList[] = "ObserverList.";
constexpr char kMetricNotifyTimePerObserver[] = "notify_time_per_observer";
namespace {
perf_test::PerfResultReporter SetUpReporter(const std::string& story_name) {
perf_test::PerfResultReporter reporter(kMetricPrefixObserverList, story_name);
reporter.RegisterImportantMetric(kMetricNotifyTimePerObserver, "ns");
return reporter;
}
} // namespace
class ObserverInterface { class ObserverInterface {
public: public:
ObserverInterface() {} ObserverInterface() {}
...@@ -95,24 +108,23 @@ TYPED_TEST(ObserverListPerfTest, NotifyPerformance) { ...@@ -95,24 +108,23 @@ TYPED_TEST(ObserverListPerfTest, NotifyPerformance) {
} }
TimeDelta duration = TimeTicks::Now() - start; TimeDelta duration = TimeTicks::Now() - start;
const char* name = Pick<TypeParam>::GetName();
observers.clear(); observers.clear();
EXPECT_EQ(observer_count * weighted_laps, EXPECT_EQ(observer_count * weighted_laps,
g_observer_list_perf_test_counter); g_observer_list_perf_test_counter);
EXPECT_TRUE(observer_count == 0 || list.might_have_observers()); EXPECT_TRUE(observer_count == 0 || list.might_have_observers());
std::string prefix = std::string story_name =
base::StringPrintf("ObserverListPerfTest_%d.", observer_count); base::StringPrintf("%s_%d", Pick<TypeParam>::GetName(), observer_count);
// A typical value is 3-20 nanoseconds per observe in Release, 1000-2000ns // A typical value is 3-20 nanoseconds per observe in Release, 1000-2000ns
// in an optimized build with DCHECKs and 3000-6000ns in debug builds. // in an optimized build with DCHECKs and 3000-6000ns in debug builds.
perf_test::PrintResult( auto reporter = SetUpReporter(story_name);
prefix, name, "NotifyPerformance", reporter.AddResult(
kMetricNotifyTimePerObserver,
duration.InNanoseconds() / duration.InNanoseconds() /
static_cast<double>(g_observer_list_perf_test_counter + static_cast<double>(g_observer_list_perf_test_counter +
weighted_laps), weighted_laps));
"ns/observe", true);
} }
} }
......
...@@ -10,12 +10,31 @@ ...@@ -10,12 +10,31 @@
#include "base/time/time.h" #include "base/time/time.h"
#include "base/timer/elapsed_timer.h" #include "base/timer/elapsed_timer.h"
#include "testing/gtest/include/gtest/gtest.h" #include "testing/gtest/include/gtest/gtest.h"
#include "testing/perf/perf_test.h" #include "testing/perf/perf_result_reporter.h"
namespace base { namespace base {
namespace { namespace {
constexpr char kMetricPrefixWaitableEvent[] = "WaitableEvent.";
constexpr char kMetricWaitTime[] = "wait_time_per_sample";
constexpr char kMetricSignalTime[] = "signal_time_per_sample";
constexpr char kMetricElapsedCycles[] = "elapsed_cycles";
constexpr char kStorySingleThread[] = "single_thread_1000_samples";
constexpr char kStoryMultiThreadWaiter[] = "multi_thread_1000_samples_waiter";
constexpr char kStoryMultiThreadSignaler[] =
"multi_thread_1000_samples_signaler";
constexpr char kStoryTimedThroughput[] = "timed_throughput";
perf_test::PerfResultReporter SetUpReporter(const std::string& story_name) {
perf_test::PerfResultReporter reporter(kMetricPrefixWaitableEvent,
story_name);
reporter.RegisterImportantMetric(kMetricWaitTime, "ns");
reporter.RegisterImportantMetric(kMetricSignalTime, "ns");
reporter.RegisterImportantMetric(kMetricElapsedCycles, "count");
return reporter;
}
class TraceWaitableEvent { class TraceWaitableEvent {
public: public:
TraceWaitableEvent() = default; TraceWaitableEvent() = default;
...@@ -92,17 +111,20 @@ class SignalerThread : public SimpleThread { ...@@ -92,17 +111,20 @@ class SignalerThread : public SimpleThread {
}; };
void PrintPerfWaitableEvent(const TraceWaitableEvent* event, void PrintPerfWaitableEvent(const TraceWaitableEvent* event,
const std::string& trace) { const std::string& story_name,
perf_test::PrintResult( size_t* elapsed_cycles = nullptr) {
"WaitableEvent_SignalTime_ns", "", trace, auto reporter = SetUpReporter(story_name);
reporter.AddResult(
kMetricSignalTime,
static_cast<size_t>(event->total_signal_time().InNanoseconds()) / static_cast<size_t>(event->total_signal_time().InNanoseconds()) /
event->signal_samples(), event->signal_samples());
"ns/sample", true); reporter.AddResult(
perf_test::PrintResult( kMetricWaitTime,
"WaitableEvent_WaitTime_ns", "", trace,
static_cast<size_t>(event->total_wait_time().InNanoseconds()) / static_cast<size_t>(event->total_wait_time().InNanoseconds()) /
event->wait_samples(), event->wait_samples());
"ns/sample", true); if (elapsed_cycles) {
reporter.AddResult(kMetricElapsedCycles, *elapsed_cycles);
}
} }
} // namespace } // namespace
...@@ -117,7 +139,7 @@ TEST(WaitableEventPerfTest, SingleThread) { ...@@ -117,7 +139,7 @@ TEST(WaitableEventPerfTest, SingleThread) {
event.Wait(); event.Wait();
} }
PrintPerfWaitableEvent(&event, "singlethread-1000-samples"); PrintPerfWaitableEvent(&event, kStorySingleThread);
} }
TEST(WaitableEventPerfTest, MultipleThreads) { TEST(WaitableEventPerfTest, MultipleThreads) {
...@@ -142,8 +164,8 @@ TEST(WaitableEventPerfTest, MultipleThreads) { ...@@ -142,8 +164,8 @@ TEST(WaitableEventPerfTest, MultipleThreads) {
thread.Join(); thread.Join();
PrintPerfWaitableEvent(&waiter, "multithread-1000-samples_waiter"); PrintPerfWaitableEvent(&waiter, kStoryMultiThreadWaiter);
PrintPerfWaitableEvent(&signaler, "multithread-1000-samples_signaler"); PrintPerfWaitableEvent(&signaler, kStoryMultiThreadSignaler);
} }
TEST(WaitableEventPerfTest, Throughput) { TEST(WaitableEventPerfTest, Throughput) {
...@@ -161,8 +183,7 @@ TEST(WaitableEventPerfTest, Throughput) { ...@@ -161,8 +183,7 @@ TEST(WaitableEventPerfTest, Throughput) {
thread.RequestStop(); thread.RequestStop();
thread.Join(); thread.Join();
perf_test::PrintResult("counts", "", "throughput", count, "signals", true); PrintPerfWaitableEvent(&event, kStoryTimedThroughput, &count);
PrintPerfWaitableEvent(&event, "throughput");
} }
} // namespace base } // namespace base
...@@ -31,14 +31,25 @@ ...@@ -31,14 +31,25 @@
#include "base/time/default_tick_clock.h" #include "base/time/default_tick_clock.h"
#include "build/build_config.h" #include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h" #include "testing/gtest/include/gtest/gtest.h"
#include "testing/perf/perf_test.h" #include "testing/perf/perf_result_reporter.h"
namespace base { namespace base {
namespace sequence_manager { namespace sequence_manager {
namespace { namespace {
const int kNumTasks = 1000000; const int kNumTasks = 1000000;
constexpr char kMetricPrefixSequenceManager[] = "SequenceManager.";
constexpr char kMetricPostTimePerTask[] = "post_time_per_task";
perf_test::PerfResultReporter SetUpReporter(const std::string& story_name) {
perf_test::PerfResultReporter reporter(kMetricPrefixSequenceManager,
story_name);
reporter.RegisterImportantMetric(kMetricPostTimePerTask, "us");
return reporter;
} }
} // namespace
// To reduce noise related to the OS timer, we use a mock time domain to // To reduce noise related to the OS timer, we use a mock time domain to
// fast forward the timers. // fast forward the timers.
class PerfTestTimeDomain : public MockTimeDomain { class PerfTestTimeDomain : public MockTimeDomain {
...@@ -623,21 +634,17 @@ class SequenceManagerPerfTest : public testing::TestWithParam<PerfTestType> { ...@@ -623,21 +634,17 @@ class SequenceManagerPerfTest : public testing::TestWithParam<PerfTestType> {
return task_runners; return task_runners;
} }
void Benchmark(const std::string& trace, TestCase* TestCase) { void Benchmark(const std::string& story_prefix, TestCase* TestCase) {
TimeTicks start = TimeTicks::Now(); TimeTicks start = TimeTicks::Now();
TimeTicks now; TimeTicks now;
TestCase->Start(); TestCase->Start();
delegate_->WaitUntilDone(); delegate_->WaitUntilDone();
now = TimeTicks::Now(); now = TimeTicks::Now();
perf_test::PrintResult( auto reporter = SetUpReporter(story_prefix + delegate_->GetName());
"task", "", trace + delegate_->GetName(), reporter.AddResult(
(now - start).InMicroseconds() / static_cast<double>(kNumTasks), kMetricPostTimePerTask,
"us/task", true); (now - start).InMicroseconds() / static_cast<double>(kNumTasks));
LOG(ERROR) << "task " << trace << delegate_->GetName()
<< ((now - start).InMicroseconds() /
static_cast<double>(kNumTasks))
<< " us/task";
} }
std::unique_ptr<PerfTestDelegate> delegate_; std::unique_ptr<PerfTestDelegate> delegate_;
......
...@@ -18,13 +18,37 @@ ...@@ -18,13 +18,37 @@
#include "base/threading/simple_thread.h" #include "base/threading/simple_thread.h"
#include "base/time/time.h" #include "base/time/time.h"
#include "testing/gtest/include/gtest/gtest.h" #include "testing/gtest/include/gtest/gtest.h"
#include "testing/perf/perf_test.h" #include "testing/perf/perf_result_reporter.h"
namespace base { namespace base {
namespace internal { namespace internal {
namespace { namespace {
constexpr char kMetricPrefixThreadPool[] = "ThreadPool.";
constexpr char kMetricPostTaskThroughput[] = "post_task_throughput";
constexpr char kMetricRunTaskThroughput[] = "run_task_throughput";
constexpr char kMetricNumTasksPosted[] = "num_tasks_posted";
constexpr char kStoryBindPostThenRunNoOp[] = "bind_post_then_run_noop_tasks";
constexpr char kStoryPostThenRunNoOp[] = "post_then_run_noop_tasks";
constexpr char kStoryPostThenRunNoOpManyThreads[] =
"post_then_run_noop_tasks_many_threads";
constexpr char kStoryPostThenRunNoOpMoreThanRunningThreads[] =
"post_then_run_noop_tasks_more_than_running_threads";
constexpr char kStoryPostRunNoOp[] = "post_run_noop_tasks";
constexpr char kStoryPostRunNoOpManyThreads[] =
"post_run_noop_tasks_many_threads";
constexpr char kStoryPostRunBusyManyThreads[] =
"post_run_busy_tasks_many_threads";
perf_test::PerfResultReporter SetUpReporter(const std::string& story_name) {
perf_test::PerfResultReporter reporter(kMetricPrefixThreadPool, story_name);
reporter.RegisterImportantMetric(kMetricPostTaskThroughput, "runs/s");
reporter.RegisterImportantMetric(kMetricRunTaskThroughput, "runs/s");
reporter.RegisterImportantMetric(kMetricNumTasksPosted, "count");
return reporter;
}
enum class ExecutionMode { enum class ExecutionMode {
// Allows tasks to start running while tasks are being posted by posting // Allows tasks to start running while tasks are being posted by posting
// threads. // threads.
...@@ -135,7 +159,7 @@ class ThreadPoolPerfTest : public testing::Test { ...@@ -135,7 +159,7 @@ class ThreadPoolPerfTest : public testing::Test {
void OnCompletePostingTasks() { complete_posting_tasks_.Signal(); } void OnCompletePostingTasks() { complete_posting_tasks_.Signal(); }
void Benchmark(const std::string& trace, ExecutionMode execution_mode) { void Benchmark(const std::string& story_name, ExecutionMode execution_mode) {
base::Optional<ThreadPoolInstance::ScopedExecutionFence> execution_fence; base::Optional<ThreadPoolInstance::ScopedExecutionFence> execution_fence;
if (execution_mode == ExecutionMode::kPostThenRun) { if (execution_mode == ExecutionMode::kPostThenRun) {
execution_fence.emplace(); execution_fence.emplace();
...@@ -159,18 +183,16 @@ class ThreadPoolPerfTest : public testing::Test { ...@@ -159,18 +183,16 @@ class ThreadPoolPerfTest : public testing::Test {
thread->Join(); thread->Join();
ThreadPoolInstance::Get()->JoinForTesting(); ThreadPoolInstance::Get()->JoinForTesting();
perf_test::PrintResult( auto reporter = SetUpReporter(story_name);
"Posting tasks throughput", "", trace, reporter.AddResult(
kMetricPostTaskThroughput,
num_posted_tasks_ / num_posted_tasks_ /
static_cast<double>(post_task_duration_.InMilliseconds()), static_cast<double>(post_task_duration_.InSecondsF()));
"tasks/ms", true); reporter.AddResult(
perf_test::PrintResult( kMetricRunTaskThroughput,
"Running tasks throughput", "", trace,
num_posted_tasks_ / num_posted_tasks_ /
static_cast<double>(tasks_run_duration_.InMilliseconds()), static_cast<double>(tasks_run_duration_.InSecondsF()));
"tasks/ms", true); reporter.AddResult(kMetricNumTasksPosted, num_posted_tasks_);
perf_test::PrintResult("Num tasks posted", "", trace, num_posted_tasks_,
"tasks", true);
} }
private: private:
...@@ -195,29 +217,28 @@ TEST_F(ThreadPoolPerfTest, BindPostThenRunNoOpTasks) { ...@@ -195,29 +217,28 @@ TEST_F(ThreadPoolPerfTest, BindPostThenRunNoOpTasks) {
1, 1, 1, 1,
BindRepeating(&ThreadPoolPerfTest::ContinuouslyBindAndPostNoOpTasks, BindRepeating(&ThreadPoolPerfTest::ContinuouslyBindAndPostNoOpTasks,
Unretained(this), 10000)); Unretained(this), 10000));
Benchmark("Bind+Post-then-run no-op tasks", ExecutionMode::kPostThenRun); Benchmark(kStoryBindPostThenRunNoOp, ExecutionMode::kPostThenRun);
} }
TEST_F(ThreadPoolPerfTest, PostThenRunNoOpTasks) { TEST_F(ThreadPoolPerfTest, PostThenRunNoOpTasks) {
StartThreadPool(1, 1, StartThreadPool(1, 1,
BindRepeating(&ThreadPoolPerfTest::ContinuouslyPostNoOpTasks, BindRepeating(&ThreadPoolPerfTest::ContinuouslyPostNoOpTasks,
Unretained(this), 10000)); Unretained(this), 10000));
Benchmark("Post-then-run no-op tasks", ExecutionMode::kPostThenRun); Benchmark(kStoryPostThenRunNoOp, ExecutionMode::kPostThenRun);
} }
TEST_F(ThreadPoolPerfTest, PostThenRunNoOpTasksManyThreads) { TEST_F(ThreadPoolPerfTest, PostThenRunNoOpTasksManyThreads) {
StartThreadPool(4, 4, StartThreadPool(4, 4,
BindRepeating(&ThreadPoolPerfTest::ContinuouslyPostNoOpTasks, BindRepeating(&ThreadPoolPerfTest::ContinuouslyPostNoOpTasks,
Unretained(this), 10000)); Unretained(this), 10000));
Benchmark("Post-then-run no-op tasks many threads", Benchmark(kStoryPostThenRunNoOpManyThreads, ExecutionMode::kPostThenRun);
ExecutionMode::kPostThenRun);
} }
TEST_F(ThreadPoolPerfTest, PostThenRunNoOpTasksMorePostingThanRunningThreads) { TEST_F(ThreadPoolPerfTest, PostThenRunNoOpTasksMorePostingThanRunningThreads) {
StartThreadPool(1, 4, StartThreadPool(1, 4,
BindRepeating(&ThreadPoolPerfTest::ContinuouslyPostNoOpTasks, BindRepeating(&ThreadPoolPerfTest::ContinuouslyPostNoOpTasks,
Unretained(this), 10000)); Unretained(this), 10000));
Benchmark("Post-then-run no-op tasks more posting than running threads", Benchmark(kStoryPostThenRunNoOpMoreThanRunningThreads,
ExecutionMode::kPostThenRun); ExecutionMode::kPostThenRun);
} }
...@@ -225,14 +246,14 @@ TEST_F(ThreadPoolPerfTest, PostRunNoOpTasks) { ...@@ -225,14 +246,14 @@ TEST_F(ThreadPoolPerfTest, PostRunNoOpTasks) {
StartThreadPool(1, 1, StartThreadPool(1, 1,
BindRepeating(&ThreadPoolPerfTest::ContinuouslyPostNoOpTasks, BindRepeating(&ThreadPoolPerfTest::ContinuouslyPostNoOpTasks,
Unretained(this), 10000)); Unretained(this), 10000));
Benchmark("Post/run no-op tasks", ExecutionMode::kPostAndRun); Benchmark(kStoryPostRunNoOp, ExecutionMode::kPostAndRun);
} }
TEST_F(ThreadPoolPerfTest, PostRunNoOpTasksManyThreads) { TEST_F(ThreadPoolPerfTest, PostRunNoOpTasksManyThreads) {
StartThreadPool(4, 4, StartThreadPool(4, 4,
BindRepeating(&ThreadPoolPerfTest::ContinuouslyPostNoOpTasks, BindRepeating(&ThreadPoolPerfTest::ContinuouslyPostNoOpTasks,
Unretained(this), 10000)); Unretained(this), 10000));
Benchmark("Post/run no-op tasks many threads", ExecutionMode::kPostAndRun); Benchmark(kStoryPostRunNoOpManyThreads, ExecutionMode::kPostAndRun);
} }
TEST_F(ThreadPoolPerfTest, PostRunBusyTasksManyThreads) { TEST_F(ThreadPoolPerfTest, PostRunBusyTasksManyThreads) {
...@@ -241,7 +262,7 @@ TEST_F(ThreadPoolPerfTest, PostRunBusyTasksManyThreads) { ...@@ -241,7 +262,7 @@ TEST_F(ThreadPoolPerfTest, PostRunBusyTasksManyThreads) {
BindRepeating(&ThreadPoolPerfTest::ContinuouslyPostBusyWaitTasks, BindRepeating(&ThreadPoolPerfTest::ContinuouslyPostBusyWaitTasks,
Unretained(this), 10000, Unretained(this), 10000,
base::TimeDelta::FromMicroseconds(200))); base::TimeDelta::FromMicroseconds(200)));
Benchmark("Post/run busy tasks many threads", ExecutionMode::kPostAndRun); Benchmark(kStoryPostRunBusyManyThreads, ExecutionMode::kPostAndRun);
} }
} // namespace internal } // namespace internal
......
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
#include "base/time/time.h" #include "base/time/time.h"
#include "build/build_config.h" #include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h" #include "testing/gtest/include/gtest/gtest.h"
#include "testing/perf/perf_test.h" #include "testing/perf/perf_result_reporter.h"
#if defined(OS_WIN) #if defined(OS_WIN)
#include <windows.h> #include <windows.h>
...@@ -31,6 +31,38 @@ namespace internal { ...@@ -31,6 +31,38 @@ namespace internal {
namespace { namespace {
constexpr char kMetricPrefixThreadLocalStorage[] = "ThreadLocalStorage.";
constexpr char kMetricBaseRead[] = "read";
constexpr char kMetricBaseWrite[] = "write";
constexpr char kMetricBaseReadWrite[] = "read_write";
constexpr char kMetricSuffixThroughput[] = "_throughput";
constexpr char kMetricSuffixOperationTime[] = "_operation_time";
constexpr char kStoryBaseTLS[] = "thread_local_storage";
#if defined(OS_WIN)
constexpr char kStoryBasePlatformFLS[] = "platform_fiber_local_storage";
#endif // defined(OS_WIN)
constexpr char kStoryBasePlatformTLS[] = "platform_thread_local_storage";
constexpr char kStoryBaseCPPTLS[] = "c++_platform_thread_local_storage";
constexpr char kStorySuffixFourThreads[] = "_4_threads";
perf_test::PerfResultReporter SetUpReporter(const std::string& story_name) {
perf_test::PerfResultReporter reporter(kMetricPrefixThreadLocalStorage,
story_name);
reporter.RegisterImportantMetric(
std::string(kMetricBaseRead) + kMetricSuffixThroughput, "runs/s");
reporter.RegisterImportantMetric(
std::string(kMetricBaseRead) + kMetricSuffixOperationTime, "ns");
reporter.RegisterImportantMetric(
std::string(kMetricBaseWrite) + kMetricSuffixThroughput, "runs/s");
reporter.RegisterImportantMetric(
std::string(kMetricBaseWrite) + kMetricSuffixOperationTime, "ns");
reporter.RegisterImportantMetric(
std::string(kMetricBaseReadWrite) + kMetricSuffixThroughput, "runs/s");
reporter.RegisterImportantMetric(
std::string(kMetricBaseReadWrite) + kMetricSuffixOperationTime, "ns");
return reporter;
}
// A thread that waits for the caller to signal an event before proceeding to // A thread that waits for the caller to signal an event before proceeding to
// call action.Run(). // call action.Run().
class TLSThread : public SimpleThread { class TLSThread : public SimpleThread {
...@@ -67,14 +99,14 @@ class ThreadLocalStoragePerfTest : public testing::Test { ...@@ -67,14 +99,14 @@ class ThreadLocalStoragePerfTest : public testing::Test {
~ThreadLocalStoragePerfTest() override = default; ~ThreadLocalStoragePerfTest() override = default;
template <class Read, class Write> template <class Read, class Write>
void Benchmark(const std::string& trace, void Benchmark(const std::string& story_name,
Read read, Read read,
Write write, Write write,
size_t num_operation, size_t num_operation,
size_t num_threads) { size_t num_threads) {
write(2); write(2);
BenchmarkImpl("TLS read throughput", trace, BenchmarkImpl(kMetricBaseRead, story_name,
base::BindLambdaForTesting([&]() { base::BindLambdaForTesting([&]() {
volatile intptr_t total = 0; volatile intptr_t total = 0;
for (size_t i = 0; i < num_operation; ++i) for (size_t i = 0; i < num_operation; ++i)
...@@ -82,14 +114,14 @@ class ThreadLocalStoragePerfTest : public testing::Test { ...@@ -82,14 +114,14 @@ class ThreadLocalStoragePerfTest : public testing::Test {
}), }),
num_operation, num_threads); num_operation, num_threads);
BenchmarkImpl("TLS write throughput", trace, BenchmarkImpl(kMetricBaseWrite, story_name,
base::BindLambdaForTesting([&]() { base::BindLambdaForTesting([&]() {
for (size_t i = 0; i < num_operation; ++i) for (size_t i = 0; i < num_operation; ++i)
write(i); write(i);
}), }),
num_operation, num_threads); num_operation, num_threads);
BenchmarkImpl("TLS read-write throughput", trace, BenchmarkImpl(kMetricBaseReadWrite, story_name,
base::BindLambdaForTesting([&]() { base::BindLambdaForTesting([&]() {
for (size_t i = 0; i < num_operation; ++i) for (size_t i = 0; i < num_operation; ++i)
write(read() + 1); write(read() + 1);
...@@ -97,8 +129,8 @@ class ThreadLocalStoragePerfTest : public testing::Test { ...@@ -97,8 +129,8 @@ class ThreadLocalStoragePerfTest : public testing::Test {
num_operation, num_threads); num_operation, num_threads);
} }
void BenchmarkImpl(const std::string& measurment, void BenchmarkImpl(const std::string& metric_base,
const std::string& trace, const std::string& story_name,
base::RepeatingClosure action, base::RepeatingClosure action,
size_t num_operation, size_t num_operation,
size_t num_threads) { size_t num_threads) {
...@@ -123,13 +155,13 @@ class ThreadLocalStoragePerfTest : public testing::Test { ...@@ -123,13 +155,13 @@ class ThreadLocalStoragePerfTest : public testing::Test {
for (auto& thread : threads) for (auto& thread : threads)
thread->Join(); thread->Join();
perf_test::PrintResult(measurment, "", trace, auto reporter = SetUpReporter(story_name);
num_operation / operation_duration.InMillisecondsF(), reporter.AddResult(metric_base + kMetricSuffixThroughput,
"operations/ms", true); num_operation / operation_duration.InSecondsF());
size_t nanos_per_operation = size_t nanos_per_operation =
operation_duration.InNanoseconds() / num_operation; operation_duration.InNanoseconds() / num_operation;
perf_test::PrintResult(measurment, "", trace, nanos_per_operation, reporter.AddResult(metric_base + kMetricSuffixOperationTime,
"ns/operation", true); nanos_per_operation);
} }
private: private:
...@@ -143,8 +175,9 @@ TEST_F(ThreadLocalStoragePerfTest, ThreadLocalStorage) { ...@@ -143,8 +175,9 @@ TEST_F(ThreadLocalStoragePerfTest, ThreadLocalStorage) {
auto read = [&]() { return reinterpret_cast<intptr_t>(tls.Get()); }; auto read = [&]() { return reinterpret_cast<intptr_t>(tls.Get()); };
auto write = [&](intptr_t value) { tls.Set(reinterpret_cast<void*>(value)); }; auto write = [&](intptr_t value) { tls.Set(reinterpret_cast<void*>(value)); };
Benchmark("ThreadLocalStorage", read, write, 10000000, 1); Benchmark(kStoryBaseTLS, read, write, 10000000, 1);
Benchmark("ThreadLocalStorage 4 threads", read, write, 10000000, 4); Benchmark(std::string(kStoryBaseTLS) + kStorySuffixFourThreads, read, write,
10000000, 4);
} }
#if defined(OS_WIN) #if defined(OS_WIN)
...@@ -160,8 +193,9 @@ TEST_F(ThreadLocalStoragePerfTest, PlatformFls) { ...@@ -160,8 +193,9 @@ TEST_F(ThreadLocalStoragePerfTest, PlatformFls) {
FlsSetValue(key, reinterpret_cast<void*>(value)); FlsSetValue(key, reinterpret_cast<void*>(value));
}; };
Benchmark("PlatformFls", read, write, 10000000, 1); Benchmark(kStoryBasePlatformFLS, read, write, 10000000, 1);
Benchmark("PlatformFls 4 threads", read, write, 10000000, 4); Benchmark(std::string(kStoryBasePlatformFLS) + kStorySuffixFourThreads, read,
write, 10000000, 4);
} }
TEST_F(ThreadLocalStoragePerfTest, PlatformTls) { TEST_F(ThreadLocalStoragePerfTest, PlatformTls) {
...@@ -173,8 +207,9 @@ TEST_F(ThreadLocalStoragePerfTest, PlatformTls) { ...@@ -173,8 +207,9 @@ TEST_F(ThreadLocalStoragePerfTest, PlatformTls) {
TlsSetValue(key, reinterpret_cast<void*>(value)); TlsSetValue(key, reinterpret_cast<void*>(value));
}; };
Benchmark("PlatformTls", read, write, 10000000, 1); Benchmark(kStoryBasePlatformTLS, read, write, 10000000, 1);
Benchmark("PlatformTls 4 threads", read, write, 10000000, 4); Benchmark(std::string(kStoryBasePlatformTLS) + kStorySuffixFourThreads, read,
write, 10000000, 4);
} }
#elif defined(OS_POSIX) || defined(OS_FUCHSIA) #elif defined(OS_POSIX) || defined(OS_FUCHSIA)
...@@ -191,8 +226,9 @@ TEST_F(ThreadLocalStoragePerfTest, PlatformTls) { ...@@ -191,8 +226,9 @@ TEST_F(ThreadLocalStoragePerfTest, PlatformTls) {
pthread_setspecific(key, reinterpret_cast<void*>(value)); pthread_setspecific(key, reinterpret_cast<void*>(value));
}; };
Benchmark("PlatformTls", read, write, 10000000, 1); Benchmark(kStoryBasePlatformTLS, read, write, 10000000, 1);
Benchmark("PlatformTls 4 threads", read, write, 10000000, 4); Benchmark(std::string(kStoryBasePlatformTLS) + kStorySuffixFourThreads, read,
write, 10000000, 4);
} }
#endif #endif
...@@ -205,8 +241,9 @@ TEST_F(ThreadLocalStoragePerfTest, Cpp11Tls) { ...@@ -205,8 +241,9 @@ TEST_F(ThreadLocalStoragePerfTest, Cpp11Tls) {
reinterpret_cast<volatile intptr_t*>(&thread_local_variable)[0] = value; reinterpret_cast<volatile intptr_t*>(&thread_local_variable)[0] = value;
}; };
Benchmark("C++ thread_local TLS", read, write, 10000000, 1); Benchmark(kStoryBaseCPPTLS, read, write, 10000000, 1);
Benchmark("C++ thread_local TLS 4 threads", read, write, 10000000, 4); Benchmark(std::string(kStoryBaseCPPTLS) + kStorySuffixFourThreads, read,
write, 10000000, 4);
} }
} // namespace internal } // namespace internal
......
...@@ -23,7 +23,7 @@ ...@@ -23,7 +23,7 @@
#include "base/time/time.h" #include "base/time/time.h"
#include "build/build_config.h" #include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h" #include "testing/gtest/include/gtest/gtest.h"
#include "testing/perf/perf_test.h" #include "testing/perf/perf_result_reporter.h"
#if defined(OS_POSIX) #if defined(OS_POSIX)
#include <pthread.h> #include <pthread.h>
...@@ -35,6 +35,27 @@ namespace { ...@@ -35,6 +35,27 @@ namespace {
const int kNumRuns = 100000; const int kNumRuns = 100000;
constexpr char kMetricPrefixThread[] = "Thread.";
constexpr char kMetricClockTimePerHop[] = "wall_time_per_hop";
constexpr char kMetricCpuTimePerHop[] = "cpu_time_per_hop";
constexpr char kStoryBaseTask[] = "task";
constexpr char kStoryBaseTaskWithObserver[] = "task_with_observer";
constexpr char kStoryBaseWaitableEvent[] = "waitable_event";
constexpr char kStoryBaseCondVar[] = "condition_variable";
constexpr char kStorySuffixOneThread[] = "_1_thread";
constexpr char kStorySuffixFourThreads[] = "_4_threads";
#if defined(OS_POSIX)
constexpr char kStoryBasePthreadCondVar[] = "pthread_condition_variable";
#endif // defined(OS_POSIX)
perf_test::PerfResultReporter SetUpReporter(const std::string& story_name) {
perf_test::PerfResultReporter reporter(kMetricPrefixThread, story_name);
reporter.RegisterImportantMetric(kMetricClockTimePerHop, "us");
reporter.RegisterImportantMetric(kMetricCpuTimePerHop, "us");
return reporter;
}
// Base class for a threading perf-test. This sets up some threads for the // Base class for a threading perf-test. This sets up some threads for the
// test and measures the clock-time in addition to time spent on each thread. // test and measures the clock-time in addition to time spent on each thread.
class ThreadPerfTest : public testing::Test { class ThreadPerfTest : public testing::Test {
...@@ -69,7 +90,7 @@ class ThreadPerfTest : public testing::Test { ...@@ -69,7 +90,7 @@ class ThreadPerfTest : public testing::Test {
return ticks; return ticks;
} }
void RunPingPongTest(const std::string& name, unsigned num_threads) { void RunPingPongTest(const std::string& story_name, unsigned num_threads) {
// Create threads and collect starting cpu-time for each thread. // Create threads and collect starting cpu-time for each thread.
std::vector<base::ThreadTicks> thread_starts; std::vector<base::ThreadTicks> thread_starts;
while (threads_.size() < num_threads) { while (threads_.size() < num_threads) {
...@@ -103,14 +124,13 @@ class ThreadPerfTest : public testing::Test { ...@@ -103,14 +124,13 @@ class ThreadPerfTest : public testing::Test {
double us_per_task_clock = (end - start).InMicroseconds() / num_runs; double us_per_task_clock = (end - start).InMicroseconds() / num_runs;
double us_per_task_cpu = thread_time.InMicroseconds() / num_runs; double us_per_task_cpu = thread_time.InMicroseconds() / num_runs;
auto reporter = SetUpReporter(story_name);
// Clock time per task. // Clock time per task.
perf_test::PrintResult( reporter.AddResult(kMetricClockTimePerHop, us_per_task_clock);
"task", "", name + "_time ", us_per_task_clock, "us/hop", true);
// Total utilization across threads if available (likely higher). // Total utilization across threads if available (likely higher).
if (base::ThreadTicks::IsSupported()) { if (base::ThreadTicks::IsSupported()) {
perf_test::PrintResult( reporter.AddResult(kMetricCpuTimePerHop, us_per_task_cpu);
"task", "", name + "_cpu ", us_per_task_cpu, "us/hop", true);
} }
} }
...@@ -145,8 +165,8 @@ class TaskPerfTest : public ThreadPerfTest { ...@@ -145,8 +165,8 @@ class TaskPerfTest : public ThreadPerfTest {
// used to ensure the threads do yeild (with just two it might be possible for // used to ensure the threads do yeild (with just two it might be possible for
// both threads to stay awake if they can signal each other fast enough). // both threads to stay awake if they can signal each other fast enough).
TEST_F(TaskPerfTest, TaskPingPong) { TEST_F(TaskPerfTest, TaskPingPong) {
RunPingPongTest("1_Task_Threads", 1); RunPingPongTest(std::string(kStoryBaseTask) + kStorySuffixOneThread, 1);
RunPingPongTest("4_Task_Threads", 4); RunPingPongTest(std::string(kStoryBaseTask) + kStorySuffixFourThreads, 4);
} }
...@@ -175,8 +195,10 @@ class TaskObserverPerfTest : public TaskPerfTest { ...@@ -175,8 +195,10 @@ class TaskObserverPerfTest : public TaskPerfTest {
}; };
TEST_F(TaskObserverPerfTest, TaskPingPong) { TEST_F(TaskObserverPerfTest, TaskPingPong) {
RunPingPongTest("1_Task_Threads_With_Observer", 1); RunPingPongTest(
RunPingPongTest("4_Task_Threads_With_Observer", 4); std::string(kStoryBaseTaskWithObserver) + kStorySuffixOneThread, 1);
RunPingPongTest(
std::string(kStoryBaseTaskWithObserver) + kStorySuffixFourThreads, 4);
} }
// Class to test our WaitableEvent performance by signaling back and fort. // Class to test our WaitableEvent performance by signaling back and fort.
...@@ -230,7 +252,8 @@ class EventPerfTest : public ThreadPerfTest { ...@@ -230,7 +252,8 @@ class EventPerfTest : public ThreadPerfTest {
// end up blocking because the event is already signalled). // end up blocking because the event is already signalled).
typedef EventPerfTest<base::WaitableEvent> WaitableEventThreadPerfTest; typedef EventPerfTest<base::WaitableEvent> WaitableEventThreadPerfTest;
TEST_F(WaitableEventThreadPerfTest, EventPingPong) { TEST_F(WaitableEventThreadPerfTest, EventPingPong) {
RunPingPongTest("4_WaitableEvent_Threads", 4); RunPingPongTest(
std::string(kStoryBaseWaitableEvent) + kStorySuffixFourThreads, 4);
} }
// Build a minimal event using ConditionVariable. // Build a minimal event using ConditionVariable.
...@@ -268,7 +291,7 @@ class ConditionVariableEvent { ...@@ -268,7 +291,7 @@ class ConditionVariableEvent {
// using our own base synchronization code. // using our own base synchronization code.
typedef EventPerfTest<ConditionVariableEvent> ConditionVariablePerfTest; typedef EventPerfTest<ConditionVariableEvent> ConditionVariablePerfTest;
TEST_F(ConditionVariablePerfTest, EventPingPong) { TEST_F(ConditionVariablePerfTest, EventPingPong) {
RunPingPongTest("4_ConditionVariable_Threads", 4); RunPingPongTest(std::string(kStoryBaseCondVar) + kStorySuffixFourThreads, 4);
} }
#if defined(OS_POSIX) #if defined(OS_POSIX)
...@@ -315,7 +338,8 @@ class PthreadEvent { ...@@ -315,7 +338,8 @@ class PthreadEvent {
// If there is any faster way to do this we should substitute it in. // If there is any faster way to do this we should substitute it in.
typedef EventPerfTest<PthreadEvent> PthreadEventPerfTest; typedef EventPerfTest<PthreadEvent> PthreadEventPerfTest;
TEST_F(PthreadEventPerfTest, EventPingPong) { TEST_F(PthreadEventPerfTest, EventPingPong) {
RunPingPongTest("4_PthreadCondVar_Threads", 4); RunPingPongTest(
std::string(kStoryBasePthreadCondVar) + kStorySuffixFourThreads, 4);
} }
#endif #endif
......
...@@ -4,13 +4,32 @@ ...@@ -4,13 +4,32 @@
#include "testing/perf/perf_result_reporter.h" #include "testing/perf/perf_result_reporter.h"
#include "base/logging.h" #include "base/logging.h"
#include "base/no_destructor.h"
#include "testing/perf/perf_test.h" #include "testing/perf/perf_test.h"
namespace {
// These characters mess with either the stdout parsing or the dashboard itself.
static const base::NoDestructor<std::vector<std::string>> kInvalidCharacters(
{"/", ":", "="});
void CheckForInvalidCharacters(const std::string& str) {
for (const auto& invalid : *kInvalidCharacters) {
CHECK(str.find(invalid) == std::string::npos)
<< "Given invalid character for perf names '" << invalid << "'";
}
}
} // namespace
namespace perf_test { namespace perf_test {
PerfResultReporter::PerfResultReporter(const std::string& metric_basename, PerfResultReporter::PerfResultReporter(const std::string& metric_basename,
const std::string& story_name) const std::string& story_name)
: metric_basename_(metric_basename), story_name_(story_name) {} : metric_basename_(metric_basename), story_name_(story_name) {
CheckForInvalidCharacters(metric_basename_);
CheckForInvalidCharacters(story_name_);
}
PerfResultReporter::~PerfResultReporter() = default; PerfResultReporter::~PerfResultReporter() = default;
...@@ -106,6 +125,7 @@ bool PerfResultReporter::GetMetricInfo(const std::string& metric_suffix, ...@@ -106,6 +125,7 @@ bool PerfResultReporter::GetMetricInfo(const std::string& metric_suffix,
void PerfResultReporter::RegisterMetric(const std::string& metric_suffix, void PerfResultReporter::RegisterMetric(const std::string& metric_suffix,
const std::string& units, const std::string& units,
bool important) { bool important) {
CheckForInvalidCharacters(metric_suffix);
CHECK(metric_map_.count(metric_suffix) == 0); CHECK(metric_map_.count(metric_suffix) == 0);
metric_map_.insert({metric_suffix, {units, important}}); metric_map_.insert({metric_suffix, {units, important}});
} }
......
...@@ -56,6 +56,7 @@ DATA_FORMAT_UNKNOWN = 'unknown' ...@@ -56,6 +56,7 @@ DATA_FORMAT_UNKNOWN = 'unknown'
# are okay with potentially encountering issues. # are okay with potentially encountering issues.
GTEST_CONVERSION_WHITELIST = [ GTEST_CONVERSION_WHITELIST = [
'angle_perftests', 'angle_perftests',
'base_perftests',
'cc_perftests', 'cc_perftests',
'components_perftests', 'components_perftests',
'gpu_perftests', 'gpu_perftests',
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment