Commit a94eef13 authored by Yuri Wiitala's avatar Yuri Wiitala Committed by Commit Bot

Move tab mirroring performance tests into browser_tests.

This moves TabCapturePerformanceTest and CastV2PerformanceTest into
browser_tests. By default, they have been changed to run in a quick-
check mode. When run for performance testing on the perf bots, the
additional --full-performance-run command line switch will be required.

The performance_browser_tests isolate config has been changed to run
browser_tests with the --full-performance-run switch, gtest filters,
and timeout overrides. Also, the old performance_browser_tests GN test
target has been turned into a simple group target that groups the
necessary data dependencies for running on the perf bots.

This change also includes some changes to allow the tests to complete
in the absence of tracing data when in quick-check mode. While most
bots do collect a reasonable amount of tracing data in a few short
seconds, one or two don't always do so. Unfortunately, we can't justify
making the CQ run longer to force data collection in all cases.

Bug: 804191
Change-Id: I2de6c0b94453981fb0d9015f6b3f0e42f57cc2a6
Reviewed-on: https://chromium-review.googlesource.com/c/1428399
Commit-Queue: Yuri Wiitala <miu@chromium.org>
Reviewed-by: default avatarJochen Eisinger <jochen@chromium.org>
Reviewed-by: default avatarCaleb Rouleau <crouleau@chromium.org>
Cr-Commit-Position: refs/heads/master@{#632366}
parent cc303a03
......@@ -53,10 +53,27 @@ namespace {
// contribute anything toward stable measurements: A brief moment of startup
// "jank" is acceptable, and shutdown may result in missing events (e.g., if
// streaming stops a few frames before capture stops).
constexpr size_t kTrimEvents = 24; // 1 sec at 24fps, or 0.4 sec at 60 fps.
constexpr int kTrimEvents = 24; // 1 sec at 24fps, or 0.4 sec at 60 fps.
// Minimum number of events required for a reasonable analysis.
constexpr size_t kMinDataPoints = 100; // 1 sec of audio, or ~5 sec at 24fps.
constexpr int kMinDataPointsForFullRun = 100; // 1s of audio, ~5s at 24fps.
// Minimum number of events required for data analysis in a non-performance run.
constexpr int kMinDataPointsForQuickRun = 3;
// A convenience macro to run a gtest expectation in the "full performance run"
// setting, or else a warning that something is not being entirely tested in the
// "CQ run" setting. This is required because the test runs in the CQ may not be
// long enough to collect sufficient tracing data; and, unfortunately, there's
// nothing we can do about that.
#define EXPECT_FOR_PERFORMANCE_RUN(expr) \
do { \
if (is_full_performance_run()) { \
EXPECT_TRUE(expr); \
} else if (!(expr)) { \
LOG(WARNING) << "Allowing failure: " << #expr; \
} \
} while (false)
enum TestFlags {
kSmallWindow = 1 << 2, // Window size: 1 = 800x600, 0 = 2000x1000
......@@ -175,7 +192,6 @@ class MeanAndError {
// If data[x] == x * A + B, then this function returns zero.
// The unit is milliseconds.
static MeanAndError AnalyzeJitter(const std::vector<TimeData>& data) {
CHECK_GT(data.size(), 1UL);
VLOG(0) << "Jitter analysis on " << data.size() << " values.";
std::vector<double> deltas;
double sum = 0.0;
......@@ -185,10 +201,15 @@ static MeanAndError AnalyzeJitter(const std::vector<TimeData>& data) {
deltas.push_back(delta);
sum += delta;
}
if (deltas.empty()) {
// Not enough data. Don't do the following calculation, to avoid a
// divide-by-zero.
} else {
double mean = sum / deltas.size();
for (size_t i = 0; i < deltas.size(); i++) {
deltas[i] = fabs(mean - deltas[i]);
}
}
return MeanAndError(deltas);
}
......@@ -199,23 +220,29 @@ class TestPatternReceiver : public media::cast::InProcessReceiver {
public:
explicit TestPatternReceiver(
const scoped_refptr<media::cast::CastEnvironment>& cast_environment,
const net::IPEndPoint& local_end_point)
const net::IPEndPoint& local_end_point,
bool is_full_performance_run)
: InProcessReceiver(
cast_environment,
local_end_point,
net::IPEndPoint(),
WithAesKeyAndIvSet(media::cast::GetDefaultAudioReceiverConfig()),
WithAesKeyAndIvSet(media::cast::GetDefaultVideoReceiverConfig())) {}
WithAesKeyAndIvSet(media::cast::GetDefaultVideoReceiverConfig())),
is_full_performance_run_(is_full_performance_run) {}
typedef std::map<uint16_t, base::TimeTicks> TimeMap;
bool is_full_performance_run() const { return is_full_performance_run_; }
// Build a map from frame ID (as encoded in the audio and video data)
// to the rtp timestamp for that frame. Note that there will be multiple
// audio frames which all have the same frame ID. When that happens we
// want the minimum rtp timestamp, because that audio frame is supposed
// to play at the same time that the corresponding image is presented.
void MapFrameTimes(const std::vector<TimeData>& events, TimeMap* map) {
for (size_t i = kTrimEvents; i < events.size() - kTrimEvents; i++) {
const int trim_count = is_full_performance_run_ ? kTrimEvents : 0;
for (int i = trim_count; i < static_cast<int>(events.size()) - trim_count;
i++) {
base::TimeTicks& frame_tick = (*map)[events[i].frame_no];
if (frame_tick.is_null()) {
frame_tick = events[i].playout_time;
......@@ -232,9 +259,14 @@ class TestPatternReceiver : public media::cast::InProcessReceiver {
// 1/30s of "2", etc.
TimeMap audio_frame_times, video_frame_times;
MapFrameTimes(audio_events_, &audio_frame_times);
EXPECT_GE(audio_frame_times.size(), kMinDataPoints);
const int min_data_points = is_full_performance_run_
? kMinDataPointsForFullRun
: kMinDataPointsForQuickRun;
EXPECT_FOR_PERFORMANCE_RUN(min_data_points <=
static_cast<int>(audio_frame_times.size()));
MapFrameTimes(video_events_, &video_frame_times);
EXPECT_GE(video_frame_times.size(), kMinDataPoints);
EXPECT_FOR_PERFORMANCE_RUN(min_data_points <=
static_cast<int>(video_frame_times.size()));
std::vector<double> deltas;
for (TimeMap::const_iterator i = audio_frame_times.begin();
i != audio_frame_times.end();
......@@ -244,7 +276,8 @@ class TestPatternReceiver : public media::cast::InProcessReceiver {
deltas.push_back((i->second - j->second).InMillisecondsF());
}
}
EXPECT_GE(deltas.size(), kMinDataPoints);
EXPECT_FOR_PERFORMANCE_RUN(min_data_points <=
static_cast<int>(deltas.size()));
MeanAndError av_sync(deltas);
av_sync.Print(name, modifier, "av_sync", "ms");
......@@ -261,11 +294,13 @@ class TestPatternReceiver : public media::cast::InProcessReceiver {
// (usually 480 lines or more). Note that this is the video resolution at
// the receiver, but changes originate on the sender side.
std::vector<double> slice_for_analysis;
if (video_frame_lines_.size() > kTrimEvents * 2) {
slice_for_analysis.reserve(video_frame_lines_.size() - kTrimEvents * 2);
EXPECT_GE(slice_for_analysis.capacity(), kMinDataPoints);
std::transform(video_frame_lines_.begin() + kTrimEvents,
video_frame_lines_.end() - kTrimEvents,
const int trim_count = is_full_performance_run_ ? kTrimEvents : 0;
if (static_cast<int>(video_frame_lines_.size()) > trim_count * 2) {
slice_for_analysis.reserve(video_frame_lines_.size() - trim_count * 2);
EXPECT_FOR_PERFORMANCE_RUN(
min_data_points <= static_cast<int>(slice_for_analysis.capacity()));
std::transform(video_frame_lines_.begin() + trim_count,
video_frame_lines_.end() - trim_count,
std::back_inserter(slice_for_analysis),
[](int lines) { return static_cast<double>(lines); });
}
......@@ -276,14 +311,14 @@ class TestPatternReceiver : public media::cast::InProcessReceiver {
// indicates a lack of data.
int last_lines = -1;
int change_count = 0;
for (size_t i = kTrimEvents; i < video_frame_lines_.size() - kTrimEvents;
++i) {
for (int i = trim_count;
i < static_cast<int>(video_frame_lines_.size()) - trim_count; ++i) {
if (video_frame_lines_[i] != last_lines) {
++change_count;
last_lines = video_frame_lines_[i];
}
}
EXPECT_GT(change_count, 0);
EXPECT_FOR_PERFORMANCE_RUN(change_count > 0);
perf_test::PrintResult(name, modifier, "resolution_changes",
base::NumberToString(change_count), "count", true);
}
......@@ -300,6 +335,10 @@ class TestPatternReceiver : public media::cast::InProcessReceiver {
return;
}
TRACE_EVENT_INSTANT1("cast_perf_test", "AudioFramePlayout",
TRACE_EVENT_SCOPE_THREAD, "playout_time",
(playout_time - base::TimeTicks()).InMicroseconds());
// Note: This is the number of the video frame that this audio belongs to.
uint16_t frame_no;
if (media::cast::DecodeTimestamp(audio_frame->channel(0),
......@@ -330,6 +369,8 @@ class TestPatternReceiver : public media::cast::InProcessReceiver {
video_frame_lines_.push_back(video_frame->visible_rect().height());
}
const bool is_full_performance_run_;
std::vector<TimeData> audio_events_;
std::vector<TimeData> video_events_;
......@@ -423,20 +464,6 @@ class CastV2PerformanceTest : public TabCapturePerformanceTestBase,
TabCapturePerformanceTestBase::SetUpCommandLine(command_line);
}
void GetTraceEvents(trace_analyzer::TraceAnalyzer* analyzer,
const std::string& event_name,
trace_analyzer::TraceEventVector* events) {
trace_analyzer::Query query =
trace_analyzer::Query::EventNameIs(event_name) &&
(trace_analyzer::Query::EventPhaseIs(TRACE_EVENT_PHASE_BEGIN) ||
trace_analyzer::Query::EventPhaseIs(TRACE_EVENT_PHASE_ASYNC_BEGIN) ||
trace_analyzer::Query::EventPhaseIs(TRACE_EVENT_PHASE_FLOW_BEGIN) ||
trace_analyzer::Query::EventPhaseIs(TRACE_EVENT_PHASE_INSTANT) ||
trace_analyzer::Query::EventPhaseIs(TRACE_EVENT_PHASE_COMPLETE));
analyzer->FindEvents(query, events);
VLOG(0) << "Retrieved " << events->size() << " events for: " << event_name;
}
// The key contains the name of the argument and the argument.
typedef std::pair<std::string, double> EventMapKey;
typedef std::map<EventMapKey, const trace_analyzer::TraceEvent*> EventMap;
......@@ -448,7 +475,7 @@ class CastV2PerformanceTest : public TabCapturePerformanceTestBase,
const std::string& event_name,
EventMap* event_map) {
trace_analyzer::TraceEventVector events;
GetTraceEvents(analyzer, event_name, &events);
QueryTraceEvents(analyzer, event_name, &events);
for (size_t i = 0; i < events.size(); i++) {
std::map<std::string, double>::const_iterator j;
for (j = events[i]->arg_numbers.begin();
......@@ -501,7 +528,7 @@ class CastV2PerformanceTest : public TabCapturePerformanceTestBase,
// from start to finish.
trace_analyzer::TraceEventVector capture_events;
// Sender side:
GetTraceEvents(analyzer, "Capture", &capture_events);
QueryTraceEvents(analyzer, "Capture", &capture_events);
EventMap onbuffer, sink, inserted, encoded, transmitted, decoded, done;
IndexEvents(analyzer, "OnBufferReceived", &onbuffer);
IndexEvents(analyzer, "ConsumeVideoFrame", &sink);
......@@ -531,9 +558,12 @@ class CastV2PerformanceTest : public TabCapturePerformanceTestBase,
// producing a matrix of when each frame reached each pipeline checkpoint.
// See the "cheat sheet" below for a description of each pipeline
// checkpoint.
ASSERT_GT(capture_events.size(), 2 * kTrimEvents);
const int trim_count = is_full_performance_run() ? kTrimEvents : 0;
EXPECT_FOR_PERFORMANCE_RUN((trim_count * 2) <=
static_cast<int>(capture_events.size()));
std::vector<std::vector<double>> traced_frames;
for (size_t i = kTrimEvents; i < capture_events.size() - kTrimEvents; i++) {
for (int i = trim_count;
i < static_cast<int>(capture_events.size()) - trim_count; i++) {
std::vector<double> times;
const trace_analyzer::TraceEvent* event = capture_events[i];
if (!event->other_event)
......@@ -558,10 +588,15 @@ class CastV2PerformanceTest : public TabCapturePerformanceTestBase,
// Report the fraction of captured frames that were dropped somewhere along
// the way (i.e., before playout at the receiver).
const size_t capture_event_count = capture_events.size() - 2 * kTrimEvents;
EXPECT_GE(capture_event_count, kMinDataPoints);
const int capture_event_count = capture_events.size() - 2 * trim_count;
EXPECT_FOR_PERFORMANCE_RUN((is_full_performance_run()
? kMinDataPointsForFullRun
: kMinDataPointsForQuickRun) <=
capture_event_count);
const double success_percent =
100.0 * traced_frames.size() / capture_event_count;
(capture_event_count == 0)
? NAN
: (100.0 * traced_frames.size() / capture_event_count);
perf_test::PrintResult(
test_name, GetSuffixForTestFlags(), "frame_drop_rate",
base::StringPrintf("%f", 100 - success_percent), "percent", true);
......@@ -591,10 +626,12 @@ class CastV2PerformanceTest : public TabCapturePerformanceTestBase,
MeanAndError AnalyzeTraceDistance(trace_analyzer::TraceAnalyzer* analyzer,
const std::string& event_name) {
trace_analyzer::TraceEventVector events;
GetTraceEvents(analyzer, event_name, &events);
QueryTraceEvents(analyzer, event_name, &events);
const int trim_count = is_full_performance_run() ? kTrimEvents : 0;
std::vector<double> deltas;
for (size_t i = kTrimEvents + 1; i < events.size() - kTrimEvents; ++i) {
for (int i = trim_count + 1;
i < static_cast<int>(events.size()) - trim_count; ++i) {
double delta_micros = events[i]->timestamp - events[i - 1]->timestamp;
deltas.push_back(delta_micros / 1000.0);
}
......@@ -637,8 +674,8 @@ IN_PROC_BROWSER_TEST_P(CastV2PerformanceTest, Performance) {
}
scoped_refptr<media::cast::StandaloneCastEnvironment> cast_environment(
new SkewedCastEnvironment(delta));
TestPatternReceiver* const receiver =
new TestPatternReceiver(cast_environment, receiver_end_point);
TestPatternReceiver* const receiver = new TestPatternReceiver(
cast_environment, receiver_end_point, is_full_performance_run());
receiver->Start();
// Create a proxy for the UDP packets that simulates certain network
......@@ -693,20 +730,29 @@ IN_PROC_BROWSER_TEST_P(CastV2PerformanceTest, Performance) {
javascript_to_play_video));
// Observe the running browser for a while, collecting a trace.
const std::string json_events = TraceAndObserve("gpu.capture,cast_perf_test");
TraceAnalyzerUniquePtr analyzer = TraceAndObserve(
"gpu.capture,cast_perf_test",
std::vector<base::StringPiece>{
// From the Compositor/Capture pipeline...
"Capture", "OnBufferReceived", "ConsumeVideoFrame",
// From the Cast Sender's pipeline...
"InsertRawVideoFrame", "VideoFrameEncoded",
// From the Cast Receiver's pipeline...
"PullEncodedVideoFrame", "VideoFrameDecoded",
// From the TestPatternReceiver (see class above!)...
"VideoFramePlayout", "AudioFramePlayout"},
// In a full performance run, events will be trimmed from both ends of
// trace. Otherwise, just require the bare-minimum to verify the stats
// calculations will work.
is_full_performance_run() ? (2 * kTrimEvents + kMinDataPointsForFullRun)
: kMinDataPointsForQuickRun);
// Shut down the receiver and all the CastEnvironment threads.
VLOG(1) << "Shutting-down receiver and CastEnvironment...";
receiver->Stop();
cast_environment->Shutdown();
VLOG(2) << "Dump of trace events (trace_events.json.gz.b64):\n"
<< MakeBase64EncodedGZippedString(json_events);
VLOG(1) << "Analyzing trace events...";
std::unique_ptr<trace_analyzer::TraceAnalyzer> analyzer;
analyzer.reset(trace_analyzer::TraceAnalyzer::Create(json_events));
analyzer->AssociateAsyncBeginEndEvents();
// This prints out the average time between capture events.
// Depending on the test, the capture frame rate is capped (e.g., at 30fps,
......
......@@ -9,6 +9,7 @@
#include <cmath>
#include "base/base64.h"
#include "base/base_switches.h"
#include "base/bind.h"
#include "base/command_line.h"
#include "base/files/file_path.h"
......@@ -34,6 +35,10 @@
#include "third_party/zlib/google/compression_utils.h"
#include "ui/gl/gl_switches.h"
namespace {
constexpr base::StringPiece kFullPerformanceRunSwitch = "full-performance-run";
} // namespace
TabCapturePerformanceTestBase::TabCapturePerformanceTestBase() = default;
TabCapturePerformanceTestBase::~TabCapturePerformanceTestBase() = default;
......@@ -57,6 +62,14 @@ void TabCapturePerformanceTestBase::SetUpOnMainThread() {
void TabCapturePerformanceTestBase::SetUpCommandLine(
base::CommandLine* command_line) {
is_full_performance_run_ = command_line->HasSwitch(kFullPerformanceRunSwitch);
// In the spirit of the NoBestEffortTasksTests, it's important to add this
// flag to make sure best-effort tasks are not required for the success of
// these tests. In a performance test run, this also removes sources of
// variance.
command_line->AppendSwitch(switches::kDisableBestEffortTasks);
// Note: The naming "kUseGpuInTests" is very misleading. It actually means
// "don't use a software OpenGL implementation." Subclasses will either call
// UseSoftwareCompositing() to use Chrome's software compositor, or else they
......@@ -130,18 +143,54 @@ base::Value TabCapturePerformanceTestBase::SendMessageToExtension(
return base::Value();
}
std::string TabCapturePerformanceTestBase::TraceAndObserve(
const std::string& category_patterns) {
LOG(INFO) << "Starting tracing and running for "
<< kObservationPeriod.InSecondsF() << " sec...";
TabCapturePerformanceTestBase::TraceAnalyzerUniquePtr
TabCapturePerformanceTestBase::TraceAndObserve(
const std::string& category_patterns,
const std::vector<base::StringPiece>& event_names,
int required_event_count) {
const base::TimeDelta observation_period = is_full_performance_run_
? kFullRunObservationPeriod
: kQuickRunObservationPeriod;
LOG(INFO) << "Starting tracing...";
{
// Wait until all child processes have ACK'ed that they are now tracing.
base::trace_event::TraceConfig trace_config(
category_patterns, base::trace_event::RECORD_CONTINUOUSLY);
base::RunLoop run_loop;
const bool did_begin_tracing = tracing::BeginTracingWithTraceConfig(
trace_config, run_loop.QuitClosure());
CHECK(did_begin_tracing);
run_loop.Run();
}
LOG(INFO) << "Running browser for " << observation_period.InSecondsF()
<< " sec...";
ContinueBrowserFor(observation_period);
LOG(INFO) << "Observation period has completed. Ending tracing...";
std::string json_events;
bool success = tracing::BeginTracing(category_patterns);
CHECK(success);
ContinueBrowserFor(kObservationPeriod);
success = tracing::EndTracing(&json_events);
const bool success = tracing::EndTracing(&json_events);
CHECK(success);
LOG(INFO) << "Observation period has completed. Ending tracing...";
return json_events;
std::unique_ptr<trace_analyzer::TraceAnalyzer> result(
trace_analyzer::TraceAnalyzer::Create(json_events));
result->AssociateAsyncBeginEndEvents();
bool have_enough_events = true;
for (const auto& event_name : event_names) {
trace_analyzer::TraceEventVector events;
QueryTraceEvents(result.get(), event_name, &events);
LOG(INFO) << "Collected " << events.size() << " events ("
<< required_event_count << " required) for: " << event_name;
if (static_cast<int>(events.size()) < required_event_count) {
have_enough_events = false;
}
}
LOG_IF(WARNING, !have_enough_events) << "Insufficient data collected.";
VLOG_IF(2, result) << "Dump of trace events (trace_events.json.gz.b64):\n"
<< MakeBase64EncodedGZippedString(json_events);
return result;
}
// static
......@@ -182,6 +231,21 @@ void TabCapturePerformanceTestBase::ContinueBrowserFor(
run_loop.Run();
}
// static
void TabCapturePerformanceTestBase::QueryTraceEvents(
trace_analyzer::TraceAnalyzer* analyzer,
base::StringPiece event_name,
trace_analyzer::TraceEventVector* events) {
const trace_analyzer::Query kQuery =
trace_analyzer::Query::EventNameIs(event_name.as_string()) &&
(trace_analyzer::Query::EventPhaseIs(TRACE_EVENT_PHASE_BEGIN) ||
trace_analyzer::Query::EventPhaseIs(TRACE_EVENT_PHASE_ASYNC_BEGIN) ||
trace_analyzer::Query::EventPhaseIs(TRACE_EVENT_PHASE_FLOW_BEGIN) ||
trace_analyzer::Query::EventPhaseIs(TRACE_EVENT_PHASE_INSTANT) ||
trace_analyzer::Query::EventPhaseIs(TRACE_EVENT_PHASE_COMPLETE));
analyzer->FindEvents(kQuery, events);
}
std::unique_ptr<net::test_server::HttpResponse>
TabCapturePerformanceTestBase::HandleRequest(
const net::test_server::HttpRequest& request) {
......@@ -199,7 +263,12 @@ TabCapturePerformanceTestBase::HandleRequest(
}
// static
constexpr base::TimeDelta TabCapturePerformanceTestBase::kObservationPeriod;
constexpr base::TimeDelta
TabCapturePerformanceTestBase::kFullRunObservationPeriod;
// static
constexpr base::TimeDelta
TabCapturePerformanceTestBase::kQuickRunObservationPeriod;
// static
constexpr base::TimeDelta
......
......@@ -7,8 +7,11 @@
#include <memory>
#include <string>
#include <vector>
#include "base/macros.h"
#include "base/strings/string_piece.h"
#include "base/test/trace_event_analyzer.h"
#include "chrome/test/base/in_process_browser_test.h"
namespace base {
......@@ -43,6 +46,11 @@ class TabCapturePerformanceTestBase : public InProcessBrowserTest {
void SetUpOnMainThread() override;
void SetUpCommandLine(base::CommandLine* command_line) override;
// If true, run a full performance test. If false, all tests should just run a
// quick test, something appropriate for running in a CQ try run or the
// waterfall.
bool is_full_performance_run() const { return is_full_performance_run_; }
// Returns the currently-loaded extension.
const extensions::Extension* extension() const { return extension_; }
......@@ -68,9 +76,12 @@ class TabCapturePerformanceTestBase : public InProcessBrowserTest {
base::Value SendMessageToExtension(const std::string& json);
// Runs the browser for a while, with tracing enabled to collect events
// matching the given |category_patterns|, then returns the JSON events string
// returned by tracing::EndTracing().
std::string TraceAndObserve(const std::string& category_patterns);
// matching the given |category_patterns|.
using TraceAnalyzerUniquePtr = std::unique_ptr<trace_analyzer::TraceAnalyzer>;
TraceAnalyzerUniquePtr TraceAndObserve(
const std::string& category_patterns,
const std::vector<base::StringPiece>& event_names,
int required_event_count);
// Returns the path ".../test/data/extensions/api_test/".
static base::FilePath GetApiTestDataDir();
......@@ -82,11 +93,19 @@ class TabCapturePerformanceTestBase : public InProcessBrowserTest {
// Uses base::RunLoop to run the browser for the given |duration|.
static void ContinueBrowserFor(base::TimeDelta duration);
// Queries the |analyzer| for events having the given |event_name| whose phase
// is classified as BEGIN, INSTANT, or COMPLETE (i.e., omit END events).
static void QueryTraceEvents(trace_analyzer::TraceAnalyzer* analyzer,
base::StringPiece event_name,
trace_analyzer::TraceEventVector* events);
protected:
// After the page has loaded, this is how long the browser is run with trace
// event recording taking place.
static constexpr base::TimeDelta kObservationPeriod =
// These are how long the browser is run with trace event recording taking
// place.
static constexpr base::TimeDelta kFullRunObservationPeriod =
base::TimeDelta::FromSeconds(15);
static constexpr base::TimeDelta kQuickRunObservationPeriod =
base::TimeDelta::FromSeconds(4);
// If sending a message to the extension fails, because the extension has not
// started its message listener yet, how long before the next retry?
......@@ -103,6 +122,8 @@ class TabCapturePerformanceTestBase : public InProcessBrowserTest {
static const char kExtensionId[];
private:
bool is_full_performance_run_ = false;
// Set to the test page that should be served by the next call to
// HandleRequest().
std::string test_page_to_serve_;
......
......@@ -7,6 +7,7 @@
#include "base/command_line.h"
#include "base/strings/stringprintf.h"
#include "base/test/trace_event_analyzer.h"
#include "build/build_config.h"
#include "chrome/browser/extensions/api/tab_capture/tab_capture_performance_test_base.h"
#include "chrome/browser/extensions/extension_service.h"
#include "chrome/browser/profiles/profile.h"
......@@ -24,20 +25,33 @@
#include "ui/compositor/compositor_switches.h"
#include "ui/gl/gl_switches.h"
#if defined(OS_WIN)
#include "base/win/windows_version.h"
#endif
namespace {
// Number of events to trim from the begining and end. These events don't
// contribute anything toward stable measurements: A brief moment of startup
// "jank" is acceptable, and shutdown may result in missing events (since
// render widget draws may stop before capture stops).
constexpr size_t kTrimEvents = 24; // 1 sec at 24fps, or 0.4 sec at 60 fps.
constexpr int kTrimEvents = 24; // 1 sec at 24fps, or 0.4 sec at 60 fps.
// Minimum number of events required for a reasonable analysis.
constexpr size_t kMinDataPoints = 100; // ~5 sec at 24fps.
constexpr int kMinDataPointsForFullRun = 100; // ~5 sec at 24fps.
// Minimum number of events required for data analysis in a non-performance run.
constexpr int kMinDataPointsForQuickRun = 3;
// A convenience macro to run a gtest expectation in the "full performance run"
// setting, or else a warning that something is not being entirely tested in the
// "CQ run" setting. This is required because the test runs in the CQ may not be
// long enough to collect sufficient tracing data; and, unfortunately, there's
// nothing we can do about that.
#define EXPECT_FOR_PERFORMANCE_RUN(expr) \
do { \
if (is_full_performance_run()) { \
EXPECT_TRUE(expr); \
} else if (!(expr)) { \
LOG(WARNING) << "Allowing failure: " << #expr; \
} \
} while (false)
enum TestFlags {
kUseGpu = 1 << 0, // Only execute test if --enable-gpu was given
......@@ -92,44 +106,31 @@ class TabCapturePerformanceTest : public TabCapturePerformanceTestBase,
TabCapturePerformanceTestBase::SetUpCommandLine(command_line);
}
static void GetTraceEvents(trace_analyzer::TraceAnalyzer* analyzer,
const std::string& event_name,
trace_analyzer::TraceEventVector* events) {
trace_analyzer::Query query =
trace_analyzer::Query::EventNameIs(event_name) &&
(trace_analyzer::Query::EventPhaseIs(TRACE_EVENT_PHASE_BEGIN) ||
trace_analyzer::Query::EventPhaseIs(TRACE_EVENT_PHASE_ASYNC_BEGIN) ||
trace_analyzer::Query::EventPhaseIs(TRACE_EVENT_PHASE_FLOW_BEGIN) ||
trace_analyzer::Query::EventPhaseIs(TRACE_EVENT_PHASE_INSTANT) ||
trace_analyzer::Query::EventPhaseIs(TRACE_EVENT_PHASE_COMPLETE));
analyzer->FindEvents(query, events);
VLOG(0) << "Retrieved " << events->size() << " events for: " << event_name;
ASSERT_LT(2 * kTrimEvents + kMinDataPoints, events->size())
<< "Not enough events of type " << event_name << " found for analysis.";
}
// Analyze and print the mean and stddev of how often events having the name
// |event_name| occur.
bool PrintRateResults(trace_analyzer::TraceAnalyzer* analyzer,
const std::string& event_name) {
trace_analyzer::TraceEventVector events;
GetTraceEvents(analyzer, event_name, &events);
QueryTraceEvents(analyzer, event_name, &events);
// Ignore some events for startup/setup/caching/teardown.
trace_analyzer::TraceEventVector rate_events(events.begin() + kTrimEvents,
events.end() - kTrimEvents);
trace_analyzer::RateStats stats;
if (!GetRateStats(rate_events, &stats, NULL)) {
LOG(ERROR) << "GetRateStats failed";
const int trim_count = is_full_performance_run() ? kTrimEvents : 0;
if (static_cast<int>(events.size()) < trim_count * 2) {
LOG(ERROR) << "Fewer events for " << event_name
<< " than would be trimmed: " << events.size();
return false;
}
trace_analyzer::TraceEventVector rate_events(events.begin() + trim_count,
events.end() - trim_count);
trace_analyzer::RateStats stats;
const bool have_rate_stats = GetRateStats(rate_events, &stats, nullptr);
double mean_ms = stats.mean_us / 1000.0;
double std_dev_ms = stats.standard_deviation_us / 1000.0;
std::string mean_and_error = base::StringPrintf("%f,%f", mean_ms,
std_dev_ms);
perf_test::PrintResultMeanAndError(kTestName, GetSuffixForTestFlags(),
event_name, mean_and_error, "ms", true);
return true;
return have_rate_stats;
}
// Analyze and print the mean and stddev of the amount of time between the
......@@ -137,11 +138,17 @@ class TabCapturePerformanceTest : public TabCapturePerformanceTestBase,
bool PrintLatencyResults(trace_analyzer::TraceAnalyzer* analyzer,
const std::string& event_name) {
trace_analyzer::TraceEventVector events;
GetTraceEvents(analyzer, event_name, &events);
QueryTraceEvents(analyzer, event_name, &events);
// Ignore some events for startup/setup/caching/teardown.
const int trim_count = is_full_performance_run() ? kTrimEvents : 0;
if (static_cast<int>(events.size()) < trim_count * 2) {
LOG(ERROR) << "Fewer events for " << event_name
<< " than would be trimmed: " << events.size();
return false;
}
trace_analyzer::TraceEventVector events_to_analyze(
events.begin() + kTrimEvents, events.end() - kTrimEvents);
events.begin() + trim_count, events.end() - trim_count);
// Compute mean and standard deviation of all capture latencies.
double sum = 0.0;
......@@ -156,15 +163,16 @@ class TabCapturePerformanceTest : public TabCapturePerformanceTestBase,
sqr_sum += latency * latency;
++count;
}
DCHECK_GE(static_cast<size_t>(count), kMinDataPoints);
const double mean_us = sum / count;
const double mean_us = (count == 0) ? NAN : (sum / count);
const double std_dev_us =
sqrt(std::max(0.0, count * sqr_sum - sum * sum)) / count;
(count == 0)
? NAN
: (sqrt(std::max(0.0, count * sqr_sum - sum * sum)) / count);
perf_test::PrintResultMeanAndError(
kTestName, GetSuffixForTestFlags(), event_name + "Latency",
base::StringPrintf("%f,%f", mean_us / 1000.0, std_dev_us / 1000.0),
"ms", true);
return true;
return count > 0;
}
// Analyze and print the mean and stddev of how often events having the name
......@@ -172,11 +180,17 @@ class TabCapturePerformanceTest : public TabCapturePerformanceTestBase,
bool PrintFailRateResults(trace_analyzer::TraceAnalyzer* analyzer,
const std::string& event_name) {
trace_analyzer::TraceEventVector events;
GetTraceEvents(analyzer, event_name, &events);
QueryTraceEvents(analyzer, event_name, &events);
// Ignore some events for startup/setup/caching/teardown.
const int trim_count = is_full_performance_run() ? kTrimEvents : 0;
if (static_cast<int>(events.size()) < trim_count * 2) {
LOG(ERROR) << "Fewer events for " << event_name
<< " than would be trimmed: " << events.size();
return false;
}
trace_analyzer::TraceEventVector events_to_analyze(
events.begin() + kTrimEvents, events.end() - kTrimEvents);
events.begin() + trim_count, events.end() - trim_count);
// Compute percentage of begin→end events missing a success=true flag.
double fail_percent = 100.0;
......@@ -206,7 +220,7 @@ class TabCapturePerformanceTest : public TabCapturePerformanceTestBase,
perf_test::PrintResult(
kTestName, GetSuffixForTestFlags(), event_name + "FailRate",
base::StringPrintf("%f", fail_percent), "percent", true);
return true;
return !events_to_analyze.empty();
}
protected:
......@@ -238,11 +252,15 @@ IN_PROC_BROWSER_TEST_P(TabCapturePerformanceTest, Performance) {
<< (reason ? *reason : std::string("<MISSING REASON>"));
// Observe the running browser for a while, collecting a trace.
const std::string json_events = TraceAndObserve("gpu,gpu.capture");
std::unique_ptr<trace_analyzer::TraceAnalyzer> analyzer;
analyzer.reset(trace_analyzer::TraceAnalyzer::Create(json_events));
analyzer->AssociateAsyncBeginEndEvents();
std::unique_ptr<trace_analyzer::TraceAnalyzer> analyzer = TraceAndObserve(
"gpu,gpu.capture",
std::vector<base::StringPiece>{
"RenderWidget::DidCommitAndDrawCompositorFrame", "Capture"},
// In a full performance run, events will be trimmed from both ends of
// trace. Otherwise, just require the bare-minimum to verify the stats
// calculations will work.
is_full_performance_run() ? (2 * kTrimEvents + kMinDataPointsForFullRun)
: kMinDataPointsForQuickRun);
// The printed result will be the average time between composites in the
// renderer of the page being captured. This may not reach the full frame
......@@ -251,29 +269,36 @@ IN_PROC_BROWSER_TEST_P(TabCapturePerformanceTest, Performance) {
// Note that any changes to drawing or compositing in the renderer,
// including changes to Blink (e.g., Canvas drawing), layout, etc.; will
// have an impact on this result.
EXPECT_TRUE(PrintRateResults(
EXPECT_FOR_PERFORMANCE_RUN(PrintRateResults(
analyzer.get(), "RenderWidget::DidCommitAndDrawCompositorFrame"));
// This prints out the average time between capture events in the browser
// process. This should roughly match the renderer's draw+composite rate.
EXPECT_TRUE(PrintRateResults(analyzer.get(), "Capture"));
EXPECT_FOR_PERFORMANCE_RUN(PrintRateResults(analyzer.get(), "Capture"));
// Analyze mean/stddev of the capture latency. This is a measure of how long
// each capture took, from initiation until read-back from the GPU into a
// media::VideoFrame was complete. Lower is better.
EXPECT_TRUE(PrintLatencyResults(analyzer.get(), "Capture"));
EXPECT_FOR_PERFORMANCE_RUN(PrintLatencyResults(analyzer.get(), "Capture"));
// Analyze percentage of failed captures. This measures how often captures
// were initiated, but not completed successfully. Lower is better, and zero
// is ideal.
EXPECT_TRUE(PrintFailRateResults(analyzer.get(), "Capture"));
EXPECT_FOR_PERFORMANCE_RUN(PrintFailRateResults(analyzer.get(), "Capture"));
}
// Note: First argument is optional and intentionally left blank.
// (it's a prefix for the generated test cases)
// On ChromeOS, software compositing is not an option. So, only do a subset of
// the tests.
#if defined(OS_CHROMEOS)
INSTANTIATE_TEST_SUITE_P(,
TabCapturePerformanceTest,
testing::Values(kUseGpu,
kTestThroughWebRTC | kUseGpu));
#else
INSTANTIATE_TEST_SUITE_P(,
TabCapturePerformanceTest,
testing::Values(0,
kUseGpu,
kTestThroughWebRTC,
kTestThroughWebRTC | kUseGpu));
#endif // defined(OS_CHROMEOS)
......@@ -1330,6 +1330,7 @@ test("browser_tests") {
"../browser/extensions/api/braille_display_private/mock_braille_controller.h",
"../browser/extensions/api/browsing_data/browsing_data_test.cc",
"../browser/extensions/api/cast_streaming/cast_streaming_apitest.cc",
"../browser/extensions/api/cast_streaming/performance_test.cc",
"../browser/extensions/api/command_line_private/command_line_private_apitest.cc",
"../browser/extensions/api/commands/command_service_browsertest.cc",
"../browser/extensions/api/content_settings/content_settings_apitest.cc",
......@@ -1397,6 +1398,9 @@ test("browser_tests") {
"../browser/extensions/api/system_indicator/system_indicator_apitest.cc",
"../browser/extensions/api/system_private/system_private_apitest.cc",
"../browser/extensions/api/tab_capture/tab_capture_apitest.cc",
"../browser/extensions/api/tab_capture/tab_capture_performance_test_base.cc",
"../browser/extensions/api/tab_capture/tab_capture_performance_test_base.h",
"../browser/extensions/api/tab_capture/tab_capture_performancetest.cc",
"../browser/extensions/api/tabs/tabs_test.cc",
"../browser/extensions/api/terminal/terminal_private_apitest.cc",
"../browser/extensions/api/test/apitest_apitest.cc",
......@@ -5290,81 +5294,14 @@ test("chrome_app_unittests") {
}
}
if (!is_android && !is_chromecast) {
test("performance_browser_tests") {
sources = [
"../app/chrome_version.rc.version",
"../browser/extensions/api/cast_streaming/performance_test.cc",
"../browser/extensions/api/tab_capture/tab_capture_performance_test_base.cc",
"../browser/extensions/api/tab_capture/tab_capture_performance_test_base.h",
"../browser/extensions/api/tab_capture/tab_capture_performancetest.cc",
"../browser/extensions/chrome_extension_test_notification_observer.cc",
"../browser/extensions/chrome_extension_test_notification_observer.h",
"../browser/extensions/extension_apitest.cc",
"../browser/extensions/extension_browsertest.cc",
"../browser/extensions/updater/extension_cache_fake.cc",
"../browser/extensions/updater/extension_cache_fake.h",
"base/browser_perf_tests_main.cc",
]
defines = [ "HAS_OUT_OF_PROC_TEST_RUNNER" ]
deps = [
":test_support",
":test_support_ui",
"//base",
"//base:i18n",
"//base/test:test_support",
"//chrome:browser_tests_pak",
"//chrome:packed_resources",
"//chrome:resources",
"//chrome/app:chrome_dll_resources",
"//chrome/browser",
"//chrome/renderer",
"//components/about_handler",
"//components/autofill/content/renderer:test_support",
"//components/spellcheck:buildflags",
"//content/test:test_support",
"//extensions/buildflags",
"//media/cast:test_support",
"//testing/gmock",
"//testing/gtest",
"//testing/perf",
]
data = [
"//chrome/test/data/extensions/api_test/",
]
# Everything needed to build/copy to run the performance_browser_tests isolate.
# See: //testing/buildbot/gn_isolate_map.pyl
group("performance_browser_tests") {
testonly = true
data_deps = [
":browser_tests",
"//testing:run_perf_test",
]
if (is_win) {
configs -= [ "//build/config/win:default_incremental_linking" ]
configs +=
[ "//build/config/win:default_large_module_incremental_linking" ]
deps += [ "//chrome/app:command_ids" ]
} else {
sources -= [ "../app/chrome_version.rc.version" ]
}
if (is_linux || is_win) {
data += [
"$root_out_dir/chrome_100_percent.pak",
"$root_out_dir/chrome_200_percent.pak",
"$root_out_dir/locales/en-US.pak",
"$root_out_dir/locales/fr.pak",
"$root_out_dir/resources.pak",
]
}
if (is_mac) {
deps += [ "//chrome:chrome_app" ]
}
# This target should not require the Chrome executable to run.
assert_no_deps = [ "//chrome" ]
}
}
if (!is_android && !is_fuchsia) {
......
......@@ -2062,16 +2062,18 @@
},
"performance_browser_tests": {
"args": [
"performance_browser_tests",
"browser_tests",
"--non-telemetry=true",
"--full-performance-run",
"--test-launcher-jobs=1",
"--test-launcher-retry-limit=0",
"--test-launcher-print-test-stdio=always",
# TODO(crbug.com/759866): Figure out why CastV2PerformanceTest/0 sometimes
# takes 15-30 seconds to start up and, once fixed, remove this workaround
# (extends the watchdog timeout to 2 minutes, normally 30 seconds).
"--test-launcher-timeout=120000",
# Allow the full performance runs to take up to 60 seconds (rather than
# the default of 30 for normal CQ browser test runs).
"--ui-test-action-timeout=60000",
"--ui-test-action-max-timeout=60000",
"--test-launcher-timeout=60000",
"--gtest_filter=TabCapturePerformanceTest.*:CastV2PerformanceTest.*",
"--test-launcher-jobs=1",
"--enable-gpu",
],
"label": "//chrome/test:performance_browser_tests",
"script": "//testing/scripts/run_performance_tests.py",
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment