Commit 08edd23f authored by Brian Anderson's avatar Brian Anderson Committed by Commit Bot

ui: Add FrameMetrics class.

This is the high level helper class that will be used by all
frame sources that we want to instrument.

It takes in timing information about frames produced and
displayed and calculates metrics for a frame's throughput,
latency, latency speed, and latency acceleration.

It forwards those computations to StreamAnalyzers which
compute statistics of each metric over time, including
mean, RMS, standard deviation, percentiles, and worst
performing time period.

Bug: 790761
Change-Id: Ibe2980e861acf0af3904b1e8d8caf134d9ef141b
Reviewed-on: https://chromium-review.googlesource.com/979120Reviewed-by: default avatarSadrul Chowdhury <sadrul@chromium.org>
Reviewed-by: default avatarBrian Anderson <brianderson@chromium.org>
Reviewed-by: default avatarTimothy Dresser <tdresser@chromium.org>
Commit-Queue: Brian Anderson <brianderson@chromium.org>
Cr-Commit-Position: refs/heads/master@{#555561}
parent e342a1ca
...@@ -9,6 +9,8 @@ jumbo_source_set("latency") { ...@@ -9,6 +9,8 @@ jumbo_source_set("latency") {
sources = [ sources = [
"fixed_point.cc", "fixed_point.cc",
"fixed_point.h", "fixed_point.h",
"frame_metrics.cc",
"frame_metrics.h",
"histograms.cc", "histograms.cc",
"histograms.h", "histograms.h",
"latency_histogram_macros.h", "latency_histogram_macros.h",
...@@ -48,6 +50,7 @@ test("latency_unittests") { ...@@ -48,6 +50,7 @@ test("latency_unittests") {
"fixed_point_unittest.cc", "fixed_point_unittest.cc",
"frame_metrics_test_common.cc", "frame_metrics_test_common.cc",
"frame_metrics_test_common.h", "frame_metrics_test_common.h",
"frame_metrics_unittest.cc",
"histograms_unittest.cc", "histograms_unittest.cc",
"latency_info_unittest.cc", "latency_info_unittest.cc",
"stream_analyzer_unittest.cc", "stream_analyzer_unittest.cc",
......
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "ui/latency/frame_metrics.h"
#include <cmath>
#include <limits>
#include <vector>
#include "base/trace_event/trace_event.h"
#include "base/trace_event/trace_event_argument.h"
namespace ui {
namespace {
// How often to report results.
// This needs to be short enough to avoid overflow in the accumulators.
constexpr base::TimeDelta kDefaultReportPeriod =
base::TimeDelta::FromMinutes(1);
// Gives the histogram for skips the highest precision just above a
// skipped:produced ratio of 1.
constexpr int64_t kFixedPointMultiplierSkips =
frame_metrics::kFixedPointMultiplier;
// Gives latency a precision of 1 microsecond in both the histogram and
// the fixed point values.
constexpr int64_t kFixedPointMultiplierLatency = 1;
// This is used to weigh each latency sample by a constant value since
// we don't weigh it by the frame duration like other metrics.
// A larger weight improves precision in the fixed point accumulators, but we
// don't want to make it so big that it causes overflow before we start a new
// reporting period.
constexpr uint32_t kLatencySampleWeight = 1u << 10;
constexpr uint32_t kMaxFramesBeforeOverflowPossible =
std::numeric_limits<uint32_t>::max() / kLatencySampleWeight;
// Gives the histogram for latency speed the highest precision just above a
// (latency delta : frame delta) ratio of 1.
constexpr int64_t kFixedPointMultiplierLatencySpeed =
frame_metrics::kFixedPointMultiplier;
// Gives the histogram for latency acceleration the highest precision just
// above a (latency speed delta : frame delta) of 1/1024.
// A value ~1k was chosen since frame deltas are on the order of microseconds.
// Use 1024 instead of 1000 since powers of 2 let the compiler optimize integer
// multiplies with shifts if it wants.
// TODO(brianderson): Fine tune these values. http://crbug.com/837434
constexpr int64_t kFixedPointMultiplierLatencyAcceleration =
frame_metrics::kFixedPointMultiplier * 1024;
// Converts a ratio to a fixed point value.
// Each threshold is offset by 0.5 to filter out jitter/inaccuracies.
constexpr uint32_t RatioThreshold(double fraction) {
return static_cast<uint32_t>((fraction + 0.5) *
frame_metrics::kFixedPointMultiplier);
}
// Converts frequency as a floating point value into a fixed point value
// representing microseconds of latency.
// The result is scaled by 110% to allow for slack in cases the actual refresh
// period is slightly longer (common) or if there is some jitter in the
// timestamp sampling.
constexpr uint32_t LatencyThreshold(double Hz) {
return static_cast<uint32_t>((1.1 / Hz) *
base::TimeTicks::kMicrosecondsPerSecond);
}
// The skip thresholds are selected to track each time more than 0, 1, 2, or 4
// frames were skipped at once.
constexpr std::initializer_list<uint32_t> kSkipThresholds = {
RatioThreshold(0), RatioThreshold(1), RatioThreshold(2), RatioThreshold(4),
};
// The latency thresholds are selected based on common display frequencies.
// We often begin a frames on a vsync which would result in whole vsync periods
// of latency. However, in case begin frames are offset slightly from the vsync,
// which is common on Android, the frequency goes all the way to 240Hz.
constexpr std::initializer_list<uint32_t> kLatencyThresholds = {
LatencyThreshold(240), // 4.17 ms * 110% = 4.58 ms
LatencyThreshold(120), // 8.33 ms * 110% = 9.17 ms
LatencyThreshold(60), // 16.67 ms * 110% = 18.33 ms
LatencyThreshold(30), // 33.33 ms * 110% = 36.67 ms
};
// The latency speed thresholds are chosen to track each frame where the
// latency was constant (0) or when there was a jump of 1, 2, or 4 frame
// periods.
constexpr std::initializer_list<uint32_t> kLatencySpeedThresholds = {
RatioThreshold(0), RatioThreshold(1), RatioThreshold(2), RatioThreshold(4),
};
// The latency acceleration thresholds here are tentative.
// TODO(brianderson): Fine tune these values. http://crbug.com/837434
constexpr std::initializer_list<uint32_t> kLatencyAccelerationThresholds = {
RatioThreshold(0), RatioThreshold(1), RatioThreshold(2), RatioThreshold(4),
};
const char kTraceCategories[] = "gpu,benchmark";
// uint32_t should be plenty of range for real world values, but clip individual
// entries to make sure no single value dominates and also to avoid overflow
// in the accumulators and the fixed point math.
// This also makes sure overflowing values saturate instead of wrapping around
// and skewing our results.
// TODO(brianderson): Report warning if clipping occurred.
uint32_t CapValue(int64_t value) {
return static_cast<uint32_t>(std::min<int64_t>(
std::llabs(value), std::numeric_limits<uint32_t>::max()));
}
uint32_t CapDuration(const base::TimeDelta duration) {
constexpr base::TimeDelta kDurationCap = base::TimeDelta::FromMinutes(1);
return std::min(duration, kDurationCap).InMicroseconds();
}
} // namespace
namespace frame_metrics {
// Converts result to fraction of frames skipped.
// The internal skip values are (skipped:produced). This transform converts
// the result to (skipped:total), which is:
// a) Easier to interpret as a human, and
// b) In the same units as latency speed, which may help us create a unified
// smoothness metric in the future.
// The internal representation uses (skipped:produced) to:
// a) Allow RMS, SMR, StdDev, etc to be performed on values that increase
// linearly (rather than asymptotically to 1) with the amount of jank, and
// b) Give us better precision where it's important when stored as a fixed
// point number and in histogram buckets.
double SkipClient::TransformResult(double result) const {
// Avoid divide by zero.
if (result < 1e-32)
return 0;
return 1.0 / (1.0 + (kFixedPointMultiplierSkips / result));
}
// Converts result to seconds.
double LatencyClient::TransformResult(double result) const {
return result / (base::TimeTicks::kMicrosecondsPerSecond *
kFixedPointMultiplierLatency);
}
// Converts result to s/s. ie: fraction of frames traveled.
double LatencySpeedClient::TransformResult(double result) const {
return result / kFixedPointMultiplierLatencySpeed;
}
// Converts result to (s/s^2).
// ie: change in fraction of frames traveled per second.
double LatencyAccelerationClient::TransformResult(double result) const {
return (result * base::TimeTicks::kMicrosecondsPerSecond) /
kFixedPointMultiplierLatencyAcceleration;
}
} // namespace frame_metrics
FrameMetrics::FrameMetrics(const FrameMetricsSettings& settings,
const char* source_name)
: settings_(settings),
source_name_(source_name),
shared_skip_client_(settings_.max_window_size),
shared_latency_client_(settings_.max_window_size),
frame_skips_analyzer_(&skip_client_,
&shared_skip_client_,
kSkipThresholds,
std::make_unique<frame_metrics::RatioHistogram>()),
latency_analyzer_(&latency_client_,
&shared_latency_client_,
kLatencyThresholds,
std::make_unique<frame_metrics::VSyncHistogram>()),
latency_speed_analyzer_(
&latency_speed_client_,
&shared_latency_client_,
kLatencySpeedThresholds,
std::make_unique<frame_metrics::RatioHistogram>()),
latency_acceleration_analyzer_(
&latency_acceleration_client_,
&shared_latency_client_,
kLatencyAccelerationThresholds,
std::make_unique<frame_metrics::RatioHistogram>()) {}
FrameMetrics::~FrameMetrics() = default;
base::TimeDelta FrameMetrics::ReportPeriod() {
return kDefaultReportPeriod;
}
void FrameMetrics::AddFrameProduced(base::TimeTicks source_timestamp,
base::TimeDelta amount_produced,
base::TimeDelta amount_skipped) {
DCHECK_GE(amount_skipped, base::TimeDelta());
DCHECK_GT(amount_produced, base::TimeDelta());
base::TimeDelta source_timestamp_delta;
if (!skip_timestamp_queue_.empty()) {
source_timestamp_delta = source_timestamp - skip_timestamp_queue_.back();
DCHECK_GT(source_timestamp_delta, base::TimeDelta());
}
// Periodically report all metrics and reset the accumulators.
// Do this before adding any samples to avoid overflow before it might happen.
time_since_start_of_report_period_ += source_timestamp_delta;
frames_produced_since_start_of_report_period_++;
if (time_since_start_of_report_period_ > ReportPeriod() ||
frames_produced_since_start_of_report_period_ >
kMaxFramesBeforeOverflowPossible) {
StartNewReportPeriod();
}
if (skip_timestamp_queue_.size() >= settings_.max_window_size) {
skip_timestamp_queue_.pop_front();
}
skip_timestamp_queue_.push_back(source_timestamp);
shared_skip_client_.window_begin = skip_timestamp_queue_.front();
shared_skip_client_.window_end = source_timestamp;
int64_t skipped_to_produced_ratio =
(amount_skipped * kFixedPointMultiplierSkips) / amount_produced;
DCHECK_GE(skipped_to_produced_ratio, 0);
frame_skips_analyzer_.AddSample(CapValue(skipped_to_produced_ratio),
CapDuration(amount_produced));
bool tracing_enabled = 0;
TRACE_EVENT_CATEGORY_GROUP_ENABLED(kTraceCategories, &tracing_enabled);
if (tracing_enabled)
TraceProducedStats();
}
void FrameMetrics::AddFrameDisplayed(base::TimeTicks source_timestamp,
base::TimeTicks display_timestamp) {
// Frame timestamps shouldn't go back in time, but check and drop them just
// in case. Much of the code assumes a positive and non-zero delta.
if (source_timestamp <= source_timestamp_prev_) {
// TODO(brianderson): Flag a warning.
return;
}
base::TimeDelta latency = display_timestamp - source_timestamp;
if (latency_timestamp_queue_.size() >= settings_.max_window_size) {
latency_timestamp_queue_.pop_front();
}
latency_timestamp_queue_.push_back(source_timestamp);
shared_latency_client_.window_begin = latency_timestamp_queue_.front();
shared_latency_client_.window_end = source_timestamp;
// TODO(brianderson): Handle negative latency better.
// For now, reporting the magnitude of the latency will reflect
// how far off the ideal display time the frame was, but it won't indicate
// in which direction. This might be important for sources like video, where
// a frame might be displayed a little bit earlier than its ideal display
// time.
int64_t latency_value =
latency.InMicroseconds() * kFixedPointMultiplierLatency;
latency_analyzer_.AddSample(CapValue(latency_value), kLatencySampleWeight);
// Only calculate velocity if there's enough history.
if (latencies_added_ >= 1) {
base::TimeDelta latency_delta = latency - latency_prev_;
base::TimeDelta source_duration = source_timestamp - source_timestamp_prev_;
int64_t latency_velocity =
(latency_delta * kFixedPointMultiplierLatencySpeed) / source_duration;
// This should be plenty of range for real world values, but clip
// entries to avoid overflow in the accumulators just in case.
latency_speed_analyzer_.AddSample(CapValue(latency_velocity),
CapDuration(source_duration));
// Only calculate acceleration if there's enough history.
if (latencies_added_ >= 2) {
base::TimeDelta source_duration_average =
(source_duration + source_duration_prev_) / 2;
int64_t latency_acceleration =
(((latency_delta * kFixedPointMultiplierLatencyAcceleration) /
source_duration) -
((latency_delta_prev_ * kFixedPointMultiplierLatencyAcceleration) /
source_duration_prev_)) /
source_duration_average.InMicroseconds();
latency_acceleration_analyzer_.AddSample(
CapValue(latency_acceleration), CapDuration(source_duration_average));
}
// Update history.
source_duration_prev_ = source_duration;
latency_delta_prev_ = latency_delta;
}
// Update history.
source_timestamp_prev_ = source_timestamp;
latency_prev_ = latency;
latencies_added_++;
bool tracing_enabled = 0;
TRACE_EVENT_CATEGORY_GROUP_ENABLED(kTraceCategories, &tracing_enabled);
if (tracing_enabled)
TraceDisplayedStats();
}
void FrameMetrics::Reset() {
TRACE_EVENT0(kTraceCategories, "FrameMetrics::Reset");
skip_timestamp_queue_.clear();
latency_timestamp_queue_.clear();
time_since_start_of_report_period_ = base::TimeDelta();
latencies_added_ = 0;
source_timestamp_prev_ = base::TimeTicks();
latency_prev_ = base::TimeDelta();
source_duration_prev_ = base::TimeDelta();
latency_delta_prev_ = base::TimeDelta();
frame_skips_analyzer_.Reset();
latency_analyzer_.Reset();
latency_speed_analyzer_.Reset();
latency_acceleration_analyzer_.Reset();
}
// Reset analyzers, but don't reset resent latency history so we can get
// latency speed and acceleration values immediately.
// TODO(brianderson): Once we support UKM reporting, store the frame skips
// result and defer it's reporting until the latency numbers are also
// available. Reporting everything at this point would put some frames in
// different reporting periods, which could skew the results.
void FrameMetrics::StartNewReportPeriod() {
TRACE_EVENT0(kTraceCategories, "FrameMetrics::StartNewReportPeriod");
time_since_start_of_report_period_ = base::TimeDelta();
frames_produced_since_start_of_report_period_ = 0;
frame_skips_analyzer_.StartNewReportPeriod();
latency_analyzer_.StartNewReportPeriod();
latency_speed_analyzer_.StartNewReportPeriod();
latency_acceleration_analyzer_.StartNewReportPeriod();
}
void FrameMetrics::TraceProducedStats() {
TRACE_EVENT1(kTraceCategories, "FrameProduced", "Skips",
frame_skips_analyzer_.AsValue());
}
void FrameMetrics::TraceDisplayedStats() {
TRACE_EVENT0(kTraceCategories, "FrameDisplayed");
TRACE_EVENT_INSTANT1(kTraceCategories, "FrameDisplayed",
TRACE_EVENT_SCOPE_THREAD, "Latency",
latency_analyzer_.AsValue());
TRACE_EVENT_INSTANT1(kTraceCategories, "FrameDisplayed",
TRACE_EVENT_SCOPE_THREAD, "LatencySpeed",
latency_speed_analyzer_.AsValue());
TRACE_EVENT_INSTANT1(kTraceCategories, "FrameDisplayed",
TRACE_EVENT_SCOPE_THREAD, "LatencyAcceleration",
latency_acceleration_analyzer_.AsValue());
}
} // namespace ui
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef UI_LATENCY_FRAME_METRICS_H_
#define UI_LATENCY_FRAME_METRICS_H_
#include "ui/latency/stream_analyzer.h"
#include <cstdint>
#include "base/containers/circular_deque.h"
#include "base/macros.h"
#include "base/time/time.h"
namespace ui {
namespace frame_metrics {
class SkipClient : public frame_metrics::StreamAnalyzerClient {
double TransformResult(double result) const override;
};
class LatencyClient : public frame_metrics::StreamAnalyzerClient {
double TransformResult(double result) const override;
};
class LatencySpeedClient : public frame_metrics::StreamAnalyzerClient {
double TransformResult(double result) const override;
};
class LatencyAccelerationClient : public frame_metrics::StreamAnalyzerClient {
double TransformResult(double result) const override;
};
} // namespace frame_metrics
struct FrameMetricsSettings {
// This is needed for telemetry results.
bool trace_results_every_frame = false;
// Maximum window size in number of samples.
// This is forwarded to each WindowAnalyzer.
size_t max_window_size = 60;
};
// Calculates all metrics for a frame source.
// Every frame source that we wish to instrument will own an instance of
// this class and will call AddFrameProduced and AddFrameDisplayed.
// Statistics will be reported automatically. Either periodically, based
// on the client interface, or on destruction if any samples were added since
// the last call to StartNewReportPeriod.
class FrameMetrics {
public:
// |source_name| must have a global lifetime for tracing and reporting
// purposes.
FrameMetrics(const FrameMetricsSettings& settings, const char* source_name);
virtual ~FrameMetrics();
// Resets all data and history as if the class were just created.
void Reset();
// AddFrameProduced should be called every time a source produces a frame.
// The information added here affects the number of frames skipped.
void AddFrameProduced(base::TimeTicks source_timestamp,
base::TimeDelta amount_produced,
base::TimeDelta amount_skipped);
// AddFrameDisplayed should be called whenever a frame causes damage and
// we know when the result became visible on the display.
// This will affect all latency derived metrics, including latency speed,
// latency acceleration, and latency itself.
// If a frame is produced but not displayed, do not call this; there was
// no change in the displayed result and thus no change to track the visual
// latency of. Guessing a displayed time will only skew the results.
void AddFrameDisplayed(base::TimeTicks source_timestamp,
base::TimeTicks display_timestamp);
protected:
void TraceProducedStats();
void TraceDisplayedStats();
// virtual for testing.
virtual base::TimeDelta ReportPeriod();
// Starts a new reporting period that resets the various accumulators
// and memory of worst regions encountered, but does not destroy recent
// sample history in the windowed analyzers and in the derivatives
// for latency speed and latency acceleration. This avoids small gaps
// in coverage when starting a new reporting period.
void StartNewReportPeriod();
FrameMetricsSettings settings_;
const char* source_name_;
frame_metrics::SharedWindowedAnalyzerClient shared_skip_client_;
base::circular_deque<base::TimeTicks> skip_timestamp_queue_;
frame_metrics::SharedWindowedAnalyzerClient shared_latency_client_;
base::circular_deque<base::TimeTicks> latency_timestamp_queue_;
base::TimeDelta time_since_start_of_report_period_;
uint32_t frames_produced_since_start_of_report_period_ = 0;
uint64_t latencies_added_ = 0;
base::TimeTicks source_timestamp_prev_;
base::TimeDelta latency_prev_;
base::TimeDelta source_duration_prev_;
base::TimeDelta latency_delta_prev_;
frame_metrics::SkipClient skip_client_;
frame_metrics::LatencyClient latency_client_;
frame_metrics::LatencySpeedClient latency_speed_client_;
frame_metrics::LatencyAccelerationClient latency_acceleration_client_;
frame_metrics::StreamAnalyzer frame_skips_analyzer_;
frame_metrics::StreamAnalyzer latency_analyzer_;
frame_metrics::StreamAnalyzer latency_speed_analyzer_;
frame_metrics::StreamAnalyzer latency_acceleration_analyzer_;
DISALLOW_COPY_AND_ASSIGN(FrameMetrics);
};
} // namespace ui
#endif // UI_LATENCY_FRAME_METRICS_H_
...@@ -157,7 +157,7 @@ class TestHistogram : public Histogram { ...@@ -157,7 +157,7 @@ class TestHistogram : public Histogram {
// Histogram interface. // Histogram interface.
void AddSample(uint32_t value, uint32_t weight) override; void AddSample(uint32_t value, uint32_t weight) override;
PercentileResults ComputePercentiles() const override; PercentileResults ComputePercentiles() const override;
void Reset() override{}; void Reset() override {}
// Test interface. // Test interface.
std::vector<ValueWeightPair> GetAndResetAllAddedSamples(); std::vector<ValueWeightPair> GetAndResetAllAddedSamples();
......
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "ui/latency/frame_metrics.h"
#include "base/bind.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "ui/latency/frame_metrics_test_common.h"
namespace ui {
namespace frame_metrics {
namespace {
// Converts a skipped:produced ratio into skipped:total, where
// total = skipped + produced.
// Internally we store the skipped:produced ratio since it is linear with
// the amount of time skipped, which has benefits for the fixed point
// representation as well as how it affects the RMS value.
// However, at a high level, we are more interested in the percent of total
// time skipped which is easier to interpret.
constexpr double SkipTransform(double ratio) {
return 1.0 / (1.0 + (1.0 / ratio));
}
// Returns the max value of an N-bit unsigned number.
constexpr uint64_t MaxValue(int N) {
return (1ULL << N) - 1;
}
// Define lower bounds on the saturation values of each metric.
// They are much bigger than they need to be, which ensures the range of our
// metrics will be okay.
// The constants passed to MaxValue represent the number of bits before
// the radix point in each metric's fixed-point representation.
constexpr double kSkipSaturationMin =
SkipTransform(MaxValue(16)); // skipped : frame delta = 65535
constexpr double kLatencySaturationMin =
MaxValue(32) / base::TimeTicks::kMicrosecondsPerSecond; // 4294.96 seconds
constexpr double kSpeedSaturationMin =
MaxValue(16); // latency delta : frame delta = 65535
constexpr double kAccelerationSaturationMin =
MaxValue(16) * base::TimeTicks::kMicrosecondsPerSecond /
1024; // speed delta : frame delta ~= 64M
// Define upper bounds for saturation points so we can verify the tests
// are testing what they think they are testing.
constexpr double kSkipSaturationMax = kSkipSaturationMin * 1.01;
constexpr double kLatencySaturationMax = kLatencySaturationMin * 1.01;
constexpr double kSpeedSaturationMax = kSpeedSaturationMin * 1.01;
constexpr double kAccelerationSaturationMax = kAccelerationSaturationMin * 1.01;
// TestFrameMetrics overrides some behavior of FrameMetrics for testing
// purposes.
class TestFrameMetrics : public FrameMetrics {
public:
TestFrameMetrics(const FrameMetricsSettings& settings,
const char* source_name)
: FrameMetrics(settings, source_name) {}
~TestFrameMetrics() override = default;
void OverrideReportPeriod(base::TimeDelta period) {
report_period_override_ = period;
}
void UseDefaultReportPeriodScaled(int scale) {
report_period_override_ = scale * FrameMetrics::ReportPeriod();
}
// AtStartOfNewReportPeriod works assuming it is called after every frame
// is submitted.
bool AtStartOfNewReportPeriod() {
bool at_start = time_since_start_of_report_period_ <
time_since_start_of_report_period_previous_;
time_since_start_of_report_period_previous_ =
time_since_start_of_report_period_;
return at_start;
}
// Convenience accessors for testing.
const frame_metrics::StreamAnalyzer& skips() const {
return frame_skips_analyzer_;
}
const frame_metrics::StreamAnalyzer& latency() const {
return latency_analyzer_;
}
const frame_metrics::StreamAnalyzer& speed() const {
return latency_speed_analyzer_;
}
const frame_metrics::StreamAnalyzer& acceleration() const {
return latency_acceleration_analyzer_;
}
protected:
base::TimeDelta ReportPeriod() override { return report_period_override_; }
base::TimeDelta report_period_override_ = base::TimeDelta::FromHours(1);
base::TimeDelta time_since_start_of_report_period_previous_;
bool override_report_period_ = true;
};
// TestStreamAnalysis enables copying of StreamAnalysis for testing purposes.
struct TestStreamAnalysis : public StreamAnalysis {
TestStreamAnalysis() = default;
~TestStreamAnalysis() = default;
TestStreamAnalysis(const TestStreamAnalysis& src) { *this = src; }
TestStreamAnalysis& operator=(const TestStreamAnalysis& src) {
mean = src.mean;
rms = src.rms;
smr = src.smr;
std_dev = src.std_dev;
variance_of_roots = src.variance_of_roots;
thresholds = src.thresholds;
percentiles = src.percentiles;
worst_mean = src.worst_mean;
worst_rms = src.worst_rms;
worst_smr = src.worst_smr;
return *this;
}
};
// The test fixture used by all tests in this file.
class FrameMetricsTest : public testing::Test {
public:
void SetUp() override {
// Make sure we don't get an unexpected call to StartNewReportPeriod.
frame_metrics = std::make_unique<TestFrameMetrics>(settings, "sourceA");
source_timestamp_origin =
base::TimeTicks() + base::TimeDelta::FromSeconds(1);
current_source_timestamp = source_timestamp_origin;
}
// A deep reset of all sample history.
void Reset() {
frame_metrics->Reset();
current_source_timestamp = source_timestamp_origin;
}
// Simulates frames with a repeating skip pattern, a repeating produce
// pattern, and a repeating latency pattern. Each pattern runs in parallel
// and independently of each other.
// |extra_frames| can help ensure a specific number of metric values are
// added since the speed and acceleration metrics have 1 and 2 fewer values
// than frames respectively.
void TestPattern(std::vector<base::TimeDelta> produced,
std::vector<base::TimeDelta> skipped,
std::vector<base::TimeDelta> latencies,
size_t extra_frames = 0) {
// Make sure we run each pattern a whole number of times.
size_t count = 1000 * produced.size() * skipped.size() * latencies.size() +
extra_frames;
for (size_t i = 0; i < count; i++) {
base::TimeDelta produce = produced[i % produced.size()];
base::TimeDelta skip = skipped[i % skipped.size()];
base::TimeDelta latency = latencies[i % latencies.size()];
base::TimeTicks displayed_timestamp = current_source_timestamp + latency;
frame_metrics->AddFrameProduced(current_source_timestamp, produce, skip);
frame_metrics->AddFrameDisplayed(current_source_timestamp,
displayed_timestamp);
current_source_timestamp += produce + skip;
}
}
// The following methods return the corresponding analysis of all
// frames added since the last call to Reset().
TestStreamAnalysis SkipAnalysis() { return Analysis(frame_metrics->skips()); }
TestStreamAnalysis LatencyAnalysis() {
return Analysis(frame_metrics->latency());
}
TestStreamAnalysis SpeedAnalysis() {
return Analysis(frame_metrics->speed());
}
TestStreamAnalysis AccelerationAnalysis() {
return Analysis(frame_metrics->acceleration());
}
using AnalysisFunc = decltype(&FrameMetricsTest::SkipAnalysis);
void StartNewReportPeriodAvoidsOverflowTest(base::TimeDelta produced,
base::TimeDelta skipped,
base::TimeDelta latency0,
base::TimeDelta latency1,
double threshold,
AnalysisFunc analysis_method);
protected:
static TestStreamAnalysis Analysis(const StreamAnalyzer& analyzer) {
TestStreamAnalysis analysis;
analyzer.ComputeSummary(&analysis);
return analysis;
}
FrameMetricsSettings settings;
std::unique_ptr<TestFrameMetrics> frame_metrics;
base::TimeTicks source_timestamp_origin;
base::TimeTicks current_source_timestamp;
};
// Verify we get zeros for skips, speed, and acceleration when the values
// are constant.
TEST_F(FrameMetricsTest, PerfectSmoothnessScores) {
const base::TimeDelta produced = base::TimeDelta::FromMilliseconds(10);
const base::TimeDelta skip = base::TimeDelta();
const base::TimeDelta latency = base::TimeDelta::FromMilliseconds(10);
TestPattern({produced}, {skip}, {latency});
for (TestStreamAnalysis r :
{SkipAnalysis(), SpeedAnalysis(), AccelerationAnalysis()}) {
EXPECT_EQ(0, r.mean);
EXPECT_EQ(0, r.rms);
EXPECT_EQ(0, r.smr);
EXPECT_EQ(0, r.std_dev);
EXPECT_EQ(0, r.variance_of_roots);
EXPECT_EQ(0, r.worst_mean.value);
EXPECT_EQ(0, r.worst_rms.value);
EXPECT_EQ(0, r.worst_smr.value);
}
}
// Verify a constant fast latency is correctly reflected in stats.
TEST_F(FrameMetricsTest, PerfectLatencyScores) {
const base::TimeDelta produced = base::TimeDelta::FromMilliseconds(10);
const base::TimeDelta skip = base::TimeDelta();
const base::TimeDelta latency = base::TimeDelta::FromMilliseconds(1);
TestPattern({produced}, {skip}, {latency});
TestStreamAnalysis r = LatencyAnalysis();
EXPECT_DOUBLE_EQ(latency.InSecondsF(), r.mean);
EXPECT_DOUBLE_EQ(latency.InSecondsF(), r.rms);
EXPECT_NEAR_SMR(r.smr, latency.InSecondsF(), produced.InMicroseconds());
EXPECT_EQ(0, r.std_dev);
EXPECT_NEAR_VARIANCE_OF_ROOT(0, r.variance_of_roots, 0,
produced.InMicroseconds());
EXPECT_DOUBLE_EQ(latency.InSecondsF(), r.worst_mean.value);
EXPECT_DOUBLE_EQ(latency.InSecondsF(), r.worst_rms.value);
EXPECT_NEAR_SMR(r.worst_smr.value, latency.InSecondsF(),
produced.InMicroseconds());
}
// Apply a saw tooth pattern to the frame skips with values that are easy to
// verify for SMR, RMS, etc.
TEST_F(FrameMetricsTest, SawToothShapedSkips) {
const base::TimeDelta produced = base::TimeDelta::FromSeconds(1);
const base::TimeDelta latency = base::TimeDelta::FromMilliseconds(1);
const std::vector<base::TimeDelta> skips = {
base::TimeDelta::FromSeconds(0), base::TimeDelta::FromSeconds(1),
};
TestPattern({produced}, skips, {latency});
// Verify skip stats.
TestStreamAnalysis r = SkipAnalysis();
// 1 frame skipped per 3 frames of active time.
const double expected_skip_mean = (0 + 1.0) / 3;
EXPECT_EQ(expected_skip_mean, r.mean);
EXPECT_EQ(expected_skip_mean, r.worst_mean.value);
// The expected value calculations for everything other than the mean are a
// bit convoluted since the internal calculations are performed in a different
// space than the final result. (skip:produce vs. skip:total).
const double expected_skip_to_produce_mean_square = (0 + 1.0) / 2;
const double expected_skip_to_produce_rms =
std::sqrt(expected_skip_to_produce_mean_square);
const double expected_skip_rms = SkipTransform(expected_skip_to_produce_rms);
EXPECT_EQ(expected_skip_rms, r.rms);
EXPECT_EQ(expected_skip_rms, r.worst_rms.value);
const double expected_expected_skip_to_produce_mean_root = (0 + 1.0) / 2;
const double expected_expected_skip_to_produce_smr =
expected_expected_skip_to_produce_mean_root *
expected_expected_skip_to_produce_mean_root;
const double expected_skip_smr =
SkipTransform(expected_expected_skip_to_produce_smr);
EXPECT_EQ(expected_skip_smr, r.smr);
EXPECT_EQ(expected_skip_smr, r.worst_smr.value);
const double expected_skip_to_produce_std_dev = (0.5 + 0.5) / 2;
const double expected_skip_std_dev =
SkipTransform(expected_skip_to_produce_std_dev);
EXPECT_EQ(expected_skip_std_dev, r.std_dev);
const double expected_skip_to_produce_std_dev_of_roots = (0.5 + 0.5) / 2;
const double expected_skip_to_produce_variance_of_roots =
expected_skip_to_produce_std_dev_of_roots *
expected_skip_to_produce_std_dev_of_roots;
const double expected_skip_variance_of_roots =
SkipTransform(expected_skip_to_produce_variance_of_roots);
EXPECT_EQ(expected_skip_variance_of_roots, r.variance_of_roots);
}
// Apply a saw tooth pattern to the latency with values that are easy to
// verify for SMR, RMS, etc. Furthermore, since the latency speed and
// acceleration are constant, verify that the SMR, RMS, and mean values are
// equal.
TEST_F(FrameMetricsTest, SawToothShapedLatency) {
const base::TimeDelta produced = base::TimeDelta::FromSeconds(1);
const base::TimeDelta skipped = base::TimeDelta();
const std::vector<base::TimeDelta> latencies = {
base::TimeDelta::FromSeconds(36), base::TimeDelta::FromSeconds(100),
};
TestPattern({produced}, {skipped}, latencies);
// Verify latency.
TestStreamAnalysis r = LatencyAnalysis();
const double expected_latency_mean = (100.0 + 36) / 2;
EXPECT_DOUBLE_EQ(expected_latency_mean, r.mean);
EXPECT_DOUBLE_EQ(expected_latency_mean, r.worst_mean.value);
const double expected_latency_mean_square = (100.0 * 100 + 36 * 36) / 2;
const double expected_latency_rms = std::sqrt(expected_latency_mean_square);
EXPECT_DOUBLE_EQ(expected_latency_rms, r.rms);
EXPECT_DOUBLE_EQ(expected_latency_rms, r.worst_rms.value);
const double expected_latency_mean_root = (10.0 + 6) / 2;
const double expected_latency_smr =
expected_latency_mean_root * expected_latency_mean_root;
EXPECT_DOUBLE_EQ(expected_latency_smr, r.smr);
EXPECT_DOUBLE_EQ(expected_latency_smr, r.worst_smr.value);
const double expected_latency_std_dev = (100.0 - 36) / 2;
EXPECT_DOUBLE_EQ(expected_latency_std_dev, r.std_dev);
const double expected_latency_std_dev_of_roots = (10.0 - 6) / 2;
const double expected_latency_variance_of_roots =
expected_latency_std_dev_of_roots * expected_latency_std_dev_of_roots;
EXPECT_DOUBLE_EQ(expected_latency_variance_of_roots, r.variance_of_roots);
// Verify latency speed, where mean, RMS, SMR, etc. should be equal.
r = SpeedAnalysis();
const double expected_speed = 64;
EXPECT_DOUBLE_EQ(expected_speed, r.mean);
EXPECT_DOUBLE_EQ(expected_speed, r.rms);
EXPECT_DOUBLE_EQ(expected_speed, r.smr);
EXPECT_DOUBLE_EQ(0, r.std_dev);
EXPECT_DOUBLE_EQ(0, r.variance_of_roots);
EXPECT_DOUBLE_EQ(expected_speed, r.worst_mean.value);
EXPECT_DOUBLE_EQ(expected_speed, r.worst_rms.value);
EXPECT_DOUBLE_EQ(expected_speed, r.worst_smr.value);
// Verify latency accelleration, where mean, RMS, SMR, etc. should be equal.
// The slack is relatively large since the frame durations are so long, which
// ends up in the divisor twice for acceleration; however, the slack is still
// within an acceptable range.
r = AccelerationAnalysis();
const double expected_acceleration = expected_speed * 2;
const double slack = 0.1;
EXPECT_NEAR(expected_acceleration, r.mean, slack);
EXPECT_NEAR(expected_acceleration, r.rms, slack);
EXPECT_NEAR(expected_acceleration, r.smr, slack);
EXPECT_NEAR(0, r.std_dev, slack);
EXPECT_NEAR(0, r.variance_of_roots, slack);
EXPECT_NEAR(expected_acceleration, r.worst_mean.value, slack);
EXPECT_NEAR(expected_acceleration, r.worst_rms.value, slack);
EXPECT_NEAR(expected_acceleration, r.worst_smr.value, slack);
}
// Makes sure rA and rB are equal.
void VerifySreamAnalysisValueEquality(const TestStreamAnalysis& rA,
const TestStreamAnalysis& rB) {
EXPECT_EQ(rA.mean, rB.mean);
EXPECT_EQ(rA.rms, rB.rms);
EXPECT_EQ(rA.smr, rB.smr);
EXPECT_EQ(rA.std_dev, rB.std_dev);
EXPECT_EQ(rA.variance_of_roots, rB.variance_of_roots);
EXPECT_EQ(rA.worst_mean.value, rB.worst_mean.value);
EXPECT_EQ(rA.worst_rms.value, rB.worst_rms.value);
EXPECT_EQ(rA.worst_smr.value, rB.worst_smr.value);
}
// Verify that overflowing skips saturates instead of wraps,
// and that its saturation point is acceptable.
TEST_F(FrameMetricsTest, SkipSaturatesOnOverflow) {
const base::TimeDelta produced = base::TimeDelta::FromMilliseconds(1);
const base::TimeDelta latency = base::TimeDelta::FromMilliseconds(1);
const base::TimeDelta skipA = base::TimeDelta::FromSeconds(66);
const base::TimeDelta skipB = base::TimeDelta::FromSeconds(80);
TestPattern({produced}, {skipA}, {latency});
TestStreamAnalysis rA = SkipAnalysis();
Reset();
TestPattern({produced}, {skipB}, {latency});
TestStreamAnalysis rB = SkipAnalysis();
// Verify results are larger than a non-saturating value and smaller than
// than a number just past the expected saturation point.
EXPECT_LT(kSkipSaturationMin, rB.mean);
EXPECT_GT(kSkipSaturationMax, rB.mean);
// Verify the results are the same.
// If they wrapped around, they would be different.
VerifySreamAnalysisValueEquality(rA, rB);
}
// Verify that overflowing latency saturates instead of wraps,
// and that its saturation point is acceptable.
TEST_F(FrameMetricsTest, LatencySaturatesOnOverflow) {
const base::TimeDelta produced = base::TimeDelta::FromMilliseconds(1);
const base::TimeDelta skipped = base::TimeDelta();
const base::TimeDelta latencyA = base::TimeDelta::FromSeconds(4295);
const base::TimeDelta latencyB = base::TimeDelta::FromSeconds(5000);
TestPattern({produced}, {skipped}, {latencyA});
TestStreamAnalysis rA = LatencyAnalysis();
Reset();
TestPattern({produced}, {skipped}, {latencyB});
TestStreamAnalysis rB = LatencyAnalysis();
// Verify results are larger than a non-saturating value and smaller than
// than a number just past the expected saturation point.
EXPECT_LT(kLatencySaturationMin, rB.mean);
EXPECT_GT(kLatencySaturationMax, rB.mean);
// Verify the results are the same.
// If they wrapped around, they would be different.
VerifySreamAnalysisValueEquality(rA, rB);
}
// Verify that overflowing latency speed saturates instead of wraps,
// and that its saturation point is acceptable.
TEST_F(FrameMetricsTest, LatencySpeedSaturatesOnOverflow) {
const base::TimeDelta produced = base::TimeDelta::FromMilliseconds(1);
const base::TimeDelta skipped = base::TimeDelta();
const base::TimeDelta latency0 = base::TimeDelta::FromSeconds(0);
const base::TimeDelta latencyA = base::TimeDelta::FromSeconds(66);
const base::TimeDelta latencyB = base::TimeDelta::FromSeconds(70);
TestPattern({produced}, {skipped}, {latency0, latencyA});
TestStreamAnalysis rA = SpeedAnalysis();
Reset();
TestPattern({produced}, {skipped}, {latency0, latencyB});
TestStreamAnalysis rB = SpeedAnalysis();
// Verify results are larger than a non-saturating value and smaller than
// than a number just past the expected saturation point.
EXPECT_LT(kSpeedSaturationMin, rB.mean);
EXPECT_GT(kSpeedSaturationMax, rB.mean);
// Verify the results are the same.
// If they wrapped around, they would be different.
VerifySreamAnalysisValueEquality(rA, rB);
}
// Verify that overflowing latency acceleration saturates instead of wraps,
// and that its saturation point is acceptable.
TEST_F(FrameMetricsTest, LatencyAccelerationSaturatesOnOverflow) {
const base::TimeDelta produced = base::TimeDelta::FromMilliseconds(1);
const base::TimeDelta skipped = base::TimeDelta();
const base::TimeDelta latency0 = base::TimeDelta::FromSeconds(0);
const base::TimeDelta latencyA = base::TimeDelta::FromSeconds(32);
const base::TimeDelta latencyB = base::TimeDelta::FromSeconds(34);
TestPattern({produced}, {skipped}, {latency0, latencyA});
TestStreamAnalysis rA = AccelerationAnalysis();
Reset();
TestPattern({produced}, {skipped}, {latency0, latencyB});
TestStreamAnalysis rB = AccelerationAnalysis();
// Verify results are larger than a non-saturating value and smaller than
// than a number just past the expected saturation point.
EXPECT_LT(kAccelerationSaturationMin, rB.mean);
EXPECT_GT(kAccelerationSaturationMax, rB.mean);
// Verify the results are the same.
// If they wrapped around, they would be different.
VerifySreamAnalysisValueEquality(rA, rB);
}
// Helps verify that:
// 1) All thresholds with index less than |i| is 1.
// 2) All thresholds with index greater than |i| is 0.
// 3) The |i|'th threshold equals |straddle_fraction|.
void VerifyThresholds(TestStreamAnalysis analysis,
size_t count,
size_t i,
double straddle_fraction) {
EXPECT_EQ(count, analysis.thresholds.size());
EXPECT_EQ(straddle_fraction, analysis.thresholds[i].ge_fraction) << i;
for (size_t j = 0; j < i; j++)
EXPECT_EQ(1.0, analysis.thresholds[j].ge_fraction) << i << "," << j;
for (size_t j = i + 1; j < count; j++)
EXPECT_EQ(0.0, analysis.thresholds[j].ge_fraction) << i << "," << j;
}
// Iterates through skip patterns that straddle each skip threshold
// and verifies the reported fractions are correct.
TEST_F(FrameMetricsTest, SkipThresholds) {
base::TimeDelta produced = base::TimeDelta::FromMilliseconds(1);
base::TimeDelta latency = base::TimeDelta::FromMilliseconds(10);
std::vector<base::TimeDelta> skips = {
base::TimeDelta::FromMicroseconds(0),
base::TimeDelta::FromMicroseconds(250),
base::TimeDelta::FromMilliseconds(1),
base::TimeDelta::FromMilliseconds(2),
base::TimeDelta::FromMilliseconds(4),
base::TimeDelta::FromMilliseconds(8),
};
const size_t kThresholdCount = skips.size() - 2;
TestPattern({produced}, {skips[0], skips[1]}, {latency});
TestStreamAnalysis r = SkipAnalysis();
EXPECT_EQ(kThresholdCount, r.thresholds.size());
for (size_t j = 0; j < kThresholdCount; j++) {
EXPECT_EQ(0, r.thresholds[j].ge_fraction);
}
for (size_t i = 0; i < kThresholdCount; i++) {
Reset();
TestPattern({produced}, {skips[i + 1], skips[i + 2]}, {latency});
VerifyThresholds(SkipAnalysis(), kThresholdCount, i, 0.5);
}
}
// Iterates through latency patterns that straddle each latency threshold
// and verifies the reported fractions are correct.
// To straddle a threshold it alternates frames above and below the threshold.
TEST_F(FrameMetricsTest, LatencyThresholds) {
base::TimeDelta produced = base::TimeDelta::FromMilliseconds(1);
base::TimeDelta skipped = base::TimeDelta();
std::vector<base::TimeDelta> latencies = {
base::TimeDelta::FromMilliseconds(0),
base::TimeDelta::FromMilliseconds(1),
base::TimeDelta::FromMilliseconds(5),
base::TimeDelta::FromMilliseconds(10),
base::TimeDelta::FromMilliseconds(20),
base::TimeDelta::FromMilliseconds(40),
};
const size_t kThresholdCount = latencies.size() - 2;
TestPattern({produced}, {skipped}, {latencies[0], latencies[1]});
TestStreamAnalysis r = LatencyAnalysis();
EXPECT_EQ(kThresholdCount, r.thresholds.size());
for (size_t j = 0; j < kThresholdCount; j++) {
EXPECT_EQ(0, r.thresholds[j].ge_fraction);
}
for (size_t i = 0; i < kThresholdCount; i++) {
Reset();
TestPattern({produced}, {skipped}, {latencies[i + 1], latencies[i + 2]});
VerifyThresholds(LatencyAnalysis(), kThresholdCount, i, 0.5);
}
}
// Iterates through latency patterns that straddle each latency threshold
// and verifies the reported fractions are correct.
// To straddle a threshold it alternates frames above and below the threshold.
TEST_F(FrameMetricsTest, SpeedThresholds) {
base::TimeDelta skipped = base::TimeDelta();
std::vector<base::TimeDelta> latencies = {
base::TimeDelta::FromMilliseconds(100),
base::TimeDelta::FromMilliseconds(200),
};
std::vector<base::TimeDelta> produced = {
base::TimeDelta::FromMilliseconds(1000),
base::TimeDelta::FromMilliseconds(240),
base::TimeDelta::FromMilliseconds(120),
base::TimeDelta::FromMilliseconds(60),
base::TimeDelta::FromMilliseconds(30),
base::TimeDelta::FromMilliseconds(15),
};
const size_t kThresholdCount = produced.size() - 2;
TestPattern({produced[0], produced[1]}, {skipped}, latencies, 1);
TestStreamAnalysis r = SpeedAnalysis();
EXPECT_EQ(kThresholdCount, r.thresholds.size());
for (size_t j = 0; j < kThresholdCount; j++) {
EXPECT_EQ(0, r.thresholds[j].ge_fraction);
}
for (size_t i = 0; i < kThresholdCount; i++) {
Reset();
TestPattern({produced[i + 1], produced[i + 2]}, {skipped}, latencies, 1);
// The expected "straddle fraction" is 1/3 instead of 1/3 since we
// varied the "produced" amound of each frame, which affects the weighting.
VerifyThresholds(SpeedAnalysis(), kThresholdCount, i, 1.0 / 3);
}
}
// Iterates through acceleration patterns that straddle each acceleration
// threshold and verifies the reported fractions are correct.
// To straddle a threshold it sends a set of frames under the threshold and
// then a second set of frames over the threshold.
TEST_F(FrameMetricsTest, AccelerationThresholds) {
base::TimeDelta skipped = base::TimeDelta();
base::TimeDelta produced = base::TimeDelta::FromMilliseconds(1);
base::TimeDelta latency0 = base::TimeDelta::FromMilliseconds(10);
std::vector<base::TimeDelta> latencies = {
latency0 + base::TimeDelta::FromMicroseconds(100),
latency0 + base::TimeDelta::FromMicroseconds(200),
latency0 + base::TimeDelta::FromMicroseconds(500),
latency0 + base::TimeDelta::FromMicroseconds(1000),
latency0 + base::TimeDelta::FromMicroseconds(2000),
latency0 + base::TimeDelta::FromMicroseconds(4000),
};
const size_t kThresholdCount = latencies.size() - 2;
TestPattern({produced}, {skipped}, {latency0, latencies[0]}, 2);
TestPattern({produced}, {skipped}, {latency0, latencies[1]}, 2);
TestStreamAnalysis r = AccelerationAnalysis();
EXPECT_EQ(kThresholdCount, r.thresholds.size());
for (size_t j = 0; j < kThresholdCount; j++) {
EXPECT_EQ(0, r.thresholds[j].ge_fraction);
}
for (size_t i = 0; i < kThresholdCount; i++) {
Reset();
TestPattern({produced}, {skipped}, {latency0, latencies[i + 1]}, 2);
TestPattern({produced}, {skipped}, {latency0, latencies[i + 2]}, 2);
VerifyThresholds(AccelerationAnalysis(), kThresholdCount, i, 0.5);
}
}
// The percentile calcuation is an estimate, so make sure it is within an
// acceptable threshold. The offset is needed in case the expected value is 0.
void VerifyPercentiles(TestStreamAnalysis r, double expected, int source_line) {
double kPercentileSlackScale = .5;
double kPercentileSlackOffset = .02;
for (size_t i = 0; i < PercentileResults::kCount; i++) {
EXPECT_LT((1 - kPercentileSlackScale) * expected - kPercentileSlackOffset,
r.percentiles.values[i])
<< i << ", " << source_line;
EXPECT_GT(
(1 + 2 * kPercentileSlackScale) * expected + kPercentileSlackOffset,
r.percentiles.values[i])
<< i << ", " << source_line;
}
}
// This is a basic test to verify percentiles for skips are hooked up correctly.
// The histogram unit tests already test bucketing and precision in depth,
// so we don't worry about that here.
TEST_F(FrameMetricsTest, PercentilesSkipBasic) {
base::TimeDelta produced = base::TimeDelta::FromMilliseconds(1);
base::TimeDelta latency = base::TimeDelta::FromMilliseconds(1);
// Everything fast.
base::TimeDelta skipped = base::TimeDelta();
base::TimeTicks displayed_timestamp = current_source_timestamp + latency;
frame_metrics->AddFrameProduced(current_source_timestamp, produced, skipped);
frame_metrics->AddFrameDisplayed(current_source_timestamp,
displayed_timestamp);
current_source_timestamp += produced + skipped;
VerifyPercentiles(SkipAnalysis(), 0, __LINE__);
VerifyPercentiles(LatencyAnalysis(), latency.InSecondsF(), __LINE__);
VerifyPercentiles(SpeedAnalysis(), 0, __LINE__);
VerifyPercentiles(AccelerationAnalysis(), 0, __LINE__);
// Bad skip.
Reset();
skipped = base::TimeDelta::FromSeconds(5);
displayed_timestamp = current_source_timestamp + latency;
frame_metrics->AddFrameProduced(current_source_timestamp, produced, skipped);
frame_metrics->AddFrameDisplayed(current_source_timestamp,
displayed_timestamp);
current_source_timestamp += produced + skipped;
double expected_skip_fraction =
skipped.InSecondsF() / (skipped.InSecondsF() + produced.InSecondsF());
VerifyPercentiles(SkipAnalysis(), expected_skip_fraction, __LINE__);
VerifyPercentiles(LatencyAnalysis(), latency.InSecondsF(), __LINE__);
VerifyPercentiles(SpeedAnalysis(), 0, __LINE__);
VerifyPercentiles(AccelerationAnalysis(), 0, __LINE__);
}
// This is a basic test to verify percentiles for latency, speed, and
// acceleration are hooked up correctly. It uses the property that latency,
// speed, and acceleration results are delayed until there are at least
// 1, 2, and 3 frames respectively.
// The histogram unit tests already test bucketing and precision in depth,
// so we don't worry about that here.
TEST_F(FrameMetricsTest, PercentilesLatencyBasic) {
const base::TimeDelta produced = base::TimeDelta::FromMilliseconds(1);
const base::TimeDelta skipped = base::TimeDelta();
const base::TimeDelta latency0 = base::TimeDelta::FromMilliseconds(1);
const base::TimeDelta latency_delta = base::TimeDelta::FromSeconds(5);
const std::vector<base::TimeDelta> latencies = {
latency0 + latency_delta, latency0, latency0 + latency_delta,
};
// Everything fast.
base::TimeTicks displayed_timestamp = current_source_timestamp + latency0;
frame_metrics->AddFrameProduced(current_source_timestamp, produced, skipped);
frame_metrics->AddFrameDisplayed(current_source_timestamp,
displayed_timestamp);
current_source_timestamp += produced + skipped;
VerifyPercentiles(SkipAnalysis(), 0, __LINE__);
VerifyPercentiles(LatencyAnalysis(), latency0.InSecondsF(), __LINE__);
VerifyPercentiles(SpeedAnalysis(), 0, __LINE__);
VerifyPercentiles(AccelerationAnalysis(), 0, __LINE__);
// Bad latency.
Reset();
displayed_timestamp = current_source_timestamp + latencies[0];
frame_metrics->AddFrameProduced(current_source_timestamp, produced, skipped);
frame_metrics->AddFrameDisplayed(current_source_timestamp,
displayed_timestamp);
current_source_timestamp += produced + skipped;
double expected_latency = (latencies[0]).InSecondsF();
VerifyPercentiles(SkipAnalysis(), 0, __LINE__);
VerifyPercentiles(LatencyAnalysis(), expected_latency, __LINE__);
VerifyPercentiles(SpeedAnalysis(), 0, __LINE__);
VerifyPercentiles(AccelerationAnalysis(), 0, __LINE__);
// Bad latency speed.
displayed_timestamp = current_source_timestamp + latencies[1];
frame_metrics->AddFrameProduced(current_source_timestamp, produced, skipped);
frame_metrics->AddFrameDisplayed(current_source_timestamp,
displayed_timestamp);
current_source_timestamp += produced + skipped;
double expected_speed = latency_delta.InSecondsF() / produced.InSecondsF();
VerifyPercentiles(SkipAnalysis(), 0, __LINE__);
VerifyPercentiles(SpeedAnalysis(), expected_speed, __LINE__);
VerifyPercentiles(AccelerationAnalysis(), 0, __LINE__);
// Bad latency acceleration.
double expected_acceleration = 2 * expected_speed / produced.InSecondsF();
displayed_timestamp = current_source_timestamp + latencies[2];
frame_metrics->AddFrameProduced(current_source_timestamp, produced, skipped);
frame_metrics->AddFrameDisplayed(current_source_timestamp,
displayed_timestamp);
current_source_timestamp += produced + skipped;
VerifyPercentiles(SkipAnalysis(), 0, __LINE__);
VerifyPercentiles(AccelerationAnalysis(), expected_acceleration, __LINE__);
}
// Applies a bunch of good frames followed by one bad frame.
// Then verifies all windows jump from the beginning (just before the bad frame)
// to the end (just after the bad frame).
TEST_F(FrameMetricsTest, WorstWindowsRangesUpdateCorrectly) {
const base::TimeDelta produced = base::TimeDelta::FromMilliseconds(10);
const base::TimeDelta skipped = base::TimeDelta();
const base::TimeDelta latency = base::TimeDelta::FromMilliseconds(1);
TestPattern({produced}, {skipped}, {latency});
base::TimeTicks expected_begin, expected_end;
// Verify windows for skips and latency start at the very beginning.
expected_begin = source_timestamp_origin;
expected_end =
source_timestamp_origin + produced * (settings.max_window_size - 1);
for (TestStreamAnalysis r : {SkipAnalysis(), LatencyAnalysis()}) {
EXPECT_EQ(expected_begin, r.worst_mean.window_begin);
EXPECT_EQ(expected_end, r.worst_mean.window_end);
EXPECT_EQ(expected_begin, r.worst_rms.window_begin);
EXPECT_EQ(expected_end, r.worst_rms.window_end);
EXPECT_EQ(expected_begin, r.worst_smr.window_begin);
EXPECT_EQ(expected_end, r.worst_smr.window_end);
}
// Verify windows for speed and acceleration start near the beginning.
// We expect their windows to be delayed by 1 and 2 frames respectively
// since their first results need to compare multiple frames.
for (TestStreamAnalysis r : {SpeedAnalysis(), AccelerationAnalysis()}) {
expected_begin += produced;
expected_end += produced;
EXPECT_EQ(expected_begin, r.worst_mean.window_begin);
EXPECT_EQ(expected_end, r.worst_mean.window_end);
EXPECT_EQ(expected_begin, r.worst_rms.window_begin);
EXPECT_EQ(expected_end, r.worst_rms.window_end);
EXPECT_EQ(expected_begin, r.worst_smr.window_begin);
EXPECT_EQ(expected_end, r.worst_smr.window_end);
}
// Add a bad frame so the windows are updated for all the dimensions.
base::TimeTicks displayed_timestamp =
current_source_timestamp + (2 * latency);
const base::TimeDelta skipped2 = base::TimeDelta::FromMilliseconds(1);
frame_metrics->AddFrameProduced(current_source_timestamp, produced - skipped2,
skipped2);
frame_metrics->AddFrameDisplayed(current_source_timestamp,
displayed_timestamp);
// Verify all dimensions windows have updated.
expected_begin =
current_source_timestamp - produced * (settings.max_window_size - 1);
expected_end = current_source_timestamp;
for (TestStreamAnalysis r : {SkipAnalysis(), LatencyAnalysis(),
SpeedAnalysis(), AccelerationAnalysis()}) {
EXPECT_EQ(expected_begin, r.worst_mean.window_begin);
EXPECT_EQ(expected_end, r.worst_mean.window_end);
EXPECT_EQ(expected_begin, r.worst_rms.window_begin);
EXPECT_EQ(expected_end, r.worst_rms.window_end);
EXPECT_EQ(expected_begin, r.worst_smr.window_begin);
EXPECT_EQ(expected_end, r.worst_smr.window_end);
}
}
// Accumulating samples for too long can result in overflow of the accumulators.
// This can happen if the system sleeps / hibernates for a long time.
// Make sure values are reported often enough to avoid overflow.
void FrameMetricsTest::StartNewReportPeriodAvoidsOverflowTest(
base::TimeDelta produced,
base::TimeDelta skipped,
base::TimeDelta latency0,
base::TimeDelta latency1,
double threshold,
AnalysisFunc analysis_method) {
// We need one frame here so that we have 3 frames by the first time we call
// AccelerationAnalysis. Before 3 frames, acceleration is not defined.
base::TimeTicks displayed_timestamp = current_source_timestamp + latency1;
frame_metrics->AddFrameProduced(current_source_timestamp, produced, skipped);
frame_metrics->AddFrameDisplayed(current_source_timestamp,
displayed_timestamp);
current_source_timestamp += produced + skipped;
do {
displayed_timestamp = current_source_timestamp + latency0;
frame_metrics->AddFrameProduced(current_source_timestamp, produced,
skipped);
frame_metrics->AddFrameDisplayed(current_source_timestamp,
displayed_timestamp);
current_source_timestamp += produced + skipped;
displayed_timestamp = current_source_timestamp + latency1;
frame_metrics->AddFrameProduced(current_source_timestamp, produced,
skipped);
frame_metrics->AddFrameDisplayed(current_source_timestamp,
displayed_timestamp);
current_source_timestamp += produced + skipped;
TestStreamAnalysis r = (this->*analysis_method)();
// If there's overflow, the result will be much less than the threshold.
ASSERT_LT(threshold, r.mean);
ASSERT_LT(threshold, r.rms);
ASSERT_LT(threshold, r.smr);
} while (!frame_metrics->AtStartOfNewReportPeriod());
}
// Make sure values are reported often enough to avoid skip overflow.
TEST_F(FrameMetricsTest, StartNewReportPeriodAvoidsOverflowForSkips) {
base::TimeDelta produced = base::TimeDelta::FromMicroseconds(1);
base::TimeDelta latency = base::TimeDelta::FromMilliseconds(1);
base::TimeDelta skipped = base::TimeDelta::FromSeconds(66);
frame_metrics->UseDefaultReportPeriodScaled(4);
StartNewReportPeriodAvoidsOverflowTest(produced, skipped, latency, latency,
kSkipSaturationMin,
&FrameMetricsTest::SkipAnalysis);
}
// Make sure values are reported often enough to avoid latency overflow.
TEST_F(FrameMetricsTest, StartNewReportPeriodAvoidsOverflowForLatency) {
base::TimeDelta produced = base::TimeDelta::FromMilliseconds(1);
base::TimeDelta latency = base::TimeDelta::FromSeconds(5000);
base::TimeDelta skipped = base::TimeDelta::FromSeconds(0);
frame_metrics->UseDefaultReportPeriodScaled(2);
StartNewReportPeriodAvoidsOverflowTest(produced, skipped, latency, latency,
kLatencySaturationMin,
&FrameMetricsTest::LatencyAnalysis);
}
// Make sure values are reported often enough to avoid speed overflow.
TEST_F(FrameMetricsTest, StartNewReportPeriodAvoidsOverflowForSpeed) {
base::TimeDelta produced = base::TimeDelta::FromMilliseconds(1);
base::TimeDelta latency0 = base::TimeDelta::FromSeconds(0);
base::TimeDelta latency1 = base::TimeDelta::FromSeconds(70);
base::TimeDelta skipped = base::TimeDelta::FromSeconds(0);
frame_metrics->UseDefaultReportPeriodScaled(2);
StartNewReportPeriodAvoidsOverflowTest(produced, skipped, latency0, latency1,
kSpeedSaturationMin,
&FrameMetricsTest::SpeedAnalysis);
}
// Make sure values are reported often enough to avoid acceleration overflow.
TEST_F(FrameMetricsTest, StartNewReportPeriodAvoidsOverflowForAcceleration) {
frame_metrics->UseDefaultReportPeriodScaled(2);
base::TimeDelta produced = base::TimeDelta::FromMilliseconds(1);
base::TimeDelta latency0 = base::TimeDelta::FromSeconds(0);
base::TimeDelta latency1 = base::TimeDelta::FromSeconds(33);
base::TimeDelta skipped = base::TimeDelta::FromSeconds(0);
frame_metrics->UseDefaultReportPeriodScaled(2);
StartNewReportPeriodAvoidsOverflowTest(
produced, skipped, latency0, latency1, kAccelerationSaturationMin,
&FrameMetricsTest::AccelerationAnalysis);
}
} // namespace
} // namespace frame_metrics
} // namespace ui
...@@ -55,7 +55,6 @@ ui::PercentileResults PercentilesHelper( ...@@ -55,7 +55,6 @@ ui::PercentileResults PercentilesHelper(
boundary_right = boundary_iterator->Next(); boundary_right = boundary_iterator->Next();
} }
NOTREACHED();
return result; return result;
} }
......
...@@ -13,6 +13,10 @@ ...@@ -13,6 +13,10 @@
namespace ui { namespace ui {
// Used to communicate percentile results to clients. // Used to communicate percentile results to clients.
// If entries in |values| are zero, that means there were no samples.
// A non-zero value implies samples were added since, even if those samples
// were zero, they would go into the [0,N) bucket and result in a non-zero
// estimate.
struct PercentileResults { struct PercentileResults {
static constexpr double kPercentiles[] = {.50, .99}; static constexpr double kPercentiles[] = {.50, .99};
static constexpr size_t kCount = arraysize(kPercentiles); static constexpr size_t kCount = arraysize(kPercentiles);
......
...@@ -41,6 +41,32 @@ TEST(FrameMetricsHistogramsTest, VSyncBoundariesDirect) { ...@@ -41,6 +41,32 @@ TEST(FrameMetricsHistogramsTest, VSyncBoundariesDirect) {
} }
} }
// Results should be 0 if no samples have been added yet.
TEST(FrameMetricsHistogramsTest, ResultsAreZeroWithoutSamples) {
RatioHistogram ratio_histogram;
EXPECT_EQ(0, ratio_histogram.ComputePercentiles().values[0]);
EXPECT_EQ(0, ratio_histogram.ComputePercentiles().values[1]);
VSyncHistogram vsync_histogram;
EXPECT_EQ(0, vsync_histogram.ComputePercentiles().values[0]);
EXPECT_EQ(0, vsync_histogram.ComputePercentiles().values[1]);
}
// A non-zero value implies samples were added since, even if those samples
// were zero, they would go into the [0,N) bucket and result in a non-zero
// estimate.
TEST(FrameMetricsHistogramsTest, ResultsAreNonZeroWithSamplesOfZero) {
RatioHistogram ratio_histogram;
ratio_histogram.AddSample(0, 1);
EXPECT_LT(0, ratio_histogram.ComputePercentiles().values[0]);
EXPECT_LT(0, ratio_histogram.ComputePercentiles().values[1]);
VSyncHistogram vsync_histogram;
vsync_histogram.AddSample(0, 1);
EXPECT_LT(0, vsync_histogram.ComputePercentiles().values[0]);
EXPECT_LT(0, vsync_histogram.ComputePercentiles().values[1]);
}
template <typename ReferenceBoundaryT> template <typename ReferenceBoundaryT>
void BoundaryTestCommon(const ReferenceBoundaryT& reference_boundaries, void BoundaryTestCommon(const ReferenceBoundaryT& reference_boundaries,
std::unique_ptr<Histogram> histogram) { std::unique_ptr<Histogram> histogram) {
......
...@@ -5,6 +5,10 @@ ...@@ -5,6 +5,10 @@
#include "ui/latency/stream_analyzer.h" #include "ui/latency/stream_analyzer.h"
namespace ui { namespace ui {
StreamAnalysis::StreamAnalysis() = default;
StreamAnalysis::~StreamAnalysis() = default;
namespace frame_metrics { namespace frame_metrics {
StreamAnalyzer::StreamAnalyzer( StreamAnalyzer::StreamAnalyzer(
...@@ -42,10 +46,10 @@ void StreamAnalyzer::StartNewReportPeriod() { ...@@ -42,10 +46,10 @@ void StreamAnalyzer::StartNewReportPeriod() {
void StreamAnalyzer::AddSample(const uint32_t value, const uint32_t weight) { void StreamAnalyzer::AddSample(const uint32_t value, const uint32_t weight) {
DCHECK_GT(weight, 0u); DCHECK_GT(weight, 0u);
uint64_t weighted_value = static_cast<uint64_t>(weight) * value; const uint64_t weighted_value = static_cast<uint64_t>(weight) * value;
uint64_t weighted_root = weight * std::sqrt(static_cast<double>(value) * const uint64_t weighted_root = weight * std::sqrt(static_cast<double>(value) *
kFixedPointRootMultiplier); kFixedPointRootMultiplier);
Accumulator96b weighted_square(value, weight); const Accumulator96b weighted_square(value, weight);
// Verify overflow isn't an issue. // Verify overflow isn't an issue.
// square_accumulator_ has DCHECKs internally, so we don't worry about // square_accumulator_ has DCHECKs internally, so we don't worry about
...@@ -143,6 +147,19 @@ PercentileResults StreamAnalyzer::ComputePercentiles() const { ...@@ -143,6 +147,19 @@ PercentileResults StreamAnalyzer::ComputePercentiles() const {
return result; return result;
} }
void StreamAnalyzer::ComputeSummary(StreamAnalysis* results) const {
results->mean = ComputeMean();
results->rms = ComputeRMS();
results->smr = ComputeSMR();
results->std_dev = ComputeStdDev();
results->variance_of_roots = ComputeVarianceOfRoots();
results->thresholds = ComputeThresholds();
results->percentiles = ComputePercentiles();
results->worst_mean = windowed_analyzer_.ComputeWorstMean();
results->worst_rms = windowed_analyzer_.ComputeWorstRMS();
results->worst_smr = windowed_analyzer_.ComputeWorstSMR();
}
std::unique_ptr<base::trace_event::ConvertableToTraceFormat> std::unique_ptr<base::trace_event::ConvertableToTraceFormat>
StreamAnalyzer::AsValue() const { StreamAnalyzer::AsValue() const {
auto state = std::make_unique<base::trace_event::TracedValue>(); auto state = std::make_unique<base::trace_event::TracedValue>();
......
...@@ -24,6 +24,27 @@ struct ThresholdResult { ...@@ -24,6 +24,27 @@ struct ThresholdResult {
double ge_fraction = 0.0; double ge_fraction = 0.0;
}; };
struct StreamAnalysis {
StreamAnalysis();
~StreamAnalysis();
double mean;
double rms;
double smr;
double std_dev;
double variance_of_roots;
std::vector<ThresholdResult> thresholds;
PercentileResults percentiles;
FrameRegionResult worst_mean;
FrameRegionResult worst_rms;
FrameRegionResult worst_smr;
DISALLOW_COPY_AND_ASSIGN(StreamAnalysis);
};
namespace frame_metrics { namespace frame_metrics {
// The StreamAnalyzerClient interface is currently the same as // The StreamAnalyzerClient interface is currently the same as
...@@ -93,6 +114,7 @@ class StreamAnalyzer { ...@@ -93,6 +114,7 @@ class StreamAnalyzer {
// available directly. // available directly.
const WindowedAnalyzer& window() const { return windowed_analyzer_; } const WindowedAnalyzer& window() const { return windowed_analyzer_; }
void ComputeSummary(StreamAnalysis* results) const;
std::unique_ptr<base::trace_event::ConvertableToTraceFormat> AsValue() const; std::unique_ptr<base::trace_event::ConvertableToTraceFormat> AsValue() const;
void AsValueInto(base::trace_event::TracedValue* state) const; void AsValueInto(base::trace_event::TracedValue* state) const;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment