Commit e12e3f48 authored by phoglund's avatar phoglund Committed by Commit bot

Add encoding time and and fps to webrtc.stress case.

This is going to show the metrics for the first 5 peer connections;
I hope that's enough since showing all 30 makes the test really
annoying to triage.

BUG=632299
CQ_INCLUDE_TRYBOTS=master.tryserver.chromium.perf:linux_perf_cq;master.tryserver.chromium.perf:mac_retina_perf_cq;master.tryserver.chromium.perf:winx64_10_perf_cq

Review-Url: https://codereview.chromium.org/2561603003
Cr-Commit-Position: refs/heads/master@{#438131}
parent 63b24527
...@@ -60,7 +60,9 @@ class WebrtcStressTest(perf_benchmark.PerfBenchmark): ...@@ -60,7 +60,9 @@ class WebrtcStressTest(perf_benchmark.PerfBenchmark):
return 'webrtc.stress' return 'webrtc.stress'
def CreatePageTest(self, options): def CreatePageTest(self, options):
return webrtc.WebRTC(use_webrtc_stats=False) # Exclude all stats.
return webrtc.WebRTC(particular_metrics=['googAvgEncodeMs',
'googFrameRateReceived'])
# WebrtcRendering must be a PerfBenchmark, and not a _Webrtc, because it is a # WebrtcRendering must be a PerfBenchmark, and not a _Webrtc, because it is a
......
...@@ -13,12 +13,18 @@ from metrics import webrtc_stats ...@@ -13,12 +13,18 @@ from metrics import webrtc_stats
class WebRTC(legacy_page_test.LegacyPageTest): class WebRTC(legacy_page_test.LegacyPageTest):
"""Gathers WebRTC-related metrics on a page set.""" """Gathers WebRTC-related metrics on a page set."""
def __init__(self, use_webrtc_stats=True): def __init__(self, particular_metrics=None):
"""Create the measurement and include selected stats.
Args:
particular_metrics: A list of the stats to include (see webrtc_stats.py
for a list of valid names) or None to select all metrics.
"""
super(WebRTC, self).__init__() super(WebRTC, self).__init__()
self._cpu_metric = None self._cpu_metric = None
self._media_metric = None self._media_metric = None
self._power_metric = None self._power_metric = None
self._use_webrtc_stats = use_webrtc_stats self._particular_metrics = particular_metrics
self._webrtc_stats_metric = None self._webrtc_stats_metric = None
def WillStartBrowser(self, platform): def WillStartBrowser(self, platform):
...@@ -26,16 +32,15 @@ class WebRTC(legacy_page_test.LegacyPageTest): ...@@ -26,16 +32,15 @@ class WebRTC(legacy_page_test.LegacyPageTest):
def DidStartBrowser(self, browser): def DidStartBrowser(self, browser):
self._cpu_metric = cpu.CpuMetric(browser) self._cpu_metric = cpu.CpuMetric(browser)
if self._use_webrtc_stats: self._webrtc_stats_metric = webrtc_stats.WebRtcStatisticsMetric(
self._webrtc_stats_metric = webrtc_stats.WebRtcStatisticsMetric() self._particular_metrics)
def DidNavigateToPage(self, page, tab): def DidNavigateToPage(self, page, tab):
self._cpu_metric.Start(page, tab) self._cpu_metric.Start(page, tab)
self._media_metric = media.MediaMetric(tab) self._media_metric = media.MediaMetric(tab)
self._media_metric.Start(page, tab) self._media_metric.Start(page, tab)
self._power_metric.Start(page, tab) self._power_metric.Start(page, tab)
if self._use_webrtc_stats: self._webrtc_stats_metric.Start(page, tab)
self._webrtc_stats_metric.Start(page, tab)
def CustomizeBrowserOptions(self, options): def CustomizeBrowserOptions(self, options):
options.AppendExtraBrowserArgs('--use-fake-device-for-media-stream') options.AppendExtraBrowserArgs('--use-fake-device-for-media-stream')
...@@ -56,9 +61,8 @@ class WebRTC(legacy_page_test.LegacyPageTest): ...@@ -56,9 +61,8 @@ class WebRTC(legacy_page_test.LegacyPageTest):
self._power_metric.Stop(page, tab) self._power_metric.Stop(page, tab)
self._power_metric.AddResults(tab, results) self._power_metric.AddResults(tab, results)
if self._use_webrtc_stats: self._webrtc_stats_metric.Stop(page, tab)
self._webrtc_stats_metric.Stop(page, tab) self._webrtc_stats_metric.AddResults(tab, results)
self._webrtc_stats_metric.AddResults(tab, results)
def DidRunPage(self, platform): def DidRunPage(self, platform):
del platform # unused del platform # unused
......
...@@ -70,6 +70,16 @@ INTERESTING_METRICS = { ...@@ -70,6 +70,16 @@ INTERESTING_METRICS = {
} }
def SelectMetrics(particular_metrics):
if not particular_metrics:
return INTERESTING_METRICS
# You can only select among the predefined interesting metrics.
assert set(particular_metrics).issubset(INTERESTING_METRICS.keys())
return {key: value for key, value in INTERESTING_METRICS.iteritems()
if key in particular_metrics}
def GetReportKind(report): def GetReportKind(report):
if 'audioInputLevel' in report or 'audioOutputLevel' in report: if 'audioInputLevel' in report or 'audioOutputLevel' in report:
return 'audio' return 'audio'
...@@ -94,12 +104,12 @@ def StripAudioVideoBweDistinction(stat_name): ...@@ -94,12 +104,12 @@ def StripAudioVideoBweDistinction(stat_name):
return re.sub('^(audio|video|bwe)_', '', stat_name) return re.sub('^(audio|video|bwe)_', '', stat_name)
def SortStatsIntoTimeSeries(report_batches): def SortStatsIntoTimeSeries(report_batches, selected_metrics):
time_series = {} time_series = {}
for report_batch in report_batches: for report_batch in report_batches:
for report in report_batch: for report in report_batch:
for stat_name, value in report.iteritems(): for stat_name, value in report.iteritems():
if stat_name not in INTERESTING_METRICS: if stat_name not in selected_metrics:
continue continue
if GetReportKind(report) == 'unknown': if GetReportKind(report) == 'unknown':
continue continue
...@@ -109,12 +119,21 @@ def SortStatsIntoTimeSeries(report_batches): ...@@ -109,12 +119,21 @@ def SortStatsIntoTimeSeries(report_batches):
return time_series return time_series
def PrintSpecialMarkerValue(results):
results.AddValue(list_of_scalar_values.ListOfScalarValues(
results.current_page, 'peer_connection_5_not_logging_more_conns',
'', [17], description=('This marker signifies we never log more '
'than 5 peer connections'),
important=False))
class WebRtcStatisticsMetric(Metric): class WebRtcStatisticsMetric(Metric):
"""Makes it possible to measure stats from peer connections.""" """Makes it possible to measure stats from peer connections."""
def __init__(self): def __init__(self, particular_metrics=None):
super(WebRtcStatisticsMetric, self).__init__() super(WebRtcStatisticsMetric, self).__init__()
self._all_reports = None self._all_reports = None
self._selected_metrics = SelectMetrics(particular_metrics)
def Start(self, page, tab): def Start(self, page, tab):
pass pass
...@@ -130,7 +149,13 @@ class WebRtcStatisticsMetric(Metric): ...@@ -130,7 +149,13 @@ class WebRtcStatisticsMetric(Metric):
reports = json.loads(self._all_reports) reports = json.loads(self._all_reports)
for i, report in enumerate(reports): for i, report in enumerate(reports):
time_series = SortStatsIntoTimeSeries(report) time_series = SortStatsIntoTimeSeries(report, self._selected_metrics)
# Only ever show stats for 5 peer connections, or it's going to look
# insane in the results.
if i > 5:
PrintSpecialMarkerValue(results)
return
for stat_name, values in time_series.iteritems(): for stat_name, values in time_series.iteritems():
stat_name_underscored = camel_case.ToUnderscore(stat_name) stat_name_underscored = camel_case.ToUnderscore(stat_name)
......
...@@ -107,9 +107,7 @@ class FakeResults(object): ...@@ -107,9 +107,7 @@ class FakeResults(object):
class WebRtcStatsUnittest(unittest.TestCase): class WebRtcStatsUnittest(unittest.TestCase):
def _RunMetricOnJson(self, json_to_return): def _RunMetricOnJson(self, json_to_return, stats_metric):
stats_metric = webrtc_stats.WebRtcStatisticsMetric()
tab = simple_mock.MockObject() tab = simple_mock.MockObject()
page = simple_mock.MockObject() page = simple_mock.MockObject()
...@@ -125,7 +123,8 @@ class WebRtcStatsUnittest(unittest.TestCase): ...@@ -125,7 +123,8 @@ class WebRtcStatsUnittest(unittest.TestCase):
return results return results
def testExtractsValuesAsTimeSeries(self): def testExtractsValuesAsTimeSeries(self):
results = self._RunMetricOnJson(SAMPLE_JSON) stats_metric = webrtc_stats.WebRtcStatisticsMetric()
results = self._RunMetricOnJson(SAMPLE_JSON, stats_metric)
self.assertTrue(results.received_values, self.assertTrue(results.received_values,
'Expected values for googDecodeMs and others, got none.') 'Expected values for googDecodeMs and others, got none.')
...@@ -139,7 +138,8 @@ class WebRtcStatsUnittest(unittest.TestCase): ...@@ -139,7 +138,8 @@ class WebRtcStatsUnittest(unittest.TestCase):
[100.0, 101.0]) [100.0, 101.0])
def testExtractsInterestingMetricsOnly(self): def testExtractsInterestingMetricsOnly(self):
results = self._RunMetricOnJson(SAMPLE_JSON) stats_metric = webrtc_stats.WebRtcStatisticsMetric()
results = self._RunMetricOnJson(SAMPLE_JSON, stats_metric)
self.assertTrue(len(results.received_values) > 0) self.assertTrue(len(results.received_values) > 0)
self.assertIn('peer_connection_0', results.received_values[0].name, self.assertIn('peer_connection_0', results.received_values[0].name,
...@@ -159,6 +159,20 @@ class WebRtcStatsUnittest(unittest.TestCase): ...@@ -159,6 +159,20 @@ class WebRtcStatsUnittest(unittest.TestCase):
'should not be reported since it is not interesting.') 'should not be reported since it is not interesting.')
self.assertNotIn('peer_connection_1_audio_audio_input_level', all_names) self.assertNotIn('peer_connection_1_audio_audio_input_level', all_names)
def testExtractsParticularMetricsOnlyIfSpecified(self):
only_goog_rtt_and_max_decode = ['googRtt', 'googMaxDecodeMs']
stats_metric = webrtc_stats.WebRtcStatisticsMetric(
particular_metrics=only_goog_rtt_and_max_decode)
results = self._RunMetricOnJson(SAMPLE_JSON, stats_metric)
received_names = [value.name for value in results.received_values]
expected_names = ['peer_connection_0_audio_goog_rtt',
'peer_connection_0_video_goog_rtt',
'peer_connection_1_video_goog_max_decode_ms',
'peer_connection_1_video_goog_rtt']
self.assertEqual(expected_names, received_names)
def testReturnsIfJsonIsEmpty(self): def testReturnsIfJsonIsEmpty(self):
results = self._RunMetricOnJson('[]') stats_metric = webrtc_stats.WebRtcStatisticsMetric()
results = self._RunMetricOnJson('[]', stats_metric)
self.assertFalse(results.received_values) self.assertFalse(results.received_values)
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment