Plumb Telemetry test name through to results object creation

We want to be able to ask for the current benchmark's name when generating results. This threads the benchmark through so that it will be available to results objects.

Review URL: https://codereview.chromium.org/386943007

git-svn-id: svn://svn.chromium.org/chrome/trunk/src@287483 0039d316-1c4b-4281-b951-d872f2087c98
parent cc0d4be2
......@@ -17,7 +17,7 @@ from telemetry.page import page_runner
from telemetry.page import page_set
from telemetry.page import page_test
from telemetry.page import test_expectations
from telemetry.results import page_test_results
from telemetry.results import results_options
from telemetry.util import cloud_storage
......@@ -25,6 +25,14 @@ Disabled = decorators.Disabled
Enabled = decorators.Enabled
class BenchmarkMetadata(object):
def __init__(self, name):
self._name = name
@property
def name(self):
return self._name
class Benchmark(command_line.Command):
"""Base class for a Telemetry benchmark.
......@@ -62,26 +70,29 @@ class Benchmark(command_line.Command):
def CustomizeBrowserOptions(self, options):
"""Add browser options that are required by this benchmark."""
def Run(self, args):
def GetMetadata(self):
return BenchmarkMetadata(self.Name())
def Run(self, finder_options):
"""Run this test with the given options."""
self.CustomizeBrowserOptions(args.browser_options)
self.CustomizeBrowserOptions(finder_options.browser_options)
test = self.PageTestClass()()
test.__name__ = self.__class__.__name__
pt = self.PageTestClass()()
pt.__name__ = self.__class__.__name__
if hasattr(self, '_disabled_strings'):
test._disabled_strings = self._disabled_strings
pt._disabled_strings = self._disabled_strings
if hasattr(self, '_enabled_strings'):
test._enabled_strings = self._enabled_strings
pt._enabled_strings = self._enabled_strings
ps = self.CreatePageSet(args)
ps = self.CreatePageSet(finder_options)
expectations = self.CreateExpectations(ps)
self._DownloadGeneratedProfileArchive(args)
self._DownloadGeneratedProfileArchive(finder_options)
results = page_test_results.PageTestResults()
results = results_options.CreateResults(self.GetMetadata(), finder_options)
try:
results = page_runner.Run(test, ps, expectations, args)
page_runner.Run(pt, ps, expectations, finder_options, results)
except page_test.TestNotSupportedOnPlatformFailure as failure:
logging.warning(str(failure))
......
# Copyright 2012 The Chromium Authors. All rights reserved.
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry import benchmark
from telemetry.core import exceptions
from telemetry.core import util
from telemetry.page import page_runner
from telemetry.page import page as page_module
from telemetry.page import page_set as page_set_module
from telemetry.page import page_test
from telemetry.results import results_options
from telemetry.page import test_expectations
from telemetry.unittest import options_for_unittests
......@@ -25,6 +27,10 @@ class BasicTestPage(page_module.Page):
interaction.End()
class EmptyMetadataForTest(benchmark.BenchmarkMetadata):
def __init__(self):
super(EmptyMetadataForTest, self).__init__('')
class PageMeasurementUnitTestBase(unittest.TestCase):
"""unittest.TestCase-derived class to help in the construction of unit tests
for a measurement."""
......@@ -63,7 +69,9 @@ class PageMeasurementUnitTestBase(unittest.TestCase):
options.output_trace_tag = None
page_runner.ProcessCommandLineArgs(temp_parser, options)
measurement.ProcessCommandLineArgs(temp_parser, options)
return page_runner.Run(measurement, ps, expectations, options)
results = results_options.CreateResults(EmptyMetadataForTest(), options)
page_runner.Run(measurement, ps, expectations, options, results)
return results
def TestTracingCleanedUp(self, measurement_class, options=None):
ps = self.CreatePageSetFromFileInUnittestDataDir('blank.html')
......
# Copyright 2012 The Chromium Authors. All rights reserved.
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
......@@ -327,10 +327,8 @@ def _UpdatePageSetArchivesIfChanged(page_set):
cloud_storage.GetIfChanged(path, page_set.bucket)
def Run(test, page_set, expectations, finder_options):
def Run(test, page_set, expectations, finder_options, results):
"""Runs a given test against a given page_set with the given options."""
results = results_options.PrepareResults(test, finder_options)
test.ValidatePageSet(page_set)
# Create a possible_browser with the given options.
......@@ -357,7 +355,7 @@ def Run(test, page_set, expectations, finder_options):
if not should_run:
logging.warning('You are trying to run a disabled test.')
logging.warning('Pass --also-run-disabled-tests to squelch this message.')
return results
return
# Reorder page set based on options.
pages = _ShuffleAndFilterPageSet(page_set, finder_options)
......@@ -392,7 +390,7 @@ def Run(test, page_set, expectations, finder_options):
pages.remove(page)
if not pages:
return results
return
state = _RunState()
# TODO(dtu): Move results creation and results_for_current_run into RunState.
......@@ -435,7 +433,7 @@ def Run(test, page_set, expectations, finder_options):
finally:
state.StopBrowser()
return results
return
def _ShuffleAndFilterPageSet(page_set, finder_options):
......
# Copyright 2013 The Chromium Authors. All rights reserved.
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
......@@ -12,12 +12,14 @@ import stat
import sys
import tempfile
from telemetry import benchmark
from telemetry.core import browser_options
from telemetry.core import discover
from telemetry.core import util
from telemetry.page import page_runner
from telemetry.page import profile_creator
from telemetry.page import test_expectations
from telemetry.results import results_options
def _DiscoverProfileCreatorClasses():
......@@ -72,7 +74,9 @@ def GenerateProfiles(profile_creator_class, profile_creator_name, options):
temp_output_directory = tempfile.mkdtemp()
options.output_profile_path = temp_output_directory
results = page_runner.Run(test, test.page_set, expectations, options)
results = results_options.CreateResults(
benchmark.BenchmarkMetadata(test.__class__.__name__), options)
page_runner.Run(test, test.page_set, expectations, options, results)
if results.failures:
logging.warning('Some pages failed.')
......
......@@ -17,6 +17,7 @@ from telemetry.page import page_test
from telemetry.page import profile_creator
from telemetry.page import test_expectations
from telemetry.results import page_measurement_results
from telemetry.results import results_options
class RecorderPageTest(page_test.PageTest): # pylint: disable=W0223
......@@ -113,7 +114,6 @@ def _MaybeGetInstanceOfClass(target, base_dir, cls):
class WprRecorder(object):
def __init__(self, base_dir, target, args=None):
action_names_to_run = FindAllActionNames(base_dir)
self._record_page_test = RecorderPageTest(action_names_to_run)
......@@ -139,6 +139,14 @@ class WprRecorder(object):
options.browser_options.no_proxy_server = True
return options
def CreateResults(self):
if self._benchmark is not None:
benchmark_metadata = self._benchmark.GetMetadata()
else:
benchmark_metadata = benchmark.BenchmarkMetadata('record_wpr')
return results_options.CreateResults(benchmark_metadata, self._options)
def _AddCommandLineArgs(self):
page_runner.AddCommandLineArgs(self._parser)
if self._benchmark is not None:
......@@ -163,11 +171,11 @@ class WprRecorder(object):
sys.exit(1)
return ps
def Record(self):
def Record(self, results):
self._page_set.wpr_archive_info.AddNewTemporaryRecording()
self._record_page_test.CustomizeBrowserOptions(self._options)
return page_runner.Run(self._record_page_test, self._page_set,
test_expectations.TestExpectations(), self._options)
page_runner.Run(self._record_page_test, self._page_set,
test_expectations.TestExpectations(), self._options, results)
def HandleResults(self, results):
if results.failures or results.skipped_values:
......@@ -185,6 +193,7 @@ def Main(base_dir):
sys.exit(1)
target = quick_args.pop()
wpr_recorder = WprRecorder(base_dir, target)
results = wpr_recorder.Record()
results = wpr_recorder.CreateResults()
wpr_recorder.Record(results)
wpr_recorder.HandleResults(results)
return min(255, len(results.failures))
......@@ -144,7 +144,8 @@ class RecordWprUnitTests(tab_test_case.TabTestCase):
mock_page_set = MockPageSet(url=self._url)
wpr_recorder = record_wpr.WprRecorder(self._test_data_dir,
mock_page_set, flags)
results = wpr_recorder.Record()
results = wpr_recorder.CreateResults()
wpr_recorder.Record(results)
self.assertEqual(set(mock_page_set.pages), results.pages_that_succeeded)
def testWprRecorderWithBenchmark(self):
......@@ -152,7 +153,8 @@ class RecordWprUnitTests(tab_test_case.TabTestCase):
mock_benchmark = MockBenchmark()
wpr_recorder = record_wpr.WprRecorder(self._test_data_dir, mock_benchmark,
flags)
results = wpr_recorder.Record()
results = wpr_recorder.CreateResults()
wpr_recorder.Record(results)
self.assertEqual(set(mock_benchmark.mock_page_set.pages),
results.pages_that_succeeded)
......
......@@ -30,12 +30,12 @@ _UNIT_JSON = ('tools', 'perf', 'unit-info.json')
# Leaving as-is now since we are going to move HtmlOutputFormatter to be
# based on JSON anyway.
class HtmlOutputFormatter(buildbot_output_formatter.BuildbotOutputFormatter):
def __init__(self, output_stream, test_name, reset_results, upload_results,
def __init__(self, output_stream, metadata, reset_results, upload_results,
browser_type, results_label=None, trace_tag=''):
# Pass output_stream=None so that we blow up if
# BuildbotOutputFormatter ever use the output_stream.
super(HtmlOutputFormatter, self).__init__(None, trace_tag)
self._test_name = test_name
self._metadata = metadata
self._reset_results = reset_results
self._upload_results = upload_results
self._html_output_stream = output_stream
......@@ -101,6 +101,10 @@ class HtmlOutputFormatter(buildbot_output_formatter.BuildbotOutputFormatter):
'important': result_type == 'default'
}
@property
def _test_name(self):
return self._metadata.name
def GetResults(self):
return self._result
......
......@@ -5,6 +5,7 @@ import os
import StringIO
import unittest
from telemetry import benchmark
from telemetry.page import page_set
from telemetry.results import html_output_formatter
from telemetry.results import page_test_results
......@@ -27,6 +28,9 @@ class DeterministicHtmlOutputFormatter(
def _GetRevision(self):
return 'revision'
class FakeMetadataForTest(benchmark.BenchmarkMetadata):
def __init__(self):
super(FakeMetadataForTest, self).__init__('test_name')
# Wrap string IO with a .name property so that it behaves more like a file.
class StringIOFile(StringIO.StringIO):
......@@ -54,7 +58,7 @@ class HtmlOutputFormatterTest(unittest.TestCase):
results.DidRunPage(test_page_set.pages[1])
formatter = DeterministicHtmlOutputFormatter(
output_file, 'test_name', False, False, 'browser_type')
output_file, FakeMetadataForTest(), False, False, 'browser_type')
formatter.Format(results)
expected = {
"platform": "browser_type",
......@@ -111,7 +115,7 @@ class HtmlOutputFormatterTest(unittest.TestCase):
results.DidRunPage(test_page_set.pages[1])
formatter = DeterministicHtmlOutputFormatter(
output_file, 'test_name', False, False, 'browser_type')
output_file, FakeMetadataForTest(), False, False, 'browser_type')
formatter.Format(results)
expected = [
{
......@@ -207,7 +211,7 @@ class HtmlOutputFormatterTest(unittest.TestCase):
results.DidRunPage(test_page_set.pages[1])
formatter = DeterministicHtmlOutputFormatter(
output_file, 'test_name', True, False, 'browser_type')
output_file, FakeMetadataForTest(), True, False, 'browser_type')
formatter.Format(results)
expected = [{
"platform": "browser_type",
......
......@@ -6,9 +6,10 @@ import json
from telemetry.results import output_formatter
def ResultsAsDict(res):
def ResultsAsDict(res, metadata):
result_dict = {
'format_version': '0.1',
'format_version': '0.2',
'benchmark_name': metadata.name,
'summary_values': [v.AsDict() for v in res.all_summary_values],
'per_page_values': [v.AsDict() for v in res.all_page_specific_values],
'pages': dict((p.id, p.AsDict()) for p in _all_pages(res))
......@@ -21,9 +22,15 @@ def _all_pages(res):
return pages
class JsonOutputFormatter(output_formatter.OutputFormatter):
def __init__(self, output_stream):
def __init__(self, output_stream, metadata):
super(JsonOutputFormatter, self).__init__(output_stream)
self._metadata = metadata
@property
def metadata(self):
return self._metadata
def Format(self, page_test_results):
json.dump(ResultsAsDict(page_test_results), self.output_stream)
json.dump(ResultsAsDict(page_test_results, self.metadata),
self.output_stream)
self.output_stream.write('\n')
......@@ -6,9 +6,9 @@ import os
import unittest
import json
from telemetry import benchmark
from telemetry.results import json_output_formatter
from telemetry.results import page_test_results
from telemetry.results.json_output_formatter import ResultsAsDict
from telemetry.page import page_set
from telemetry.value import scalar
......@@ -25,12 +25,12 @@ def _HasPage(pages, page):
def _HasValueNamed(values, name):
return len([x for x in values if x['name'] == name]) == 1
class JsonOutputFormatterTest(unittest.TestCase):
def setUp(self):
self._output = StringIO.StringIO()
self._page_set = _MakePageSet()
self._formatter = json_output_formatter.JsonOutputFormatter(self._output)
self._formatter = json_output_formatter.JsonOutputFormatter(self._output,
benchmark.BenchmarkMetadata('test_name'))
def testOutputAndParse(self):
results = page_test_results.PageTestResults()
......@@ -52,7 +52,7 @@ class JsonOutputFormatterTest(unittest.TestCase):
results.AddValue(v0)
results.DidRunPage(self._page_set[0])
d = ResultsAsDict(results)
d = json_output_formatter.ResultsAsDict(results, self._formatter.metadata)
self.assertTrue(_HasPage(d['pages'], self._page_set[0]))
self.assertTrue(_HasValueNamed(d['per_page_values'], 'foo'))
......@@ -69,7 +69,7 @@ class JsonOutputFormatterTest(unittest.TestCase):
results.AddValue(v1)
results.DidRunPage(self._page_set[1])
d = ResultsAsDict(results)
d = json_output_formatter.ResultsAsDict(results, self._formatter.metadata)
self.assertTrue(_HasPage(d['pages'], self._page_set[0]))
self.assertTrue(_HasPage(d['pages'], self._page_set[1]))
......@@ -81,7 +81,7 @@ class JsonOutputFormatterTest(unittest.TestCase):
v = scalar.ScalarValue(None, 'baz', 'seconds', 5)
results.AddSummaryValue(v)
d = ResultsAsDict(results)
d = json_output_formatter.ResultsAsDict(results, self._formatter.metadata)
self.assertFalse(d['pages'])
self.assertTrue(_HasValueNamed(d['summary_values'], 'baz'))
......@@ -43,7 +43,7 @@ def AddResultsOptions(parser):
parser.add_option_group(group)
def PrepareResults(test, options):
def CreateResults(metadata, options):
# TODO(chrishenry): This logic prevents us from having multiple
# OutputFormatters. We should have an output_file per OutputFormatter.
# Maybe we should have --output-dir instead of --output-file?
......@@ -85,12 +85,12 @@ def PrepareResults(test, options):
output_formatters.append(buildbot_output_formatter.BuildbotOutputFormatter(
sys.stdout, trace_tag=options.output_trace_tag))
output_formatters.append(html_output_formatter.HtmlOutputFormatter(
output_stream, test.__class__.__name__, options.reset_results,
output_stream, metadata, options.reset_results,
options.upload_results, options.browser_type,
options.results_label, trace_tag=options.output_trace_tag))
elif options.output_format == 'json':
output_formatters.append(json_output_formatter.JsonOutputFormatter(
output_stream))
output_stream, metadata))
else:
# Should never be reached. The parser enforces the choices.
raise Exception('Invalid --output-format "%s". Valid choices are: %s'
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment