Commit e3b49934 authored by ariblue's avatar ariblue Committed by Commit bot

Add support for multiple output formatters for telemetry results using the following syntax:

./run_benchmark --output-format=json --output-format=html foo

BUG=

Review URL: https://codereview.chromium.org/574593002

Cr-Commit-Position: refs/heads/master@{#295149}
parent 68b33a1f
...@@ -81,7 +81,7 @@ class PageRunnerTests(unittest.TestCase): ...@@ -81,7 +81,7 @@ class PageRunnerTests(unittest.TestCase):
pass pass
options = options_for_unittests.GetCopy() options = options_for_unittests.GetCopy()
options.output_format = 'none' options.output_formats = ['none']
options.suppress_gtest_report = True options.suppress_gtest_report = True
SetUpPageRunnerArguments(options) SetUpPageRunnerArguments(options)
results = results_options.CreateResults(EmptyMetadataForTest(), options) results = results_options.CreateResults(EmptyMetadataForTest(), options)
...@@ -111,7 +111,7 @@ class PageRunnerTests(unittest.TestCase): ...@@ -111,7 +111,7 @@ class PageRunnerTests(unittest.TestCase):
raise ExpectedException() raise ExpectedException()
options = options_for_unittests.GetCopy() options = options_for_unittests.GetCopy()
options.output_format = 'none' options.output_formats = ['none']
options.suppress_gtest_report = True options.suppress_gtest_report = True
test = Test() test = Test()
SetUpPageRunnerArguments(options) SetUpPageRunnerArguments(options)
...@@ -133,7 +133,7 @@ class PageRunnerTests(unittest.TestCase): ...@@ -133,7 +133,7 @@ class PageRunnerTests(unittest.TestCase):
pass pass
options = options_for_unittests.GetCopy() options = options_for_unittests.GetCopy()
options.output_format = 'none' options.output_formats = ['none']
options.suppress_gtest_report = True options.suppress_gtest_report = True
SetUpPageRunnerArguments(options) SetUpPageRunnerArguments(options)
results = results_options.CreateResults(EmptyMetadataForTest(), options) results = results_options.CreateResults(EmptyMetadataForTest(), options)
...@@ -159,7 +159,7 @@ class PageRunnerTests(unittest.TestCase): ...@@ -159,7 +159,7 @@ class PageRunnerTests(unittest.TestCase):
raise exceptions.BrowserGoneException(tab.browser) raise exceptions.BrowserGoneException(tab.browser)
options = options_for_unittests.GetCopy() options = options_for_unittests.GetCopy()
options.output_format = 'csv' options.output_formats = ['csv']
options.suppress_gtest_report = True options.suppress_gtest_report = True
SetUpPageRunnerArguments(options) SetUpPageRunnerArguments(options)
...@@ -190,7 +190,7 @@ class PageRunnerTests(unittest.TestCase): ...@@ -190,7 +190,7 @@ class PageRunnerTests(unittest.TestCase):
results.AddValue(string.StringValue(page, 'test', 't', page.url)) results.AddValue(string.StringValue(page, 'test', 't', page.url))
options = options_for_unittests.GetCopy() options = options_for_unittests.GetCopy()
options.output_format = 'none' options.output_formats = ['none']
options.suppress_gtest_report = True options.suppress_gtest_report = True
options.reset_results = None options.reset_results = None
options.upload_results = None options.upload_results = None
...@@ -223,7 +223,7 @@ class PageRunnerTests(unittest.TestCase): ...@@ -223,7 +223,7 @@ class PageRunnerTests(unittest.TestCase):
self.assertEquals(0, len(results.failures)) self.assertEquals(0, len(results.failures))
self.assertEquals(2, len(results.all_page_specific_values)) self.assertEquals(2, len(results.all_page_specific_values))
options.output_format = 'html' options.output_formats = ['html']
options.suppress_gtest_report = True options.suppress_gtest_report = True
options.page_repeat = 1 options.page_repeat = 1
options.pageset_repeat = 1 options.pageset_repeat = 1
...@@ -253,7 +253,7 @@ class PageRunnerTests(unittest.TestCase): ...@@ -253,7 +253,7 @@ class PageRunnerTests(unittest.TestCase):
output_file = tempfile.NamedTemporaryFile(delete=False).name output_file = tempfile.NamedTemporaryFile(delete=False).name
try: try:
options = options_for_unittests.GetCopy() options = options_for_unittests.GetCopy()
options.output_format = 'buildbot' options.output_formats = ['buildbot']
options.output_file = output_file options.output_file = output_file
options.suppress_gtest_report = True options.suppress_gtest_report = True
options.reset_results = None options.reset_results = None
...@@ -323,7 +323,7 @@ class PageRunnerTests(unittest.TestCase): ...@@ -323,7 +323,7 @@ class PageRunnerTests(unittest.TestCase):
test = TestThatInstallsCredentialsBackend(credentials_backend) test = TestThatInstallsCredentialsBackend(credentials_backend)
options = options_for_unittests.GetCopy() options = options_for_unittests.GetCopy()
options.output_format = 'none' options.output_formats = ['none']
options.suppress_gtest_report = True options.suppress_gtest_report = True
SetUpPageRunnerArguments(options) SetUpPageRunnerArguments(options)
results = results_options.CreateResults(EmptyMetadataForTest(), options) results = results_options.CreateResults(EmptyMetadataForTest(), options)
...@@ -354,7 +354,7 @@ class PageRunnerTests(unittest.TestCase): ...@@ -354,7 +354,7 @@ class PageRunnerTests(unittest.TestCase):
test = TestUserAgent() test = TestUserAgent()
options = options_for_unittests.GetCopy() options = options_for_unittests.GetCopy()
options.output_format = 'none' options.output_formats = ['none']
options.suppress_gtest_report = True options.suppress_gtest_report = True
SetUpPageRunnerArguments(options) SetUpPageRunnerArguments(options)
results = results_options.CreateResults(EmptyMetadataForTest(), options) results = results_options.CreateResults(EmptyMetadataForTest(), options)
...@@ -385,7 +385,7 @@ class PageRunnerTests(unittest.TestCase): ...@@ -385,7 +385,7 @@ class PageRunnerTests(unittest.TestCase):
test = TestOneTab() test = TestOneTab()
options = options_for_unittests.GetCopy() options = options_for_unittests.GetCopy()
options.output_format = 'none' options.output_formats = ['none']
options.suppress_gtest_report = True options.suppress_gtest_report = True
SetUpPageRunnerArguments(options) SetUpPageRunnerArguments(options)
results = results_options.CreateResults(EmptyMetadataForTest(), options) results = results_options.CreateResults(EmptyMetadataForTest(), options)
...@@ -419,7 +419,7 @@ class PageRunnerTests(unittest.TestCase): ...@@ -419,7 +419,7 @@ class PageRunnerTests(unittest.TestCase):
test = TestBeforeLaunch() test = TestBeforeLaunch()
options = options_for_unittests.GetCopy() options = options_for_unittests.GetCopy()
options.output_format = 'none' options.output_formats = ['none']
options.suppress_gtest_report = True options.suppress_gtest_report = True
SetUpPageRunnerArguments(options) SetUpPageRunnerArguments(options)
results = results_options.CreateResults(EmptyMetadataForTest(), options) results = results_options.CreateResults(EmptyMetadataForTest(), options)
...@@ -448,7 +448,7 @@ class PageRunnerTests(unittest.TestCase): ...@@ -448,7 +448,7 @@ class PageRunnerTests(unittest.TestCase):
options = options_for_unittests.GetCopy() options = options_for_unittests.GetCopy()
options.page_repeat = 2 options.page_repeat = 2
options.output_format = 'none' options.output_formats = ['none']
options.suppress_gtest_report = True options.suppress_gtest_report = True
if not browser_finder.FindBrowser(options): if not browser_finder.FindBrowser(options):
return return
...@@ -481,7 +481,7 @@ class PageRunnerTests(unittest.TestCase): ...@@ -481,7 +481,7 @@ class PageRunnerTests(unittest.TestCase):
test = Test() test = Test()
options = options_for_unittests.GetCopy() options = options_for_unittests.GetCopy()
options.output_format = 'none' options.output_formats = ['none']
options.suppress_gtest_report = True options.suppress_gtest_report = True
SetUpPageRunnerArguments(options) SetUpPageRunnerArguments(options)
results = results_options.CreateResults(EmptyMetadataForTest(), options) results = results_options.CreateResults(EmptyMetadataForTest(), options)
...@@ -519,7 +519,7 @@ class PageRunnerTests(unittest.TestCase): ...@@ -519,7 +519,7 @@ class PageRunnerTests(unittest.TestCase):
test = Test() test = Test()
options = options_for_unittests.GetCopy() options = options_for_unittests.GetCopy()
options.output_format = 'none' options.output_formats = ['none']
options.suppress_gtest_report = True options.suppress_gtest_report = True
SetUpPageRunnerArguments(options) SetUpPageRunnerArguments(options)
results = results_options.CreateResults(EmptyMetadataForTest(), options) results = results_options.CreateResults(EmptyMetadataForTest(), options)
...@@ -568,7 +568,7 @@ class PageRunnerTests(unittest.TestCase): ...@@ -568,7 +568,7 @@ class PageRunnerTests(unittest.TestCase):
def testUseLiveSitesFlagSet(self): def testUseLiveSitesFlagSet(self):
options = options_for_unittests.GetCopy() options = options_for_unittests.GetCopy()
options.output_format = 'none' options.output_formats = ['none']
options.suppress_gtest_report = True options.suppress_gtest_report = True
options.use_live_sites = True options.use_live_sites = True
SetUpPageRunnerArguments(options) SetUpPageRunnerArguments(options)
...@@ -576,7 +576,7 @@ class PageRunnerTests(unittest.TestCase): ...@@ -576,7 +576,7 @@ class PageRunnerTests(unittest.TestCase):
def testUseLiveSitesFlagUnset(self): def testUseLiveSitesFlagUnset(self):
options = options_for_unittests.GetCopy() options = options_for_unittests.GetCopy()
options.output_format = 'none' options.output_formats = ['none']
options.suppress_gtest_report = True options.suppress_gtest_report = True
SetUpPageRunnerArguments(options) SetUpPageRunnerArguments(options)
self.TestUseLiveSitesFlag(options, expect_from_archive=True) self.TestUseLiveSitesFlag(options, expect_from_archive=True)
...@@ -602,7 +602,7 @@ class PageRunnerTests(unittest.TestCase): ...@@ -602,7 +602,7 @@ class PageRunnerTests(unittest.TestCase):
'file://blank.html', ps, base_dir=util.GetUnittestDataDir())) 'file://blank.html', ps, base_dir=util.GetUnittestDataDir()))
options = options_for_unittests.GetCopy() options = options_for_unittests.GetCopy()
options.output_format = 'none' options.output_formats = ['none']
options.suppress_gtest_report = True options.suppress_gtest_report = True
expected_max_failures = 2 expected_max_failures = 2
if not max_failures is None: if not max_failures is None:
......
...@@ -151,7 +151,7 @@ class WprRecorder(object): ...@@ -151,7 +151,7 @@ class WprRecorder(object):
self._SetArgumentDefaults() self._SetArgumentDefaults()
def _SetArgumentDefaults(self): def _SetArgumentDefaults(self):
self._parser.set_defaults(**{'output_format': 'none'}) self._parser.set_defaults(**{'output_formats': ['none']})
def _ParseArgs(self, args=None): def _ParseArgs(self, args=None):
args_to_parse = sys.argv[1:] if args is None else args args_to_parse = sys.argv[1:] if args is None else args
......
...@@ -25,13 +25,14 @@ def AddResultsOptions(parser): ...@@ -25,13 +25,14 @@ def AddResultsOptions(parser):
group = optparse.OptionGroup(parser, 'Results options') group = optparse.OptionGroup(parser, 'Results options')
group.add_option('--chartjson', action='store_true', group.add_option('--chartjson', action='store_true',
help='Output Chart JSON. Ignores --output-format.') help='Output Chart JSON. Ignores --output-format.')
group.add_option('--output-format', group.add_option('--output-format', action='append', dest='output_formats',
default=_OUTPUT_FORMAT_CHOICES[0], default=[_OUTPUT_FORMAT_CHOICES[0]],
choices=_OUTPUT_FORMAT_CHOICES, choices=_OUTPUT_FORMAT_CHOICES,
help='Output format. Defaults to "%%default". ' help='Output format. Defaults to "%%default". '
'Can be %s.' % ', '.join(_OUTPUT_FORMAT_CHOICES)) 'Can be %s.' % ', '.join(_OUTPUT_FORMAT_CHOICES))
group.add_option('-o', '--output', group.add_option('-o', '--output',
dest='output_file', dest='output_file',
default=None,
help='Redirects output to a file. Defaults to stdout.') help='Redirects output to a file. Defaults to stdout.')
group.add_option('--output-trace-tag', group.add_option('--output-trace-tag',
default='', default='',
...@@ -49,77 +50,86 @@ def AddResultsOptions(parser): ...@@ -49,77 +50,86 @@ def AddResultsOptions(parser):
parser.add_option_group(group) parser.add_option_group(group)
def _GetOutputStream(output_format, output_file):
assert output_format in _OUTPUT_FORMAT_CHOICES, 'Must specify a valid format.'
assert output_format not in ('gtest', 'none'), (
'Cannot set stream for \'gtest\' or \'none\' output formats.')
if output_file is None:
if output_format != 'html' and output_format != 'json':
return sys.stdout
output_file = os.path.join(util.GetBaseDir(), 'results.' + output_format)
output_file = os.path.expanduser(output_file)
open(output_file, 'a').close() # Create file if it doesn't exist.
return open(output_file, 'r+')
def _GetProgressReporter(output_skipped_tests_summary, suppress_gtest_report):
if suppress_gtest_report:
return progress_reporter.ProgressReporter()
return gtest_progress_reporter.GTestProgressReporter(
sys.stdout, output_skipped_tests_summary=output_skipped_tests_summary)
def CreateResults(benchmark_metadata, options): def CreateResults(benchmark_metadata, options):
""" """
Args: Args:
options: Contains the options specified in AddResultsOptions. options: Contains the options specified in AddResultsOptions.
""" """
# TODO(chrishenry): This logic prevents us from having multiple # TODO(chrishenry): It doesn't make sense to have a single output_file flag
# OutputFormatters. We should have an output_file per OutputFormatter. # with multiple output formatters. We should explore other possible options:
# Maybe we should have --output-dir instead of --output-file? # - Have an output_file per output formatter
if options.output_format == 'html' and not options.output_file: # - Have --output-dir instead of --output-file
options.output_file = os.path.join(util.GetBaseDir(), 'results.html') if len(options.output_formats) != 1 and options.output_file:
elif options.output_format == 'json' and not options.output_file: raise Exception('Cannot specify output_file flag with multiple output '
options.output_file = os.path.join(util.GetBaseDir(), 'results.json') 'formats.')
if hasattr(options, 'output_file') and options.output_file:
output_file = os.path.expanduser(options.output_file)
open(output_file, 'a').close() # Create file if it doesn't exist.
output_stream = open(output_file, 'r+')
else:
output_stream = sys.stdout
if not hasattr(options, 'output_format'):
options.output_format = _OUTPUT_FORMAT_CHOICES[0]
if not hasattr(options, 'output_trace_tag'):
options.output_trace_tag = ''
output_formatters = [] output_formatters = []
output_skipped_tests_summary = True for output_format in options.output_formats:
reporter = None if output_format == 'none' or output_format == "gtest" or options.chartjson:
if options.output_format == 'none' or options.chartjson: continue
pass
elif options.output_format == 'csv': output_stream = _GetOutputStream(output_format, options.output_file)
output_formatters.append(csv_output_formatter.CsvOutputFormatter( if output_format == 'csv':
output_stream)) output_formatters.append(csv_output_formatter.CsvOutputFormatter(
elif options.output_format == 'buildbot': output_stream))
output_formatters.append(buildbot_output_formatter.BuildbotOutputFormatter( elif output_format == 'buildbot':
output_stream, trace_tag=options.output_trace_tag)) output_formatters.append(
elif options.output_format == 'gtest': buildbot_output_formatter.BuildbotOutputFormatter(
# TODO(chrishenry): This is here to not change the output of output_stream, trace_tag=options.output_trace_tag))
# gtest. Let's try enabling skipped tests summary for gtest test elif output_format == 'html':
# results too (in a separate patch), and see if we break anything. # TODO(chrishenry): We show buildbot output so that users can grep
output_skipped_tests_summary = False # through the results easily without needing to open the html
elif options.output_format == 'html': # file. Another option for this is to output the results directly
# TODO(chrishenry): We show buildbot output so that users can grep # in gtest-style results (via some sort of progress reporter),
# through the results easily without needing to open the html # as we plan to enable gtest-style output for all output formatters.
# file. Another option for this is to output the results directly output_formatters.append(
# in gtest-style results (via some sort of progress reporter), buildbot_output_formatter.BuildbotOutputFormatter(
# as we plan to enable gtest-style output for all output formatters. sys.stdout, trace_tag=options.output_trace_tag))
output_formatters.append(buildbot_output_formatter.BuildbotOutputFormatter( output_formatters.append(html_output_formatter.HtmlOutputFormatter(
sys.stdout, trace_tag=options.output_trace_tag)) output_stream, benchmark_metadata, options.reset_results,
output_formatters.append(html_output_formatter.HtmlOutputFormatter( options.upload_results, options.browser_type,
output_stream, benchmark_metadata, options.reset_results, options.results_label, trace_tag=options.output_trace_tag))
options.upload_results, options.browser_type, elif output_format == 'json':
options.results_label, trace_tag=options.output_trace_tag)) output_formatters.append(json_output_formatter.JsonOutputFormatter(
elif options.output_format == 'json': output_stream, benchmark_metadata))
output_formatters.append( elif output_format == 'chartjson':
json_output_formatter.JsonOutputFormatter(output_stream, output_formatters.append(
benchmark_metadata)) chart_json_output_formatter.ChartJsonOutputFormatter(
elif options.output_format == 'chartjson': output_stream, benchmark_metadata))
output_formatters.append( else:
chart_json_output_formatter.ChartJsonOutputFormatter( # Should never be reached. The parser enforces the choices.
output_stream, raise Exception('Invalid --output-format "%s". Valid choices are: %s'
benchmark_metadata)) % (output_format, ', '.join(_OUTPUT_FORMAT_CHOICES)))
else:
# Should never be reached. The parser enforces the choices. # TODO(chrishenry): This is here to not change the output of
raise Exception('Invalid --output-format "%s". Valid choices are: %s' # gtest. Let's try enabling skipped tests summary for gtest test
% (options.output_format, # results too (in a separate patch), and see if we break anything.
', '.join(_OUTPUT_FORMAT_CHOICES))) output_skipped_tests_summary = 'gtest' in options.output_formats
if options.suppress_gtest_report: reporter = _GetProgressReporter(output_skipped_tests_summary,
reporter = progress_reporter.ProgressReporter() options.suppress_gtest_report)
else:
reporter = gtest_progress_reporter.GTestProgressReporter(
sys.stdout, output_skipped_tests_summary=output_skipped_tests_summary)
return page_test_results.PageTestResults( return page_test_results.PageTestResults(
output_formatters=output_formatters, progress_reporter=reporter) output_formatters=output_formatters, progress_reporter=reporter)
...@@ -67,7 +67,7 @@ class PageTestTestCase(unittest.TestCase): ...@@ -67,7 +67,7 @@ class PageTestTestCase(unittest.TestCase):
measurement.CustomizeBrowserOptions(options.browser_options) measurement.CustomizeBrowserOptions(options.browser_options)
options.output_file = None options.output_file = None
options.output_format = 'none' options.output_formats = ['none']
options.suppress_gtest_report = True options.suppress_gtest_report = True
options.output_trace_tag = None options.output_trace_tag = None
page_runner.ProcessCommandLineArgs(temp_parser, options) page_runner.ProcessCommandLineArgs(temp_parser, options)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment