Plumb Telemetry test name through to results object creation

We want to be able to ask for the current benchmark's name when generating results. This threads the benchmark through so that it will be available to results objects.

Review URL: https://codereview.chromium.org/386943007

git-svn-id: svn://svn.chromium.org/chrome/trunk/src@287483 0039d316-1c4b-4281-b951-d872f2087c98
parent cc0d4be2
...@@ -17,7 +17,7 @@ from telemetry.page import page_runner ...@@ -17,7 +17,7 @@ from telemetry.page import page_runner
from telemetry.page import page_set from telemetry.page import page_set
from telemetry.page import page_test from telemetry.page import page_test
from telemetry.page import test_expectations from telemetry.page import test_expectations
from telemetry.results import page_test_results from telemetry.results import results_options
from telemetry.util import cloud_storage from telemetry.util import cloud_storage
...@@ -25,6 +25,14 @@ Disabled = decorators.Disabled ...@@ -25,6 +25,14 @@ Disabled = decorators.Disabled
Enabled = decorators.Enabled Enabled = decorators.Enabled
class BenchmarkMetadata(object):
def __init__(self, name):
self._name = name
@property
def name(self):
return self._name
class Benchmark(command_line.Command): class Benchmark(command_line.Command):
"""Base class for a Telemetry benchmark. """Base class for a Telemetry benchmark.
...@@ -62,26 +70,29 @@ class Benchmark(command_line.Command): ...@@ -62,26 +70,29 @@ class Benchmark(command_line.Command):
def CustomizeBrowserOptions(self, options): def CustomizeBrowserOptions(self, options):
"""Add browser options that are required by this benchmark.""" """Add browser options that are required by this benchmark."""
def Run(self, args): def GetMetadata(self):
return BenchmarkMetadata(self.Name())
def Run(self, finder_options):
"""Run this test with the given options.""" """Run this test with the given options."""
self.CustomizeBrowserOptions(args.browser_options) self.CustomizeBrowserOptions(finder_options.browser_options)
test = self.PageTestClass()() pt = self.PageTestClass()()
test.__name__ = self.__class__.__name__ pt.__name__ = self.__class__.__name__
if hasattr(self, '_disabled_strings'): if hasattr(self, '_disabled_strings'):
test._disabled_strings = self._disabled_strings pt._disabled_strings = self._disabled_strings
if hasattr(self, '_enabled_strings'): if hasattr(self, '_enabled_strings'):
test._enabled_strings = self._enabled_strings pt._enabled_strings = self._enabled_strings
ps = self.CreatePageSet(args) ps = self.CreatePageSet(finder_options)
expectations = self.CreateExpectations(ps) expectations = self.CreateExpectations(ps)
self._DownloadGeneratedProfileArchive(args) self._DownloadGeneratedProfileArchive(finder_options)
results = page_test_results.PageTestResults() results = results_options.CreateResults(self.GetMetadata(), finder_options)
try: try:
results = page_runner.Run(test, ps, expectations, args) page_runner.Run(pt, ps, expectations, finder_options, results)
except page_test.TestNotSupportedOnPlatformFailure as failure: except page_test.TestNotSupportedOnPlatformFailure as failure:
logging.warning(str(failure)) logging.warning(str(failure))
......
# Copyright 2012 The Chromium Authors. All rights reserved. # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be # Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file. # found in the LICENSE file.
import unittest import unittest
from telemetry import benchmark
from telemetry.core import exceptions from telemetry.core import exceptions
from telemetry.core import util from telemetry.core import util
from telemetry.page import page_runner from telemetry.page import page_runner
from telemetry.page import page as page_module from telemetry.page import page as page_module
from telemetry.page import page_set as page_set_module from telemetry.page import page_set as page_set_module
from telemetry.page import page_test from telemetry.page import page_test
from telemetry.results import results_options
from telemetry.page import test_expectations from telemetry.page import test_expectations
from telemetry.unittest import options_for_unittests from telemetry.unittest import options_for_unittests
...@@ -25,6 +27,10 @@ class BasicTestPage(page_module.Page): ...@@ -25,6 +27,10 @@ class BasicTestPage(page_module.Page):
interaction.End() interaction.End()
class EmptyMetadataForTest(benchmark.BenchmarkMetadata):
def __init__(self):
super(EmptyMetadataForTest, self).__init__('')
class PageMeasurementUnitTestBase(unittest.TestCase): class PageMeasurementUnitTestBase(unittest.TestCase):
"""unittest.TestCase-derived class to help in the construction of unit tests """unittest.TestCase-derived class to help in the construction of unit tests
for a measurement.""" for a measurement."""
...@@ -63,7 +69,9 @@ class PageMeasurementUnitTestBase(unittest.TestCase): ...@@ -63,7 +69,9 @@ class PageMeasurementUnitTestBase(unittest.TestCase):
options.output_trace_tag = None options.output_trace_tag = None
page_runner.ProcessCommandLineArgs(temp_parser, options) page_runner.ProcessCommandLineArgs(temp_parser, options)
measurement.ProcessCommandLineArgs(temp_parser, options) measurement.ProcessCommandLineArgs(temp_parser, options)
return page_runner.Run(measurement, ps, expectations, options) results = results_options.CreateResults(EmptyMetadataForTest(), options)
page_runner.Run(measurement, ps, expectations, options, results)
return results
def TestTracingCleanedUp(self, measurement_class, options=None): def TestTracingCleanedUp(self, measurement_class, options=None):
ps = self.CreatePageSetFromFileInUnittestDataDir('blank.html') ps = self.CreatePageSetFromFileInUnittestDataDir('blank.html')
......
# Copyright 2012 The Chromium Authors. All rights reserved. # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be # Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file. # found in the LICENSE file.
...@@ -327,10 +327,8 @@ def _UpdatePageSetArchivesIfChanged(page_set): ...@@ -327,10 +327,8 @@ def _UpdatePageSetArchivesIfChanged(page_set):
cloud_storage.GetIfChanged(path, page_set.bucket) cloud_storage.GetIfChanged(path, page_set.bucket)
def Run(test, page_set, expectations, finder_options): def Run(test, page_set, expectations, finder_options, results):
"""Runs a given test against a given page_set with the given options.""" """Runs a given test against a given page_set with the given options."""
results = results_options.PrepareResults(test, finder_options)
test.ValidatePageSet(page_set) test.ValidatePageSet(page_set)
# Create a possible_browser with the given options. # Create a possible_browser with the given options.
...@@ -357,7 +355,7 @@ def Run(test, page_set, expectations, finder_options): ...@@ -357,7 +355,7 @@ def Run(test, page_set, expectations, finder_options):
if not should_run: if not should_run:
logging.warning('You are trying to run a disabled test.') logging.warning('You are trying to run a disabled test.')
logging.warning('Pass --also-run-disabled-tests to squelch this message.') logging.warning('Pass --also-run-disabled-tests to squelch this message.')
return results return
# Reorder page set based on options. # Reorder page set based on options.
pages = _ShuffleAndFilterPageSet(page_set, finder_options) pages = _ShuffleAndFilterPageSet(page_set, finder_options)
...@@ -392,7 +390,7 @@ def Run(test, page_set, expectations, finder_options): ...@@ -392,7 +390,7 @@ def Run(test, page_set, expectations, finder_options):
pages.remove(page) pages.remove(page)
if not pages: if not pages:
return results return
state = _RunState() state = _RunState()
# TODO(dtu): Move results creation and results_for_current_run into RunState. # TODO(dtu): Move results creation and results_for_current_run into RunState.
...@@ -435,7 +433,7 @@ def Run(test, page_set, expectations, finder_options): ...@@ -435,7 +433,7 @@ def Run(test, page_set, expectations, finder_options):
finally: finally:
state.StopBrowser() state.StopBrowser()
return results return
def _ShuffleAndFilterPageSet(page_set, finder_options): def _ShuffleAndFilterPageSet(page_set, finder_options):
......
# Copyright 2012 The Chromium Authors. All rights reserved. # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be # Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file. # found in the LICENSE file.
...@@ -7,6 +7,7 @@ import os ...@@ -7,6 +7,7 @@ import os
import tempfile import tempfile
import unittest import unittest
from telemetry import benchmark
from telemetry import decorators from telemetry import decorators
from telemetry.core import browser_finder from telemetry.core import browser_finder
from telemetry.core import exceptions from telemetry.core import exceptions
...@@ -18,6 +19,7 @@ from telemetry.page import page_set ...@@ -18,6 +19,7 @@ from telemetry.page import page_set
from telemetry.page import page_test from telemetry.page import page_test
from telemetry.page import page_runner from telemetry.page import page_runner
from telemetry.page import test_expectations from telemetry.page import test_expectations
from telemetry.results import results_options
from telemetry.unittest import options_for_unittests from telemetry.unittest import options_for_unittests
from telemetry.value import scalar from telemetry.value import scalar
from telemetry.value import string from telemetry.value import string
...@@ -39,6 +41,9 @@ def SetUpPageRunnerArguments(options): ...@@ -39,6 +41,9 @@ def SetUpPageRunnerArguments(options):
options.MergeDefaultValues(parser.get_default_values()) options.MergeDefaultValues(parser.get_default_values())
page_runner.ProcessCommandLineArgs(parser, options) page_runner.ProcessCommandLineArgs(parser, options)
class EmptyMetadataForTest(benchmark.BenchmarkMetadata):
def __init__(self):
super(EmptyMetadataForTest, self).__init__('')
class StubCredentialsBackend(object): class StubCredentialsBackend(object):
def __init__(self, login_return_value): def __init__(self, login_return_value):
...@@ -79,7 +84,8 @@ class PageRunnerTests(unittest.TestCase): ...@@ -79,7 +84,8 @@ class PageRunnerTests(unittest.TestCase):
options = options_for_unittests.GetCopy() options = options_for_unittests.GetCopy()
options.output_format = 'none' options.output_format = 'none'
SetUpPageRunnerArguments(options) SetUpPageRunnerArguments(options)
results = page_runner.Run(Test(), ps, expectations, options) results = results_options.CreateResults(EmptyMetadataForTest(), options)
page_runner.Run(Test(), ps, expectations, options, results)
self.assertEquals(0, len(GetSuccessfulPageRuns(results))) self.assertEquals(0, len(GetSuccessfulPageRuns(results)))
self.assertEquals(1, len(results.failures)) self.assertEquals(1, len(results.failures))
...@@ -108,7 +114,8 @@ class PageRunnerTests(unittest.TestCase): ...@@ -108,7 +114,8 @@ class PageRunnerTests(unittest.TestCase):
options.output_format = 'none' options.output_format = 'none'
test = Test() test = Test()
SetUpPageRunnerArguments(options) SetUpPageRunnerArguments(options)
results = page_runner.Run(test, ps, expectations, options) results = results_options.CreateResults(EmptyMetadataForTest(), options)
page_runner.Run(test, ps, expectations, options, results)
self.assertEquals(2, test.run_count) self.assertEquals(2, test.run_count)
self.assertEquals(1, len(GetSuccessfulPageRuns(results))) self.assertEquals(1, len(GetSuccessfulPageRuns(results)))
self.assertEquals(1, len(results.failures)) self.assertEquals(1, len(results.failures))
...@@ -127,8 +134,8 @@ class PageRunnerTests(unittest.TestCase): ...@@ -127,8 +134,8 @@ class PageRunnerTests(unittest.TestCase):
options = options_for_unittests.GetCopy() options = options_for_unittests.GetCopy()
options.output_format = 'none' options.output_format = 'none'
SetUpPageRunnerArguments(options) SetUpPageRunnerArguments(options)
results = page_runner.Run( results = results_options.CreateResults(EmptyMetadataForTest(), options)
Test(), ps, expectations, options) page_runner.Run(Test(), ps, expectations, options, results)
self.assertEquals(1, len(GetSuccessfulPageRuns(results))) self.assertEquals(1, len(GetSuccessfulPageRuns(results)))
self.assertEquals(0, len(results.failures)) self.assertEquals(0, len(results.failures))
...@@ -153,7 +160,8 @@ class PageRunnerTests(unittest.TestCase): ...@@ -153,7 +160,8 @@ class PageRunnerTests(unittest.TestCase):
options.output_format = 'csv' options.output_format = 'csv'
SetUpPageRunnerArguments(options) SetUpPageRunnerArguments(options)
results = page_runner.Run(CrashyMeasurement(), ps, expectations, options) results = results_options.CreateResults(EmptyMetadataForTest(), options)
page_runner.Run(CrashyMeasurement(), ps, expectations, options, results)
self.assertEquals(1, len(GetSuccessfulPageRuns(results))) self.assertEquals(1, len(GetSuccessfulPageRuns(results)))
self.assertEquals(0, len(results.failures)) self.assertEquals(0, len(results.failures))
...@@ -187,7 +195,8 @@ class PageRunnerTests(unittest.TestCase): ...@@ -187,7 +195,8 @@ class PageRunnerTests(unittest.TestCase):
options.page_repeat = 1 options.page_repeat = 1
options.pageset_repeat = 1 options.pageset_repeat = 1
SetUpPageRunnerArguments(options) SetUpPageRunnerArguments(options)
results = page_runner.Run(Measurement(), ps, expectations, options) results = results_options.CreateResults(EmptyMetadataForTest(), options)
page_runner.Run(Measurement(), ps, expectations, options, results)
self.assertEquals(0, len(GetSuccessfulPageRuns(results))) self.assertEquals(0, len(GetSuccessfulPageRuns(results)))
self.assertEquals(0, len(results.failures)) self.assertEquals(0, len(results.failures))
self.assertEquals(0, len(results.all_page_specific_values)) self.assertEquals(0, len(results.all_page_specific_values))
...@@ -195,7 +204,8 @@ class PageRunnerTests(unittest.TestCase): ...@@ -195,7 +204,8 @@ class PageRunnerTests(unittest.TestCase):
options.page_repeat = 1 options.page_repeat = 1
options.pageset_repeat = 2 options.pageset_repeat = 2
SetUpPageRunnerArguments(options) SetUpPageRunnerArguments(options)
results = page_runner.Run(Measurement(), ps, expectations, options) results = results_options.CreateResults(EmptyMetadataForTest(), options)
page_runner.Run(Measurement(), ps, expectations, options, results)
self.assertEquals(2, len(GetSuccessfulPageRuns(results))) self.assertEquals(2, len(GetSuccessfulPageRuns(results)))
self.assertEquals(0, len(results.failures)) self.assertEquals(0, len(results.failures))
self.assertEquals(2, len(results.all_page_specific_values)) self.assertEquals(2, len(results.all_page_specific_values))
...@@ -203,7 +213,8 @@ class PageRunnerTests(unittest.TestCase): ...@@ -203,7 +213,8 @@ class PageRunnerTests(unittest.TestCase):
options.page_repeat = 2 options.page_repeat = 2
options.pageset_repeat = 1 options.pageset_repeat = 1
SetUpPageRunnerArguments(options) SetUpPageRunnerArguments(options)
results = page_runner.Run(Measurement(), ps, expectations, options) results = results_options.CreateResults(EmptyMetadataForTest(), options)
page_runner.Run(Measurement(), ps, expectations, options, results)
self.assertEquals(2, len(GetSuccessfulPageRuns(results))) self.assertEquals(2, len(GetSuccessfulPageRuns(results)))
self.assertEquals(0, len(results.failures)) self.assertEquals(0, len(results.failures))
self.assertEquals(2, len(results.all_page_specific_values)) self.assertEquals(2, len(results.all_page_specific_values))
...@@ -212,7 +223,8 @@ class PageRunnerTests(unittest.TestCase): ...@@ -212,7 +223,8 @@ class PageRunnerTests(unittest.TestCase):
options.page_repeat = 1 options.page_repeat = 1
options.pageset_repeat = 1 options.pageset_repeat = 1
SetUpPageRunnerArguments(options) SetUpPageRunnerArguments(options)
results = page_runner.Run(Measurement(), ps, expectations, options) results = results_options.CreateResults(EmptyMetadataForTest(), options)
page_runner.Run(Measurement(), ps, expectations, options, results)
self.assertEquals(0, len(GetSuccessfulPageRuns(results))) self.assertEquals(0, len(GetSuccessfulPageRuns(results)))
self.assertEquals(0, len(results.failures)) self.assertEquals(0, len(results.failures))
self.assertEquals(0, len(results.all_page_specific_values)) self.assertEquals(0, len(results.all_page_specific_values))
...@@ -245,7 +257,8 @@ class PageRunnerTests(unittest.TestCase): ...@@ -245,7 +257,8 @@ class PageRunnerTests(unittest.TestCase):
options.page_repeat = 1 options.page_repeat = 1
options.pageset_repeat = 2 options.pageset_repeat = 2
SetUpPageRunnerArguments(options) SetUpPageRunnerArguments(options)
results = page_runner.Run(Measurement(), ps, expectations, options) results = results_options.CreateResults(EmptyMetadataForTest(), options)
page_runner.Run(Measurement(), ps, expectations, options, results)
results.PrintSummary() results.PrintSummary()
self.assertEquals(4, len(GetSuccessfulPageRuns(results))) self.assertEquals(4, len(GetSuccessfulPageRuns(results)))
self.assertEquals(0, len(results.failures)) self.assertEquals(0, len(results.failures))
...@@ -306,7 +319,8 @@ class PageRunnerTests(unittest.TestCase): ...@@ -306,7 +319,8 @@ class PageRunnerTests(unittest.TestCase):
options = options_for_unittests.GetCopy() options = options_for_unittests.GetCopy()
options.output_format = 'none' options.output_format = 'none'
SetUpPageRunnerArguments(options) SetUpPageRunnerArguments(options)
page_runner.Run(test, ps, expectations, options) results = results_options.CreateResults(EmptyMetadataForTest(), options)
page_runner.Run(test, ps, expectations, options, results)
finally: finally:
os.remove(f.name) os.remove(f.name)
...@@ -335,7 +349,8 @@ class PageRunnerTests(unittest.TestCase): ...@@ -335,7 +349,8 @@ class PageRunnerTests(unittest.TestCase):
options = options_for_unittests.GetCopy() options = options_for_unittests.GetCopy()
options.output_format = 'none' options.output_format = 'none'
SetUpPageRunnerArguments(options) SetUpPageRunnerArguments(options)
page_runner.Run(test, ps, expectations, options) results = results_options.CreateResults(EmptyMetadataForTest(), options)
page_runner.Run(test, ps, expectations, options, results)
self.assertTrue(hasattr(test, 'hasRun') and test.hasRun) self.assertTrue(hasattr(test, 'hasRun') and test.hasRun)
...@@ -364,7 +379,8 @@ class PageRunnerTests(unittest.TestCase): ...@@ -364,7 +379,8 @@ class PageRunnerTests(unittest.TestCase):
options = options_for_unittests.GetCopy() options = options_for_unittests.GetCopy()
options.output_format = 'none' options.output_format = 'none'
SetUpPageRunnerArguments(options) SetUpPageRunnerArguments(options)
page_runner.Run(test, ps, expectations, options) results = results_options.CreateResults(EmptyMetadataForTest(), options)
page_runner.Run(test, ps, expectations, options, results)
# Ensure that page_runner allows the test to customize the browser before it # Ensure that page_runner allows the test to customize the browser before it
# launches. # launches.
...@@ -396,7 +412,8 @@ class PageRunnerTests(unittest.TestCase): ...@@ -396,7 +412,8 @@ class PageRunnerTests(unittest.TestCase):
options = options_for_unittests.GetCopy() options = options_for_unittests.GetCopy()
options.output_format = 'none' options.output_format = 'none'
SetUpPageRunnerArguments(options) SetUpPageRunnerArguments(options)
page_runner.Run(test, ps, expectations, options) results = results_options.CreateResults(EmptyMetadataForTest(), options)
page_runner.Run(test, ps, expectations, options, results)
def testRunPageWithStartupUrl(self): def testRunPageWithStartupUrl(self):
ps = page_set.PageSet() ps = page_set.PageSet()
...@@ -426,7 +443,8 @@ class PageRunnerTests(unittest.TestCase): ...@@ -426,7 +443,8 @@ class PageRunnerTests(unittest.TestCase):
return return
test = Measurement() test = Measurement()
SetUpPageRunnerArguments(options) SetUpPageRunnerArguments(options)
page_runner.Run(test, ps, expectations, options) results = results_options.CreateResults(EmptyMetadataForTest(), options)
page_runner.Run(test, ps, expectations, options, results)
self.assertEquals('about:blank', options.browser_options.startup_url) self.assertEquals('about:blank', options.browser_options.startup_url)
self.assertTrue(test.browser_restarted) self.assertTrue(test.browser_restarted)
...@@ -454,7 +472,8 @@ class PageRunnerTests(unittest.TestCase): ...@@ -454,7 +472,8 @@ class PageRunnerTests(unittest.TestCase):
options = options_for_unittests.GetCopy() options = options_for_unittests.GetCopy()
options.output_format = 'none' options.output_format = 'none'
SetUpPageRunnerArguments(options) SetUpPageRunnerArguments(options)
page_runner.Run(test, ps, expectations, options) results = results_options.CreateResults(EmptyMetadataForTest(), options)
page_runner.Run(test, ps, expectations, options, results)
assert test.did_call_clean_up assert test.did_call_clean_up
# Ensure skipping the test if page cannot be run on the browser # Ensure skipping the test if page cannot be run on the browser
...@@ -490,7 +509,8 @@ class PageRunnerTests(unittest.TestCase): ...@@ -490,7 +509,8 @@ class PageRunnerTests(unittest.TestCase):
options = options_for_unittests.GetCopy() options = options_for_unittests.GetCopy()
options.output_format = 'none' options.output_format = 'none'
SetUpPageRunnerArguments(options) SetUpPageRunnerArguments(options)
results = page_runner.Run(test, ps, expectations, options) results = results_options.CreateResults(EmptyMetadataForTest(), options)
page_runner.Run(test, ps, expectations, options, results)
self.assertFalse(test.will_navigate_to_page_called) self.assertFalse(test.will_navigate_to_page_called)
self.assertEquals(0, len(GetSuccessfulPageRuns(results))) self.assertEquals(0, len(GetSuccessfulPageRuns(results)))
self.assertEquals(0, len(results.failures)) self.assertEquals(0, len(results.failures))
...@@ -519,7 +539,8 @@ class PageRunnerTests(unittest.TestCase): ...@@ -519,7 +539,8 @@ class PageRunnerTests(unittest.TestCase):
pass pass
test = ArchiveTest() test = ArchiveTest()
page_runner.Run(test, ps, expectations, options) results = results_options.CreateResults(EmptyMetadataForTest(), options)
page_runner.Run(test, ps, expectations, options, results)
if expect_from_archive and not test.archive_path_exist: if expect_from_archive and not test.archive_path_exist:
logging.warning('archive path did not exist, asserting that page ' logging.warning('archive path did not exist, asserting that page '
'is from archive is skipped.') 'is from archive is skipped.')
...@@ -574,7 +595,8 @@ class PageRunnerTests(unittest.TestCase): ...@@ -574,7 +595,8 @@ class PageRunnerTests(unittest.TestCase):
options = options_for_unittests.GetCopy() options = options_for_unittests.GetCopy()
options.output_format = 'none' options.output_format = 'none'
SetUpPageRunnerArguments(options) SetUpPageRunnerArguments(options)
results = page_runner.Run(Test(max_failures=2), ps, expectations, options) results = results_options.CreateResults(EmptyMetadataForTest(), options)
page_runner.Run(Test(max_failures=2), ps, expectations, options, results)
self.assertEquals(0, len(GetSuccessfulPageRuns(results))) self.assertEquals(0, len(GetSuccessfulPageRuns(results)))
# Runs up to max_failures+1 failing tests before stopping, since # Runs up to max_failures+1 failing tests before stopping, since
# every tests after max_failures failures have been encountered # every tests after max_failures failures have been encountered
......
# Copyright 2013 The Chromium Authors. All rights reserved. # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be # Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file. # found in the LICENSE file.
...@@ -12,12 +12,14 @@ import stat ...@@ -12,12 +12,14 @@ import stat
import sys import sys
import tempfile import tempfile
from telemetry import benchmark
from telemetry.core import browser_options from telemetry.core import browser_options
from telemetry.core import discover from telemetry.core import discover
from telemetry.core import util from telemetry.core import util
from telemetry.page import page_runner from telemetry.page import page_runner
from telemetry.page import profile_creator from telemetry.page import profile_creator
from telemetry.page import test_expectations from telemetry.page import test_expectations
from telemetry.results import results_options
def _DiscoverProfileCreatorClasses(): def _DiscoverProfileCreatorClasses():
...@@ -72,7 +74,9 @@ def GenerateProfiles(profile_creator_class, profile_creator_name, options): ...@@ -72,7 +74,9 @@ def GenerateProfiles(profile_creator_class, profile_creator_name, options):
temp_output_directory = tempfile.mkdtemp() temp_output_directory = tempfile.mkdtemp()
options.output_profile_path = temp_output_directory options.output_profile_path = temp_output_directory
results = page_runner.Run(test, test.page_set, expectations, options) results = results_options.CreateResults(
benchmark.BenchmarkMetadata(test.__class__.__name__), options)
page_runner.Run(test, test.page_set, expectations, options, results)
if results.failures: if results.failures:
logging.warning('Some pages failed.') logging.warning('Some pages failed.')
......
...@@ -17,6 +17,7 @@ from telemetry.page import page_test ...@@ -17,6 +17,7 @@ from telemetry.page import page_test
from telemetry.page import profile_creator from telemetry.page import profile_creator
from telemetry.page import test_expectations from telemetry.page import test_expectations
from telemetry.results import page_measurement_results from telemetry.results import page_measurement_results
from telemetry.results import results_options
class RecorderPageTest(page_test.PageTest): # pylint: disable=W0223 class RecorderPageTest(page_test.PageTest): # pylint: disable=W0223
...@@ -113,7 +114,6 @@ def _MaybeGetInstanceOfClass(target, base_dir, cls): ...@@ -113,7 +114,6 @@ def _MaybeGetInstanceOfClass(target, base_dir, cls):
class WprRecorder(object): class WprRecorder(object):
def __init__(self, base_dir, target, args=None): def __init__(self, base_dir, target, args=None):
action_names_to_run = FindAllActionNames(base_dir) action_names_to_run = FindAllActionNames(base_dir)
self._record_page_test = RecorderPageTest(action_names_to_run) self._record_page_test = RecorderPageTest(action_names_to_run)
...@@ -139,6 +139,14 @@ class WprRecorder(object): ...@@ -139,6 +139,14 @@ class WprRecorder(object):
options.browser_options.no_proxy_server = True options.browser_options.no_proxy_server = True
return options return options
def CreateResults(self):
if self._benchmark is not None:
benchmark_metadata = self._benchmark.GetMetadata()
else:
benchmark_metadata = benchmark.BenchmarkMetadata('record_wpr')
return results_options.CreateResults(benchmark_metadata, self._options)
def _AddCommandLineArgs(self): def _AddCommandLineArgs(self):
page_runner.AddCommandLineArgs(self._parser) page_runner.AddCommandLineArgs(self._parser)
if self._benchmark is not None: if self._benchmark is not None:
...@@ -163,11 +171,11 @@ class WprRecorder(object): ...@@ -163,11 +171,11 @@ class WprRecorder(object):
sys.exit(1) sys.exit(1)
return ps return ps
def Record(self): def Record(self, results):
self._page_set.wpr_archive_info.AddNewTemporaryRecording() self._page_set.wpr_archive_info.AddNewTemporaryRecording()
self._record_page_test.CustomizeBrowserOptions(self._options) self._record_page_test.CustomizeBrowserOptions(self._options)
return page_runner.Run(self._record_page_test, self._page_set, page_runner.Run(self._record_page_test, self._page_set,
test_expectations.TestExpectations(), self._options) test_expectations.TestExpectations(), self._options, results)
def HandleResults(self, results): def HandleResults(self, results):
if results.failures or results.skipped_values: if results.failures or results.skipped_values:
...@@ -185,6 +193,7 @@ def Main(base_dir): ...@@ -185,6 +193,7 @@ def Main(base_dir):
sys.exit(1) sys.exit(1)
target = quick_args.pop() target = quick_args.pop()
wpr_recorder = WprRecorder(base_dir, target) wpr_recorder = WprRecorder(base_dir, target)
results = wpr_recorder.Record() results = wpr_recorder.CreateResults()
wpr_recorder.Record(results)
wpr_recorder.HandleResults(results) wpr_recorder.HandleResults(results)
return min(255, len(results.failures)) return min(255, len(results.failures))
...@@ -144,7 +144,8 @@ class RecordWprUnitTests(tab_test_case.TabTestCase): ...@@ -144,7 +144,8 @@ class RecordWprUnitTests(tab_test_case.TabTestCase):
mock_page_set = MockPageSet(url=self._url) mock_page_set = MockPageSet(url=self._url)
wpr_recorder = record_wpr.WprRecorder(self._test_data_dir, wpr_recorder = record_wpr.WprRecorder(self._test_data_dir,
mock_page_set, flags) mock_page_set, flags)
results = wpr_recorder.Record() results = wpr_recorder.CreateResults()
wpr_recorder.Record(results)
self.assertEqual(set(mock_page_set.pages), results.pages_that_succeeded) self.assertEqual(set(mock_page_set.pages), results.pages_that_succeeded)
def testWprRecorderWithBenchmark(self): def testWprRecorderWithBenchmark(self):
...@@ -152,7 +153,8 @@ class RecordWprUnitTests(tab_test_case.TabTestCase): ...@@ -152,7 +153,8 @@ class RecordWprUnitTests(tab_test_case.TabTestCase):
mock_benchmark = MockBenchmark() mock_benchmark = MockBenchmark()
wpr_recorder = record_wpr.WprRecorder(self._test_data_dir, mock_benchmark, wpr_recorder = record_wpr.WprRecorder(self._test_data_dir, mock_benchmark,
flags) flags)
results = wpr_recorder.Record() results = wpr_recorder.CreateResults()
wpr_recorder.Record(results)
self.assertEqual(set(mock_benchmark.mock_page_set.pages), self.assertEqual(set(mock_benchmark.mock_page_set.pages),
results.pages_that_succeeded) results.pages_that_succeeded)
......
...@@ -30,12 +30,12 @@ _UNIT_JSON = ('tools', 'perf', 'unit-info.json') ...@@ -30,12 +30,12 @@ _UNIT_JSON = ('tools', 'perf', 'unit-info.json')
# Leaving as-is now since we are going to move HtmlOutputFormatter to be # Leaving as-is now since we are going to move HtmlOutputFormatter to be
# based on JSON anyway. # based on JSON anyway.
class HtmlOutputFormatter(buildbot_output_formatter.BuildbotOutputFormatter): class HtmlOutputFormatter(buildbot_output_formatter.BuildbotOutputFormatter):
def __init__(self, output_stream, test_name, reset_results, upload_results, def __init__(self, output_stream, metadata, reset_results, upload_results,
browser_type, results_label=None, trace_tag=''): browser_type, results_label=None, trace_tag=''):
# Pass output_stream=None so that we blow up if # Pass output_stream=None so that we blow up if
# BuildbotOutputFormatter ever use the output_stream. # BuildbotOutputFormatter ever use the output_stream.
super(HtmlOutputFormatter, self).__init__(None, trace_tag) super(HtmlOutputFormatter, self).__init__(None, trace_tag)
self._test_name = test_name self._metadata = metadata
self._reset_results = reset_results self._reset_results = reset_results
self._upload_results = upload_results self._upload_results = upload_results
self._html_output_stream = output_stream self._html_output_stream = output_stream
...@@ -101,6 +101,10 @@ class HtmlOutputFormatter(buildbot_output_formatter.BuildbotOutputFormatter): ...@@ -101,6 +101,10 @@ class HtmlOutputFormatter(buildbot_output_formatter.BuildbotOutputFormatter):
'important': result_type == 'default' 'important': result_type == 'default'
} }
@property
def _test_name(self):
return self._metadata.name
def GetResults(self): def GetResults(self):
return self._result return self._result
......
...@@ -5,6 +5,7 @@ import os ...@@ -5,6 +5,7 @@ import os
import StringIO import StringIO
import unittest import unittest
from telemetry import benchmark
from telemetry.page import page_set from telemetry.page import page_set
from telemetry.results import html_output_formatter from telemetry.results import html_output_formatter
from telemetry.results import page_test_results from telemetry.results import page_test_results
...@@ -27,6 +28,9 @@ class DeterministicHtmlOutputFormatter( ...@@ -27,6 +28,9 @@ class DeterministicHtmlOutputFormatter(
def _GetRevision(self): def _GetRevision(self):
return 'revision' return 'revision'
class FakeMetadataForTest(benchmark.BenchmarkMetadata):
def __init__(self):
super(FakeMetadataForTest, self).__init__('test_name')
# Wrap string IO with a .name property so that it behaves more like a file. # Wrap string IO with a .name property so that it behaves more like a file.
class StringIOFile(StringIO.StringIO): class StringIOFile(StringIO.StringIO):
...@@ -54,7 +58,7 @@ class HtmlOutputFormatterTest(unittest.TestCase): ...@@ -54,7 +58,7 @@ class HtmlOutputFormatterTest(unittest.TestCase):
results.DidRunPage(test_page_set.pages[1]) results.DidRunPage(test_page_set.pages[1])
formatter = DeterministicHtmlOutputFormatter( formatter = DeterministicHtmlOutputFormatter(
output_file, 'test_name', False, False, 'browser_type') output_file, FakeMetadataForTest(), False, False, 'browser_type')
formatter.Format(results) formatter.Format(results)
expected = { expected = {
"platform": "browser_type", "platform": "browser_type",
...@@ -111,7 +115,7 @@ class HtmlOutputFormatterTest(unittest.TestCase): ...@@ -111,7 +115,7 @@ class HtmlOutputFormatterTest(unittest.TestCase):
results.DidRunPage(test_page_set.pages[1]) results.DidRunPage(test_page_set.pages[1])
formatter = DeterministicHtmlOutputFormatter( formatter = DeterministicHtmlOutputFormatter(
output_file, 'test_name', False, False, 'browser_type') output_file, FakeMetadataForTest(), False, False, 'browser_type')
formatter.Format(results) formatter.Format(results)
expected = [ expected = [
{ {
...@@ -207,7 +211,7 @@ class HtmlOutputFormatterTest(unittest.TestCase): ...@@ -207,7 +211,7 @@ class HtmlOutputFormatterTest(unittest.TestCase):
results.DidRunPage(test_page_set.pages[1]) results.DidRunPage(test_page_set.pages[1])
formatter = DeterministicHtmlOutputFormatter( formatter = DeterministicHtmlOutputFormatter(
output_file, 'test_name', True, False, 'browser_type') output_file, FakeMetadataForTest(), True, False, 'browser_type')
formatter.Format(results) formatter.Format(results)
expected = [{ expected = [{
"platform": "browser_type", "platform": "browser_type",
......
...@@ -6,9 +6,10 @@ import json ...@@ -6,9 +6,10 @@ import json
from telemetry.results import output_formatter from telemetry.results import output_formatter
def ResultsAsDict(res): def ResultsAsDict(res, metadata):
result_dict = { result_dict = {
'format_version': '0.1', 'format_version': '0.2',
'benchmark_name': metadata.name,
'summary_values': [v.AsDict() for v in res.all_summary_values], 'summary_values': [v.AsDict() for v in res.all_summary_values],
'per_page_values': [v.AsDict() for v in res.all_page_specific_values], 'per_page_values': [v.AsDict() for v in res.all_page_specific_values],
'pages': dict((p.id, p.AsDict()) for p in _all_pages(res)) 'pages': dict((p.id, p.AsDict()) for p in _all_pages(res))
...@@ -21,9 +22,15 @@ def _all_pages(res): ...@@ -21,9 +22,15 @@ def _all_pages(res):
return pages return pages
class JsonOutputFormatter(output_formatter.OutputFormatter): class JsonOutputFormatter(output_formatter.OutputFormatter):
def __init__(self, output_stream): def __init__(self, output_stream, metadata):
super(JsonOutputFormatter, self).__init__(output_stream) super(JsonOutputFormatter, self).__init__(output_stream)
self._metadata = metadata
@property
def metadata(self):
return self._metadata
def Format(self, page_test_results): def Format(self, page_test_results):
json.dump(ResultsAsDict(page_test_results), self.output_stream) json.dump(ResultsAsDict(page_test_results, self.metadata),
self.output_stream)
self.output_stream.write('\n') self.output_stream.write('\n')
...@@ -6,9 +6,9 @@ import os ...@@ -6,9 +6,9 @@ import os
import unittest import unittest
import json import json
from telemetry import benchmark
from telemetry.results import json_output_formatter from telemetry.results import json_output_formatter
from telemetry.results import page_test_results from telemetry.results import page_test_results
from telemetry.results.json_output_formatter import ResultsAsDict
from telemetry.page import page_set from telemetry.page import page_set
from telemetry.value import scalar from telemetry.value import scalar
...@@ -25,12 +25,12 @@ def _HasPage(pages, page): ...@@ -25,12 +25,12 @@ def _HasPage(pages, page):
def _HasValueNamed(values, name): def _HasValueNamed(values, name):
return len([x for x in values if x['name'] == name]) == 1 return len([x for x in values if x['name'] == name]) == 1
class JsonOutputFormatterTest(unittest.TestCase): class JsonOutputFormatterTest(unittest.TestCase):
def setUp(self): def setUp(self):
self._output = StringIO.StringIO() self._output = StringIO.StringIO()
self._page_set = _MakePageSet() self._page_set = _MakePageSet()
self._formatter = json_output_formatter.JsonOutputFormatter(self._output) self._formatter = json_output_formatter.JsonOutputFormatter(self._output,
benchmark.BenchmarkMetadata('test_name'))
def testOutputAndParse(self): def testOutputAndParse(self):
results = page_test_results.PageTestResults() results = page_test_results.PageTestResults()
...@@ -52,7 +52,7 @@ class JsonOutputFormatterTest(unittest.TestCase): ...@@ -52,7 +52,7 @@ class JsonOutputFormatterTest(unittest.TestCase):
results.AddValue(v0) results.AddValue(v0)
results.DidRunPage(self._page_set[0]) results.DidRunPage(self._page_set[0])
d = ResultsAsDict(results) d = json_output_formatter.ResultsAsDict(results, self._formatter.metadata)
self.assertTrue(_HasPage(d['pages'], self._page_set[0])) self.assertTrue(_HasPage(d['pages'], self._page_set[0]))
self.assertTrue(_HasValueNamed(d['per_page_values'], 'foo')) self.assertTrue(_HasValueNamed(d['per_page_values'], 'foo'))
...@@ -69,7 +69,7 @@ class JsonOutputFormatterTest(unittest.TestCase): ...@@ -69,7 +69,7 @@ class JsonOutputFormatterTest(unittest.TestCase):
results.AddValue(v1) results.AddValue(v1)
results.DidRunPage(self._page_set[1]) results.DidRunPage(self._page_set[1])
d = ResultsAsDict(results) d = json_output_formatter.ResultsAsDict(results, self._formatter.metadata)
self.assertTrue(_HasPage(d['pages'], self._page_set[0])) self.assertTrue(_HasPage(d['pages'], self._page_set[0]))
self.assertTrue(_HasPage(d['pages'], self._page_set[1])) self.assertTrue(_HasPage(d['pages'], self._page_set[1]))
...@@ -81,7 +81,7 @@ class JsonOutputFormatterTest(unittest.TestCase): ...@@ -81,7 +81,7 @@ class JsonOutputFormatterTest(unittest.TestCase):
v = scalar.ScalarValue(None, 'baz', 'seconds', 5) v = scalar.ScalarValue(None, 'baz', 'seconds', 5)
results.AddSummaryValue(v) results.AddSummaryValue(v)
d = ResultsAsDict(results) d = json_output_formatter.ResultsAsDict(results, self._formatter.metadata)
self.assertFalse(d['pages']) self.assertFalse(d['pages'])
self.assertTrue(_HasValueNamed(d['summary_values'], 'baz')) self.assertTrue(_HasValueNamed(d['summary_values'], 'baz'))
...@@ -43,7 +43,7 @@ def AddResultsOptions(parser): ...@@ -43,7 +43,7 @@ def AddResultsOptions(parser):
parser.add_option_group(group) parser.add_option_group(group)
def PrepareResults(test, options): def CreateResults(metadata, options):
# TODO(chrishenry): This logic prevents us from having multiple # TODO(chrishenry): This logic prevents us from having multiple
# OutputFormatters. We should have an output_file per OutputFormatter. # OutputFormatters. We should have an output_file per OutputFormatter.
# Maybe we should have --output-dir instead of --output-file? # Maybe we should have --output-dir instead of --output-file?
...@@ -85,12 +85,12 @@ def PrepareResults(test, options): ...@@ -85,12 +85,12 @@ def PrepareResults(test, options):
output_formatters.append(buildbot_output_formatter.BuildbotOutputFormatter( output_formatters.append(buildbot_output_formatter.BuildbotOutputFormatter(
sys.stdout, trace_tag=options.output_trace_tag)) sys.stdout, trace_tag=options.output_trace_tag))
output_formatters.append(html_output_formatter.HtmlOutputFormatter( output_formatters.append(html_output_formatter.HtmlOutputFormatter(
output_stream, test.__class__.__name__, options.reset_results, output_stream, metadata, options.reset_results,
options.upload_results, options.browser_type, options.upload_results, options.browser_type,
options.results_label, trace_tag=options.output_trace_tag)) options.results_label, trace_tag=options.output_trace_tag))
elif options.output_format == 'json': elif options.output_format == 'json':
output_formatters.append(json_output_formatter.JsonOutputFormatter( output_formatters.append(json_output_formatter.JsonOutputFormatter(
output_stream)) output_stream, metadata))
else: else:
# Should never be reached. The parser enforces the choices. # Should never be reached. The parser enforces the choices.
raise Exception('Invalid --output-format "%s". Valid choices are: %s' raise Exception('Invalid --output-format "%s". Valid choices are: %s'
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment