Commit 93e6fa0b authored by Juan Antonio Navarro Perez's avatar Juan Antonio Navarro Perez Committed by Commit Bot

[tools/perf] Use GetRunOptions() to get options for unit tests

The new options_for_unittests.GetRunOptions function now factors out all
of the details required to build an options object.

This removes all remaining references in tools/perf to the
Add-/ProcessCommandLineArgs functions from the telemetry.benchmark
module.

Depends on catapult CL:
https://chromium-review.googlesource.com/c/catapult/+/1713636

Bug: 985712
Change-Id: I49403b437a5e5e992328199372e4d9db57740906
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1720834
Commit-Queue: Juan Antonio Navarro Pérez <perezju@chromium.org>
Reviewed-by: default avatarCaleb Rouleau <crouleau@chromium.org>
Cr-Commit-Position: refs/heads/master@{#683132}
parent 1a3039bf
...@@ -19,7 +19,9 @@ from telemetry import benchmark as benchmark_module ...@@ -19,7 +19,9 @@ from telemetry import benchmark as benchmark_module
from telemetry import decorators from telemetry import decorators
from telemetry.testing import options_for_unittests from telemetry.testing import options_for_unittests
from telemetry.testing import progress_reporter from telemetry.testing import progress_reporter
from py_utils import discover from py_utils import discover
from py_utils import tempfile_ext
from benchmarks import jetstream from benchmarks import jetstream
from benchmarks import kraken from benchmarks import kraken
...@@ -49,18 +51,26 @@ def SmokeTestGenerator(benchmark, num_pages=1): ...@@ -49,18 +51,26 @@ def SmokeTestGenerator(benchmark, num_pages=1):
@decorators.Disabled('android') # crbug.com/641934 @decorators.Disabled('android') # crbug.com/641934
def BenchmarkSmokeTest(self): def BenchmarkSmokeTest(self):
class SinglePageBenchmark(benchmark): # pylint: disable=no-init class SinglePageBenchmark(benchmark): # pylint: disable=no-init
# Only measure a single page so that this test cycles reasonably quickly.
options = benchmark.options.copy()
options['pageset_repeat'] = 1
def CreateStorySet(self, options): def CreateStorySet(self, options):
# pylint: disable=super-on-old-class # pylint: disable=super-on-old-class
story_set = super(SinglePageBenchmark, self).CreateStorySet(options) story_set = super(SinglePageBenchmark, self).CreateStorySet(options)
# We want to prevent benchmarks from accidentally trying to upload too
# much data to the chrome perf dashboard. So this tests tries to
# estimate the amount of values that the benchmark _would_ create when
# running on the waterfall, and fails if too many values are produced.
# As we run a single story and not the whole benchmark, the number of
# max values allowed is scaled proportionally.
# TODO(crbug.com/981349): This logic is only really valid for legacy
# values, and does not take histograms into account. An alternative
# should be implemented when using the results processor.
type(self).MAX_NUM_VALUES = MAX_NUM_VALUES / len(story_set)
# Only smoke test the first story since smoke testing everything takes # Only smoke test the first story since smoke testing everything takes
# too long. # too long.
for s in story_set.stories[num_pages:]: for s in story_set.stories[num_pages:]:
story_set.RemoveStory(s) story_set.RemoveStory(s)
return story_set return story_set
# Some benchmarks are running multiple iterations # Some benchmarks are running multiple iterations
...@@ -68,35 +78,19 @@ def SmokeTestGenerator(benchmark, num_pages=1): ...@@ -68,35 +78,19 @@ def SmokeTestGenerator(benchmark, num_pages=1):
if hasattr(SinglePageBenchmark, 'enable_smoke_test_mode'): if hasattr(SinglePageBenchmark, 'enable_smoke_test_mode'):
SinglePageBenchmark.enable_smoke_test_mode = True SinglePageBenchmark.enable_smoke_test_mode = True
# Set the benchmark's default arguments. with tempfile_ext.NamedTemporaryDirectory() as temp_dir:
options = options_for_unittests.GetCopy() # Set the benchmark's default arguments.
options.output_formats = ['none'] options = options_for_unittests.GetRunOptions(
parser = options.CreateParser() output_dir=temp_dir,
benchmark_cls=SinglePageBenchmark)
SinglePageBenchmark.AddCommandLineArgs(parser) options.pageset_repeat = 1 # For smoke testing only run the page once.
benchmark_module.AddCommandLineArgs(parser)
SinglePageBenchmark.SetArgumentDefaults(parser) single_page_benchmark = SinglePageBenchmark()
options.MergeDefaultValues(parser.get_default_values()) with open(path_util.GetExpectationsPath()) as fp:
single_page_benchmark.AugmentExpectationsWithParser(fp.read())
# Prevent benchmarks from accidentally trying to upload too much data to the
# chromeperf dashboard. The number of values uploaded is equal to (the return_code = single_page_benchmark.Run(options)
# average number of values produced by a single story) * (1 + (the number of
# stories)). The "1 + " accounts for values summarized across all stories.
# We can approximate "the average number of values produced by a single
# story" as the number of values produced by the first story.
# pageset_repeat doesn't matter because values are summarized across
# repetitions before uploading.
story_set = benchmark().CreateStorySet(options)
SinglePageBenchmark.MAX_NUM_VALUES = MAX_NUM_VALUES / len(story_set.stories)
SinglePageBenchmark.ProcessCommandLineArgs(None, options)
benchmark_module.ProcessCommandLineArgs(None, options)
single_page_benchmark = SinglePageBenchmark()
with open(path_util.GetExpectationsPath()) as fp:
single_page_benchmark.AugmentExpectationsWithParser(fp.read())
return_code = single_page_benchmark.Run(options)
if return_code == -1: if return_code == -1:
self.skipTest('The benchmark was not run.') self.skipTest('The benchmark was not run.')
self.assertEqual(0, return_code, msg='Failed: %s' % benchmark) self.assertEqual(0, return_code, msg='Failed: %s' % benchmark)
......
...@@ -12,7 +12,7 @@ from core import perf_benchmark ...@@ -12,7 +12,7 @@ from core import perf_benchmark
from telemetry import benchmark as benchmark_module from telemetry import benchmark as benchmark_module
from telemetry import decorators from telemetry import decorators
from telemetry.internal.browser import browser_options from telemetry.testing import options_for_unittests
from telemetry.testing import progress_reporter from telemetry.testing import progress_reporter
from py_utils import discover from py_utils import discover
...@@ -24,13 +24,12 @@ def _GetAllPerfBenchmarks(): ...@@ -24,13 +24,12 @@ def _GetAllPerfBenchmarks():
def _BenchmarkOptionsTestGenerator(benchmark): def _BenchmarkOptionsTestGenerator(benchmark):
def testBenchmarkOptions(self): # pylint: disable=unused-argument def testBenchmarkOptions(self):
"""Invalid options will raise benchmark.InvalidOptionsError.""" """Tests whether benchmark options can be constructed without errors."""
options = browser_options.BrowserFinderOptions() try:
parser = options.CreateParser() options_for_unittests.GetRunOptions(benchmark_cls=benchmark)
benchmark.AddCommandLineArgs(parser) except benchmark_module.InvalidOptionsError as exc:
benchmark_module.AddCommandLineArgs(parser) self.fail(str(exc))
benchmark.SetArgumentDefaults(parser)
return testBenchmarkOptions return testBenchmarkOptions
......
...@@ -14,13 +14,13 @@ import unittest ...@@ -14,13 +14,13 @@ import unittest
from core import path_util from core import path_util
from core import perf_benchmark from core import perf_benchmark
from telemetry import benchmark as benchmark_module
from telemetry import decorators from telemetry import decorators
from telemetry.internal.browser import browser_finder from telemetry.internal.browser import browser_finder
from telemetry.testing import options_for_unittests from telemetry.testing import options_for_unittests
from telemetry.testing import progress_reporter from telemetry.testing import progress_reporter
from py_utils import discover from py_utils import discover
from py_utils import tempfile_ext
from benchmarks import system_health from benchmarks import system_health
...@@ -183,11 +183,22 @@ def _GenerateSmokeTestCase(benchmark_class, story_to_smoke_test): ...@@ -183,11 +183,22 @@ def _GenerateSmokeTestCase(benchmark_class, story_to_smoke_test):
# failing, disable it by putting it into the _DISABLED_TESTS list above. # failing, disable it by putting it into the _DISABLED_TESTS list above.
@decorators.Disabled('chromeos') # crbug.com/351114 @decorators.Disabled('chromeos') # crbug.com/351114
def RunTest(self): def RunTest(self):
class SinglePageBenchmark(benchmark_class): # pylint: disable=no-init class SinglePageBenchmark(benchmark_class): # pylint: disable=no-init
def CreateStorySet(self, options): def CreateStorySet(self, options):
# pylint: disable=super-on-old-class # pylint: disable=super-on-old-class
story_set = super(SinglePageBenchmark, self).CreateStorySet(options) story_set = super(SinglePageBenchmark, self).CreateStorySet(options)
# We want to prevent benchmarks from accidentally trying to upload too
# much data to the chrome perf dashboard. So this tests tries to
# estimate the amount of values that the benchmark _would_ create when
# running on the waterfall, and fails if too many values are produced.
# As we run a single story and not the whole benchmark, the number of
# max values allowed is scaled proportionally.
# TODO(crbug.com/981349): This logic is only really valid for legacy
# values, and does not take histograms into account. An alternative
# should be implemented when using the results processor.
type(self).MAX_NUM_VALUES = MAX_NUM_VALUES / len(story_set)
stories_to_remove = [s for s in story_set.stories if s != stories_to_remove = [s for s in story_set.stories if s !=
story_to_smoke_test] story_to_smoke_test]
for s in stories_to_remove: for s in stories_to_remove:
...@@ -195,40 +206,32 @@ def _GenerateSmokeTestCase(benchmark_class, story_to_smoke_test): ...@@ -195,40 +206,32 @@ def _GenerateSmokeTestCase(benchmark_class, story_to_smoke_test):
assert story_set.stories assert story_set.stories
return story_set return story_set
options = GenerateBenchmarkOptions(benchmark_class) with tempfile_ext.NamedTemporaryDirectory() as temp_dir:
# Set the benchmark's default arguments.
# Prevent benchmarks from accidentally trying to upload too much data to the options = GenerateBenchmarkOptions(
# chromeperf dashboard. The number of values uploaded is equal to (the output_dir=temp_dir,
# average number of values produced by a single story) * (1 + (the number of benchmark_cls=SinglePageBenchmark)
# stories)). The "1 + " accounts for values summarized across all stories. possible_browser = browser_finder.FindBrowser(options)
# We can approximate "the average number of values produced by a single if possible_browser is None:
# story" as the number of values produced by the given story. self.skipTest('Cannot find the browser to run the test.')
# pageset_repeat doesn't matter because values are summarized across
# repetitions before uploading.
story_set = benchmark_class().CreateStorySet(options)
SinglePageBenchmark.MAX_NUM_VALUES = MAX_NUM_VALUES / len(story_set.stories)
possible_browser = browser_finder.FindBrowser(options) simplified_test_name = self.id().replace(
if possible_browser is None: 'benchmarks.system_health_smoke_test.SystemHealthBenchmarkSmokeTest.',
self.skipTest('Cannot find the browser to run the test.') '')
# Sanity check to ensure that that substring removal was effective.
assert len(simplified_test_name) < len(self.id())
simplified_test_name = self.id().replace( if (simplified_test_name in _DISABLED_TESTS and
'benchmarks.system_health_smoke_test.SystemHealthBenchmarkSmokeTest.', not options.run_disabled_tests):
'') self.skipTest('Test is explicitly disabled')
# Sanity check to ensure that that substring removal was effective. single_page_benchmark = SinglePageBenchmark()
assert len(simplified_test_name) < len(self.id()) with open(path_util.GetExpectationsPath()) as fp:
single_page_benchmark.AugmentExpectationsWithParser(fp.read())
if (simplified_test_name in _DISABLED_TESTS and return_code = single_page_benchmark.Run(options)
not options.run_disabled_tests):
self.skipTest('Test is explicitly disabled')
single_page_benchmark = SinglePageBenchmark()
with open(path_util.GetExpectationsPath()) as fp:
single_page_benchmark.AugmentExpectationsWithParser(fp.read())
return_code = single_page_benchmark.Run(options)
if return_code == -1: if return_code == -1:
self.skipTest('The benchmark was not run.') self.skipTest('The benchmark was not run.')
self.assertEqual(0, return_code, msg='Failed: %s' % benchmark_class) self.assertEqual(0, return_code, msg='Failed: %s' % benchmark_class)
...@@ -247,31 +250,17 @@ def _GenerateSmokeTestCase(benchmark_class, story_to_smoke_test): ...@@ -247,31 +250,17 @@ def _GenerateSmokeTestCase(benchmark_class, story_to_smoke_test):
return SystemHealthBenchmarkSmokeTest(methodName=test_method_name) return SystemHealthBenchmarkSmokeTest(methodName=test_method_name)
def GenerateBenchmarkOptions(benchmark_class): def GenerateBenchmarkOptions(output_dir, benchmark_cls):
# Set the benchmark's default arguments. options = options_for_unittests.GetRunOptions(
options = options_for_unittests.GetCopy() output_dir=output_dir, benchmark_cls=benchmark_cls)
options.output_formats = ['none'] options.pageset_repeat = 1 # For smoke testing only run each page once.
parser = options.CreateParser()
# TODO(nednguyen): probably this logic of setting up the benchmark options
# parser & processing the options should be sharable with telemetry's
# core.
benchmark_class.AddCommandLineArgs(parser)
benchmark_module.AddCommandLineArgs(parser)
benchmark_class.SetArgumentDefaults(parser)
options.MergeDefaultValues(parser.get_default_values())
benchmark_class.ProcessCommandLineArgs(None, options)
benchmark_module.ProcessCommandLineArgs(None, options)
# Only measure a single story so that this test cycles reasonably quickly.
options.pageset_repeat = 1
# Enable browser logging in the smoke test only. Hopefully, this will detect # Enable browser logging in the smoke test only. Hopefully, this will detect
# all crashes and hence remove the need to enable logging in actual perf # all crashes and hence remove the need to enable logging in actual perf
# benchmarks. # benchmarks.
options.browser_options.logging_verbosity = 'non-verbose' options.browser_options.logging_verbosity = 'non-verbose'
options.target_platforms = benchmark_class.GetSupportedPlatformNames( options.target_platforms = benchmark_cls.GetSupportedPlatformNames(
benchmark_class.SUPPORTED_PLATFORMS) benchmark_cls.SUPPORTED_PLATFORMS)
return options return options
......
...@@ -2,13 +2,10 @@ ...@@ -2,13 +2,10 @@
# Use of this source code is governed by a BSD-style license that can be # Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file. # found in the LICENSE file.
import logging
import optparse
import os import os
import unittest import unittest
from telemetry import benchmark as benchmark_module from telemetry import benchmark as benchmark_module
from telemetry.internal.browser import browser_options
from telemetry.page import legacy_page_test from telemetry.page import legacy_page_test
from telemetry.testing import options_for_unittests from telemetry.testing import options_for_unittests
from telemetry.web_perf import timeline_based_measurement from telemetry.web_perf import timeline_based_measurement
...@@ -34,23 +31,12 @@ def _GetAllPossiblePageTestInstances(): ...@@ -34,23 +31,12 @@ def _GetAllPossiblePageTestInstances():
benchmarks_dir, top_level_dir, benchmark_module.Benchmark).values() benchmarks_dir, top_level_dir, benchmark_module.Benchmark).values()
# Get all page test instances from defined benchmarks. # Get all page test instances from defined benchmarks.
# Note: since this depends on the command line options, there is no guaranteed # Note: since this depends on the command line options, there is no guarantee
# that this will generate all possible page test instances but it's worth # that this will generate all possible page test instances but it's worth
# enough for smoke test purpose. # enough for smoke test purpose.
for benchmark_class in all_benchmarks_classes: for benchmark_cls in all_benchmarks_classes:
options = options_for_unittests.GetCopy() options = options_for_unittests.GetRunOptions(benchmark_cls=benchmark_cls)
parser = optparse.OptionParser() pt = benchmark_cls().CreatePageTest(options)
browser_options.BrowserOptions.AddCommandLineArgs(parser)
try:
benchmark_class.AddCommandLineArgs(parser)
benchmark_module.AddCommandLineArgs(parser)
benchmark_class.SetArgumentDefaults(parser)
except Exception:
logging.error('Exception raised when processing benchmark %s',
benchmark_class)
raise
options.MergeDefaultValues(parser.get_default_values())
pt = benchmark_class().CreatePageTest(options)
if not isinstance(pt, timeline_based_measurement.TimelineBasedMeasurement): if not isinstance(pt, timeline_based_measurement.TimelineBasedMeasurement):
page_test_instances.append(pt) page_test_instances.append(pt)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment