Commit 93e6fa0b authored by Juan Antonio Navarro Perez's avatar Juan Antonio Navarro Perez Committed by Commit Bot

[tools/perf] Use GetRunOptions() to get options for unit tests

The new options_for_unittests.GetRunOptions function now factors out all
of the details required to build an options object.

This removes all remaining references in tools/perf to the
Add-/ProcessCommandLineArgs functions from the telemetry.benchmark
module.

Depends on catapult CL:
https://chromium-review.googlesource.com/c/catapult/+/1713636

Bug: 985712
Change-Id: I49403b437a5e5e992328199372e4d9db57740906
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1720834
Commit-Queue: Juan Antonio Navarro Pérez <perezju@chromium.org>
Reviewed-by: default avatarCaleb Rouleau <crouleau@chromium.org>
Cr-Commit-Position: refs/heads/master@{#683132}
parent 1a3039bf
......@@ -19,7 +19,9 @@ from telemetry import benchmark as benchmark_module
from telemetry import decorators
from telemetry.testing import options_for_unittests
from telemetry.testing import progress_reporter
from py_utils import discover
from py_utils import tempfile_ext
from benchmarks import jetstream
from benchmarks import kraken
......@@ -49,18 +51,26 @@ def SmokeTestGenerator(benchmark, num_pages=1):
@decorators.Disabled('android') # crbug.com/641934
def BenchmarkSmokeTest(self):
class SinglePageBenchmark(benchmark): # pylint: disable=no-init
# Only measure a single page so that this test cycles reasonably quickly.
options = benchmark.options.copy()
options['pageset_repeat'] = 1
def CreateStorySet(self, options):
# pylint: disable=super-on-old-class
story_set = super(SinglePageBenchmark, self).CreateStorySet(options)
# We want to prevent benchmarks from accidentally trying to upload too
# much data to the chrome perf dashboard. So this tests tries to
# estimate the amount of values that the benchmark _would_ create when
# running on the waterfall, and fails if too many values are produced.
# As we run a single story and not the whole benchmark, the number of
# max values allowed is scaled proportionally.
# TODO(crbug.com/981349): This logic is only really valid for legacy
# values, and does not take histograms into account. An alternative
# should be implemented when using the results processor.
type(self).MAX_NUM_VALUES = MAX_NUM_VALUES / len(story_set)
# Only smoke test the first story since smoke testing everything takes
# too long.
for s in story_set.stories[num_pages:]:
story_set.RemoveStory(s)
return story_set
# Some benchmarks are running multiple iterations
......@@ -68,35 +78,19 @@ def SmokeTestGenerator(benchmark, num_pages=1):
if hasattr(SinglePageBenchmark, 'enable_smoke_test_mode'):
SinglePageBenchmark.enable_smoke_test_mode = True
# Set the benchmark's default arguments.
options = options_for_unittests.GetCopy()
options.output_formats = ['none']
parser = options.CreateParser()
SinglePageBenchmark.AddCommandLineArgs(parser)
benchmark_module.AddCommandLineArgs(parser)
SinglePageBenchmark.SetArgumentDefaults(parser)
options.MergeDefaultValues(parser.get_default_values())
# Prevent benchmarks from accidentally trying to upload too much data to the
# chromeperf dashboard. The number of values uploaded is equal to (the
# average number of values produced by a single story) * (1 + (the number of
# stories)). The "1 + " accounts for values summarized across all stories.
# We can approximate "the average number of values produced by a single
# story" as the number of values produced by the first story.
# pageset_repeat doesn't matter because values are summarized across
# repetitions before uploading.
story_set = benchmark().CreateStorySet(options)
SinglePageBenchmark.MAX_NUM_VALUES = MAX_NUM_VALUES / len(story_set.stories)
SinglePageBenchmark.ProcessCommandLineArgs(None, options)
benchmark_module.ProcessCommandLineArgs(None, options)
single_page_benchmark = SinglePageBenchmark()
with open(path_util.GetExpectationsPath()) as fp:
single_page_benchmark.AugmentExpectationsWithParser(fp.read())
return_code = single_page_benchmark.Run(options)
with tempfile_ext.NamedTemporaryDirectory() as temp_dir:
# Set the benchmark's default arguments.
options = options_for_unittests.GetRunOptions(
output_dir=temp_dir,
benchmark_cls=SinglePageBenchmark)
options.pageset_repeat = 1 # For smoke testing only run the page once.
single_page_benchmark = SinglePageBenchmark()
with open(path_util.GetExpectationsPath()) as fp:
single_page_benchmark.AugmentExpectationsWithParser(fp.read())
return_code = single_page_benchmark.Run(options)
if return_code == -1:
self.skipTest('The benchmark was not run.')
self.assertEqual(0, return_code, msg='Failed: %s' % benchmark)
......
......@@ -12,7 +12,7 @@ from core import perf_benchmark
from telemetry import benchmark as benchmark_module
from telemetry import decorators
from telemetry.internal.browser import browser_options
from telemetry.testing import options_for_unittests
from telemetry.testing import progress_reporter
from py_utils import discover
......@@ -24,13 +24,12 @@ def _GetAllPerfBenchmarks():
def _BenchmarkOptionsTestGenerator(benchmark):
def testBenchmarkOptions(self): # pylint: disable=unused-argument
"""Invalid options will raise benchmark.InvalidOptionsError."""
options = browser_options.BrowserFinderOptions()
parser = options.CreateParser()
benchmark.AddCommandLineArgs(parser)
benchmark_module.AddCommandLineArgs(parser)
benchmark.SetArgumentDefaults(parser)
def testBenchmarkOptions(self):
"""Tests whether benchmark options can be constructed without errors."""
try:
options_for_unittests.GetRunOptions(benchmark_cls=benchmark)
except benchmark_module.InvalidOptionsError as exc:
self.fail(str(exc))
return testBenchmarkOptions
......
......@@ -14,13 +14,13 @@ import unittest
from core import path_util
from core import perf_benchmark
from telemetry import benchmark as benchmark_module
from telemetry import decorators
from telemetry.internal.browser import browser_finder
from telemetry.testing import options_for_unittests
from telemetry.testing import progress_reporter
from py_utils import discover
from py_utils import tempfile_ext
from benchmarks import system_health
......@@ -183,11 +183,22 @@ def _GenerateSmokeTestCase(benchmark_class, story_to_smoke_test):
# failing, disable it by putting it into the _DISABLED_TESTS list above.
@decorators.Disabled('chromeos') # crbug.com/351114
def RunTest(self):
class SinglePageBenchmark(benchmark_class): # pylint: disable=no-init
def CreateStorySet(self, options):
# pylint: disable=super-on-old-class
story_set = super(SinglePageBenchmark, self).CreateStorySet(options)
# We want to prevent benchmarks from accidentally trying to upload too
# much data to the chrome perf dashboard. So this tests tries to
# estimate the amount of values that the benchmark _would_ create when
# running on the waterfall, and fails if too many values are produced.
# As we run a single story and not the whole benchmark, the number of
# max values allowed is scaled proportionally.
# TODO(crbug.com/981349): This logic is only really valid for legacy
# values, and does not take histograms into account. An alternative
# should be implemented when using the results processor.
type(self).MAX_NUM_VALUES = MAX_NUM_VALUES / len(story_set)
stories_to_remove = [s for s in story_set.stories if s !=
story_to_smoke_test]
for s in stories_to_remove:
......@@ -195,40 +206,32 @@ def _GenerateSmokeTestCase(benchmark_class, story_to_smoke_test):
assert story_set.stories
return story_set
options = GenerateBenchmarkOptions(benchmark_class)
# Prevent benchmarks from accidentally trying to upload too much data to the
# chromeperf dashboard. The number of values uploaded is equal to (the
# average number of values produced by a single story) * (1 + (the number of
# stories)). The "1 + " accounts for values summarized across all stories.
# We can approximate "the average number of values produced by a single
# story" as the number of values produced by the given story.
# pageset_repeat doesn't matter because values are summarized across
# repetitions before uploading.
story_set = benchmark_class().CreateStorySet(options)
SinglePageBenchmark.MAX_NUM_VALUES = MAX_NUM_VALUES / len(story_set.stories)
with tempfile_ext.NamedTemporaryDirectory() as temp_dir:
# Set the benchmark's default arguments.
options = GenerateBenchmarkOptions(
output_dir=temp_dir,
benchmark_cls=SinglePageBenchmark)
possible_browser = browser_finder.FindBrowser(options)
if possible_browser is None:
self.skipTest('Cannot find the browser to run the test.')
possible_browser = browser_finder.FindBrowser(options)
if possible_browser is None:
self.skipTest('Cannot find the browser to run the test.')
simplified_test_name = self.id().replace(
'benchmarks.system_health_smoke_test.SystemHealthBenchmarkSmokeTest.',
'')
# Sanity check to ensure that that substring removal was effective.
assert len(simplified_test_name) < len(self.id())
simplified_test_name = self.id().replace(
'benchmarks.system_health_smoke_test.SystemHealthBenchmarkSmokeTest.',
'')
if (simplified_test_name in _DISABLED_TESTS and
not options.run_disabled_tests):
self.skipTest('Test is explicitly disabled')
# Sanity check to ensure that that substring removal was effective.
assert len(simplified_test_name) < len(self.id())
single_page_benchmark = SinglePageBenchmark()
with open(path_util.GetExpectationsPath()) as fp:
single_page_benchmark.AugmentExpectationsWithParser(fp.read())
if (simplified_test_name in _DISABLED_TESTS and
not options.run_disabled_tests):
self.skipTest('Test is explicitly disabled')
return_code = single_page_benchmark.Run(options)
single_page_benchmark = SinglePageBenchmark()
with open(path_util.GetExpectationsPath()) as fp:
single_page_benchmark.AugmentExpectationsWithParser(fp.read())
return_code = single_page_benchmark.Run(options)
if return_code == -1:
self.skipTest('The benchmark was not run.')
self.assertEqual(0, return_code, msg='Failed: %s' % benchmark_class)
......@@ -247,31 +250,17 @@ def _GenerateSmokeTestCase(benchmark_class, story_to_smoke_test):
return SystemHealthBenchmarkSmokeTest(methodName=test_method_name)
def GenerateBenchmarkOptions(benchmark_class):
# Set the benchmark's default arguments.
options = options_for_unittests.GetCopy()
options.output_formats = ['none']
parser = options.CreateParser()
# TODO(nednguyen): probably this logic of setting up the benchmark options
# parser & processing the options should be sharable with telemetry's
# core.
benchmark_class.AddCommandLineArgs(parser)
benchmark_module.AddCommandLineArgs(parser)
benchmark_class.SetArgumentDefaults(parser)
options.MergeDefaultValues(parser.get_default_values())
benchmark_class.ProcessCommandLineArgs(None, options)
benchmark_module.ProcessCommandLineArgs(None, options)
# Only measure a single story so that this test cycles reasonably quickly.
options.pageset_repeat = 1
def GenerateBenchmarkOptions(output_dir, benchmark_cls):
options = options_for_unittests.GetRunOptions(
output_dir=output_dir, benchmark_cls=benchmark_cls)
options.pageset_repeat = 1 # For smoke testing only run each page once.
# Enable browser logging in the smoke test only. Hopefully, this will detect
# all crashes and hence remove the need to enable logging in actual perf
# benchmarks.
options.browser_options.logging_verbosity = 'non-verbose'
options.target_platforms = benchmark_class.GetSupportedPlatformNames(
benchmark_class.SUPPORTED_PLATFORMS)
options.target_platforms = benchmark_cls.GetSupportedPlatformNames(
benchmark_cls.SUPPORTED_PLATFORMS)
return options
......
......@@ -2,13 +2,10 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import optparse
import os
import unittest
from telemetry import benchmark as benchmark_module
from telemetry.internal.browser import browser_options
from telemetry.page import legacy_page_test
from telemetry.testing import options_for_unittests
from telemetry.web_perf import timeline_based_measurement
......@@ -34,23 +31,12 @@ def _GetAllPossiblePageTestInstances():
benchmarks_dir, top_level_dir, benchmark_module.Benchmark).values()
# Get all page test instances from defined benchmarks.
# Note: since this depends on the command line options, there is no guaranteed
# Note: since this depends on the command line options, there is no guarantee
# that this will generate all possible page test instances but it's worth
# enough for smoke test purpose.
for benchmark_class in all_benchmarks_classes:
options = options_for_unittests.GetCopy()
parser = optparse.OptionParser()
browser_options.BrowserOptions.AddCommandLineArgs(parser)
try:
benchmark_class.AddCommandLineArgs(parser)
benchmark_module.AddCommandLineArgs(parser)
benchmark_class.SetArgumentDefaults(parser)
except Exception:
logging.error('Exception raised when processing benchmark %s',
benchmark_class)
raise
options.MergeDefaultValues(parser.get_default_values())
pt = benchmark_class().CreatePageTest(options)
for benchmark_cls in all_benchmarks_classes:
options = options_for_unittests.GetRunOptions(benchmark_cls=benchmark_cls)
pt = benchmark_cls().CreatePageTest(options)
if not isinstance(pt, timeline_based_measurement.TimelineBasedMeasurement):
page_test_instances.append(pt)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment