Commit 1d61f890 authored by Juan Antonio Navarro Perez's avatar Juan Antonio Navarro Perez Committed by Commit Bot

[tools/perf] Add --max-values-per-test-case option [fix typos]

Had written these, but forgot to upload before landing:
https://chromium-review.googlesource.com/c/chromium/src/+/1879207

TBR=crouleau@chromium.org

Bug: 1001038
Change-Id: I8794343b81129e18736277bad88c23088e3b18fe
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1883722Reviewed-by: default avatarJuan Antonio Navarro Pérez <perezju@chromium.org>
Commit-Queue: Juan Antonio Navarro Pérez <perezju@chromium.org>
Cr-Commit-Position: refs/heads/master@{#709958}
parent a82dd6c9
...@@ -37,11 +37,11 @@ from benchmarks import v8_browsing ...@@ -37,11 +37,11 @@ from benchmarks import v8_browsing
# data to the chrome perf dashboard. So the smoke tests below cap the max # data to the chrome perf dashboard. So the smoke tests below cap the max
# number of values that each story tested would produce when running on the # number of values that each story tested would produce when running on the
# waterfall. # waterfall.
MAX_VALUES_PERT_TEST_CASE = 1000 MAX_VALUES_PER_TEST_CASE = 1000
def SmokeTestGenerator(benchmark_class, num_pages=1): def SmokeTestGenerator(benchmark_class, num_pages=1):
"""Generates a somke test for the first N pages from a benchmark. """Generates a smoke test for the first N pages from a benchmark.
Args: Args:
benchmark_class: a benchmark class to smoke test. benchmark_class: a benchmark class to smoke test.
...@@ -70,7 +70,7 @@ def SmokeTestGenerator(benchmark_class, num_pages=1): ...@@ -70,7 +70,7 @@ def SmokeTestGenerator(benchmark_class, num_pages=1):
environment=chromium_config.GetDefaultChromiumConfig()) environment=chromium_config.GetDefaultChromiumConfig())
options.pageset_repeat = 1 # For smoke testing only run the page once. options.pageset_repeat = 1 # For smoke testing only run the page once.
options.output_formats = ['histograms'] options.output_formats = ['histograms']
options.max_values_per_test_case = MAX_VALUES_PERT_TEST_CASE options.max_values_per_test_case = MAX_VALUES_PER_TEST_CASE
return_code = benchmark_class().Run(options) return_code = benchmark_class().Run(options)
if return_code == -1: if return_code == -1:
self.skipTest('The benchmark was not run.') self.skipTest('The benchmark was not run.')
......
...@@ -199,7 +199,7 @@ _DISABLED_TESTS = frozenset({ ...@@ -199,7 +199,7 @@ _DISABLED_TESTS = frozenset({
# data to the chrome perf dashboard. So the smoke tests below cap the max # data to the chrome perf dashboard. So the smoke tests below cap the max
# number of values that each story tested would produce when running on the # number of values that each story tested would produce when running on the
# waterfall. # waterfall.
MAX_VALUES_PERT_TEST_CASE = 1000 MAX_VALUES_PER_TEST_CASE = 1000
def _GenerateSmokeTestCase(benchmark_class, story_to_smoke_test): def _GenerateSmokeTestCase(benchmark_class, story_to_smoke_test):
...@@ -274,7 +274,7 @@ def GenerateBenchmarkOptions(output_dir, benchmark_cls): ...@@ -274,7 +274,7 @@ def GenerateBenchmarkOptions(output_dir, benchmark_cls):
environment=chromium_config.GetDefaultChromiumConfig()) environment=chromium_config.GetDefaultChromiumConfig())
options.pageset_repeat = 1 # For smoke testing only run each page once. options.pageset_repeat = 1 # For smoke testing only run each page once.
options.output_formats = ['histograms'] options.output_formats = ['histograms']
options.max_values_per_test_case = MAX_VALUES_PERT_TEST_CASE options.max_values_per_test_case = MAX_VALUES_PER_TEST_CASE
# Enable browser logging in the smoke test only. Hopefully, this will detect # Enable browser logging in the smoke test only. Hopefully, this will detect
# all crashes and hence remove the need to enable logging in actual perf # all crashes and hence remove the need to enable logging in actual perf
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment