Commit 88a57b64 authored by Ashley Enstad's avatar Ashley Enstad Committed by Commit Bot

Upload the system_health benchmarks to the flakiness dashboard.

This change enables perf_data_generator to upload results from a
list of benchmarks to the flakiness dashboard.

BUG=chromium:717394

Change-Id: I3952887c527c40fb5d061e9ee8aa4875794075c7
Reviewed-on: https://chromium-review.googlesource.com/574687
Commit-Queue: Ashley Enstad <ashleymarie@chromium.org>
Reviewed-by: default avatarNed Nguyen <nednguyen@google.com>
Cr-Commit-Position: refs/heads/master@{#487310}
parent d2b3c76d
...@@ -4411,7 +4411,8 @@ ...@@ -4411,7 +4411,8 @@
"-v", "-v",
"--upload-results", "--upload-results",
"--output-format=chartjson", "--output-format=chartjson",
"--browser=release_x64" "--browser=release_x64",
"--output-format=json-test-results"
], ],
"isolate_name": "telemetry_perf_tests", "isolate_name": "telemetry_perf_tests",
"name": "system_health.common_desktop", "name": "system_health.common_desktop",
...@@ -4441,6 +4442,7 @@ ...@@ -4441,6 +4442,7 @@
"--upload-results", "--upload-results",
"--output-format=chartjson", "--output-format=chartjson",
"--browser=reference", "--browser=reference",
"--output-format=json-test-results",
"--output-trace-tag=_ref" "--output-trace-tag=_ref"
], ],
"isolate_name": "telemetry_perf_tests", "isolate_name": "telemetry_perf_tests",
...@@ -4470,7 +4472,8 @@ ...@@ -4470,7 +4472,8 @@
"-v", "-v",
"--upload-results", "--upload-results",
"--output-format=chartjson", "--output-format=chartjson",
"--browser=release_x64" "--browser=release_x64",
"--output-format=json-test-results"
], ],
"isolate_name": "telemetry_perf_tests", "isolate_name": "telemetry_perf_tests",
"name": "system_health.memory_desktop", "name": "system_health.memory_desktop",
...@@ -4500,6 +4503,7 @@ ...@@ -4500,6 +4503,7 @@
"--upload-results", "--upload-results",
"--output-format=chartjson", "--output-format=chartjson",
"--browser=reference", "--browser=reference",
"--output-format=json-test-results",
"--output-trace-tag=_ref" "--output-trace-tag=_ref"
], ],
"isolate_name": "telemetry_perf_tests", "isolate_name": "telemetry_perf_tests",
...@@ -9479,7 +9483,8 @@ ...@@ -9479,7 +9483,8 @@
"-v", "-v",
"--upload-results", "--upload-results",
"--output-format=chartjson", "--output-format=chartjson",
"--browser=release_x64" "--browser=release_x64",
"--output-format=json-test-results"
], ],
"isolate_name": "telemetry_perf_tests", "isolate_name": "telemetry_perf_tests",
"name": "system_health.common_desktop", "name": "system_health.common_desktop",
...@@ -9509,6 +9514,7 @@ ...@@ -9509,6 +9514,7 @@
"--upload-results", "--upload-results",
"--output-format=chartjson", "--output-format=chartjson",
"--browser=reference", "--browser=reference",
"--output-format=json-test-results",
"--output-trace-tag=_ref" "--output-trace-tag=_ref"
], ],
"isolate_name": "telemetry_perf_tests", "isolate_name": "telemetry_perf_tests",
...@@ -9538,7 +9544,8 @@ ...@@ -9538,7 +9544,8 @@
"-v", "-v",
"--upload-results", "--upload-results",
"--output-format=chartjson", "--output-format=chartjson",
"--browser=release_x64" "--browser=release_x64",
"--output-format=json-test-results"
], ],
"isolate_name": "telemetry_perf_tests", "isolate_name": "telemetry_perf_tests",
"name": "system_health.memory_desktop", "name": "system_health.memory_desktop",
...@@ -9568,6 +9575,7 @@ ...@@ -9568,6 +9575,7 @@
"--upload-results", "--upload-results",
"--output-format=chartjson", "--output-format=chartjson",
"--browser=reference", "--browser=reference",
"--output-format=json-test-results",
"--output-trace-tag=_ref" "--output-trace-tag=_ref"
], ],
"isolate_name": "telemetry_perf_tests", "isolate_name": "telemetry_perf_tests",
......
...@@ -582,7 +582,14 @@ def generate_isolate_script_entry(swarming_dimensions, test_args, ...@@ -582,7 +582,14 @@ def generate_isolate_script_entry(swarming_dimensions, test_args,
return result return result
def generate_telemetry_test(swarming_dimensions, benchmark_name, browser): BENCHMARKS_TO_UPLOAD_TO_FLAKINESS_DASHBOARD = ['system_health.common_desktop',
'system_health.common_mobile',
'system_health.memory_desktop',
'system_health.memory_mobile']
def generate_telemetry_test(swarming_dimensions,
benchmark_name, browser, for_fyi_waterfall=False):
# The step name must end in 'test' or 'tests' in order for the # The step name must end in 'test' or 'tests' in order for the
# results to automatically show up on the flakiness dashboard. # results to automatically show up on the flakiness dashboard.
# (At least, this was true some time ago.) Continue to use this # (At least, this was true some time ago.) Continue to use this
...@@ -598,6 +605,10 @@ def generate_telemetry_test(swarming_dimensions, benchmark_name, browser): ...@@ -598,6 +605,10 @@ def generate_telemetry_test(swarming_dimensions, benchmark_name, browser):
# When this is enabled on more than just windows machines we will need # When this is enabled on more than just windows machines we will need
# --device=android # --device=android
if (for_fyi_waterfall and
benchmark_name in BENCHMARKS_TO_UPLOAD_TO_FLAKINESS_DASHBOARD):
test_args.append('--output-format=json-test-results')
ignore_task_failure = False ignore_task_failure = False
step_name = benchmark_name step_name = benchmark_name
if browser == 'reference': if browser == 'reference':
...@@ -672,7 +683,8 @@ def ShouldBenchmarkBeScheduled(benchmark, platform): ...@@ -672,7 +683,8 @@ def ShouldBenchmarkBeScheduled(benchmark, platform):
def generate_telemetry_tests(name, tester_config, benchmarks, def generate_telemetry_tests(name, tester_config, benchmarks,
benchmark_sharding_map, benchmark_sharding_map,
benchmark_ref_build_blacklist): benchmark_ref_build_blacklist,
for_fyi_waterfall=False):
isolated_scripts = [] isolated_scripts = []
# First determine the browser that you need based on the tester # First determine the browser that you need based on the tester
browser_name = '' browser_name = ''
...@@ -709,7 +721,7 @@ def generate_telemetry_tests(name, tester_config, benchmarks, ...@@ -709,7 +721,7 @@ def generate_telemetry_tests(name, tester_config, benchmarks,
dimension, device)) dimension, device))
test = generate_telemetry_test( test = generate_telemetry_test(
swarming_dimensions, benchmark.Name(), browser_name) swarming_dimensions, benchmark.Name(), browser_name, for_fyi_waterfall)
isolated_scripts.append(test) isolated_scripts.append(test)
# Now create another executable for this benchmark on the reference browser # Now create another executable for this benchmark on the reference browser
# if it is not blacklisted from running on the reference browser. # if it is not blacklisted from running on the reference browser.
...@@ -717,7 +729,7 @@ def generate_telemetry_tests(name, tester_config, benchmarks, ...@@ -717,7 +729,7 @@ def generate_telemetry_tests(name, tester_config, benchmarks,
if not tester_config.get('replace_system_webview', False) and ( if not tester_config.get('replace_system_webview', False) and (
benchmark.Name() not in benchmark_ref_build_blacklist): benchmark.Name() not in benchmark_ref_build_blacklist):
reference_test = generate_telemetry_test( reference_test = generate_telemetry_test(
swarming_dimensions, benchmark.Name(),'reference') swarming_dimensions, benchmark.Name(),'reference', for_fyi_waterfall)
isolated_scripts.append(reference_test) isolated_scripts.append(reference_test)
return isolated_scripts return isolated_scripts
...@@ -792,7 +804,7 @@ def generate_all_tests(waterfall): ...@@ -792,7 +804,7 @@ def generate_all_tests(waterfall):
# Generate benchmarks # Generate benchmarks
isolated_scripts = generate_telemetry_tests( isolated_scripts = generate_telemetry_tests(
name, config, all_benchmarks, benchmark_sharding_map, name, config, all_benchmarks, benchmark_sharding_map,
BENCHMARK_REF_BUILD_BLACKLIST) BENCHMARK_REF_BUILD_BLACKLIST, waterfall['name']=='chromium.perf.fyi')
# Generate swarmed non-telemetry tests if present # Generate swarmed non-telemetry tests if present
if config['swarming_dimensions'][0].get('perf_tests', False): if config['swarming_dimensions'][0].get('perf_tests', False):
isolated_scripts += generate_cplusplus_isolate_script_test( isolated_scripts += generate_cplusplus_isolate_script_test(
......
...@@ -148,6 +148,28 @@ class PerfDataGeneratorTest(unittest.TestCase): ...@@ -148,6 +148,28 @@ class PerfDataGeneratorTest(unittest.TestCase):
'--webview-embedder-apk=../../out/Release/apks/SystemWebViewShell.apk']) '--webview-embedder-apk=../../out/Release/apks/SystemWebViewShell.apk'])
self.assertEquals(test['isolate_name'], 'telemetry_perf_webview_tests') self.assertEquals(test['isolate_name'], 'telemetry_perf_webview_tests')
def testGenerateTelemetryTestsWithUploadToFlakinessDashboard(self):
swarming_dimensions = [{'os': 'SkyNet', 'id': 'T-850', 'pool': 'T-RIP'}]
test = perf_data_generator.generate_telemetry_test(
swarming_dimensions, 'system_health.common_desktop', 'release', True)
expected_generated_test = {
'override_compile_targets': ['telemetry_perf_tests'],
'args': ['system_health.common_desktop', '-v', '--upload-results',
'--output-format=chartjson', '--browser=release',
'--output-format=json-test-results'],
'swarming': {
'ignore_task_failure': False,
'dimension_sets': [{'os': 'SkyNet', 'id': 'T-850', 'pool': 'T-RIP'}],
'hard_timeout': 10800,
'can_use_on_swarming_builders': True,
'expiration': 36000,
'io_timeout': 3600,
},
'name': 'system_health.common_desktop',
'isolate_name': 'telemetry_perf_tests',
}
self.assertEquals(test, expected_generated_test)
def testGenerateTelemetryTestsBlacklistedReferenceBuildTest(self): def testGenerateTelemetryTestsBlacklistedReferenceBuildTest(self):
class BlacklistedBenchmark(benchmark.Benchmark): class BlacklistedBenchmark(benchmark.Benchmark):
@classmethod @classmethod
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment