Commit 1ce60a43 authored by Caleb Rouleau's avatar Caleb Rouleau Committed by Commit Bot

[Benchmarking] Exit code 111 indicates no stories were run.

See crbug.com/1019139#c8 for this change's motivation.

See https://chromium-review.googlesource.com/c/catapult/+/1894511
for catapult-side change.

This change will be followed up with another change
restricts the exit code to 111 completely (That can't
be done until catapult side change lands, and that
change can't land until this one does.)

Bug: 1019139
Change-Id: If0243e792282cfcbcbeb2a943648efd7916e1d57
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1893823
Commit-Queue: Caleb Rouleau <crouleau@chromium.org>
Auto-Submit: Caleb Rouleau <crouleau@chromium.org>
Reviewed-by: default avatarJuan Antonio Navarro Pérez <perezju@chromium.org>
Cr-Commit-Position: refs/heads/master@{#711891}
parent 79037111
......@@ -377,6 +377,15 @@ def execute_telemetry_benchmark(
print_duration('executing benchmark %s' % command_generator.benchmark, start)
# Telemetry sets exit code to -1 to indicate that no stories were run. This
# becomes 255 on linux because linux doesn't support -1 so it does modulo:
# -1 % 256 == 255.
# TODO(crbug.com/1019139): Make 111 be the exit code that means
# "no stories were run.".
if return_code in (111, -1, 255):
print ('Exit code %s indicates that no stories were run, so we are marking '
'this as a success.' % return_code)
return 0
if return_code:
return return_code
return 0
......
......@@ -72,7 +72,9 @@ def SmokeTestGenerator(benchmark_class, num_pages=1):
options.output_formats = ['histograms']
options.max_values_per_test_case = MAX_VALUES_PER_TEST_CASE
return_code = benchmark_class().Run(options)
if return_code == -1:
# TODO(crbug.com/1019139): Make 111 be the exit code that means
# "no stories were run.".
if return_code in (-1, 111):
self.skipTest('The benchmark was not run.')
self.assertEqual(
return_code, 0,
......
......@@ -238,7 +238,9 @@ def _GenerateSmokeTestCase(benchmark_class, story_to_smoke_test):
self.skipTest('Test is explicitly disabled')
single_page_benchmark = SinglePageBenchmark()
return_code = single_page_benchmark.Run(options)
if return_code == -1:
# TODO(crbug.com/1019139): Make 111 be the exit code that means
# "no stories were run.".
if return_code in (-1, 111):
self.skipTest('The benchmark was not run.')
self.assertEqual(
return_code, 0,
......
......@@ -127,13 +127,13 @@ def GenerateExitCode(test_results):
Returns:
1 if there were failed tests.
-1 if all tests were skipped.
111 if all tests were skipped. (See crbug.com/1019139#c8 for details).
0 otherwise.
"""
if any(r['status'] == 'FAIL' for r in test_results):
return 1
if all(r['status'] == 'SKIP' for r in test_results):
return -1
return 111
return 0
......
......@@ -664,7 +664,7 @@ class ResultsProcessorIntegrationTests(unittest.TestCase):
'--output-dir', self.output_dir,
'--intermediate-dir', self.intermediate_dir])
self.assertEqual(exit_code, -1)
self.assertEqual(exit_code, 111)
def testExitCodeSomeSkipped(self):
self.SerializeIntermediateResults(
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment