Commit 427101e9 authored by Caleb Rouleau's avatar Caleb Rouleau Committed by Commit Bot

[performance test runner] Format test results properly for total failures.

I'm hoping this will make Milo's colors make sense again.

Currently if a benchmark fails completely and provides no
test_results json, then we will ignore that
output and not surface it in the merged output.json. This
fixes that.

Bug: 947100
Change-Id: I42259ae6c3597586da4c35b1f2bea76f1972529f
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1637037Reviewed-by: default avatarJohn Chen <johnchen@chromium.org>
Commit-Queue: Caleb Rouleau <crouleau@chromium.org>
Cr-Commit-Position: refs/heads/master@{#664852}
parent a93e99ea
......@@ -164,6 +164,29 @@ def parse_common_test_results(json_results, test_separator='/'):
return results
def write_interrupted_test_results_to(filepath, test_start_time):
"""Writes a test results JSON file* to filepath.
This JSON file is formatted to explain that something went wrong.
*src/docs/testing/json_test_results_format.md
Args:
filepath: A path to a file to write the output to.
test_start_time: The start time of the test run expressed as a
floating-point offset in seconds from the UNIX epoch.
"""
with open(filepath, 'w') as fh:
output = {
'interrupted': True,
'num_failures_by_type': {},
'seconds_since_epoch': test_start_time,
'tests': {},
'version': 3,
}
json.dump(output, fh)
def get_gtest_summary_passes(output):
"""Returns a mapping of test to boolean indicating if the test passed.
......
......@@ -341,10 +341,13 @@ def execute_telemetry_benchmark(
else:
return_code = test_env.run_command_with_output(
command, env=env, stdoutfile=output_paths.logs)
expected_perf_filename = os.path.join(temp_dir, 'histograms.json')
shutil.move(expected_perf_filename, output_paths.perf_results)
expected_results_filename = os.path.join(temp_dir, 'test-results.json')
if os.path.exists(expected_results_filename):
shutil.move(expected_results_filename, output_paths.test_results)
else:
common.write_interrupted_test_results_to(output_paths.test_results, start)
expected_perf_filename = os.path.join(temp_dir, 'histograms.json')
shutil.move(expected_perf_filename, output_paths.perf_results)
csv_file_path = os.path.join(temp_dir, 'results.csv')
if os.path.isfile(csv_file_path):
......
......@@ -108,6 +108,47 @@ class ScriptsSmokeTest(unittest.TestCase):
finally:
shutil.rmtree(tempdir)
@decorators.Enabled('linux') # Testing platform-independent code.
def testRunPerformanceTestsTelemetry_NoTestResults(self):
"""Test that test results output gets returned for complete failures."""
options = options_for_unittests.GetCopy()
browser_type = options.browser_type
tempdir = tempfile.mkdtemp()
benchmarks = ['benchmark1', 'benchmark2']
return_code, stdout = self.RunPerfScript(
'../../testing/scripts/run_performance_tests.py '
'../../tools/perf/testdata/fail_and_do_nothing '
'--benchmarks=%s '
'--browser=%s '
'--isolated-script-test-output=%s' % (
','.join(benchmarks),
browser_type,
os.path.join(tempdir, 'output.json')
))
self.assertNotEqual(return_code, 0)
try:
with open(os.path.join(tempdir, 'output.json')) as f:
test_results = json.load(f)
self.assertIsNotNone(
test_results, 'json_test_results should be populated: ' + stdout)
self.assertTrue(
test_results['interrupted'],
'if the benchmark does not populate test results, then we should '
'populate it with a failure.')
for benchmark in benchmarks:
with open(os.path.join(tempdir, benchmark, 'test_results.json')) as f:
test_results = json.load(f)
self.assertIsNotNone(
test_results, 'json_test_results should be populated: ' + stdout)
self.assertTrue(
test_results['interrupted'],
'if the benchmark does not populate test results, then we should '
'populate it with a failure.')
except IOError as e:
self.fail('json_test_results should be populated: ' + stdout + str(e))
finally:
shutil.rmtree(tempdir)
# Android: crbug.com/932301
# ChromeOS: crbug.com/754913
@decorators.Disabled('chromeos', 'android')
......
#!/usr/bin/env vpython
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This is a script used for testing as a convenient failing binary."""
import sys
sys.exit(1)
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment