Commit dcca9cce authored by Caleb Rouleau's avatar Caleb Rouleau

emergency hardening of process_perf_results

Bug: 936602, 936500
Change-Id: I9265a2950c185c90da7ee07210ab7ff63f4eb5f6
Reviewed-on: https://chromium-review.googlesource.com/c/1493081
Commit-Queue: Caleb Rouleau <crouleau@chromium.org>
Reviewed-by: default avatarJohn Chen <johnchen@chromium.org>
Cr-Commit-Position: refs/heads/master@{#636201}
parent 08999f29
......@@ -188,6 +188,7 @@ def _handle_perf_json_test_results(
# Obtain the test name we are running
is_ref = '.reference' in benchmark_name
enabled = True
try:
with open(join(directory, 'test_results.json')) as json_data:
json_results = json.load(json_data)
if not json_results:
......@@ -206,6 +207,11 @@ def _handle_perf_json_test_results(
# We don't need to upload reference build data to the
# flakiness dashboard since we don't monitor the ref build
test_results_list.append(json_results)
except IOError as e:
# TODO(crbug.com/936602): Figure out how to surface these errors. Should
# we have a non-zero exit code if we error out?
logging.error('Failed to obtain test results for %s: %s',
benchmark_name, e)
if not enabled:
# We don't upload disabled benchmarks or tests that are run
# as a smoke test
......@@ -375,8 +381,18 @@ def _merge_perf_results(benchmark_name, results_filename, directories):
collected_results = []
for directory in directories:
filename = join(directory, 'perf_results.json')
try:
with open(filename) as pf:
collected_results.append(json.load(pf))
except IOError as e:
# TODO(crbug.com/936602): Figure out how to surface these errors. Should
# we have a non-zero exit code if we error out?
logging.error('Failed to obtain perf results from %s: %s',
directory, e)
if not collected_results:
logging.error('Failed to obtain any perf results from %s.',
benchmark_name)
return
# Assuming that multiple shards will only be chartjson or histogram set
# Non-telemetry benchmarks only ever run on one shard
......
......@@ -160,5 +160,21 @@ class ProcessPerfResultsIntegrationTest(unittest.TestCase):
"speedometer.reference": True
})
class ProcessPerfResults_HardenedUnittest(unittest.TestCase):
def test_handle_perf_json_test_results_IOError(self):
directory_map = {
'benchmark.example': ['directory_that_does_not_exist']}
test_results_list = []
ppr_module._handle_perf_json_test_results(directory_map, test_results_list)
self.assertEqual(test_results_list, [])
def test_merge_perf_results_IOError(self):
results_filename = None
directories = ['directory_that_does_not_exist']
ppr_module._merge_perf_results('benchmark.example', results_filename,
directories)
if __name__ == '__main__':
unittest.main()
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment