Commit dcca9cce authored by Caleb Rouleau's avatar Caleb Rouleau

emergency hardening of process_perf_results

Bug: 936602, 936500
Change-Id: I9265a2950c185c90da7ee07210ab7ff63f4eb5f6
Reviewed-on: https://chromium-review.googlesource.com/c/1493081
Commit-Queue: Caleb Rouleau <crouleau@chromium.org>
Reviewed-by: default avatarJohn Chen <johnchen@chromium.org>
Cr-Commit-Position: refs/heads/master@{#636201}
parent 08999f29
...@@ -188,24 +188,30 @@ def _handle_perf_json_test_results( ...@@ -188,24 +188,30 @@ def _handle_perf_json_test_results(
# Obtain the test name we are running # Obtain the test name we are running
is_ref = '.reference' in benchmark_name is_ref = '.reference' in benchmark_name
enabled = True enabled = True
with open(join(directory, 'test_results.json')) as json_data: try:
json_results = json.load(json_data) with open(join(directory, 'test_results.json')) as json_data:
if not json_results: json_results = json.load(json_data)
# Output is null meaning the test didn't produce any results. if not json_results:
# Want to output an error and continue loading the rest of the # Output is null meaning the test didn't produce any results.
# test results. # Want to output an error and continue loading the rest of the
print 'No results produced for %s, skipping upload' % directory # test results.
continue print 'No results produced for %s, skipping upload' % directory
if json_results.get('version') == 3: continue
# Non-telemetry tests don't have written json results but if json_results.get('version') == 3:
# if they are executing then they are enabled and will generate # Non-telemetry tests don't have written json results but
# chartjson results. # if they are executing then they are enabled and will generate
if not bool(json_results.get('tests')): # chartjson results.
enabled = False if not bool(json_results.get('tests')):
if not is_ref: enabled = False
# We don't need to upload reference build data to the if not is_ref:
# flakiness dashboard since we don't monitor the ref build # We don't need to upload reference build data to the
test_results_list.append(json_results) # flakiness dashboard since we don't monitor the ref build
test_results_list.append(json_results)
except IOError as e:
# TODO(crbug.com/936602): Figure out how to surface these errors. Should
# we have a non-zero exit code if we error out?
logging.error('Failed to obtain test results for %s: %s',
benchmark_name, e)
if not enabled: if not enabled:
# We don't upload disabled benchmarks or tests that are run # We don't upload disabled benchmarks or tests that are run
# as a smoke test # as a smoke test
...@@ -375,8 +381,18 @@ def _merge_perf_results(benchmark_name, results_filename, directories): ...@@ -375,8 +381,18 @@ def _merge_perf_results(benchmark_name, results_filename, directories):
collected_results = [] collected_results = []
for directory in directories: for directory in directories:
filename = join(directory, 'perf_results.json') filename = join(directory, 'perf_results.json')
with open(filename) as pf: try:
collected_results.append(json.load(pf)) with open(filename) as pf:
collected_results.append(json.load(pf))
except IOError as e:
# TODO(crbug.com/936602): Figure out how to surface these errors. Should
# we have a non-zero exit code if we error out?
logging.error('Failed to obtain perf results from %s: %s',
directory, e)
if not collected_results:
logging.error('Failed to obtain any perf results from %s.',
benchmark_name)
return
# Assuming that multiple shards will only be chartjson or histogram set # Assuming that multiple shards will only be chartjson or histogram set
# Non-telemetry benchmarks only ever run on one shard # Non-telemetry benchmarks only ever run on one shard
......
...@@ -160,5 +160,21 @@ class ProcessPerfResultsIntegrationTest(unittest.TestCase): ...@@ -160,5 +160,21 @@ class ProcessPerfResultsIntegrationTest(unittest.TestCase):
"speedometer.reference": True "speedometer.reference": True
}) })
class ProcessPerfResults_HardenedUnittest(unittest.TestCase):
def test_handle_perf_json_test_results_IOError(self):
directory_map = {
'benchmark.example': ['directory_that_does_not_exist']}
test_results_list = []
ppr_module._handle_perf_json_test_results(directory_map, test_results_list)
self.assertEqual(test_results_list, [])
def test_merge_perf_results_IOError(self):
results_filename = None
directories = ['directory_that_does_not_exist']
ppr_module._merge_perf_results('benchmark.example', results_filename,
directories)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment