Commit 9e560b59 authored by dpranke's avatar dpranke Committed by Commit bot

Fix handling of skipped tests in telemetry_unittests when uploading results.

This fixes a crash when we skip a test only on ChromeOS; it turns out
that telemetry uses two mechanisms for disabling tests, and only one of
them was provoking this; that mechanism only seems to be used for this
one ChromeOS test (everything else uses the other mechanism).

R=dtu@chromium.org, nduca@chromium.org
BUG=397279

Review URL: https://codereview.chromium.org/484333002

Cr-Commit-Position: refs/heads/master@{#291980}
parent 0ddb7f78
...@@ -85,28 +85,44 @@ def FullResults(args, suite, results): ...@@ -85,28 +85,44 @@ def FullResults(args, suite, results):
key, val = md.split('=', 1) key, val = md.split('=', 1)
full_results[key] = val full_results[key] = val
# TODO(dpranke): Handle skipped tests as well.
all_test_names = AllTestNames(suite) all_test_names = AllTestNames(suite)
sets_of_passing_test_names = map(PassingTestNames, results)
sets_of_failing_test_names = map(FailedTestNames, results)
# TODO(crbug.com/405379): This handles tests that are skipped via the
# unittest skip decorators (like skipUnless). The tests that are skipped via
# telemetry's decorators package are not included in the test suite at all so
# we need those to be passed in in order to include them.
skipped_tests = (set(all_test_names) - sets_of_passing_test_names[0]
- sets_of_failing_test_names[0])
num_tests = len(all_test_names)
num_failures = NumFailuresAfterRetries(results) num_failures = NumFailuresAfterRetries(results)
num_skips = len(skipped_tests)
num_passes = num_tests - num_failures - num_skips
full_results['num_failures_by_type'] = { full_results['num_failures_by_type'] = {
'FAIL': num_failures, 'FAIL': num_failures,
'PASS': len(all_test_names) - num_failures, 'PASS': num_passes,
'SKIP': num_skips,
} }
sets_of_passing_test_names = map(PassingTestNames, results)
sets_of_failing_test_names = map(FailedTestNames, results)
full_results['tests'] = {} full_results['tests'] = {}
for test_name in all_test_names: for test_name in all_test_names:
value = { if test_name in skipped_tests:
'expected': 'PASS', value = {
'actual': ActualResultsForTest(test_name, sets_of_failing_test_names, 'expected': 'SKIP',
sets_of_passing_test_names) 'actual': 'SKIP',
} }
if value['actual'].endswith('FAIL'): else:
value['is_unexpected'] = True value = {
'expected': 'PASS',
'actual': ActualResultsForTest(test_name,
sets_of_failing_test_names,
sets_of_passing_test_names),
}
if value['actual'].endswith('FAIL'):
value['is_unexpected'] = True
_AddPathToTrie(full_results['tests'], test_name, value) _AddPathToTrie(full_results['tests'], test_name, value)
return full_results return full_results
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment