Commit 3b76cc3e authored by Mikhail Khokhlov's avatar Mikhail Khokhlov Committed by Commit Bot

[tools/perf] Mark test failures as unexpected

Failures during the processing of test results should be marked
unexpected.

Bug: 1019137
Change-Id: Ib3b880a462a038ea98fc8c56e3ec815ba613ec93
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1886894
Commit-Queue: Mikhail Khokhlov <khokhlov@google.com>
Reviewed-by: default avatarJuan Antonio Navarro Pérez <perezju@chromium.org>
Cr-Commit-Position: refs/heads/master@{#710330}
parent f5d11ef1
...@@ -7,6 +7,8 @@ import logging ...@@ -7,6 +7,8 @@ import logging
import os import os
import time import time
from core.results_processor import util
from tracing.metrics import metric_runner from tracing.metrics import metric_runner
...@@ -46,7 +48,7 @@ def _RunMetric(test_result): ...@@ -46,7 +48,7 @@ def _RunMetric(test_result):
test_result['testPath'], time.time() - start)) test_result['testPath'], time.time() - start))
if mre_result.failures: if mre_result.failures:
test_result['status'] = 'FAIL' util.SetUnexpectedFailure(test_result)
for f in mre_result.failures: for f in mre_result.failures:
logging.error('Failure recorded for test %s: %s', logging.error('Failure recorded for test %s: %s',
test_result['testPath'], f) test_result['testPath'], f)
...@@ -85,7 +87,7 @@ def ComputeTBMv2Metrics(test_result): ...@@ -85,7 +87,7 @@ def ComputeTBMv2Metrics(test_result):
# details. # details.
# TODO(crbug.com/1010041): Return a non-zero exit code in this case. # TODO(crbug.com/1010041): Return a non-zero exit code in this case.
if trace_size_in_mib > 400: if trace_size_in_mib > 400:
test_result['status'] = 'FAIL' util.SetUnexpectedFailure(test_result)
logging.error('%s: Trace size is too big: %s MiB', logging.error('%s: Trace size is too big: %s MiB',
test_result['testPath'], trace_size_in_mib) test_result['testPath'], trace_size_in_mib)
return return
......
...@@ -64,6 +64,7 @@ class ComputeMetricsTest(unittest.TestCase): ...@@ -64,6 +64,7 @@ class ComputeMetricsTest(unittest.TestCase):
histogram_dicts = test_result['_histograms'].AsDicts() histogram_dicts = test_result['_histograms'].AsDicts()
self.assertEqual(histogram_dicts, []) self.assertEqual(histogram_dicts, [])
self.assertEqual(test_result['status'], 'FAIL') self.assertEqual(test_result['status'], 'FAIL')
self.assertFalse(test_result['expected'])
def testComputeTBMv2MetricsFailure(self): def testComputeTBMv2MetricsFailure(self):
test_result = testing.TestResult( test_result = testing.TestResult(
...@@ -87,6 +88,7 @@ class ComputeMetricsTest(unittest.TestCase): ...@@ -87,6 +88,7 @@ class ComputeMetricsTest(unittest.TestCase):
histogram_dicts = test_result['_histograms'].AsDicts() histogram_dicts = test_result['_histograms'].AsDicts()
self.assertEqual(histogram_dicts, []) self.assertEqual(histogram_dicts, [])
self.assertEqual(test_result['status'], 'FAIL') self.assertEqual(test_result['status'], 'FAIL')
self.assertFalse(test_result['expected'])
def testComputeTBMv2MetricsSkipped(self): def testComputeTBMv2MetricsSkipped(self):
test_result = testing.TestResult( test_result = testing.TestResult(
......
...@@ -77,7 +77,7 @@ def ProcessResults(options): ...@@ -77,7 +77,7 @@ def ProcessResults(options):
result, upload_bucket, results_label, run_identifier, result, upload_bucket, results_label, run_identifier,
test_suite_start, should_compute_metrics, max_num_values), test_suite_start, should_compute_metrics, max_num_values),
test_results, test_results,
on_failure=lambda result: result.update(status='FAIL'), on_failure=util.SetUnexpectedFailure,
) )
if should_compute_metrics: if should_compute_metrics:
...@@ -110,7 +110,7 @@ def ProcessTestResult(test_result, upload_bucket, results_label, ...@@ -110,7 +110,7 @@ def ProcessTestResult(test_result, upload_bucket, results_label,
if max_num_values is not None and num_values > max_num_values: if max_num_values is not None and num_values > max_num_values:
logging.error('%s produced %d values, but only %d are allowed.', logging.error('%s produced %d values, but only %d are allowed.',
test_result['testPath'], num_values, max_num_values) test_result['testPath'], num_values, max_num_values)
test_result['status'] = 'FAIL' util.SetUnexpectedFailure(test_result)
del test_result['_histograms'] del test_result['_histograms']
else: else:
AddDiagnosticsToHistograms(test_result, test_suite_start, results_label) AddDiagnosticsToHistograms(test_result, test_suite_start, results_label)
......
...@@ -162,6 +162,7 @@ class ResultsProcessorIntegrationTests(unittest.TestCase): ...@@ -162,6 +162,7 @@ class ResultsProcessorIntegrationTests(unittest.TestCase):
self.assertEqual(results['tests']['benchmark']['story1']['actual'], 'PASS') self.assertEqual(results['tests']['benchmark']['story1']['actual'], 'PASS')
self.assertEqual(results['tests']['benchmark']['story2']['actual'], 'FAIL') self.assertEqual(results['tests']['benchmark']['story2']['actual'], 'FAIL')
self.assertTrue(results['tests']['benchmark']['story2']['is_unexpected'])
def testHistogramsOutput(self): def testHistogramsOutput(self):
self.SerializeIntermediateResults( self.SerializeIntermediateResults(
......
...@@ -56,3 +56,9 @@ def IsoTimestampToEpoch(timestamp): ...@@ -56,3 +56,9 @@ def IsoTimestampToEpoch(timestamp):
except ValueError: except ValueError:
dt = datetime.datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%SZ') dt = datetime.datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%SZ')
return calendar.timegm(dt.timetuple()) + dt.microsecond / 1e6 return calendar.timegm(dt.timetuple()) + dt.microsecond / 1e6
def SetUnexpectedFailure(test_result):
"""Update fields of a test result in a case of processing failure."""
test_result['status'] = 'FAIL'
test_result['expected'] = False
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment