Commit 3b76cc3e authored by Mikhail Khokhlov's avatar Mikhail Khokhlov Committed by Commit Bot

[tools/perf] Mark test failures as unexpected

Failures during the processing of test results should be marked
unexpected.

Bug: 1019137
Change-Id: Ib3b880a462a038ea98fc8c56e3ec815ba613ec93
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1886894
Commit-Queue: Mikhail Khokhlov <khokhlov@google.com>
Reviewed-by: default avatarJuan Antonio Navarro Pérez <perezju@chromium.org>
Cr-Commit-Position: refs/heads/master@{#710330}
parent f5d11ef1
......@@ -7,6 +7,8 @@ import logging
import os
import time
from core.results_processor import util
from tracing.metrics import metric_runner
......@@ -46,7 +48,7 @@ def _RunMetric(test_result):
test_result['testPath'], time.time() - start))
if mre_result.failures:
test_result['status'] = 'FAIL'
util.SetUnexpectedFailure(test_result)
for f in mre_result.failures:
logging.error('Failure recorded for test %s: %s',
test_result['testPath'], f)
......@@ -85,7 +87,7 @@ def ComputeTBMv2Metrics(test_result):
# details.
# TODO(crbug.com/1010041): Return a non-zero exit code in this case.
if trace_size_in_mib > 400:
test_result['status'] = 'FAIL'
util.SetUnexpectedFailure(test_result)
logging.error('%s: Trace size is too big: %s MiB',
test_result['testPath'], trace_size_in_mib)
return
......
......@@ -64,6 +64,7 @@ class ComputeMetricsTest(unittest.TestCase):
histogram_dicts = test_result['_histograms'].AsDicts()
self.assertEqual(histogram_dicts, [])
self.assertEqual(test_result['status'], 'FAIL')
self.assertFalse(test_result['expected'])
def testComputeTBMv2MetricsFailure(self):
test_result = testing.TestResult(
......@@ -87,6 +88,7 @@ class ComputeMetricsTest(unittest.TestCase):
histogram_dicts = test_result['_histograms'].AsDicts()
self.assertEqual(histogram_dicts, [])
self.assertEqual(test_result['status'], 'FAIL')
self.assertFalse(test_result['expected'])
def testComputeTBMv2MetricsSkipped(self):
test_result = testing.TestResult(
......
......@@ -77,7 +77,7 @@ def ProcessResults(options):
result, upload_bucket, results_label, run_identifier,
test_suite_start, should_compute_metrics, max_num_values),
test_results,
on_failure=lambda result: result.update(status='FAIL'),
on_failure=util.SetUnexpectedFailure,
)
if should_compute_metrics:
......@@ -110,7 +110,7 @@ def ProcessTestResult(test_result, upload_bucket, results_label,
if max_num_values is not None and num_values > max_num_values:
logging.error('%s produced %d values, but only %d are allowed.',
test_result['testPath'], num_values, max_num_values)
test_result['status'] = 'FAIL'
util.SetUnexpectedFailure(test_result)
del test_result['_histograms']
else:
AddDiagnosticsToHistograms(test_result, test_suite_start, results_label)
......
......@@ -162,6 +162,7 @@ class ResultsProcessorIntegrationTests(unittest.TestCase):
self.assertEqual(results['tests']['benchmark']['story1']['actual'], 'PASS')
self.assertEqual(results['tests']['benchmark']['story2']['actual'], 'FAIL')
self.assertTrue(results['tests']['benchmark']['story2']['is_unexpected'])
def testHistogramsOutput(self):
self.SerializeIntermediateResults(
......
......@@ -56,3 +56,9 @@ def IsoTimestampToEpoch(timestamp):
except ValueError:
dt = datetime.datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%SZ')
return calendar.timegm(dt.timetuple()) + dt.microsecond / 1e6
def SetUnexpectedFailure(test_result):
"""Update fields of a test result in a case of processing failure."""
test_result['status'] = 'FAIL'
test_result['expected'] = False
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment