Commit 86308fbe authored by Mikhail Khokhlov's avatar Mikhail Khokhlov Committed by Commit Bot

[tools/perf] Refactoring: parallel test result processing

Before this CL Results Processor did its processing in several stages:
aggregating traces for all tests in parallel, then computing metrics for
all tests in parallel, and so on. Now we move the parallelization to
the upper level, so that all processing for a particular test is done
inside one thread. This allows us to:
1) Make processing of tests independent, so that errors in one of them
do not affect others.
2) Mark the tests with failures 'FAIL' in the final results.
3) Add test-specific diagnostics to histograms.

Also we add support for the new intermediate results format (where there
are no benchmarkRun messages; all metadata is contained in testResults).

Bug: 981349, 1015192
Change-Id: I75d036a3ded439e092ee7b892a26bc26f3600520
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1869212
Commit-Queue: Mikhail Khokhlov <khokhlov@google.com>
Reviewed-by: default avatarJuan Antonio Navarro Pérez <perezju@chromium.org>
Cr-Commit-Position: refs/heads/master@{#708235}
parent 6f2e929e
...@@ -7,8 +7,6 @@ import logging ...@@ -7,8 +7,6 @@ import logging
import os import os
import time import time
from core.results_processor import util
from tracing.metrics import metric_runner from tracing.metrics import metric_runner
...@@ -24,7 +22,7 @@ HISTOGRAM_DICTS_KEY = 'histogram_dicts' ...@@ -24,7 +22,7 @@ HISTOGRAM_DICTS_KEY = 'histogram_dicts'
HISTOGRAM_DICTS_FILE = 'histogram_dicts.json' HISTOGRAM_DICTS_FILE = 'histogram_dicts.json'
def _PoolWorker(test_result): def _RunMetric(test_result):
metrics = [tag['value'] for tag in test_result['tags'] metrics = [tag['value'] for tag in test_result['tags']
if tag['key'] == 'tbmv2'] if tag['key'] == 'tbmv2']
html_trace = test_result['outputArtifacts'][HTML_TRACE_NAME] html_trace = test_result['outputArtifacts'][HTML_TRACE_NAME]
...@@ -56,7 +54,7 @@ def _PoolWorker(test_result): ...@@ -56,7 +54,7 @@ def _PoolWorker(test_result):
return mre_result.pairs.get('histograms', []) return mre_result.pairs.get('histograms', [])
def ComputeTBMv2Metrics(intermediate_results): def ComputeTBMv2Metrics(test_result):
"""Compute metrics on aggregated traces in parallel. """Compute metrics on aggregated traces in parallel.
For each test run that has an aggregate trace and some TBMv2 metrics listed For each test run that has an aggregate trace and some TBMv2 metrics listed
...@@ -64,40 +62,32 @@ def ComputeTBMv2Metrics(intermediate_results): ...@@ -64,40 +62,32 @@ def ComputeTBMv2Metrics(intermediate_results):
histograms. Note: the order of histograms in the results may be different histograms. Note: the order of histograms in the results may be different
from the order of tests in intermediate_results. from the order of tests in intermediate_results.
""" """
histogram_dicts = [] artifacts = test_result.get('outputArtifacts', {})
work_list = [] # TODO(crbug.com/981349): If metrics have already been computed in
for test_result in intermediate_results['testResults']: # Telemetry, we read it from the file. Remove this branch after Telemetry
artifacts = test_result.get('outputArtifacts', {}) # does not compute metrics anymore.
# TODO(crbug.com/981349): If metrics have already been computed in if HISTOGRAM_DICTS_FILE in artifacts:
# Telemetry, we read it from the file. Remove this branch after Telemetry with open(artifacts[HISTOGRAM_DICTS_FILE]['filePath']) as f:
# does not compute metrics anymore. test_result['_histograms'].ImportDicts(json.load(f))
if HISTOGRAM_DICTS_FILE in artifacts: del artifacts[HISTOGRAM_DICTS_FILE]
with open(artifacts[HISTOGRAM_DICTS_FILE]['filePath']) as f: return
histogram_dicts += json.load(f)
del artifacts[HISTOGRAM_DICTS_FILE] if test_result['status'] == 'SKIP':
continue return
if test_result['status'] == 'SKIP': if (HTML_TRACE_NAME not in artifacts or
continue not any(tag['key'] == 'tbmv2' for tag in test_result.get('tags', []))):
return
if (HTML_TRACE_NAME not in artifacts or
not any(tag['key'] == 'tbmv2' for tag in test_result.get('tags', []))): trace_size_in_mib = (os.path.getsize(artifacts[HTML_TRACE_NAME]['filePath'])
continue / (2 ** 20))
# Bails out on traces that are too big. See crbug.com/812631 for more
trace_size_in_mib = (os.path.getsize(artifacts[HTML_TRACE_NAME]['filePath']) # details.
/ (2 ** 20)) # TODO(crbug.com/1010041): Return a non-zero exit code in this case.
# Bails out on traces that are too big. See crbug.com/812631 for more if trace_size_in_mib > 400:
# details. test_result['status'] = 'FAIL'
# TODO(crbug.com/1010041): Return a non-zero exit code in this case. logging.error('%s: Trace size is too big: %s MiB',
if trace_size_in_mib > 400: test_result['testPath'], trace_size_in_mib)
test_result['status'] = 'FAIL' return
logging.error('%s: Trace size is too big: %s MiB',
test_result['testPath'], trace_size_in_mib) test_result['_histograms'].ImportDicts(_RunMetric(test_result))
continue
work_list.append(test_result)
for dicts in util.ApplyInParallel(_PoolWorker, work_list):
histogram_dicts += dicts
return histogram_dicts
...@@ -11,6 +11,7 @@ from tracing.mre import failure ...@@ -11,6 +11,7 @@ from tracing.mre import failure
from tracing.mre import job from tracing.mre import job
from tracing.mre import mre_result from tracing.mre import mre_result
from tracing.value import histogram from tracing.value import histogram
from tracing.value import histogram_set
import mock import mock
...@@ -21,22 +22,14 @@ GETSIZE_METHOD = 'os.path.getsize' ...@@ -21,22 +22,14 @@ GETSIZE_METHOD = 'os.path.getsize'
class ComputeMetricsTest(unittest.TestCase): class ComputeMetricsTest(unittest.TestCase):
def testComputeTBMv2Metrics(self): def testComputeTBMv2Metrics(self):
in_results = testing.IntermediateResults([ test_result = testing.TestResult(
testing.TestResult( 'benchmark/story1',
'benchmark/story1', output_artifacts={
output_artifacts={ compute_metrics.HTML_TRACE_NAME:
compute_metrics.HTML_TRACE_NAME: testing.Artifact('/trace1.html', 'gs://trace1.html')},
testing.Artifact('/trace1.html', 'gs://trace1.html')}, tags=['tbmv2:metric1'],
tags=['tbmv2:metric1'], )
), test_result['_histograms'] = histogram_set.HistogramSet()
testing.TestResult(
'benchmark/story2',
output_artifacts={
compute_metrics.HTML_TRACE_NAME:
testing.Artifact('/trace2.html', 'gs://trace2.html')},
tags=['tbmv2:metric2'],
),
])
test_dict = histogram.Histogram('a', 'unitless').AsDict() test_dict = histogram.Histogram('a', 'unitless').AsDict()
metrics_result = mre_result.MreResult() metrics_result = mre_result.MreResult()
...@@ -46,42 +39,41 @@ class ComputeMetricsTest(unittest.TestCase): ...@@ -46,42 +39,41 @@ class ComputeMetricsTest(unittest.TestCase):
with mock.patch(RUN_METRICS_METHOD) as run_metrics_mock: with mock.patch(RUN_METRICS_METHOD) as run_metrics_mock:
getsize_mock.return_value = 1000 getsize_mock.return_value = 1000
run_metrics_mock.return_value = metrics_result run_metrics_mock.return_value = metrics_result
histogram_dicts = compute_metrics.ComputeTBMv2Metrics(in_results) compute_metrics.ComputeTBMv2Metrics(test_result)
self.assertEqual(histogram_dicts, [test_dict, test_dict]) histogram_dicts = test_result['_histograms'].AsDicts()
self.assertEqual(in_results['testResults'][0]['status'], 'PASS') self.assertEqual(histogram_dicts, [test_dict])
self.assertEqual(in_results['testResults'][1]['status'], 'PASS') self.assertEqual(test_result['status'], 'PASS')
def testComputeTBMv2MetricsTraceTooBig(self): def testComputeTBMv2MetricsTraceTooBig(self):
in_results = testing.IntermediateResults([ test_result = testing.TestResult(
testing.TestResult( 'benchmark/story1',
'benchmark/story1', output_artifacts={
output_artifacts={ compute_metrics.HTML_TRACE_NAME:
compute_metrics.HTML_TRACE_NAME: testing.Artifact('/trace1.html', 'gs://trace1.html')},
testing.Artifact('/trace1.html', 'gs://trace1.html')}, tags=['tbmv2:metric1'],
tags=['tbmv2:metric1'], )
), test_result['_histograms'] = histogram_set.HistogramSet()
])
with mock.patch(GETSIZE_METHOD) as getsize_mock: with mock.patch(GETSIZE_METHOD) as getsize_mock:
with mock.patch(RUN_METRICS_METHOD) as run_metrics_mock: with mock.patch(RUN_METRICS_METHOD) as run_metrics_mock:
getsize_mock.return_value = 1e9 getsize_mock.return_value = 1e9
histogram_dicts = compute_metrics.ComputeTBMv2Metrics(in_results) compute_metrics.ComputeTBMv2Metrics(test_result)
self.assertEqual(run_metrics_mock.call_count, 0) self.assertEqual(run_metrics_mock.call_count, 0)
histogram_dicts = test_result['_histograms'].AsDicts()
self.assertEqual(histogram_dicts, []) self.assertEqual(histogram_dicts, [])
self.assertEqual(in_results['testResults'][0]['status'], 'FAIL') self.assertEqual(test_result['status'], 'FAIL')
def testComputeTBMv2MetricsFailure(self): def testComputeTBMv2MetricsFailure(self):
in_results = testing.IntermediateResults([ test_result = testing.TestResult(
testing.TestResult( 'benchmark/story1',
'benchmark/story1', output_artifacts={
output_artifacts={ compute_metrics.HTML_TRACE_NAME:
compute_metrics.HTML_TRACE_NAME: testing.Artifact('/trace1.html', 'gs://trace1.html')},
testing.Artifact('/trace1.html', 'gs://trace1.html')}, tags=['tbmv2:metric1'],
tags=['tbmv2:metric1'], )
), test_result['_histograms'] = histogram_set.HistogramSet()
])
metrics_result = mre_result.MreResult() metrics_result = mre_result.MreResult()
metrics_result.AddFailure(failure.Failure(job.Job(0), 0, 0, 0, 0, 0)) metrics_result.AddFailure(failure.Failure(job.Job(0), 0, 0, 0, 0, 0))
...@@ -90,26 +82,27 @@ class ComputeMetricsTest(unittest.TestCase): ...@@ -90,26 +82,27 @@ class ComputeMetricsTest(unittest.TestCase):
with mock.patch(RUN_METRICS_METHOD) as run_metrics_mock: with mock.patch(RUN_METRICS_METHOD) as run_metrics_mock:
getsize_mock.return_value = 100 getsize_mock.return_value = 100
run_metrics_mock.return_value = metrics_result run_metrics_mock.return_value = metrics_result
histogram_dicts = compute_metrics.ComputeTBMv2Metrics(in_results) compute_metrics.ComputeTBMv2Metrics(test_result)
histogram_dicts = test_result['_histograms'].AsDicts()
self.assertEqual(histogram_dicts, []) self.assertEqual(histogram_dicts, [])
self.assertEqual(in_results['testResults'][0]['status'], 'FAIL') self.assertEqual(test_result['status'], 'FAIL')
def testComputeTBMv2MetricsSkipped(self): def testComputeTBMv2MetricsSkipped(self):
in_results = testing.IntermediateResults([ test_result = testing.TestResult(
testing.TestResult( 'benchmark/story1',
'benchmark/story1', output_artifacts={
output_artifacts={ compute_metrics.HTML_TRACE_NAME:
compute_metrics.HTML_TRACE_NAME: testing.Artifact('/trace1.html', 'gs://trace1.html')},
testing.Artifact('/trace1.html', 'gs://trace1.html')}, tags=['tbmv2:metric1'],
tags=['tbmv2:metric1'], status='SKIP',
status='SKIP', )
), test_result['_histograms'] = histogram_set.HistogramSet()
])
with mock.patch(RUN_METRICS_METHOD) as run_metrics_mock: with mock.patch(RUN_METRICS_METHOD) as run_metrics_mock:
histogram_dicts = compute_metrics.ComputeTBMv2Metrics(in_results) compute_metrics.ComputeTBMv2Metrics(test_result)
self.assertEqual(run_metrics_mock.call_count, 0) self.assertEqual(run_metrics_mock.call_count, 0)
histogram_dicts = test_result['_histograms'].AsDicts()
self.assertEqual(histogram_dicts, []) self.assertEqual(histogram_dicts, [])
self.assertEqual(in_results['testResults'][0]['status'], 'SKIP') self.assertEqual(test_result['status'], 'SKIP')
...@@ -9,6 +9,7 @@ https://chromium.googlesource.com/chromium/src/+/master/docs/testing/json_test_r ...@@ -9,6 +9,7 @@ https://chromium.googlesource.com/chromium/src/+/master/docs/testing/json_test_r
""" """
import collections import collections
import datetime
import json import json
import os import os
import urllib import urllib
...@@ -19,18 +20,18 @@ from core.results_processor import util ...@@ -19,18 +20,18 @@ from core.results_processor import util
OUTPUT_FILENAME = 'test-results.json' OUTPUT_FILENAME = 'test-results.json'
def ProcessIntermediateResults(intermediate_results, options): def ProcessIntermediateResults(test_results, options):
"""Process intermediate results and write output in output_dir.""" """Process intermediate results and write output in output_dir."""
results = Convert(intermediate_results, options.output_dir) results = Convert(test_results, options.output_dir)
with open(os.path.join(options.output_dir, OUTPUT_FILENAME), 'w') as f: with open(os.path.join(options.output_dir, OUTPUT_FILENAME), 'w') as f:
json.dump(results, f, sort_keys=True, indent=4, separators=(',', ': ')) json.dump(results, f, sort_keys=True, indent=4, separators=(',', ': '))
def Convert(in_results, base_dir): def Convert(test_results, base_dir):
"""Convert intermediate results to the JSON Test Results Format. """Convert intermediate results to the JSON Test Results Format.
Args: Args:
in_results: The parsed intermediate results. test_results: The parsed intermediate results.
base_dir: A string with the path to a base directory; artifact file paths base_dir: A string with the path to a base directory; artifact file paths
will be written relative to this. will be written relative to this.
...@@ -40,7 +41,7 @@ def Convert(in_results, base_dir): ...@@ -40,7 +41,7 @@ def Convert(in_results, base_dir):
results = {'tests': {}} results = {'tests': {}}
status_counter = collections.Counter() status_counter = collections.Counter()
for result in in_results['testResults']: for result in test_results:
benchmark_name, story_name = result['testPath'].split('/') benchmark_name, story_name = result['testPath'].split('/')
story_name = urllib.unquote(story_name) story_name = urllib.unquote(story_name)
actual_status = result['status'] actual_status = result['status']
...@@ -77,10 +78,17 @@ def Convert(in_results, base_dir): ...@@ -77,10 +78,17 @@ def Convert(in_results, base_dir):
if test['shard'] is None: if test['shard'] is None:
del test['shard'] del test['shard']
benchmark_run = in_results['benchmarkRun'] # Test results are written in order of execution, so the first test start
# time is approximately the start time of the whole suite.
test_suite_start_time = (test_results[0]['startTime'] if test_results
else datetime.datetime.utcnow().isoformat() + 'Z')
# If Telemetry stops with a unhandleable error, then remaining stories
# are marked as unexpectedly skipped.
interrupted = any(t['status'] == 'SKIP' and not t['isExpected']
for t in test_results)
results.update( results.update(
seconds_since_epoch=util.IsoTimestampToEpoch(benchmark_run['startTime']), seconds_since_epoch=util.IsoTimestampToEpoch(test_suite_start_time),
interrupted=benchmark_run['interrupted'], interrupted=interrupted,
num_failures_by_type=dict(status_counter), num_failures_by_type=dict(status_counter),
path_delimiter='/', path_delimiter='/',
version=3, version=3,
......
...@@ -13,13 +13,11 @@ class Json3OutputTest(unittest.TestCase): ...@@ -13,13 +13,11 @@ class Json3OutputTest(unittest.TestCase):
def setUp(self): def setUp(self):
self.base_dir = 'base_dir' self.base_dir = 'base_dir'
def Convert(self, test_results, **kwargs): def Convert(self, test_results):
base_dir = kwargs.pop('base_dir', self.base_dir) test_results_copy = copy.deepcopy(test_results)
original_results = testing.IntermediateResults(test_results, **kwargs) results = json3_output.Convert(test_results_copy, self.base_dir)
intermediate_results = copy.deepcopy(original_results)
results = json3_output.Convert(intermediate_results, base_dir)
# Convert should not modify the original intermediate results. # Convert should not modify the original intermediate results.
self.assertEqual(intermediate_results, original_results) self.assertEqual(test_results_copy, test_results)
return results return results
def FindTestResult(self, results, benchmark, story): def FindTestResult(self, results, benchmark, story):
...@@ -29,15 +27,15 @@ class Json3OutputTest(unittest.TestCase): ...@@ -29,15 +27,15 @@ class Json3OutputTest(unittest.TestCase):
node = node[key] node = node[key]
return node return node
def testEmptyResults(self): def testStartTime(self):
results = self.Convert( results = self.Convert([
[], start_time='2009-02-13T23:31:30.987000Z', interrupted=False) testing.TestResult('benchmark/story',
start_time='2009-02-13T23:31:30.987000Z')
])
self.assertFalse(results['interrupted']) self.assertFalse(results['interrupted'])
self.assertEqual(results['num_failures_by_type'], {})
self.assertEqual(results['path_delimiter'], '/') self.assertEqual(results['path_delimiter'], '/')
self.assertEqual(results['seconds_since_epoch'], 1234567890.987) self.assertEqual(results['seconds_since_epoch'], 1234567890.987)
self.assertEqual(results['tests'], {})
self.assertEqual(results['version'], 3) self.assertEqual(results['version'], 3)
def testSingleTestCase(self): def testSingleTestCase(self):
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
"""Unit tests for results_processor methods.""" """Unit tests for results_processor methods."""
import datetime
import os import os
import unittest import unittest
...@@ -12,141 +13,95 @@ import mock ...@@ -12,141 +13,95 @@ import mock
from core.results_processor import processor from core.results_processor import processor
from core.results_processor import testing from core.results_processor import testing
from tracing.value import histogram from tracing.value.diagnostics import generic_set
from tracing.value.diagnostics import date_range
from tracing.value import histogram_set from tracing.value import histogram_set
class ResultsProcessorUnitTests(unittest.TestCase): class ResultsProcessorUnitTests(unittest.TestCase):
def testAddDiagnosticsToHistograms(self): def testAddDiagnosticsToHistograms(self):
histogram_dicts = [histogram.Histogram('a', 'unitless').AsDict()] test_result = testing.TestResult('benchmark/story')
test_result['_histograms'] = histogram_set.HistogramSet()
in_results = testing.IntermediateResults( test_result['_histograms'].CreateHistogram('a', 'unitless', [0])
test_results=[],
diagnostics={ start_ts = 1500000000
'benchmarks': ['benchmark'], start_iso = datetime.datetime.utcfromtimestamp(start_ts).isoformat() + 'Z'
'osNames': ['linux'],
'documentationUrls': [['documentation', 'url']],
},
)
histograms_with_diagnostics = processor.AddDiagnosticsToHistograms( processor.AddDiagnosticsToHistograms(
histogram_dicts, in_results, results_label='label') test_result, test_suite_start=start_iso, results_label='label')
out_histograms = histogram_set.HistogramSet() hist = test_result['_histograms'].GetFirstHistogram()
out_histograms.ImportDicts(histograms_with_diagnostics) self.assertEqual(hist.diagnostics['labels'],
diag_values = [list(v) for v in out_histograms.shared_diagnostics] generic_set.GenericSet(['label']))
self.assertEqual(len(diag_values), 4) self.assertEqual(hist.diagnostics['benchmarkStart'],
self.assertIn(['benchmark'], diag_values) date_range.DateRange(start_ts * 1e3))
self.assertIn(['linux'], diag_values)
self.assertIn([['documentation', 'url']], diag_values)
self.assertIn(['label'], diag_values)
def testUploadArtifacts(self): def testUploadArtifacts(self):
in_results = testing.IntermediateResults( test_result = testing.TestResult(
test_results=[ 'benchmark/story',
testing.TestResult( output_artifacts={
'benchmark/story', 'logs': testing.Artifact('/log.log'),
output_artifacts={'log': testing.Artifact('/log.log')}, 'trace.html': testing.Artifact('/trace.html'),
), 'screenshot': testing.Artifact('/screenshot.png'),
testing.TestResult( },
'benchmark/story',
output_artifacts={
'trace.html': testing.Artifact('/trace.html'),
'screenshot': testing.Artifact('/screenshot.png'),
},
),
],
) )
with mock.patch('py_utils.cloud_storage.Insert') as cloud_patch: with mock.patch('py_utils.cloud_storage.Insert') as cloud_patch:
cloud_patch.return_value = 'gs://url' cloud_patch.return_value = 'gs://url'
processor.UploadArtifacts(in_results, 'bucket', None) processor.UploadArtifacts(test_result, 'bucket', 'run1')
cloud_patch.assert_has_calls([ cloud_patch.assert_has_calls([
mock.call('bucket', mock.ANY, '/log.log'), mock.call('bucket', 'run1/benchmark/story/logs', '/log.log'),
mock.call('bucket', mock.ANY, '/trace.html'), mock.call('bucket', 'run1/benchmark/story/trace.html', '/trace.html'),
mock.call('bucket', mock.ANY, '/screenshot.png'), mock.call('bucket', 'run1/benchmark/story/screenshot',
'/screenshot.png'),
], ],
any_order=True, any_order=True,
) )
for result in in_results['testResults']: for artifact in test_result['outputArtifacts'].itervalues():
for artifact in result['outputArtifacts'].itervalues(): self.assertEqual(artifact['remoteUrl'], 'gs://url')
self.assertEqual(artifact['remoteUrl'], 'gs://url')
def testUploadArtifacts_CheckRemoteUrl(self):
in_results = testing.IntermediateResults(
test_results=[
testing.TestResult(
'benchmark/story',
output_artifacts={
'trace.html': testing.Artifact('/trace.html')
},
),
],
start_time='2019-10-01T12:00:00.123456Z',
)
with mock.patch('py_utils.cloud_storage.Insert') as cloud_patch: def testRunIdentifier(self):
with mock.patch('random.randint') as randint_patch: with mock.patch('random.randint') as randint_patch:
randint_patch.return_value = 54321 randint_patch.return_value = 54321
processor.UploadArtifacts(in_results, 'bucket', 'src@abc + 123') run_identifier = processor.RunIdentifier(
cloud_patch.assert_called_once_with( results_label='src@abc + 123',
'bucket', test_suite_start='2019-10-01T12:00:00.123456Z')
'src_abc_123_20191001T120000_54321/benchmark/story/trace.html', self.assertEqual(run_identifier, 'src_abc_123_20191001T120000_54321')
'/trace.html'
)
def testAggregateTraces(self): def testAggregateTraces(self):
in_results = testing.IntermediateResults( test_result = testing.TestResult(
test_results=[ 'benchmark/story2',
testing.TestResult( output_artifacts={
'benchmark/story1', 'trace/1.json': testing.Artifact(
output_artifacts={ os.path.join('test_run', 'story2', 'trace', '1.json')),
'trace/1.json': testing.Artifact( 'trace/2.json': testing.Artifact(
os.path.join('test_run', 'story1', 'trace', '1.json')), os.path.join('test_run', 'story2', 'trace', '2.json')),
}, },
),
testing.TestResult(
'benchmark/story2',
output_artifacts={
'trace/1.json': testing.Artifact(
os.path.join('test_run', 'story2', 'trace', '1.json')),
'trace/2.json': testing.Artifact(
os.path.join('test_run', 'story2', 'trace', '2.json')),
},
),
],
) )
with mock.patch('tracing.trace_data.trace_data.SerializeAsHtml') as patch: serialize_method = 'tracing.trace_data.trace_data.SerializeAsHtml'
processor.AggregateTraces(in_results) with mock.patch(serialize_method) as mock_serialize:
processor.AggregateTraces(test_result)
call_list = [list(call[0]) for call in patch.call_args_list]
self.assertEqual(len(call_list), 2) self.assertEqual(mock_serialize.call_count, 1)
for call in call_list: trace_files, file_path = mock_serialize.call_args[0][:2]
call[0] = set(call[0]) self.assertEqual(
self.assertIn( set(trace_files),
[ set([
set([os.path.join('test_run', 'story1', 'trace', '1.json')]), os.path.join('test_run', 'story2', 'trace', '1.json'),
os.path.join('test_run', 'story1', 'trace', 'trace.html'), os.path.join('test_run', 'story2', 'trace', '2.json'),
], ]),
call_list
) )
self.assertIn( self.assertEqual(
[ file_path,
set([ os.path.join('test_run', 'story2', 'trace', 'trace.html'),
os.path.join('test_run', 'story2', 'trace', '1.json'),
os.path.join('test_run', 'story2', 'trace', '2.json'),
]),
os.path.join('test_run', 'story2', 'trace', 'trace.html'),
],
call_list
) )
for result in in_results['testResults']: artifacts = test_result['outputArtifacts']
artifacts = result['outputArtifacts'] self.assertEqual(len(artifacts), 1)
self.assertEqual(len(artifacts), 1) self.assertEqual(artifacts.keys()[0], 'trace.html')
self.assertEqual(artifacts.keys()[0], 'trace.html')
def testMeasurementToHistogram(self): def testMeasurementToHistogram(self):
hist = processor.MeasurementToHistogram('a', { hist = processor.MeasurementToHistogram('a', {
......
...@@ -7,33 +7,6 @@ ...@@ -7,33 +7,6 @@
import json import json
_BENCHMARK_START_KEYS = set(['startTime'])
def IntermediateResults(test_results, start_time='2015-10-21T07:28:00.000Z',
finalized=True, interrupted=False, diagnostics=None):
"""Build a dict of 'parsed' intermediate results.
Args:
test_results: A sequence of testResult dicts.
start_time: An optional UTC timestamp recording when a benchmark started
running.
finalized: An optional bool indicating whether the benchmark run finalized.
Defaults to True.
interrupted: An optional bool indicating whether the benchmark run was
interrupted. Defaults to False.
"""
return {
'benchmarkRun': {
'startTime': start_time,
'finalized': finalized,
'interrupted': interrupted,
'diagnostics': diagnostics or {},
},
'testResults': list(test_results)
}
def TestResult(test_path, status='PASS', is_expected=None, def TestResult(test_path, status='PASS', is_expected=None,
start_time='2015-10-21T07:28:00.000Z', run_duration='1.00s', start_time='2015-10-21T07:28:00.000Z', run_duration='1.00s',
output_artifacts=None, tags=None): output_artifacts=None, tags=None):
...@@ -97,29 +70,16 @@ def SerializeIntermediateResults(in_results, filepath): ...@@ -97,29 +70,16 @@ def SerializeIntermediateResults(in_results, filepath):
"""Serialize intermediate results to a filepath. """Serialize intermediate results to a filepath.
Args: Args:
in_results: A dict with intermediate results, e.g. as produced by in_results: A list of test results.
IntermediateResults or parsed from an intermediate results file. filepath: A file path where to serialize the intermediate results.
filpath: A file path where to serialize the intermediate results.
""" """
# Split benchmarkRun into fields recorded at startup and when finishing.
benchmark_start = {}
benchmark_finish = {}
for key, value in in_results['benchmarkRun'].items():
d = benchmark_start if key in _BENCHMARK_START_KEYS else benchmark_finish
d[key] = value
# Serialize individual records as a sequence of json lines.
with open(filepath, 'w') as fp: with open(filepath, 'w') as fp:
_SerializeRecord({'benchmarkRun': benchmark_start}, fp) for test_result in in_results:
for test_result in in_results['testResults']: json.dump({'testResult': test_result}, fp,
_SerializeRecord({'testResult': test_result}, fp) sort_keys=True, separators=(',', ':'))
_SerializeRecord({'benchmarkRun': benchmark_finish}, fp) fp.write('\n')
def _SplitTag(tag): def _SplitTag(tag):
key, value = tag.split(':', 1) key, value = tag.split(':', 1)
return {'key': key, 'value': value} return {'key': key, 'value': value}
def _SerializeRecord(record, fp):
fp.write(json.dumps(record, sort_keys=True, separators=(',', ':')) + '\n')
...@@ -9,16 +9,13 @@ import multiprocessing ...@@ -9,16 +9,13 @@ import multiprocessing
from multiprocessing.dummy import Pool as ThreadPool from multiprocessing.dummy import Pool as ThreadPool
def ApplyInParallel(function, work_list): def ApplyInParallel(function, work_list, on_failure=None):
"""Apply a function to all values in work_list in parallel. """Apply a function to all values in work_list in parallel.
Args: Args:
function: A function with one argument. function: A function with one argument.
work_list: Any iterable with arguments for the function. work_list: Any iterable with arguments for the function.
on_failure: A function to run in case of a failure.
Returns:
A generator over results. The order of results might not match the
order of the arguments in the work_list.
""" """
if not work_list: if not work_list:
return return
...@@ -35,17 +32,17 @@ def ApplyInParallel(function, work_list): ...@@ -35,17 +32,17 @@ def ApplyInParallel(function, work_list):
def function_with_try(arg): def function_with_try(arg):
try: try:
return function(arg) function(arg)
except Exception: # pylint: disable=broad-except except Exception: # pylint: disable=broad-except
# logging exception here is the only way to get a stack trace since # logging exception here is the only way to get a stack trace since
# multiprocessing's pool implementation does not save that data. See # multiprocessing's pool implementation does not save that data. See
# crbug.com/953365. # crbug.com/953365.
logging.exception('Exception while running %s' % function.__name__) logging.exception('Exception while running %s' % function.__name__)
raise if on_failure:
on_failure(arg)
try: try:
for result in pool.imap_unordered(function_with_try, work_list): pool.imap_unordered(function_with_try, work_list)
yield result
pool.close() pool.close()
pool.join() pool.join()
finally: finally:
......
...@@ -9,15 +9,16 @@ from core.results_processor import util ...@@ -9,15 +9,16 @@ from core.results_processor import util
class UtilTests(unittest.TestCase): class UtilTests(unittest.TestCase):
def testApplyInParallel(self): def testApplyInParallel(self):
work_list = [1, 2, 3] work_list = [[1], [2], [3]]
fun = lambda x: x * x def fun(x):
result = set(util.ApplyInParallel(fun, work_list)) x.extend(x)
self.assertEqual(result, set([1, 4, 9])) util.ApplyInParallel(fun, work_list)
self.assertEqual(work_list, [[1, 1], [2, 2], [3, 3]])
def testApplyInParallelExceptionRaised(self): def testApplyInParallelOnFailure(self):
work_list = [1, 2, 3] work_list = [[1], [2], [3]]
def fun(x): def fun(x):
if x == 3: if x == [3]:
raise RuntimeError() raise RuntimeError()
with self.assertRaises(RuntimeError): util.ApplyInParallel(fun, work_list, on_failure=lambda x: x.pop())
list(util.ApplyInParallel(fun, work_list)) self.assertEqual(work_list, [[1], [2], []])
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment