Commit 1ac776e8 authored by Mikhail Khokhlov's avatar Mikhail Khokhlov Committed by Commit Bot

[tools/perf] Rename artifacts -> outputArtifacts

The old name is also supported for compatibility.

Bug: 1011813
Change-Id: I513d316bea76c1355a2fdf6785acee6e6ca940af
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1847292Reviewed-by: default avatarJuan Antonio Navarro Pérez <perezju@chromium.org>
Commit-Queue: Mikhail Khokhlov <khokhlov@google.com>
Cr-Commit-Position: refs/heads/master@{#703680}
parent 987bf84b
......@@ -28,8 +28,9 @@ def _PoolWorker(test_result):
try:
metrics = [tag['value'] for tag in test_result['tags']
if tag['key'] == 'tbmv2']
html_local_path = test_result['artifacts'][HTML_TRACE_NAME]['filePath']
html_remote_url = test_result['artifacts'][HTML_TRACE_NAME]['remoteUrl']
html_trace = test_result['outputArtifacts'][HTML_TRACE_NAME]
html_local_path = html_trace['filePath']
html_remote_url = html_trace['remoteUrl']
logging.info('%s: Starting to compute metrics on trace.',
test_result['testPath'])
......@@ -73,7 +74,7 @@ def ComputeTBMv2Metrics(intermediate_results):
histogram_dicts = []
work_list = []
for test_result in intermediate_results['testResults']:
artifacts = test_result.get('artifacts', {})
artifacts = test_result.get('outputArtifacts', {})
# TODO(crbug.com/981349): If metrics have already been computed in
# Telemetry, we read it from the file. Remove this branch after Telemetry
# does not compute metrics anymore.
......
......@@ -24,14 +24,14 @@ class ComputeMetricsTest(unittest.TestCase):
in_results = testing.IntermediateResults([
testing.TestResult(
'benchmark/story1',
artifacts={
output_artifacts={
compute_metrics.HTML_TRACE_NAME:
testing.Artifact('/trace1.html', 'gs://trace1.html')},
tags=['tbmv2:metric1'],
),
testing.TestResult(
'benchmark/story2',
artifacts={
output_artifacts={
compute_metrics.HTML_TRACE_NAME:
testing.Artifact('/trace2.html', 'gs://trace2.html')},
tags=['tbmv2:metric2'],
......@@ -56,7 +56,7 @@ class ComputeMetricsTest(unittest.TestCase):
in_results = testing.IntermediateResults([
testing.TestResult(
'benchmark/story1',
artifacts={
output_artifacts={
compute_metrics.HTML_TRACE_NAME:
testing.Artifact('/trace1.html', 'gs://trace1.html')},
tags=['tbmv2:metric1'],
......@@ -76,7 +76,7 @@ class ComputeMetricsTest(unittest.TestCase):
in_results = testing.IntermediateResults([
testing.TestResult(
'benchmark/story1',
artifacts={
output_artifacts={
compute_metrics.HTML_TRACE_NAME:
testing.Artifact('/trace1.html', 'gs://trace1.html')},
tags=['tbmv2:metric1'],
......@@ -99,7 +99,7 @@ class ComputeMetricsTest(unittest.TestCase):
in_results = testing.IntermediateResults([
testing.TestResult(
'benchmark/story1',
artifacts={
output_artifacts={
compute_metrics.HTML_TRACE_NAME:
testing.Artifact('/trace1.html', 'gs://trace1.html')},
tags=['tbmv2:metric1'],
......
......@@ -46,7 +46,7 @@ def Convert(in_results, base_dir):
actual_status = result['status']
expected_status = actual_status if result['isExpected'] else 'PASS'
status_counter[actual_status] += 1
artifacts = result.get('artifacts', {})
artifacts = result.get('outputArtifacts', {})
shard = _GetTagValue(result.get('tags', []), 'shard', as_type=int)
_MergeDict(
results['tests'],
......
......@@ -155,10 +155,10 @@ class Json3OutputTest(unittest.TestCase):
def testRepeatedTestCaseWithArtifacts(self):
self.base_dir = 'base'
results = self.Convert([
testing.TestResult('benchmark/story1', artifacts={
testing.TestResult('benchmark/story1', output_artifacts={
'logs.txt': testing.Artifact('base/artifacts/logs1.txt')
}),
testing.TestResult('benchmark/story1', artifacts={
testing.TestResult('benchmark/story1', output_artifacts={
'logs.txt': testing.Artifact('base/artifacts/logs2.txt'),
'trace.json': testing.Artifact('base/artifacts/trace2.json')
}),
......@@ -174,12 +174,12 @@ class Json3OutputTest(unittest.TestCase):
def testRemoteArtifacts(self):
results = self.Convert([
testing.TestResult('benchmark/story1', artifacts={
testing.TestResult('benchmark/story1', output_artifacts={
'logs.txt': testing.Artifact(
'base/artifacts/logs1.txt',
'https://example.org/artifacts/logs1.txt')
}),
testing.TestResult('benchmark/story1', artifacts={
testing.TestResult('benchmark/story1', output_artifacts={
'logs.txt': testing.Artifact(
'base/artifacts/logs2.txt',
'https://example.org/artifacts/logs2.txt'),
......
......@@ -89,7 +89,12 @@ def _LoadIntermediateResults(intermediate_file):
if 'benchmarkRun' in record:
results['benchmarkRun'].update(record['benchmarkRun'])
if 'testResult' in record:
results['testResults'].append(record['testResult'])
test_result = record['testResult']
# TODO(crbug.com/1011813): This is for compatibility with old version
# of LUCI format. Remove it when Telemetry switches to a new version.
if 'artifacts' in test_result:
test_result['outputArtifacts'] = test_result.pop('artifacts')
results['testResults'].append(test_result)
return results
......@@ -100,7 +105,7 @@ def _AggregateTraces(intermediate_results):
all entries for individual traces and adds one entry for aggregate one.
"""
for result in intermediate_results['testResults']:
artifacts = result.get('artifacts', {})
artifacts = result.get('outputArtifacts', {})
traces = [name for name in artifacts if name.startswith('trace/')]
if len(traces) > 0:
# For now, the html trace is generated by Telemetry, so it should be there
......@@ -138,7 +143,7 @@ def UploadArtifacts(intermediate_results, upload_bucket, results_label):
work_list = []
for result in intermediate_results['testResults']:
artifacts = result.get('artifacts', {})
artifacts = result.get('outputArtifacts', {})
for name, artifact in artifacts.iteritems():
if 'remoteUrl' in artifact:
continue
......@@ -161,7 +166,7 @@ def UploadArtifacts(intermediate_results, upload_bucket, results_label):
pass
for result in intermediate_results['testResults']:
artifacts = result.get('artifacts', {})
artifacts = result.get('outputArtifacts', {})
for name, artifact in artifacts.iteritems():
logging.info('Uploaded %s of %s to %s', name, result['testPath'],
artifact['remoteUrl'])
......
......@@ -80,7 +80,7 @@ class ResultsProcessorIntegrationTests(unittest.TestCase):
self.SerializeIntermediateResults([
testing.TestResult(
'benchmark/story',
artifacts={
output_artifacts={
'logs': testing.Artifact('/logs.txt', 'gs://logs.txt'),
'trace/telemetry': testing.Artifact('/telemetry.json'),
'trace.html':
......@@ -116,7 +116,9 @@ class ResultsProcessorIntegrationTests(unittest.TestCase):
test_results=[
testing.TestResult(
'benchmark/story',
artifacts={'histogram_dicts.json': testing.Artifact(hist_file)},
output_artifacts={
'histogram_dicts.json': testing.Artifact(hist_file)
},
),
],
diagnostics={
......@@ -161,7 +163,9 @@ class ResultsProcessorIntegrationTests(unittest.TestCase):
test_results=[
testing.TestResult(
'benchmark/story',
artifacts={'histogram_dicts.json': testing.Artifact(hist_file)},
output_artifacts={
'histogram_dicts.json': testing.Artifact(hist_file)
},
),
],
)
......@@ -202,7 +206,9 @@ class ResultsProcessorIntegrationTests(unittest.TestCase):
test_results=[
testing.TestResult(
'benchmark/story',
artifacts={'histogram_dicts.json': testing.Artifact(hist_file)},
output_artifacts={
'histogram_dicts.json': testing.Artifact(hist_file)
},
),
],
)
......@@ -241,9 +247,10 @@ class ResultsProcessorIntegrationTests(unittest.TestCase):
test_results=[
testing.TestResult(
'benchmark/story',
artifacts={
output_artifacts={
compute_metrics.HTML_TRACE_NAME:
testing.Artifact(trace_file, 'gs://trace.html')},
testing.Artifact(trace_file, 'gs://trace.html')
},
tags=['tbmv2:sampleMetric'],
),
],
......@@ -278,7 +285,9 @@ class ResultsProcessorIntegrationTests(unittest.TestCase):
test_results=[
testing.TestResult(
'benchmark/story',
artifacts={'histogram_dicts.json': testing.Artifact(hist_file)},
output_artifacts={
'histogram_dicts.json': testing.Artifact(hist_file)
},
),
],
diagnostics={
......@@ -380,7 +389,9 @@ class ResultsProcessorIntegrationTests(unittest.TestCase):
test_results=[
testing.TestResult(
'benchmark/story',
artifacts={'histogram_dicts.json': testing.Artifact(hist_file)},
output_artifacts={
'histogram_dicts.json': testing.Artifact(hist_file)
},
),
],
diagnostics={
......@@ -424,7 +435,9 @@ class ResultsProcessorIntegrationTests(unittest.TestCase):
test_results=[
testing.TestResult(
'benchmark/story',
artifacts={'histogram_dicts.json': testing.Artifact(hist_file)},
output_artifacts={
'histogram_dicts.json': testing.Artifact(hist_file)
},
),
],
)
......@@ -460,7 +473,9 @@ class ResultsProcessorIntegrationTests(unittest.TestCase):
test_results=[
testing.TestResult(
'benchmark/story',
artifacts={'histogram_dicts.json': testing.Artifact(hist_file)},
output_artifacts={
'histogram_dicts.json': testing.Artifact(hist_file)
},
),
],
)
......
......@@ -45,11 +45,11 @@ class ResultsProcessorUnitTests(unittest.TestCase):
test_results=[
testing.TestResult(
'benchmark/story',
artifacts={'log': testing.Artifact('/log.log')},
output_artifacts={'log': testing.Artifact('/log.log')},
),
testing.TestResult(
'benchmark/story',
artifacts={
output_artifacts={
'trace.html': testing.Artifact('/trace.html'),
'screenshot': testing.Artifact('/screenshot.png'),
},
......@@ -69,7 +69,7 @@ class ResultsProcessorUnitTests(unittest.TestCase):
)
for result in in_results['testResults']:
for artifact in result['artifacts'].itervalues():
for artifact in result['outputArtifacts'].itervalues():
self.assertEqual(artifact['remoteUrl'], 'gs://url')
def testUploadArtifacts_CheckRemoteUrl(self):
......@@ -77,7 +77,9 @@ class ResultsProcessorUnitTests(unittest.TestCase):
test_results=[
testing.TestResult(
'benchmark/story',
artifacts={'trace.html': testing.Artifact('/trace.html')},
output_artifacts={
'trace.html': testing.Artifact('/trace.html')
},
),
],
start_time='2019-10-01T12:00:00.123456Z',
......
......@@ -36,7 +36,7 @@ def IntermediateResults(test_results, start_time='2015-10-21T07:28:00.000Z',
def TestResult(test_path, status='PASS', is_expected=None,
start_time='2015-10-21T07:28:00.000Z', run_duration='1.00s',
artifacts=None, tags=None):
output_artifacts=None, tags=None):
"""Build a TestResult dict.
This follows the TestResultEntry spec of LUCI Test Results format.
......@@ -69,8 +69,8 @@ def TestResult(test_path, status='PASS', is_expected=None,
'startTime': start_time,
'runDuration': run_duration
}
if artifacts is not None:
test_result['artifacts'] = artifacts
if output_artifacts is not None:
test_result['outputArtifacts'] = output_artifacts
if tags is not None:
test_result['tags'] = [_SplitTag(tag) for tag in tags]
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment