Commit e5093968 authored by Mikhail Khokhlov's avatar Mikhail Khokhlov Committed by Commit Bot

[tools/perf] Write all diagnostics in Results Processor

This CL unifies diagnostic writing in one place so that we no longer
depend on diagnostics written by metrics computation code.

Bug: 981349
Change-Id: I8e08c8b52c5e64de77d2dbc39e19b3e8d12e2b75
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1893879
Commit-Queue: Mikhail Khokhlov <khokhlov@google.com>
Reviewed-by: default avatarJuan Antonio Navarro Pérez <perezju@chromium.org>
Cr-Commit-Position: refs/heads/master@{#712977}
parent dbfb1f8f
......@@ -22,6 +22,8 @@ from core.results_processor import formatters
# These formats are always handled natively, and never handed over to Telemetry.
HANDLED_NATIVELY = ['none', 'json-test-results', 'histograms', 'html', 'csv']
TELEMETRY_TEST_PATH_FORMAT = 'telemetry'
GTEST_TEST_PATH_FORMAT = 'gtest'
def ArgumentParser(standalone=False, legacy_formats=None):
"""Create an ArgumentParser defining options required by the processor."""
......@@ -66,6 +68,13 @@ def ArgumentParser(standalone=False, legacy_formats=None):
group.add_argument(
'--results-label', metavar='LABEL',
help='Label to identify the results generated by this run.')
group.add_argument(
'--test-path-format', metavar='FORMAT',
choices=[TELEMETRY_TEST_PATH_FORMAT, GTEST_TEST_PATH_FORMAT],
default=TELEMETRY_TEST_PATH_FORMAT,
help=Sentences(
'How to interpret the testPath attribute.',
'Available options: %(choices)s. Default: %(default)s.'))
group.add_argument(
'--upload-results', action='store_true',
help='Upload generated artifacts to cloud storage.')
......
......@@ -24,7 +24,7 @@ from core.results_processor import formatters
from core.results_processor import util
from tracing.trace_data import trace_data
from tracing.value.diagnostics import date_range
from tracing.value.diagnostics import all_diagnostics
from tracing.value.diagnostics import generic_set
from tracing.value.diagnostics import reserved_infos
from tracing.value import histogram
......@@ -62,6 +62,7 @@ def ProcessResults(options):
upload_bucket = options.upload_bucket
results_label = options.results_label
max_num_values = options.max_values_per_test_case
test_path_format = options.test_path_format
test_suite_start = (test_results[0]['startTime']
if test_results and 'startTime' in test_results[0]
else datetime.datetime.utcnow().isoformat() + 'Z')
......@@ -72,7 +73,8 @@ def ProcessResults(options):
util.ApplyInParallel(
lambda result: ProcessTestResult(
result, upload_bucket, results_label, run_identifier,
test_suite_start, should_compute_metrics, max_num_values),
test_suite_start, should_compute_metrics, max_num_values,
test_path_format),
test_results,
on_failure=util.SetUnexpectedFailure,
)
......@@ -94,7 +96,7 @@ def ProcessResults(options):
def ProcessTestResult(test_result, upload_bucket, results_label,
run_identifier, test_suite_start, should_compute_metrics,
max_num_values):
max_num_values, test_path_format):
AggregateTraces(test_result)
if upload_bucket is not None:
UploadArtifacts(test_result, upload_bucket, run_identifier)
......@@ -110,7 +112,8 @@ def ProcessTestResult(test_result, upload_bucket, results_label,
util.SetUnexpectedFailure(test_result)
del test_result['_histograms']
else:
AddDiagnosticsToHistograms(test_result, test_suite_start, results_label)
AddDiagnosticsToHistograms(test_result, test_suite_start, results_label,
test_path_format)
def ExtractHistograms(test_results):
......@@ -210,10 +213,28 @@ def UploadArtifacts(test_result, upload_bucket, run_identifier):
artifact['remoteUrl'])
def AddDiagnosticsToHistograms(test_result, test_suite_start, results_label):
"""Add diagnostics to all histograms of a test run.
def _GetTraceUrl(test_result):
artifacts = test_result.get('outputArtifacts', {})
trace_artifact = artifacts.get(compute_metrics.HTML_TRACE_NAME, {})
return (trace_artifact['remoteUrl'] if 'remoteUrl' in trace_artifact
else trace_artifact.get('filePath'))
def _SplitTestPath(test_result, test_path_format):
if test_path_format == command_line.TELEMETRY_TEST_PATH_FORMAT:
return test_result['testPath'].split('/', 1)
elif test_path_format == command_line.GTEST_TEST_PATH_FORMAT:
return test_result['testPath'].split('.', 1)
else:
raise ValueError('Unknown test path format: %s', test_path_format)
def AddDiagnosticsToHistograms(test_result, test_suite_start, results_label,
test_path_format):
"""Add diagnostics to all histograms of a test result.
Reads diagnostics from the test artifact and adds them to all histograms.
Also sets additional diagnostics based on test result metadata.
This overwrites the corresponding diagnostics previously set by e.g.
run_metrics.
"""
......@@ -228,15 +249,29 @@ def AddDiagnosticsToHistograms(test_result, test_suite_start, results_label):
test_result['_histograms'].AddSharedDiagnosticToAllHistograms(
name, generic_set.GenericSet(diag))
timestamp_ms = util.IsoTimestampToEpoch(test_suite_start) * 1e3
test_result['_histograms'].AddSharedDiagnosticToAllHistograms(
reserved_infos.BENCHMARK_START.name, date_range.DateRange(timestamp_ms))
if results_label is not None:
test_result['_histograms'].AddSharedDiagnosticToAllHistograms(
reserved_infos.LABELS.name,
generic_set.GenericSet([results_label]))
test_suite, test_case = _SplitTestPath(test_result, test_path_format)
if 'startTime' in test_result:
test_start_ms = util.IsoTimestampToEpoch(test_result['startTime']) * 1e3
else:
test_start_ms = None
test_suite_start_ms = util.IsoTimestampToEpoch(test_suite_start) * 1e3
story_tags = [tag['value'] for tag in test_result.get('tags', [])
if tag['key'] == 'story_tag']
result_id = int(test_result.get('resultId', 0))
trace_url = _GetTraceUrl(test_result)
additional_diagnostics = [
(reserved_infos.BENCHMARKS, test_suite),
(reserved_infos.BENCHMARK_START, test_suite_start_ms),
(reserved_infos.LABELS, results_label),
(reserved_infos.STORIES, test_case),
(reserved_infos.STORYSET_REPEATS, result_id),
(reserved_infos.STORY_TAGS, story_tags),
(reserved_infos.TRACE_START, test_start_ms),
(reserved_infos.TRACE_URLS, trace_url),
]
for name, value in _WrapDiagnostics(additional_diagnostics):
test_result['_histograms'].AddSharedDiagnosticToAllHistograms(name, value)
def MeasurementToHistogram(name, measurement):
......@@ -253,20 +288,24 @@ def MeasurementToHistogram(name, measurement):
description=description)
def _StoryDiagnostics(test_result):
"""Extract diagnostics information about the specific story.
def _WrapDiagnostics(info_value_pairs):
"""Wrap diagnostic values in corresponding Diagnostics classes.
Args:
info_value_pairs: any iterable of pairs (info, value), where info is one
of reserved infos defined in tracing.value.diagnostics.reserved_infos,
and value can be any json-serializable object.
These diagnostics will be added only to ad-hoc measurements recorded by
benchmarks.
Returns:
An iterator over pairs (diagnostic name, diagnostic value).
"""
benchmark_name, story_name = test_result['testPath'].split('/', 1)
story_tags = [tag['value'] for tag in test_result.get('tags', [])
if tag['key'] == 'story_tag']
return {
reserved_infos.BENCHMARKS.name: generic_set.GenericSet([benchmark_name]),
reserved_infos.STORIES.name: generic_set.GenericSet([story_name]),
reserved_infos.STORY_TAGS.name: generic_set.GenericSet(story_tags),
}
for info, value in info_value_pairs:
if value is None or value == []:
continue
if info.type == 'GenericSet' and not isinstance(value, list):
value = [value]
diag_class = all_diagnostics.GetDiagnosticClassForName(info.type)
yield info.name, diag_class(value)
def ExtractMeasurements(test_result):
......@@ -275,10 +314,9 @@ def ExtractMeasurements(test_result):
if MEASUREMENTS_NAME in artifacts:
with open(artifacts[MEASUREMENTS_NAME]['filePath']) as f:
measurements = json.load(f)['measurements']
diagnostics = _StoryDiagnostics(test_result)
for name, measurement in measurements.iteritems():
test_result['_histograms'].AddHistogram(
MeasurementToHistogram(name, measurement), diagnostics=diagnostics)
MeasurementToHistogram(name, measurement))
def main(args=None):
......
......@@ -571,8 +571,8 @@ class ResultsProcessorIntegrationTests(unittest.TestCase):
('builds', ''), ('deviceIds', ''), ('displayLabel', 'label'),
('masters', ''), ('memoryAmounts', ''), ('osNames', 'linux'),
('osVersions', ''), ('productVersions', ''),
('stories', ''), ('storysetRepeats', ''),
('traceStart', ''), ('traceUrls', '')
('stories', 'story'), ('storysetRepeats', '0'),
('traceStart', '2009-02-13 23:31:30'), ('traceUrls', '')
]
self.assertEqual(actual, expected)
......
......@@ -20,22 +20,42 @@ from tracing.value import histogram_set
class ResultsProcessorUnitTests(unittest.TestCase):
def testAddDiagnosticsToHistograms(self):
test_result = testing.TestResult('benchmark/story')
test_result['_histograms'] = histogram_set.HistogramSet()
test_result['_histograms'].CreateHistogram('a', 'unitless', [0])
start_ts = 1500000000
start_iso = datetime.datetime.utcfromtimestamp(start_ts).isoformat() + 'Z'
test_result = testing.TestResult(
'benchmark/story',
output_artifacts={
'trace.html': testing.Artifact('/trace.html', 'gs://trace.html'),
},
start_time=start_iso,
tags=['story_tag:test'],
result_id='3',
)
test_result['_histograms'] = histogram_set.HistogramSet()
test_result['_histograms'].CreateHistogram('a', 'unitless', [0])
processor.AddDiagnosticsToHistograms(
test_result, test_suite_start=start_iso, results_label='label')
test_result, test_suite_start=start_iso, results_label='label',
test_path_format='telemetry')
hist = test_result['_histograms'].GetFirstHistogram()
self.assertEqual(hist.diagnostics['labels'],
generic_set.GenericSet(['label']))
self.assertEqual(hist.diagnostics['benchmarks'],
generic_set.GenericSet(['benchmark']))
self.assertEqual(hist.diagnostics['benchmarkStart'],
date_range.DateRange(start_ts * 1e3))
self.assertEqual(hist.diagnostics['traceStart'],
date_range.DateRange(start_ts * 1e3))
self.assertEqual(hist.diagnostics['stories'],
generic_set.GenericSet(['story']))
self.assertEqual(hist.diagnostics['storyTags'],
generic_set.GenericSet(['test']))
self.assertEqual(hist.diagnostics['storysetRepeats'],
generic_set.GenericSet([3]))
self.assertEqual(hist.diagnostics['traceUrls'],
generic_set.GenericSet(['gs://trace.html']))
def testUploadArtifacts(self):
test_result = testing.TestResult(
......
......@@ -9,7 +9,7 @@ import json
def TestResult(test_path, status='PASS', expected=None,
start_time='2015-10-21T07:28:00.000Z', run_duration='1.00s',
output_artifacts=None, tags=None):
output_artifacts=None, tags=None, result_id=None):
"""Build a TestResult dict.
This follows the TestResultEntry spec of LUCI Test Results format.
......@@ -41,12 +41,14 @@ def TestResult(test_path, status='PASS', expected=None,
'status': status,
'expected': expected,
'startTime': start_time,
'runDuration': run_duration
'runDuration': run_duration,
}
if output_artifacts is not None:
test_result['outputArtifacts'] = dict(output_artifacts)
if tags is not None:
test_result['tags'] = [_SplitTag(tag) for tag in tags]
if result_id is not None:
test_result['resultId'] = str(result_id)
return test_result
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment