Commit e0928254 authored by Mikhail Khokhlov's avatar Mikhail Khokhlov Committed by Chromium LUCI CQ

[tools/perf] Compute multiple TBMv3 metrics without reloading the trace

This speeds up TBMv3 metric computation in results_processor.

Change-Id: I76ef0ffc369b45a1104f750eb79d214931093d84
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2577499
Commit-Queue: Mikhail Khokhlov <khokhlov@google.com>
Reviewed-by: default avatarDeep Roy <dproy@chromium.org>
Cr-Commit-Position: refs/heads/master@{#834679}
parent 462272fa
......@@ -108,10 +108,9 @@ def ComputeTBMv3Metrics(test_result,
return
start = time.time()
for metric in metrics:
histograms = trace_processor.RunMetric(
trace_processor_path, artifacts[CONCATENATED_PROTO_NAME]['filePath'],
metric, fetch_power_profile)
test_result['_histograms'].Merge(histograms)
histograms = trace_processor.RunMetrics(
trace_processor_path, artifacts[CONCATENATED_PROTO_NAME]['filePath'],
metrics, fetch_power_profile)
test_result['_histograms'].Merge(histograms)
logging.info('%s: Computing TBMv3 metrics took %.3f seconds.' % (
test_result['testPath'], time.time() - start))
......@@ -18,7 +18,7 @@ import mock
RUN_METRICS_METHOD = 'tracing.metrics.metric_runner.RunMetricOnSingleTrace'
GETSIZE_METHOD = 'os.path.getsize'
TRACE_PROCESSOR_METRIC_METHOD = 'core.tbmv3.trace_processor.RunMetric'
TRACE_PROCESSOR_METRIC_METHOD = 'core.tbmv3.trace_processor.RunMetrics'
class ComputeMetricsTest(unittest.TestCase):
......
......@@ -813,3 +813,44 @@ class ResultsProcessorIntegrationTests(unittest.TestCase):
self.assertEqual(hist1.unit, SAMPLE_HISTOGRAM_UNIT)
hist2 = out_histograms.GetHistogramNamed('dummy::simple_field')
self.assertEqual(hist2.unit, 'count_smallerIsBetter')
def testMultipleTBMv3Metrics(self):
self.SerializeIntermediateResults(
testing.TestResult(
'benchmark/story',
output_artifacts=[
self.CreateProtoTraceArtifact(),
self.CreateDiagnosticsArtifact(
benchmarks=['benchmark'],
osNames=['linux'],
documentationUrls=[['documentation', 'url']])
],
tags=['tbmv3:dummy_metric', 'tbmv3:test_chrome_metric'],
start_time='2009-02-13T23:31:30.987000Z',
),
)
processor.main([
'--output-format', 'histograms',
'--output-dir', self.output_dir,
'--intermediate-dir', self.intermediate_dir,
'--results-label', 'label',
'--experimental-tbmv3-metrics',
])
with open(os.path.join(
self.output_dir, histograms_output.OUTPUT_FILENAME)) as f:
results = json.load(f)
out_histograms = histogram_set.HistogramSet()
out_histograms.ImportDicts(results)
# We use two metrics for testing here. The dummy_metric is defined in
# tools/perf/core/tbmv3/metrics/dummy_metric_*.
# The test_chrome_metric is built into trace_processor, see source in
# third_party/perfetto/src/trace_processor/metrics/chrome/test_chrome_metric.sql.
hist1 = out_histograms.GetHistogramNamed('dummy::simple_field')
self.assertEqual(hist1.sample_values, [42])
hist2 = out_histograms.GetHistogramNamed('test_chrome::test_value')
self.assertEqual(hist2.sample_values, [1])
......@@ -188,28 +188,30 @@ def _PluckField(json_dict, field_path):
return _PluckField(field_value, path_tail)
def RunMetric(trace_processor_path, trace_file, metric_name,
fetch_power_profile=False):
"""Run a TBMv3 metric using trace processor.
def RunMetrics(trace_processor_path, trace_file, metric_names,
fetch_power_profile=False):
"""Run TBMv3 metrics using trace processor.
Args:
trace_processor_path: path to the trace_processor executable.
trace_file: path to the trace file.
metric_name: the metric name (the corresponding files must exist in
metric_names: a list of metric names (the corresponding files must exist in
tbmv3/metrics directory).
Returns:
A HistogramSet with metric results.
"""
trace_processor_path = _EnsureTraceProcessor(trace_processor_path)
metric_files = _CreateMetricFiles(metric_name)
if metric_files.internal_metric:
metric_name_arg = metric_name
else:
metric_name_arg = metric_files.sql
metric_name_args = []
for metric_name in metric_names:
metric_files = _CreateMetricFiles(metric_name)
if metric_files.internal_metric:
metric_name_args.append(metric_name)
else:
metric_name_args.append(metric_files.sql)
command_args = [
trace_processor_path,
'--run-metrics', metric_name_arg,
'--run-metrics', ','.join(metric_name_args),
'--metrics-output', 'json',
trace_file,
]
......@@ -222,30 +224,38 @@ def RunMetric(trace_processor_path, trace_file, metric_name,
histograms = histogram_set.HistogramSet()
root_annotations = measurements.get('__annotations', {})
full_metric_name = 'perfetto.protos.' + metric_name
annotations = root_annotations.get(full_metric_name, None)
metric_proto = measurements.get(full_metric_name, None)
if metric_proto is None:
logging.warn("No metric found in the output.")
return histograms
elif annotations is None:
logging.info("Metric has no field with unit. Histograms will be empty.")
return histograms
for field in _LeafFieldAnnotations(annotations):
unit = field.field_options.get('unit', None)
if unit is None:
logging.debug('Skipping field %s to histograms because it has no unit',
field.name)
for metric_name in metric_names:
full_metric_name = 'perfetto.protos.' + metric_name
annotations = root_annotations.get(full_metric_name, None)
metric_proto = measurements.get(full_metric_name, None)
if metric_proto is None:
logging.warn("Metric not found in the output: %s", metric_name)
continue
elif annotations is None:
logging.info("Skipping metric %s because it has no field with unit.",
metric_name)
continue
histogram_name = ':'.join([field.name for field in field.path_from_root])
samples = _PluckField(metric_proto, field.path_from_root)
scoped_histogram_name = _ScopedHistogramName(metric_name, histogram_name)
histograms.CreateHistogram(scoped_histogram_name, unit, samples)
for field in _LeafFieldAnnotations(annotations):
unit = field.field_options.get('unit', None)
if unit is None:
logging.debug('Skipping field %s to histograms because it has no unit',
field.name)
continue
histogram_name = ':'.join([field.name for field in field.path_from_root])
samples = _PluckField(metric_proto, field.path_from_root)
scoped_histogram_name = _ScopedHistogramName(metric_name, histogram_name)
histograms.CreateHistogram(scoped_histogram_name, unit, samples)
return histograms
def RunMetric(trace_processor_path, trace_file, metric_name,
fetch_power_profile=False):
return RunMetrics(trace_processor_path, trace_file, [metric_name],
fetch_power_profile)
def ConvertProtoTraceToJson(trace_processor_path, proto_file, json_path):
"""Convert proto trace to json using trace processor.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment