Commit e0928254 authored by Mikhail Khokhlov's avatar Mikhail Khokhlov Committed by Chromium LUCI CQ

[tools/perf] Compute multiple TBMv3 metrics without reloading the trace

This speeds up TBMv3 metric computation in results_processor.

Change-Id: I76ef0ffc369b45a1104f750eb79d214931093d84
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2577499
Commit-Queue: Mikhail Khokhlov <khokhlov@google.com>
Reviewed-by: default avatarDeep Roy <dproy@chromium.org>
Cr-Commit-Position: refs/heads/master@{#834679}
parent 462272fa
...@@ -108,10 +108,9 @@ def ComputeTBMv3Metrics(test_result, ...@@ -108,10 +108,9 @@ def ComputeTBMv3Metrics(test_result,
return return
start = time.time() start = time.time()
for metric in metrics: histograms = trace_processor.RunMetrics(
histograms = trace_processor.RunMetric(
trace_processor_path, artifacts[CONCATENATED_PROTO_NAME]['filePath'], trace_processor_path, artifacts[CONCATENATED_PROTO_NAME]['filePath'],
metric, fetch_power_profile) metrics, fetch_power_profile)
test_result['_histograms'].Merge(histograms) test_result['_histograms'].Merge(histograms)
logging.info('%s: Computing TBMv3 metrics took %.3f seconds.' % ( logging.info('%s: Computing TBMv3 metrics took %.3f seconds.' % (
test_result['testPath'], time.time() - start)) test_result['testPath'], time.time() - start))
...@@ -18,7 +18,7 @@ import mock ...@@ -18,7 +18,7 @@ import mock
RUN_METRICS_METHOD = 'tracing.metrics.metric_runner.RunMetricOnSingleTrace' RUN_METRICS_METHOD = 'tracing.metrics.metric_runner.RunMetricOnSingleTrace'
GETSIZE_METHOD = 'os.path.getsize' GETSIZE_METHOD = 'os.path.getsize'
TRACE_PROCESSOR_METRIC_METHOD = 'core.tbmv3.trace_processor.RunMetric' TRACE_PROCESSOR_METRIC_METHOD = 'core.tbmv3.trace_processor.RunMetrics'
class ComputeMetricsTest(unittest.TestCase): class ComputeMetricsTest(unittest.TestCase):
......
...@@ -813,3 +813,44 @@ class ResultsProcessorIntegrationTests(unittest.TestCase): ...@@ -813,3 +813,44 @@ class ResultsProcessorIntegrationTests(unittest.TestCase):
self.assertEqual(hist1.unit, SAMPLE_HISTOGRAM_UNIT) self.assertEqual(hist1.unit, SAMPLE_HISTOGRAM_UNIT)
hist2 = out_histograms.GetHistogramNamed('dummy::simple_field') hist2 = out_histograms.GetHistogramNamed('dummy::simple_field')
self.assertEqual(hist2.unit, 'count_smallerIsBetter') self.assertEqual(hist2.unit, 'count_smallerIsBetter')
def testMultipleTBMv3Metrics(self):
self.SerializeIntermediateResults(
testing.TestResult(
'benchmark/story',
output_artifacts=[
self.CreateProtoTraceArtifact(),
self.CreateDiagnosticsArtifact(
benchmarks=['benchmark'],
osNames=['linux'],
documentationUrls=[['documentation', 'url']])
],
tags=['tbmv3:dummy_metric', 'tbmv3:test_chrome_metric'],
start_time='2009-02-13T23:31:30.987000Z',
),
)
processor.main([
'--output-format', 'histograms',
'--output-dir', self.output_dir,
'--intermediate-dir', self.intermediate_dir,
'--results-label', 'label',
'--experimental-tbmv3-metrics',
])
with open(os.path.join(
self.output_dir, histograms_output.OUTPUT_FILENAME)) as f:
results = json.load(f)
out_histograms = histogram_set.HistogramSet()
out_histograms.ImportDicts(results)
# We use two metrics for testing here. The dummy_metric is defined in
# tools/perf/core/tbmv3/metrics/dummy_metric_*.
# The test_chrome_metric is built into trace_processor, see source in
# third_party/perfetto/src/trace_processor/metrics/chrome/test_chrome_metric.sql.
hist1 = out_histograms.GetHistogramNamed('dummy::simple_field')
self.assertEqual(hist1.sample_values, [42])
hist2 = out_histograms.GetHistogramNamed('test_chrome::test_value')
self.assertEqual(hist2.sample_values, [1])
...@@ -188,28 +188,30 @@ def _PluckField(json_dict, field_path): ...@@ -188,28 +188,30 @@ def _PluckField(json_dict, field_path):
return _PluckField(field_value, path_tail) return _PluckField(field_value, path_tail)
def RunMetric(trace_processor_path, trace_file, metric_name, def RunMetrics(trace_processor_path, trace_file, metric_names,
fetch_power_profile=False): fetch_power_profile=False):
"""Run a TBMv3 metric using trace processor. """Run TBMv3 metrics using trace processor.
Args: Args:
trace_processor_path: path to the trace_processor executable. trace_processor_path: path to the trace_processor executable.
trace_file: path to the trace file. trace_file: path to the trace file.
metric_name: the metric name (the corresponding files must exist in metric_names: a list of metric names (the corresponding files must exist in
tbmv3/metrics directory). tbmv3/metrics directory).
Returns: Returns:
A HistogramSet with metric results. A HistogramSet with metric results.
""" """
trace_processor_path = _EnsureTraceProcessor(trace_processor_path) trace_processor_path = _EnsureTraceProcessor(trace_processor_path)
metric_name_args = []
for metric_name in metric_names:
metric_files = _CreateMetricFiles(metric_name) metric_files = _CreateMetricFiles(metric_name)
if metric_files.internal_metric: if metric_files.internal_metric:
metric_name_arg = metric_name metric_name_args.append(metric_name)
else: else:
metric_name_arg = metric_files.sql metric_name_args.append(metric_files.sql)
command_args = [ command_args = [
trace_processor_path, trace_processor_path,
'--run-metrics', metric_name_arg, '--run-metrics', ','.join(metric_name_args),
'--metrics-output', 'json', '--metrics-output', 'json',
trace_file, trace_file,
] ]
...@@ -222,15 +224,17 @@ def RunMetric(trace_processor_path, trace_file, metric_name, ...@@ -222,15 +224,17 @@ def RunMetric(trace_processor_path, trace_file, metric_name,
histograms = histogram_set.HistogramSet() histograms = histogram_set.HistogramSet()
root_annotations = measurements.get('__annotations', {}) root_annotations = measurements.get('__annotations', {})
for metric_name in metric_names:
full_metric_name = 'perfetto.protos.' + metric_name full_metric_name = 'perfetto.protos.' + metric_name
annotations = root_annotations.get(full_metric_name, None) annotations = root_annotations.get(full_metric_name, None)
metric_proto = measurements.get(full_metric_name, None) metric_proto = measurements.get(full_metric_name, None)
if metric_proto is None: if metric_proto is None:
logging.warn("No metric found in the output.") logging.warn("Metric not found in the output: %s", metric_name)
return histograms continue
elif annotations is None: elif annotations is None:
logging.info("Metric has no field with unit. Histograms will be empty.") logging.info("Skipping metric %s because it has no field with unit.",
return histograms metric_name)
continue
for field in _LeafFieldAnnotations(annotations): for field in _LeafFieldAnnotations(annotations):
unit = field.field_options.get('unit', None) unit = field.field_options.get('unit', None)
...@@ -246,6 +250,12 @@ def RunMetric(trace_processor_path, trace_file, metric_name, ...@@ -246,6 +250,12 @@ def RunMetric(trace_processor_path, trace_file, metric_name,
return histograms return histograms
def RunMetric(trace_processor_path, trace_file, metric_name,
fetch_power_profile=False):
return RunMetrics(trace_processor_path, trace_file, [metric_name],
fetch_power_profile)
def ConvertProtoTraceToJson(trace_processor_path, proto_file, json_path): def ConvertProtoTraceToJson(trace_processor_path, proto_file, json_path):
"""Convert proto trace to json using trace processor. """Convert proto trace to json using trace processor.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment