Commit ef4670fb authored by Mikhail Khokhlov's avatar Mikhail Khokhlov Committed by Commit Bot

[tools/perf] Support running TBMv3 metrics in Results Processor

This CL implements computing TBMv3 metrics using the Perfetto
trace processor. Metrics to compute should be passed as tags with
key "tbmv3".

Bug: 990304
Change-Id: I514396861f801d2d9889e5866bf95f2cae21be4e
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1942651
Commit-Queue: Mikhail Khokhlov <khokhlov@google.com>
Reviewed-by: default avatarJuan Antonio Navarro Pérez <perezju@chromium.org>
Cr-Commit-Position: refs/heads/master@{#720910}
parent bcbb11d6
...@@ -7,13 +7,17 @@ import os ...@@ -7,13 +7,17 @@ import os
import time import time
from core.results_processor import util from core.results_processor import util
from core.tbmv3 import trace_processor
from tracing.metrics import metric_runner from tracing.metrics import metric_runner
# Aggregated trace is saved under this name. # Aggregated TBMv2 trace is saved under this name.
HTML_TRACE_NAME = 'trace.html' HTML_TRACE_NAME = 'trace.html'
# Concatenated proto trace is saved under this name.
CONCATENATED_PROTO_NAME = 'trace.pb'
def _RunMetric(test_result, metrics): def _RunMetric(test_result, metrics):
html_trace = test_result['outputArtifacts'][HTML_TRACE_NAME] html_trace = test_result['outputArtifacts'][HTML_TRACE_NAME]
...@@ -60,7 +64,7 @@ def ComputeTBMv2Metrics(test_result): ...@@ -60,7 +64,7 @@ def ComputeTBMv2Metrics(test_result):
metrics = [tag['value'] for tag in test_result.get('tags', []) metrics = [tag['value'] for tag in test_result.get('tags', [])
if tag['key'] == 'tbmv2'] if tag['key'] == 'tbmv2']
if not metrics: if not metrics:
logging.info('%s: No metrics specified.', test_result['testPath']) logging.info('%s: No TBMv2 metrics specified.', test_result['testPath'])
return return
if HTML_TRACE_NAME not in artifacts: if HTML_TRACE_NAME not in artifacts:
...@@ -80,3 +84,29 @@ def ComputeTBMv2Metrics(test_result): ...@@ -80,3 +84,29 @@ def ComputeTBMv2Metrics(test_result):
return return
test_result['_histograms'].ImportDicts(_RunMetric(test_result, metrics)) test_result['_histograms'].ImportDicts(_RunMetric(test_result, metrics))
def ComputeTBMv3Metrics(test_result, trace_processor_path):
artifacts = test_result.get('outputArtifacts', {})
if test_result['status'] == 'SKIP':
return
metrics = [tag['value'] for tag in test_result.get('tags', [])
if tag['key'] == 'tbmv3']
if not metrics:
logging.info('%s: No TBMv3 metrics specified.', test_result['testPath'])
return
if CONCATENATED_PROTO_NAME not in artifacts:
util.SetUnexpectedFailure(test_result)
logging.error('%s: No proto traces to compute metrics on.',
test_result['testPath'])
return
for metric in metrics:
logging.info('%s: Computing metric %s.', test_result['testPath'], metric)
histograms = trace_processor.RunMetric(
trace_processor_path, artifacts[CONCATENATED_PROTO_NAME]['filePath'],
metric)
test_result['_histograms'].Merge(histograms)
...@@ -18,6 +18,7 @@ import mock ...@@ -18,6 +18,7 @@ import mock
RUN_METRICS_METHOD = 'tracing.metrics.metric_runner.RunMetricOnSingleTrace' RUN_METRICS_METHOD = 'tracing.metrics.metric_runner.RunMetricOnSingleTrace'
GETSIZE_METHOD = 'os.path.getsize' GETSIZE_METHOD = 'os.path.getsize'
TRACE_PROCESSOR_METRIC_METHOD = 'core.tbmv3.trace_processor.RunMetric'
class ComputeMetricsTest(unittest.TestCase): class ComputeMetricsTest(unittest.TestCase):
...@@ -108,3 +109,24 @@ class ComputeMetricsTest(unittest.TestCase): ...@@ -108,3 +109,24 @@ class ComputeMetricsTest(unittest.TestCase):
histogram_dicts = test_result['_histograms'].AsDicts() histogram_dicts = test_result['_histograms'].AsDicts()
self.assertEqual(histogram_dicts, []) self.assertEqual(histogram_dicts, [])
self.assertEqual(test_result['status'], 'SKIP') self.assertEqual(test_result['status'], 'SKIP')
def testComputeTBMv3Metrics(self):
test_result = testing.TestResult(
'benchmark/story1',
output_artifacts={
compute_metrics.CONCATENATED_PROTO_NAME:
testing.Artifact('/concatenated.pb')},
tags=['tbmv3:metric'],
)
test_result['_histograms'] = histogram_set.HistogramSet()
metric_result = histogram_set.HistogramSet()
metric_result.CreateHistogram('a', 'unitless', [0])
with mock.patch(TRACE_PROCESSOR_METRIC_METHOD) as run_metric_mock:
run_metric_mock.return_value = metric_result
compute_metrics.ComputeTBMv3Metrics(test_result, '/path/to/tp')
histogram_dicts = test_result['_histograms'].AsDicts()
self.assertEqual(histogram_dicts, metric_result.AsDicts())
self.assertEqual(test_result['status'], 'PASS')
...@@ -11,12 +11,14 @@ the standalone version of Results Processor. ...@@ -11,12 +11,14 @@ the standalone version of Results Processor.
from __future__ import print_function from __future__ import print_function
import datetime import datetime
import gzip
import json import json
import logging import logging
import os import os
import posixpath import posixpath
import random import random
import re import re
import shutil
from py_utils import cloud_storage from py_utils import cloud_storage
from core.results_processor import command_line from core.results_processor import command_line
...@@ -103,12 +105,14 @@ def ProcessTestResult(test_result, upload_bucket, results_label, ...@@ -103,12 +105,14 @@ def ProcessTestResult(test_result, upload_bucket, results_label,
max_num_values, test_path_format, trace_processor_path): max_num_values, test_path_format, trace_processor_path):
ConvertProtoTraces(test_result, trace_processor_path) ConvertProtoTraces(test_result, trace_processor_path)
AggregateTBMv2Traces(test_result) AggregateTBMv2Traces(test_result)
AggregateTBMv3Traces(test_result)
if upload_bucket is not None: if upload_bucket is not None:
UploadArtifacts(test_result, upload_bucket, run_identifier) UploadArtifacts(test_result, upload_bucket, run_identifier)
if should_compute_metrics: if should_compute_metrics:
test_result['_histograms'] = histogram_set.HistogramSet() test_result['_histograms'] = histogram_set.HistogramSet()
compute_metrics.ComputeTBMv2Metrics(test_result) compute_metrics.ComputeTBMv2Metrics(test_result)
compute_metrics.ComputeTBMv3Metrics(test_result, trace_processor_path)
ExtractMeasurements(test_result) ExtractMeasurements(test_result)
num_values = len(test_result['_histograms']) num_values = len(test_result['_histograms'])
if max_num_values is not None and num_values > max_num_values: if max_num_values is not None and num_values > max_num_values:
...@@ -185,6 +189,9 @@ def ConvertProtoTraces(test_result, trace_processor_path): ...@@ -185,6 +189,9 @@ def ConvertProtoTraces(test_result, trace_processor_path):
artifacts = test_result.get('outputArtifacts', {}) artifacts = test_result.get('outputArtifacts', {})
proto_traces = [name for name in artifacts if _IsProtoTrace(name)] proto_traces = [name for name in artifacts if _IsProtoTrace(name)]
# TODO(crbug.com/990304): After implementation of TBMv3-style clock sync,
# it will be possible to convert the aggregated proto trace, not
# individual ones.
for proto_trace_name in proto_traces: for proto_trace_name in proto_traces:
proto_file_path = artifacts[proto_trace_name]['filePath'] proto_file_path = artifacts[proto_trace_name]['filePath']
logging.info('%s: Converting proto trace %s.', logging.info('%s: Converting proto trace %s.',
...@@ -224,6 +231,35 @@ def AggregateTBMv2Traces(test_result): ...@@ -224,6 +231,35 @@ def AggregateTBMv2Traces(test_result):
del artifacts[name] del artifacts[name]
def AggregateTBMv3Traces(test_result):
"""Replace individual proto traces with an aggregate one.
For a test result with proto traces, concatenates them into one file.
Removes all entries for individual traces and adds one entry for
the aggregate one.
"""
artifacts = test_result.get('outputArtifacts', {})
traces = [name for name in artifacts if _IsProtoTrace(name)]
if traces:
proto_files = [artifacts[name]['filePath'] for name in traces]
concatenated_path = _BuildOutputPath(
proto_files, compute_metrics.CONCATENATED_PROTO_NAME)
with open(concatenated_path, 'w') as concatenated_trace:
for trace_file in proto_files:
if trace_file.endswith('.pb.gz'):
with gzip.open(trace_file, 'rb') as f:
shutil.copyfileobj(f, concatenated_trace)
else:
with open(trace_file, 'rb') as f:
shutil.copyfileobj(f, concatenated_trace)
artifacts[compute_metrics.CONCATENATED_PROTO_NAME] = {
'filePath': concatenated_path,
'contentType': 'application/x-protobuf',
}
for name in traces:
del artifacts[name]
def RunIdentifier(results_label, test_suite_start): def RunIdentifier(results_label, test_suite_start):
"""Construct an identifier for the current script run""" """Construct an identifier for the current script run"""
if results_label: if results_label:
......
...@@ -33,14 +33,20 @@ from tracing_build import render_histograms_viewer ...@@ -33,14 +33,20 @@ from tracing_build import render_histograms_viewer
import mock import mock
# We use sampleMetric defined in # For testing the TBMv2 workflow we use sampleMetric defined in
# third_party/catapult/tracing/tracing/metrics/sample_metric.html # third_party/catapult/tracing/tracing/metrics/sample_metric.html.
# to test processing of test results.
# This metric ignores the trace data and outputs a histogram with # This metric ignores the trace data and outputs a histogram with
# the following name and unit: # the following name and unit:
SAMPLE_HISTOGRAM_NAME = 'foo' SAMPLE_HISTOGRAM_NAME = 'foo'
SAMPLE_HISTOGRAM_UNIT = 'sizeInBytes_smallerIsBetter' SAMPLE_HISTOGRAM_UNIT = 'sizeInBytes_smallerIsBetter'
# For testing the TBMv3 workflow we use dummy_metric defined in
# tools/perf/core/tbmv3/metrics/dummy_metric_*.
# This metric ignores the trace data and outputs a histogram with
# the following name and unit:
DUMMY_HISTOGRAM_NAME = 'dummy::foo'
DUMMY_HISTOGRAM_UNIT = 'count_biggerIsBetter'
class ResultsProcessorIntegrationTests(unittest.TestCase): class ResultsProcessorIntegrationTests(unittest.TestCase):
def setUp(self): def setUp(self):
...@@ -64,6 +70,14 @@ class ResultsProcessorIntegrationTests(unittest.TestCase): ...@@ -64,6 +70,14 @@ class ResultsProcessorIntegrationTests(unittest.TestCase):
return (compute_metrics.HTML_TRACE_NAME, return (compute_metrics.HTML_TRACE_NAME,
testing.Artifact(artifact_file.name)) testing.Artifact(artifact_file.name))
def CreateProtoTraceArtifact(self):
"""Create an empty file as a fake proto trace."""
with tempfile.NamedTemporaryFile(
dir=self.intermediate_dir, delete=False) as artifact_file:
pass
return (compute_metrics.CONCATENATED_PROTO_NAME,
testing.Artifact(artifact_file.name))
def CreateDiagnosticsArtifact(self, **diagnostics): def CreateDiagnosticsArtifact(self, **diagnostics):
"""Create an artifact with diagnostics.""" """Create an artifact with diagnostics."""
with tempfile.NamedTemporaryFile( with tempfile.NamedTemporaryFile(
...@@ -653,3 +667,51 @@ class ResultsProcessorIntegrationTests(unittest.TestCase): ...@@ -653,3 +667,51 @@ class ResultsProcessorIntegrationTests(unittest.TestCase):
'--intermediate-dir', self.intermediate_dir]) '--intermediate-dir', self.intermediate_dir])
self.assertEqual(exit_code, 0) self.assertEqual(exit_code, 0)
# TODO(crbug.com/990304): Enable this test when the long-term solution for
# building the trace_processor_shell on all platforms is found.
@unittest.skip('crbug.com/990304')
def testHistogramsOutput_TBMv3(self):
self.SerializeIntermediateResults(
testing.TestResult(
'benchmark/story',
output_artifacts=[
self.CreateProtoTraceArtifact(),
self.CreateDiagnosticsArtifact(
benchmarks=['benchmark'],
osNames=['linux'],
documentationUrls=[['documentation', 'url']])
],
tags=['tbmv3:dummy_metric'],
start_time='2009-02-13T23:31:30.987000Z',
),
)
processor.main([
'--output-format', 'histograms',
'--output-dir', self.output_dir,
'--intermediate-dir', self.intermediate_dir,
'--results-label', 'label',
])
with open(os.path.join(
self.output_dir, histograms_output.OUTPUT_FILENAME)) as f:
results = json.load(f)
out_histograms = histogram_set.HistogramSet()
out_histograms.ImportDicts(results)
hist = out_histograms.GetHistogramNamed(DUMMY_HISTOGRAM_NAME)
self.assertEqual(hist.unit, DUMMY_HISTOGRAM_UNIT)
self.assertEqual(hist.diagnostics['benchmarks'],
generic_set.GenericSet(['benchmark']))
self.assertEqual(hist.diagnostics['osNames'],
generic_set.GenericSet(['linux']))
self.assertEqual(hist.diagnostics['documentationUrls'],
generic_set.GenericSet([['documentation', 'url']]))
self.assertEqual(hist.diagnostics['labels'],
generic_set.GenericSet(['label']))
self.assertEqual(hist.diagnostics['benchmarkStart'],
date_range.DateRange(1234567890987))
// Copyright 2019 Google LLC.
// SPDX-License-Identifier: Apache-2.0
syntax = "proto2";
option optimize_for = LITE_RUNTIME;
package perfetto.protos;
import "protos/perfetto/metrics/metrics.proto";
message DummyMetric {
optional int64 foo = 1;
}
extend TraceMetrics {
optional DummyMetric dummy_metric = 451;
}
-- Copyright 2019 Google LLC.
-- SPDX-License-Identifier: Apache-2.0
CREATE VIEW dummy_metric_output AS
SELECT DummyMetric('foo', 42)
{
"name": "Dummy Metric",
"description": "A dummy metric for tests.",
"histograms": [
{
"name": "foo",
"description": "bar",
"unit": "count_biggerIsBetter"
}
]
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment