Commit 3cba8746 authored by Deepanjan Roy's avatar Deepanjan Roy Committed by Commit Bot

[tbmv3] Remove json config and use proto annotations

Now that trace processor can read and emit proto annotations, we no
longer need a separate json config.

I turned the description into comments, and it will likely stay that way
because descriptions are rarely used. We eventually want the
requiredCategories to be a file/message option instead of the comment,
but right now trace processor only supports field options and not
file/message options, so we'll have to wait until that's implemented.

Bug: 1012687
Change-Id: I4ef713ebef151e959b9c373a220f9f2630415ed5
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2090452Reviewed-by: default avatarDeep Roy <dproy@chromium.org>
Reviewed-by: default avatarMikhail Khokhlov <khokhlov@google.com>
Commit-Queue: Deep Roy <dproy@chromium.org>
Cr-Commit-Position: refs/heads/master@{#747777}
parent 0103fe2c
......@@ -40,13 +40,6 @@ import mock
SAMPLE_HISTOGRAM_NAME = 'foo'
SAMPLE_HISTOGRAM_UNIT = 'sizeInBytes_smallerIsBetter'
# For testing the TBMv3 workflow we use dummy_metric defined in
# tools/perf/core/tbmv3/metrics/dummy_metric_*.
# This metric ignores the trace data and outputs a histogram with
# the following name and unit:
DUMMY_HISTOGRAM_NAME = 'dummy::foo'
DUMMY_HISTOGRAM_UNIT = 'count_biggerIsBetter'
class ResultsProcessorIntegrationTests(unittest.TestCase):
def setUp(self):
......@@ -707,8 +700,10 @@ class ResultsProcessorIntegrationTests(unittest.TestCase):
out_histograms = histogram_set.HistogramSet()
out_histograms.ImportDicts(results)
hist = out_histograms.GetHistogramNamed(DUMMY_HISTOGRAM_NAME)
self.assertEqual(hist.unit, DUMMY_HISTOGRAM_UNIT)
# For testing the TBMv3 workflow we use dummy_metric defined in
# tools/perf/core/tbmv3/metrics/dummy_metric_*.
hist = out_histograms.GetHistogramNamed('dummy::simple_field')
self.assertEqual(hist.unit, 'count_smallerIsBetter')
self.assertEqual(hist.diagnostics['benchmarks'],
generic_set.GenericSet(['benchmark']))
......@@ -720,3 +715,66 @@ class ResultsProcessorIntegrationTests(unittest.TestCase):
generic_set.GenericSet(['label']))
self.assertEqual(hist.diagnostics['benchmarkStart'],
date_range.DateRange(1234567890987))
def testComplexMetricOutput_TBMv3(self):
self.SerializeIntermediateResults(
testing.TestResult(
'benchmark/story',
output_artifacts=[
self.CreateProtoTraceArtifact(),
self.CreateDiagnosticsArtifact(
benchmarks=['benchmark'],
osNames=['linux'],
documentationUrls=[['documentation', 'url']])
],
tags=['tbmv3:dummy_metric'],
start_time='2009-02-13T23:31:30.987000Z',
),
)
processor.main([
'--output-format', 'histograms',
'--output-dir', self.output_dir,
'--intermediate-dir', self.intermediate_dir,
'--results-label', 'label',
'--experimental-tbmv3-metrics',
])
with open(os.path.join(
self.output_dir, histograms_output.OUTPUT_FILENAME)) as f:
results = json.load(f)
# For testing the TBMv3 workflow we use dummy_metric defined in
# tools/perf/core/tbmv3/metrics/dummy_metric_*.
out_histograms = histogram_set.HistogramSet()
out_histograms.ImportDicts(results)
simple_field = out_histograms.GetHistogramNamed(
"dummy::simple_field")
self.assertEqual(simple_field.unit, "count_smallerIsBetter")
self.assertEqual((simple_field.num_values, simple_field.average), (1, 42))
repeated_field = out_histograms.GetHistogramNamed(
"dummy::repeated_field")
self.assertEqual(repeated_field.unit, "ms_biggerIsBetter")
self.assertEqual(repeated_field.num_values, 3)
self.assertEqual(repeated_field.sample_values, [1, 2, 3])
# Unannotated fields should not be included in final histogram output.
simple_nested_unannotated = out_histograms.GetHistogramsNamed(
"dummy::simple_nested:unannotated_field")
self.assertEqual(len(simple_nested_unannotated), 0)
repeated_nested_unannotated = out_histograms.GetHistogramsNamed(
"dummy::repeated_nested:unannotated_field")
self.assertEqual(len(repeated_nested_unannotated), 0)
simple_nested_annotated = out_histograms.GetHistogramNamed(
"dummy::simple_nested:annotated_field")
self.assertEqual(simple_nested_annotated.unit, "ms_smallerIsBetter")
self.assertEqual(simple_nested_annotated.num_values, 1)
self.assertEqual(simple_nested_annotated.average, 44)
repeated_nested_annotated = out_histograms.GetHistogramNamed(
"dummy::repeated_nested:annotated_field")
self.assertEqual(repeated_nested_annotated.unit, "ms_smallerIsBetter")
self.assertEqual(repeated_nested_annotated.num_values, 2)
self.assertEqual(repeated_nested_annotated.sample_values, [2, 4])
......@@ -6,11 +6,13 @@ syntax = "proto2";
package perfetto.protos;
import "protos/perfetto/metrics/metrics.proto";
// TODO(crbug.com/1012687): Ideally we won't need this second import.
import "protos/perfetto/metrics/custom_options.proto";
message ConsoleErrorMetric {
optional int64 all_errors = 1;
optional int64 js_errors = 2;
optional int64 network_errors = 3;
optional int64 all_errors = 1 [(unit) = "count_smallerIsBetter"];
optional int64 js_errors = 2 [(unit) = "count_smallerIsBetter"];
optional int64 network_errors = 3 [(unit) = "count_smallerIsBetter"];
}
extend TraceMetrics {
......
{
"name": "Console Error Metrics",
"description": "Counts the number of console errors seen in the trace.",
"histograms": [
{
"name": "all_errors",
"description": "Number of all console error messages",
"requiredCategories": ["v8", "blink.console"],
"unit": "count_smallerIsBetter"
},
{
"name": "js_errors",
"description": "Number of js console error messages",
"unit": "count_smallerIsBetter"
},
{
"name": "network_errors",
"description": "Number of network console error messages",
"unit": "count_smallerIsBetter"
}
]
}
......@@ -6,9 +6,18 @@ syntax = "proto2";
package perfetto.protos;
import "protos/perfetto/metrics/metrics.proto";
import "protos/perfetto/metrics/custom_options.proto";
message NestedMetric {
optional int64 unannotated_field = 1;
optional int64 annotated_field = 2 [(unit) = "ms_smallerIsBetter"];
}
message DummyMetric {
optional int64 foo = 1;
optional int64 simple_field = 1 [(unit) = "count_smallerIsBetter"];
repeated int64 repeated_field = 2 [(unit) = "ms_biggerIsBetter"];
optional NestedMetric simple_nested = 3 [(unit) = "ms_smallerIsBetter"];
repeated NestedMetric repeated_nested = 4;
}
extend TraceMetrics {
......
-- Copyright 2019 Google LLC.
-- Copyright 2020 Google LLC.
-- SPDX-License-Identifier: Apache-2.0
CREATE VIEW dummy_metric_output AS
SELECT DummyMetric('foo', 42)
SELECT DummyMetric(
'simple_field', 42,
'repeated_field', (
WITH cte(col1) AS (VALUES (1), (2), (3))
SELECT RepeatedField(col1) FROM cte
),
'simple_nested', NestedMetric(
'unannotated_field', 43,
'annotated_field', 44
),
'repeated_nested', (
WITH cte(col1, col2) AS (VALUES (1, 2), (3, 4))
SELECT RepeatedField(
NestedMetric(
'unannotated_field', col1,
'annotated_field', col2
)
) FROM cte
)
);
{
"name": "Dummy Metric",
"description": "A dummy metric for tests.",
"histograms": [
{
"name": "foo",
"description": "bar",
"unit": "count_biggerIsBetter"
}
]
}
......@@ -6,13 +6,15 @@ syntax = "proto2";
package perfetto.protos;
import "protos/perfetto/metrics/metrics.proto";
import "protos/perfetto/metrics/custom_options.proto";
// A metric that counts the number of janky scroll gestures and the number
// of periods of continuous jankiness.
// Required category: latencyInfo
message JankyScrollPeriods {
optional int64 num_gestures = 1;
optional int64 num_periods = 2;
optional int64 num_gestures = 1 [(unit) = "count_smallerIsBetter"];
optional int64 num_periods = 2 [(unit) = "count_smallerIsBetter"];
}
extend TraceMetrics {
......
{
"name": "Janky Scroll Periods Metric",
"description": "Counts the number of janky scroll gestures and periods of continuous jankiness.",
"histograms": [
{
"name": "num_gestures",
"description": "Number of all janky gestures.",
"requiredCategories": ["latencyInfo"],
"unit": "count_smallerIsBetter"
},
{
"name": "num_periods",
"description": "Number of all janky periods.",
"requiredCategories": ["latencyInfo"],
"unit": "count_smallerIsBetter"
}
]
}
......@@ -7,16 +7,28 @@ syntax = "proto2";
package perfetto.protos;
import "protos/perfetto/metrics/metrics.proto";
import "protos/perfetto/metrics/custom_options.proto";
// A collection of metrics related to GestureScroll events. See the sql file for
// a detailed description of the variables below.
// requiredCategories: latencyInfo.
message JankyTimePerScrollProcessingTime {
optional double janky_time_per_scroll_processing_time_percentage = 1;
optional int64 gesture_scroll_milliseconds = 2;
optional int64 janky_gesture_scroll_milliseconds = 3;
optional int64 gesture_scroll_processing_milliseconds = 4;
optional int64 num_janky_gesture_scroll_updates = 5;
// The percentage of time we consider janky of the total time spent on scrolling.
optional double janky_time_per_scroll_processing_time_percentage = 1
[(unit) = "n%_smallerIsBetter"];
// The time in milliseconds we spent scrolling this trace.
optional int64 gesture_scroll_milliseconds = 2
[(unit) = "ms_biggerIsBetter"];
// The time in milliseconds we spent scrolling this trace that we consider janky.
optional int64 janky_gesture_scroll_milliseconds = 3
[(unit) = "ms_smallerIsBetter"];
// The time in milliseconds we spent processing updates to the scroll this trace.
optional int64 gesture_scroll_processing_milliseconds = 4
[(unit) = "ms_biggerIsBetter"];
// The number of individual gesture scroll updates that we considered janky.
optional int64 num_janky_gesture_scroll_updates = 5
[(unit) = "count_smallerIsBetter"];
}
extend TraceMetrics {
......
{
"name": "Janky Time per Processing Time Metric",
"description": "The percentage of time spent processing a scroll that we consider janky",
"histograms": [
{
"name": "janky_time_per_scroll_processing_time_percentage",
"description": "The percentage of time we consider janky of the total time spent on scrolling.",
"requiredCategories": ["latencyInfo"],
"unit": "n%_smallerIsBetter"
},
{
"name": "gesture_scroll_milliseconds",
"description": "The time in milliseconds we spent scrolling this trace.",
"requiredCategories": ["latencyInfo"],
"unit": "ms_biggerIsBetter"
},
{
"name": "janky_gesture_scroll_milliseconds",
"description": "The time in milliseconds we spent scrolling this trace that we consider janky.",
"requiredCategories": ["latencyInfo"],
"unit": "ms_smallerIsBetter"
},
{
"name": "gesture_scroll_processing_milliseconds",
"description": "The time in milliseconds we spent processing updates to the scroll this trace.",
"requiredCategories": ["latencyInfo"],
"unit": "ms_biggerIsBetter"
},
{
"name": "num_janky_gesture_scroll_updates",
"description": "The number of individual gesture scroll updates that we considered janky.",
"requiredCategories": ["latencyInfo"],
"unit": "count_smallerIsBetter"
}
]
}
......@@ -6,15 +6,19 @@ syntax = "proto2";
package perfetto.protos;
import "protos/perfetto/metrics/metrics.proto";
import "protos/perfetto/metrics/custom_options.proto";
// The metric finds periods in a trace when scroll update events started
// but didn't finish. The idea is that it's perceived by the user as a scroll
// delay, i.e. jank.
// requiredCategories = ["latencyInfo"]. Will be a message option field once
// tooling exists.
message ScrollJankMetric {
optional int64 num_janks = 1;
optional int64 num_big_janks = 2;
optional float total_jank_duration = 3;
optional int64 num_janks = 1 [(unit) = "count_smallerIsBetter"];
optional int64 num_big_janks = 2 [(unit) = "count_smallerIsBetter"];
optional float total_jank_duration = 3 [(unit) = "ms_smallerIsBetter"];
}
extend TraceMetrics {
......
{
"name": "Scroll Jank Metric",
"description": "Counts the number of janky periods during scroll.",
"histograms": [
{
"name": "num_janks",
"description": "Number of all janky periods.",
"requiredCategories": ["latencyInfo"],
"unit": "count_smallerIsBetter"
},
{
"name": "num_big_janks",
"description": "Number of big janky periods.",
"requiredCategories": ["latencyInfo"],
"unit": "count_smallerIsBetter"
},
{
"name": "total_jank_duration",
"description": "Total duration of all janky periods.",
"requiredCategories": ["latencyInfo"],
"unit": "ms_smallerIsBetter"
}
]
}
......@@ -28,6 +28,7 @@ _DEFAULT_TP_PATH = os.path.realpath(os.path.join(
_CHROMIUM_SRC_PATH, 'out', 'Debug', 'trace_processor_shell'))
def _WriteHistogramSetToFile(histograms, outfile):
with open(outfile, 'w') as f:
json.dump(histograms.AsDicts(), f, indent=2, sort_keys=True,
......
......@@ -20,7 +20,12 @@ EXPORT_JSON_QUERY_TEMPLATE = 'select export_json(%s)\n'
METRICS_PATH = os.path.realpath(os.path.join(os.path.dirname(__file__),
'metrics'))
MetricFiles = namedtuple('MetricFiles', ('sql', 'proto', 'config'))
MetricFiles = namedtuple('MetricFiles', ('sql', 'proto'))
class InvalidTraceProcessorOutput(Exception):
pass
# This will be set to a path once trace processor has been fetched
# to avoid downloading several times during one Results Processor run.
......@@ -68,8 +73,7 @@ def _CreateMetricFiles(metric_name):
# revise this decision later.
metric_files = MetricFiles(
sql=os.path.join(METRICS_PATH, metric_name + '.sql'),
proto=os.path.join(METRICS_PATH, metric_name + '.proto'),
config=os.path.join(METRICS_PATH, metric_name + '_config.json'))
proto=os.path.join(METRICS_PATH, metric_name + '.proto'))
for filetype, path in metric_files._asdict().iteritems():
if not os.path.isfile(path):
raise RuntimeError('metric %s file not found at %s' % (filetype, path))
......@@ -95,6 +99,92 @@ def _ScopedHistogramName(metric_name, histogram_name):
return '::'.join([scope, histogram_name])
class ProtoFieldInfo(object):
def __init__(self, name, parent, repeated, field_options):
self.name = name
self.parent = parent
self.repeated = repeated
self.field_options = field_options
@property
def path_from_root(self):
if self.parent is None:
return [self]
else:
return self.parent.path_from_root + [self]
def __repr__(self):
return 'ProtoFieldInfo("%s", repeated=%s)' %(self.name, self.repeated)
def _LeafFieldAnnotations(annotations, parent=None):
"""Yields leaf fields in the annotations tree, yielding a proto field info
each time. Given the following annotation:
__annotations: {
a: {
__field_options: { },
b: { __field_options: { unit: "count" }, __repeated: True },
c: { __field_options: { unit: "ms" } }
}
}
It yields:
ProtoFieldInfo(name="b", parent=FieldInfoForA,
repeated=True, field_options={unit: "count"})
ProtoFieldInfo(name="c", parent=FieldInfoForA,
repeated=False, field_options={unit: "ms"})
"""
for (name, field_value) in annotations.items():
if name[:2] == "__":
continue # internal fields.
current_field = ProtoFieldInfo(
name=name,
parent=parent,
repeated=field_value.get('__repeated', False),
field_options=field_value.get('__field_options', {}))
has_no_descendants = True
for descendant in _LeafFieldAnnotations(field_value, current_field):
has_no_descendants = False
yield descendant
if has_no_descendants:
yield current_field
def _PluckField(json_dict, field_path):
"""Returns the values of fields matching field_path from json dict.
Field path is a sequence of ProtoFieldInfo starting from the root dict. Arrays
are flattened along the way. For exampe, consider the following json dict:
{
a: {
b: [ { c: 24 }, { c: 25 } ],
d: 42,
}
}
Field path (a, d) returns [42]. Field_path (a, b, c) returns [24, 25].
"""
if len(field_path) == 0:
return [json_dict]
path_head = field_path[0]
path_tail = field_path[1:]
if path_head.repeated:
field_values = json_dict[path_head.name]
if not isinstance(field_values, list):
raise InvalidTraceProcessorOutput(
"Field marked as repeated but json value is not list")
output = []
for field_value in field_values:
output.extend(_PluckField(field_value, path_tail))
return output
else:
field_value = json_dict[path_head.name]
if isinstance(field_value, list):
raise InvalidTraceProcessorOutput(
"Field not marked as repeated but json value is list")
return _PluckField(field_value, path_tail)
def RunMetric(trace_processor_path, trace_file, metric_name):
"""Run a TBMv3 metric using trace processor.
......@@ -118,17 +208,28 @@ def RunMetric(trace_processor_path, trace_file, metric_name):
measurements = json.loads(output)
histograms = histogram_set.HistogramSet()
with open(metric_files.config) as f:
config = json.load(f)
metric_root_field = 'perfetto.protos.' + metric_name
for histogram_config in config['histograms']:
histogram_name = histogram_config['name']
samples = measurements[metric_root_field][histogram_name]
root_annotations = measurements.get('__annotations', {})
full_metric_name = 'perfetto.protos.' + metric_name
annotations = root_annotations.get(full_metric_name, None)
metric_proto = measurements.get(full_metric_name, None)
if metric_proto is None:
logging.warn("No metric found in the output.")
return histograms
elif annotations is None:
logging.info("Metric has no field with unit. Histograms will be empty.")
return histograms
for field in _LeafFieldAnnotations(annotations):
unit = field.field_options.get('unit', None)
if unit is None:
logging.debug('Skipping field %s to histograms because it has no unit',
field.name)
continue
histogram_name = ':'.join([field.name for field in field.path_from_root])
samples = _PluckField(metric_proto, field.path_from_root)
scoped_histogram_name = _ScopedHistogramName(metric_name, histogram_name)
description = histogram_config['description']
histograms.CreateHistogram(scoped_histogram_name,
histogram_config['unit'], samples,
description=description)
histograms.CreateHistogram(scoped_histogram_name, unit, samples)
return histograms
......
......@@ -2,7 +2,6 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import shutil
import tempfile
......@@ -24,19 +23,6 @@ class TraceProcessorTestCase(unittest.TestCase):
pass
with open(os.path.join(self.temp_dir, 'dummy_metric.proto'), 'w'):
pass
with open(os.path.join(self.temp_dir,
'dummy_metric_config.json'), 'w') as config:
json.dump(
{
'name': 'Dummy Metric',
'histograms': [{
'name': 'value',
'description': 'dummy value',
'unit': 'count_smallerIsBetter',
}],
},
config,
)
def tearDown(self):
shutil.rmtree(self.temp_dir)
......@@ -46,8 +32,23 @@ class TraceProcessorTestCase(unittest.TestCase):
trace_processor.ConvertProtoTraceToJson(
self.tp_path, '/path/to/proto', '/path/to/json')
def testRunMetric(self):
metric_output = '{"perfetto.protos.dummy_metric": {"value": 7}}'
def testRunMetricNoRepeated(self):
metric_output = """
{
"perfetto.protos.dummy_metric": {
"foo": 7,
"bar": 8
},
"__annotations": {
"perfetto.protos.dummy_metric": {
"foo": {
"__field_options": {
"unit": "count_biggerIsBetter"
}
}
}
}
}"""
with mock.patch('core.tbmv3.trace_processor.METRICS_PATH', self.temp_dir):
with mock.patch(RUN_METHOD) as run_patch:
......@@ -55,7 +56,120 @@ class TraceProcessorTestCase(unittest.TestCase):
histograms = trace_processor.RunMetric(
self.tp_path, '/path/to/proto', 'dummy_metric')
hist = histograms.GetHistogramNamed('dummy::value')
self.assertEqual(hist.unit, 'count_smallerIsBetter')
self.assertEqual(hist.sample_values, [7])
foo_hist = histograms.GetHistogramNamed('dummy::foo')
self.assertEqual(foo_hist.unit, 'count_biggerIsBetter')
self.assertEqual(foo_hist.sample_values, [7])
bar_hists = histograms.GetHistogramsNamed('dummy::bar')
self.assertEqual(len(bar_hists), 0)
def testRunMetricRepeated(self):
metric_output = """
{
"perfetto.protos.dummy_metric": {
"foo": [4, 5, 6],
"bar": [{"baz": 10}, {"baz": 11}]
},
"__annotations": {
"perfetto.protos.dummy_metric": {
"foo": {
"__repeated": true,
"__field_options": {
"unit": "count_biggerIsBetter"
}
},
"bar": {
"__repeated": true,
"baz": {
"__field_options": {
"unit": "ms_smallerIsBetter"
}
}
}
}
}
}"""
with mock.patch('core.tbmv3.trace_processor.METRICS_PATH', self.temp_dir):
with mock.patch(RUN_METHOD) as run_patch:
run_patch.return_value = metric_output
histograms = trace_processor.RunMetric(
self.tp_path, '/path/to/proto', 'dummy_metric')
foo_hist = histograms.GetHistogramNamed('dummy::foo')
self.assertEqual(foo_hist.unit, 'count_biggerIsBetter')
self.assertEqual(foo_hist.sample_values, [4, 5, 6])
baz_hist = histograms.GetHistogramNamed('dummy::bar:baz')
self.assertEqual(baz_hist.unit, 'ms_smallerIsBetter')
self.assertEqual(baz_hist.sample_values, [10, 11])
def testMarkedRepeatedButValueIsNotList(self):
metric_output = """
{
"perfetto.protos.dummy_metric": {
"foo": 4
},
"__annotations": {
"perfetto.protos.dummy_metric": {
"foo": {
"__repeated": true,
"__field_options": {
"unit": "count_biggerIsBetter"
}
}
}
}
}"""
with mock.patch('core.tbmv3.trace_processor.METRICS_PATH', self.temp_dir):
with mock.patch(RUN_METHOD) as run_patch:
run_patch.return_value = metric_output
with self.assertRaises(trace_processor.InvalidTraceProcessorOutput):
trace_processor.RunMetric(
self.tp_path, '/path/to/proto', 'dummy_metric')
def testMarkedNotRepeatedButValueIsList(self):
metric_output = """
{
"perfetto.protos.dummy_metric": {
"foo": [1, 2, 3]
},
"__annotations": {
"perfetto.protos.dummy_metric": {
"foo": {
"__field_options": {
"unit": "count_biggerIsBetter"
}
}
}
}
}"""
with mock.patch('core.tbmv3.trace_processor.METRICS_PATH', self.temp_dir):
with mock.patch(RUN_METHOD) as run_patch:
run_patch.return_value = metric_output
with self.assertRaises(trace_processor.InvalidTraceProcessorOutput):
trace_processor.RunMetric(
self.tp_path, '/path/to/proto', 'dummy_metric')
def testRunMetricEmpty(self):
metric_output = '{}'
with mock.patch('core.tbmv3.trace_processor.METRICS_PATH', self.temp_dir):
with mock.patch(RUN_METHOD) as run_patch:
run_patch.return_value = metric_output
# Checking that this doesn't throw errors.
trace_processor.RunMetric(
self.tp_path, '/path/to/proto', 'dummy_metric')
def testRunMetricNoAnnotations(self):
metric_output = '{"perfetto.protos.foo": {"bar": 42}}'
with mock.patch('core.tbmv3.trace_processor.METRICS_PATH', self.temp_dir):
with mock.patch(RUN_METHOD) as run_patch:
run_patch.return_value = metric_output
# Checking that this doesn't throw errors.
trace_processor.RunMetric(
self.tp_path, '/path/to/proto', 'dummy_metric')
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment