Commit b00de377 authored by Mikhail Khokhlov's avatar Mikhail Khokhlov Committed by Commit Bot

[tools/perf] Implement histograms_output result formatter

This formatter takes serialized histograms from test artifacts, adds
diagnostics to them and saves to a file. Ad-hoc values are not supported
yet.

Bug: 981349
Change-Id: Iab6cbf40d4e3ddc118603a4eaa38dc58a9ae8163
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1796670
Commit-Queue: Mikhail Khokhlov <khokhlov@google.com>
Reviewed-by: default avatarJuan Antonio Navarro Pérez <perezju@chromium.org>
Cr-Commit-Position: refs/heads/master@{#699289}
parent 436cd238
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Output formatter for HistogramSet Results Format.
Format specification:
https://github.com/catapult-project/catapult/blob/master/docs/histogram-set-json-format.md
"""
import json
import logging
import os
from tracing.value.diagnostics import generic_set
from tracing.value.diagnostics import reserved_infos
from tracing.value import histogram_set
# This file is written by telemetry, it contains output of metric computation.
# This is a temporary hack to keep things working while we gradually move
# code from Telemetry to Results Processor.
HISTOGRAM_DICTS_NAME = 'histogram_dicts.json'
# Output file in HistogramSet format.
OUTPUT_FILENAME = 'histograms.json'
def Process(intermediate_results, options):
"""Process intermediate results and write output in output_dir."""
histogram_dicts = Convert(intermediate_results, options.results_label)
output_file = os.path.join(options.output_dir, OUTPUT_FILENAME)
if not options.reset_results and os.path.isfile(output_file):
with open(output_file) as input_stream:
try:
histogram_dicts += json.load(input_stream)
except ValueError:
logging.warning(
'Found existing histograms json but failed to parse it.')
with open(output_file, 'w') as output_stream:
json.dump(histogram_dicts, output_stream)
def Convert(intermediate_results, results_label):
"""Convert intermediate results to histogram dicts"""
histograms = histogram_set.HistogramSet()
for result in intermediate_results['testResults']:
if HISTOGRAM_DICTS_NAME in result['artifacts']:
with open(result['artifacts'][HISTOGRAM_DICTS_NAME]['filePath']) as f:
histogram_dicts = json.load(f)
histograms.ImportDicts(histogram_dicts)
diagnostics = intermediate_results['benchmarkRun'].get('diagnostics', {})
for name, diag in diagnostics.items():
# For now, we only support GenericSet diagnostics that are serialized
# as lists of values.
assert isinstance(diag, list)
histograms.AddSharedDiagnosticToAllHistograms(
name, generic_set.GenericSet(diag))
if results_label is not None:
histograms.AddSharedDiagnosticToAllHistograms(
reserved_infos.LABELS.name,
generic_set.GenericSet([results_label]))
return histograms.AsDicts()
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
import json
import os
import shutil
import tempfile
from core.results_processor import histograms_output
from core.results_processor import command_line
from core.results_processor import testing
from tracing.value import histogram
from tracing.value import histogram_set
class HistogramsOutputTest(unittest.TestCase):
def setUp(self):
self.output_dir = tempfile.mkdtemp()
parser = command_line.ArgumentParser()
self.options = parser.parse_args([])
self.options.output_dir = self.output_dir
command_line.ProcessOptions(self.options)
def tearDown(self):
shutil.rmtree(self.output_dir)
def testConvertOneStory(self):
hist_file = os.path.join(self.output_dir,
histograms_output.HISTOGRAM_DICTS_NAME)
with open(hist_file, 'w') as f:
json.dump([histogram.Histogram('a', 'unitless').AsDict()], f)
in_results = testing.IntermediateResults(
test_results=[
testing.TestResult(
'benchmark/story',
artifacts={'histogram_dicts.json': testing.Artifact(hist_file)},
),
],
)
histogram_dicts = histograms_output.Convert(in_results, results_label=None)
out_histograms = histogram_set.HistogramSet()
out_histograms.ImportDicts(histogram_dicts)
self.assertEqual(len(out_histograms), 1)
self.assertEqual(out_histograms.GetFirstHistogram().name, 'a')
self.assertEqual(out_histograms.GetFirstHistogram().unit, 'unitless')
def testConvertDiagnostics(self):
hist_file = os.path.join(self.output_dir,
histograms_output.HISTOGRAM_DICTS_NAME)
with open(hist_file, 'w') as f:
json.dump([histogram.Histogram('a', 'unitless').AsDict()], f)
in_results = testing.IntermediateResults(
test_results=[],
diagnostics={
'benchmarks': ['benchmark'],
'osNames': ['linux'],
'documentationUrls': [['documentation', 'url']],
},
)
histogram_dicts = histograms_output.Convert(in_results,
results_label='label')
out_histograms = histogram_set.HistogramSet()
out_histograms.ImportDicts(histogram_dicts)
diag_values = [list(v) for v in out_histograms.shared_diagnostics]
self.assertEqual(len(diag_values), 4)
self.assertIn(['benchmark'], diag_values)
self.assertIn(['linux'], diag_values)
self.assertIn([['documentation', 'url']], diag_values)
self.assertIn(['label'], diag_values)
def testConvertTwoStories(self):
hist_file = os.path.join(self.output_dir,
histograms_output.HISTOGRAM_DICTS_NAME)
with open(hist_file, 'w') as f:
json.dump([histogram.Histogram('a', 'unitless').AsDict()], f)
in_results = testing.IntermediateResults(
test_results=[
testing.TestResult(
'benchmark/story1',
artifacts={'histogram_dicts.json': testing.Artifact(hist_file)},
),
testing.TestResult(
'benchmark/story2',
artifacts={'histogram_dicts.json': testing.Artifact(hist_file)},
),
testing.TestResult(
'benchmark/story1',
artifacts={'histogram_dicts.json': testing.Artifact(hist_file)},
),
testing.TestResult(
'benchmark/story2',
artifacts={'histogram_dicts.json': testing.Artifact(hist_file)},
),
],
)
histogram_dicts = histograms_output.Convert(in_results,
results_label='label')
out_histograms = histogram_set.HistogramSet()
out_histograms.ImportDicts(histogram_dicts)
self.assertEqual(len(out_histograms), 4)
hist = out_histograms.GetFirstHistogram()
self.assertEqual(hist.name, 'a')
self.assertEqual(hist.unit, 'unitless')
self.assertEqual(list(hist.diagnostics['labels']), ['label'])
...@@ -19,10 +19,10 @@ import urllib ...@@ -19,10 +19,10 @@ import urllib
OUTPUT_FILENAME = 'test-results.json' OUTPUT_FILENAME = 'test-results.json'
def Process(intermediate_results, output_dir): def Process(intermediate_results, options):
"""Process intermediate results and write output in output_dir.""" """Process intermediate results and write output in output_dir."""
results = Convert(intermediate_results, output_dir) results = Convert(intermediate_results, options.output_dir)
with open(os.path.join(output_dir, OUTPUT_FILENAME), 'w') as f: with open(os.path.join(options.output_dir, OUTPUT_FILENAME), 'w') as f:
json.dump(results, f, sort_keys=True, indent=4, separators=(',', ': ')) json.dump(results, f, sort_keys=True, indent=4, separators=(',', ': '))
......
...@@ -13,12 +13,14 @@ import os ...@@ -13,12 +13,14 @@ import os
from core.results_processor import command_line from core.results_processor import command_line
from core.results_processor import json3_output from core.results_processor import json3_output
from core.results_processor import histograms_output
HTML_TRACE_NAME = 'trace.html' HTML_TRACE_NAME = 'trace.html'
TELEMETRY_RESULTS = '_telemetry_results.jsonl' TELEMETRY_RESULTS = '_telemetry_results.jsonl'
FORMATTERS = { FORMATTERS = {
'json-test-results': json3_output, 'json-test-results': json3_output,
'histograms': histograms_output,
} }
...@@ -48,7 +50,7 @@ def ProcessResults(options): ...@@ -48,7 +50,7 @@ def ProcessResults(options):
raise NotImplementedError(output_format) raise NotImplementedError(output_format)
formatter = FORMATTERS[output_format] formatter = FORMATTERS[output_format]
formatter.Process(intermediate_results, options.output_dir) formatter.Process(intermediate_results, options)
def _LoadIntermediateResults(intermediate_file): def _LoadIntermediateResults(intermediate_file):
......
...@@ -15,10 +15,16 @@ import shutil ...@@ -15,10 +15,16 @@ import shutil
import tempfile import tempfile
import unittest import unittest
import mock
from core.results_processor import json3_output from core.results_processor import json3_output
from core.results_processor import histograms_output
from core.results_processor import processor from core.results_processor import processor
from core.results_processor import testing from core.results_processor import testing
from tracing.value import histogram
from tracing.value import histogram_set
class ResultsProcessorIntegrationTests(unittest.TestCase): class ResultsProcessorIntegrationTests(unittest.TestCase):
def setUp(self): def setUp(self):
...@@ -96,3 +102,141 @@ class ResultsProcessorIntegrationTests(unittest.TestCase): ...@@ -96,3 +102,141 @@ class ResultsProcessorIntegrationTests(unittest.TestCase):
self.assertEqual(len(artifacts), 2) self.assertEqual(len(artifacts), 2)
self.assertEqual(artifacts['logs'], ['gs://logs.txt']) self.assertEqual(artifacts['logs'], ['gs://logs.txt'])
self.assertEqual(artifacts['trace.html'], ['gs://trace.html']) self.assertEqual(artifacts['trace.html'], ['gs://trace.html'])
# TODO(crbug.com/981349): Remove this mock when histograms format
# is enabled in results_processor.
@mock.patch('core.results_processor.command_line.SUPPORTED_FORMATS',
['histograms'])
def testHistogramsOutput(self):
hist_file = os.path.join(self.output_dir,
histograms_output.HISTOGRAM_DICTS_NAME)
with open(hist_file, 'w') as f:
json.dump([histogram.Histogram('a', 'unitless').AsDict()], f)
self.SerializeIntermediateResults(
test_results=[
testing.TestResult(
'benchmark/story',
artifacts={'histogram_dicts.json': testing.Artifact(hist_file)},
),
],
diagnostics={
'benchmarks': ['benchmark'],
'osNames': ['linux'],
'documentationUrls': [['documentation', 'url']],
},
start_time='2009-02-13T23:31:30.987000Z',
)
processor.main([
'--output-format', 'histograms',
'--output-dir', self.output_dir,
'--intermediate-dir', self.intermediate_dir,
'--results-label', 'label',
])
with open(os.path.join(
self.output_dir, histograms_output.OUTPUT_FILENAME)) as f:
results = json.load(f)
out_histograms = histogram_set.HistogramSet()
out_histograms.ImportDicts(results)
self.assertEqual(len(out_histograms), 1)
self.assertEqual(out_histograms.GetFirstHistogram().name, 'a')
self.assertEqual(out_histograms.GetFirstHistogram().unit, 'unitless')
diag_values = [list(v) for v in out_histograms.shared_diagnostics]
self.assertEqual(len(diag_values), 4)
self.assertIn(['benchmark'], diag_values)
self.assertIn(['linux'], diag_values)
self.assertIn([['documentation', 'url']], diag_values)
self.assertIn(['label'], diag_values)
# TODO(crbug.com/981349): Remove this mock when histograms format
# is enabled in results_processor.
@mock.patch('core.results_processor.command_line.SUPPORTED_FORMATS',
['histograms'])
def testHistogramsOutputResetResults(self):
hist_file = os.path.join(self.output_dir,
histograms_output.HISTOGRAM_DICTS_NAME)
with open(hist_file, 'w') as f:
json.dump([histogram.Histogram('a', 'unitless').AsDict()], f)
self.SerializeIntermediateResults(
test_results=[
testing.TestResult(
'benchmark/story',
artifacts={'histogram_dicts.json': testing.Artifact(hist_file)},
),
],
)
processor.main([
'--output-format', 'histograms',
'--output-dir', self.output_dir,
'--intermediate-dir', self.intermediate_dir,
'--results-label', 'label1',
])
processor.main([
'--output-format', 'histograms',
'--output-dir', self.output_dir,
'--intermediate-dir', self.intermediate_dir,
'--results-label', 'label2',
'--reset-results',
])
with open(os.path.join(
self.output_dir, histograms_output.OUTPUT_FILENAME)) as f:
results = json.load(f)
out_histograms = histogram_set.HistogramSet()
out_histograms.ImportDicts(results)
self.assertEqual(len(out_histograms), 1)
diag_values = [list(v) for v in out_histograms.shared_diagnostics]
self.assertNotIn(['label1'], diag_values)
self.assertIn(['label2'], diag_values)
# TODO(crbug.com/981349): Remove this mock when histograms format
# is enabled in results_processor.
@mock.patch('core.results_processor.command_line.SUPPORTED_FORMATS',
['histograms'])
def testHistogramsOutputAppendResults(self):
hist_file = os.path.join(self.output_dir,
histograms_output.HISTOGRAM_DICTS_NAME)
with open(hist_file, 'w') as f:
json.dump([histogram.Histogram('a', 'unitless').AsDict()], f)
self.SerializeIntermediateResults(
test_results=[
testing.TestResult(
'benchmark/story',
artifacts={'histogram_dicts.json': testing.Artifact(hist_file)},
),
],
)
processor.main([
'--output-format', 'histograms',
'--output-dir', self.output_dir,
'--intermediate-dir', self.intermediate_dir,
'--results-label', 'label1',
])
processor.main([
'--output-format', 'histograms',
'--output-dir', self.output_dir,
'--intermediate-dir', self.intermediate_dir,
'--results-label', 'label2',
])
with open(os.path.join(
self.output_dir, histograms_output.OUTPUT_FILENAME)) as f:
results = json.load(f)
out_histograms = histogram_set.HistogramSet()
out_histograms.ImportDicts(results)
self.assertEqual(len(out_histograms), 2)
diag_values = [list(v) for v in out_histograms.shared_diagnostics]
self.assertIn(['label1'], diag_values)
self.assertIn(['label2'], diag_values)
...@@ -11,7 +11,7 @@ _BENCHMARK_START_KEYS = set(['startTime']) ...@@ -11,7 +11,7 @@ _BENCHMARK_START_KEYS = set(['startTime'])
def IntermediateResults(test_results, start_time='2015-10-21T07:28:00.000Z', def IntermediateResults(test_results, start_time='2015-10-21T07:28:00.000Z',
finalized=True, interrupted=False): finalized=True, interrupted=False, diagnostics=None):
"""Build a dict of 'parsed' intermediate results. """Build a dict of 'parsed' intermediate results.
Args: Args:
...@@ -28,6 +28,7 @@ def IntermediateResults(test_results, start_time='2015-10-21T07:28:00.000Z', ...@@ -28,6 +28,7 @@ def IntermediateResults(test_results, start_time='2015-10-21T07:28:00.000Z',
'startTime': start_time, 'startTime': start_time,
'finalized': finalized, 'finalized': finalized,
'interrupted': interrupted, 'interrupted': interrupted,
'diagnostics': diagnostics or {},
}, },
'testResults': list(test_results) 'testResults': list(test_results)
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment