Commit 6cd81cc7 authored by Mikhail Khokhlov's avatar Mikhail Khokhlov Committed by Commit Bot

[tools/perf] Metric computation stub + some refactoring

It makes sense to treat output formats that require metric computation
different from those that only report test success/failure. This CL
introduces this distinction.

Bug: 981349
Change-Id: I3f19988dd9ef7871489b263768b1ca250cf54535
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1829340
Commit-Queue: Mikhail Khokhlov <khokhlov@google.com>
Reviewed-by: default avatarJuan Antonio Navarro Pérez <perezju@chromium.org>
Cr-Commit-Position: refs/heads/master@{#701524}
parent f2c6885c
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
# This file is written by telemetry, it contains output of metric computation.
# This is a temporary hack to keep things working while we gradually move
# code from Telemetry to Results Processor.
HISTOGRAM_DICTS_FILE = 'histogram_dicts.json'
def ComputeTBMv2Metrics(intermediate_results):
"""Compute metrics on aggregated traces in parallel.
For each test run that has an aggregate trace and some TBMv2 metrics listed
in its tags, compute the metrics and store the result as histogram dicts
in the corresponding test result.
"""
histogram_dicts = []
for test_result in intermediate_results['testResults']:
artifacts = test_result.get('artifacts', {})
# For now, metrics are computed in telemetry.
# TODO(crbug.com/981349): Replace it with actual metrics computation.
assert HISTOGRAM_DICTS_FILE in artifacts
with open(artifacts[HISTOGRAM_DICTS_FILE]['filePath']) as f:
histogram_dicts += json.load(f)
return histogram_dicts
......@@ -9,7 +9,6 @@ import csv
import json
import os
from core.results_processor.formatters import histograms_output
from py_utils import tempfile_ext
from tracing.value import histograms_to_csv
......@@ -40,11 +39,8 @@ def _WriteCsv(dicts, output_stream):
csv.writer(output_stream).writerows(rows)
def Process(intermediate_results, options):
"""Process intermediate results and write output in output_dir."""
histogram_dicts = histograms_output.Convert(intermediate_results,
options.results_label)
def ProcessHistogramDicts(histogram_dicts, options):
"""Convert histogram dicts to CSV and write output in output_dir."""
with tempfile_ext.NamedTemporaryFile() as hist_file:
json.dump(histogram_dicts, hist_file)
hist_file.close()
......
......@@ -12,24 +12,13 @@ import json
import logging
import os
from tracing.value.diagnostics import generic_set
from tracing.value.diagnostics import reserved_infos
from tracing.value import histogram_set
# This file is written by telemetry, it contains output of metric computation.
# This is a temporary hack to keep things working while we gradually move
# code from Telemetry to Results Processor.
HISTOGRAM_DICTS_NAME = 'histogram_dicts.json'
# Output file in HistogramSet format.
OUTPUT_FILENAME = 'histograms.json'
def Process(intermediate_results, options):
"""Process intermediate results and write output in output_dir."""
histogram_dicts = Convert(intermediate_results, options.results_label)
def ProcessHistogramDicts(histogram_dicts, options):
"""Write histograms in output_dir."""
output_file = os.path.join(options.output_dir, OUTPUT_FILENAME)
if not options.reset_results and os.path.isfile(output_file):
with open(output_file) as input_stream:
......@@ -41,28 +30,3 @@ def Process(intermediate_results, options):
with open(output_file, 'w') as output_stream:
json.dump(histogram_dicts, output_stream)
def Convert(intermediate_results, results_label):
"""Convert intermediate results to histogram dicts"""
histograms = histogram_set.HistogramSet()
for result in intermediate_results['testResults']:
if HISTOGRAM_DICTS_NAME in result['artifacts']:
with open(result['artifacts'][HISTOGRAM_DICTS_NAME]['filePath']) as f:
histogram_dicts = json.load(f)
histograms.ImportDicts(histogram_dicts)
diagnostics = intermediate_results['benchmarkRun'].get('diagnostics', {})
for name, diag in diagnostics.items():
# For now, we only support GenericSet diagnostics that are serialized
# as lists of values.
assert isinstance(diag, list)
histograms.AddSharedDiagnosticToAllHistograms(
name, generic_set.GenericSet(diag))
if results_label is not None:
histograms.AddSharedDiagnosticToAllHistograms(
reserved_infos.LABELS.name,
generic_set.GenericSet([results_label]))
return histograms.AsDicts()
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
import json
import os
import shutil
import tempfile
from core.results_processor.formatters import histograms_output
from core.results_processor import command_line
from core.results_processor import testing
from tracing.value import histogram
from tracing.value import histogram_set
class HistogramsOutputTest(unittest.TestCase):
def setUp(self):
self.output_dir = tempfile.mkdtemp()
parser = command_line.ArgumentParser()
self.options = parser.parse_args([])
self.options.output_dir = self.output_dir
command_line.ProcessOptions(self.options)
def tearDown(self):
shutil.rmtree(self.output_dir)
def testConvertOneStory(self):
hist_file = os.path.join(self.output_dir,
histograms_output.HISTOGRAM_DICTS_NAME)
with open(hist_file, 'w') as f:
json.dump([histogram.Histogram('a', 'unitless').AsDict()], f)
in_results = testing.IntermediateResults(
test_results=[
testing.TestResult(
'benchmark/story',
artifacts={'histogram_dicts.json': testing.Artifact(hist_file)},
),
],
)
histogram_dicts = histograms_output.Convert(in_results, results_label=None)
out_histograms = histogram_set.HistogramSet()
out_histograms.ImportDicts(histogram_dicts)
self.assertEqual(len(out_histograms), 1)
self.assertEqual(out_histograms.GetFirstHistogram().name, 'a')
self.assertEqual(out_histograms.GetFirstHistogram().unit, 'unitless')
def testConvertDiagnostics(self):
hist_file = os.path.join(self.output_dir,
histograms_output.HISTOGRAM_DICTS_NAME)
with open(hist_file, 'w') as f:
json.dump([histogram.Histogram('a', 'unitless').AsDict()], f)
in_results = testing.IntermediateResults(
test_results=[],
diagnostics={
'benchmarks': ['benchmark'],
'osNames': ['linux'],
'documentationUrls': [['documentation', 'url']],
},
)
histogram_dicts = histograms_output.Convert(in_results,
results_label='label')
out_histograms = histogram_set.HistogramSet()
out_histograms.ImportDicts(histogram_dicts)
diag_values = [list(v) for v in out_histograms.shared_diagnostics]
self.assertEqual(len(diag_values), 4)
self.assertIn(['benchmark'], diag_values)
self.assertIn(['linux'], diag_values)
self.assertIn([['documentation', 'url']], diag_values)
self.assertIn(['label'], diag_values)
def testConvertTwoStories(self):
hist_file = os.path.join(self.output_dir,
histograms_output.HISTOGRAM_DICTS_NAME)
with open(hist_file, 'w') as f:
json.dump([histogram.Histogram('a', 'unitless').AsDict()], f)
in_results = testing.IntermediateResults(
test_results=[
testing.TestResult(
'benchmark/story1',
artifacts={'histogram_dicts.json': testing.Artifact(hist_file)},
),
testing.TestResult(
'benchmark/story2',
artifacts={'histogram_dicts.json': testing.Artifact(hist_file)},
),
testing.TestResult(
'benchmark/story1',
artifacts={'histogram_dicts.json': testing.Artifact(hist_file)},
),
testing.TestResult(
'benchmark/story2',
artifacts={'histogram_dicts.json': testing.Artifact(hist_file)},
),
],
)
histogram_dicts = histograms_output.Convert(in_results,
results_label='label')
out_histograms = histogram_set.HistogramSet()
out_histograms.ImportDicts(histogram_dicts)
self.assertEqual(len(out_histograms), 4)
hist = out_histograms.GetFirstHistogram()
self.assertEqual(hist.name, 'a')
self.assertEqual(hist.unit, 'unitless')
self.assertEqual(list(hist.diagnostics['labels']), ['label'])
......@@ -7,18 +7,14 @@
import codecs
import os
from core.results_processor.formatters import histograms_output
from tracing_build import vulcanize_histograms_viewer
OUTPUT_FILENAME = 'results.html'
def Process(intermediate_results, options):
"""Process intermediate results and write output in output_dir."""
histogram_dicts = histograms_output.Convert(intermediate_results,
options.results_label)
def ProcessHistogramDicts(histogram_dicts, options):
"""Convert histogram dicts to HTML and write output in output_dir."""
output_file = os.path.join(options.output_dir, OUTPUT_FILENAME)
open(output_file, 'a').close() # Create file if it doesn't exist.
with codecs.open(output_file, mode='r+', encoding='utf-8') as output_stream:
......
......@@ -19,7 +19,7 @@ import urllib
OUTPUT_FILENAME = 'test-results.json'
def Process(intermediate_results, options):
def ProcessIntermediateResults(intermediate_results, options):
"""Process intermediate results and write output in output_dir."""
results = Convert(intermediate_results, options.output_dir)
with open(os.path.join(options.output_dir, OUTPUT_FILENAME), 'w') as f:
......
......@@ -12,12 +12,19 @@ import json
import os
from core.results_processor import command_line
from core.results_processor import compute_metrics
from core.results_processor import formatters
from tracing.value.diagnostics import generic_set
from tracing.value.diagnostics import reserved_infos
from tracing.value import histogram_set
HTML_TRACE_NAME = 'trace.html'
TELEMETRY_RESULTS = '_telemetry_results.jsonl'
FORMATS_WITH_METRICS = ['csv', 'histograms', 'html']
def ProcessResults(options):
"""Process intermediate results and produce the requested outputs.
......@@ -40,12 +47,16 @@ def ProcessResults(options):
_UploadArtifacts(intermediate_results, options.upload_bucket)
for output_format in options.output_formats:
if output_format not in formatters.FORMATTERS:
raise NotImplementedError(output_format)
if any(fmt in FORMATS_WITH_METRICS for fmt in options.output_formats):
histogram_dicts = _ComputeMetrics(intermediate_results,
options.results_label)
for output_format in options.output_formats:
formatter = formatters.FORMATTERS[output_format]
formatter.Process(intermediate_results, options)
if output_format in FORMATS_WITH_METRICS:
formatter.ProcessHistogramDicts(histogram_dicts, options)
else:
formatter.ProcessIntermediateResults(intermediate_results, options)
def _LoadIntermediateResults(intermediate_file):
......@@ -95,6 +106,34 @@ def _UploadArtifacts(intermediate_results, upload_bucket):
assert 'remoteUrl' in artifact
def _ComputeMetrics(intermediate_results, results_label):
histogram_dicts = compute_metrics.ComputeTBMv2Metrics(intermediate_results)
histogram_dicts = AddDiagnosticsToHistograms(
histogram_dicts, intermediate_results, results_label)
return histogram_dicts
def AddDiagnosticsToHistograms(histogram_dicts, intermediate_results,
results_label):
"""Add diagnostics to histogram dicts"""
histograms = histogram_set.HistogramSet()
histograms.ImportDicts(histogram_dicts)
diagnostics = intermediate_results['benchmarkRun'].get('diagnostics', {})
for name, diag in diagnostics.items():
# For now, we only support GenericSet diagnostics that are serialized
# as lists of values.
assert isinstance(diag, list)
histograms.AddSharedDiagnosticToAllHistograms(
name, generic_set.GenericSet(diag))
if results_label is not None:
histograms.AddSharedDiagnosticToAllHistograms(
reserved_infos.LABELS.name,
generic_set.GenericSet([results_label]))
return histograms.AsDicts()
def main(args=None):
"""Entry point for the standalone version of the results_processor script."""
parser = command_line.ArgumentParser(standalone=True)
......
......@@ -20,6 +20,7 @@ from core.results_processor.formatters import csv_output
from core.results_processor.formatters import json3_output
from core.results_processor.formatters import histograms_output
from core.results_processor.formatters import html_output
from core.results_processor import compute_metrics
from core.results_processor import processor
from core.results_processor import testing
......@@ -107,7 +108,7 @@ class ResultsProcessorIntegrationTests(unittest.TestCase):
def testHistogramsOutput(self):
hist_file = os.path.join(self.output_dir,
histograms_output.HISTOGRAM_DICTS_NAME)
compute_metrics.HISTOGRAM_DICTS_FILE)
with open(hist_file, 'w') as f:
json.dump([histogram.Histogram('a', 'unitless').AsDict()], f)
......@@ -152,7 +153,7 @@ class ResultsProcessorIntegrationTests(unittest.TestCase):
def testHistogramsOutputResetResults(self):
hist_file = os.path.join(self.output_dir,
histograms_output.HISTOGRAM_DICTS_NAME)
compute_metrics.HISTOGRAM_DICTS_FILE)
with open(hist_file, 'w') as f:
json.dump([histogram.Histogram('a', 'unitless').AsDict()], f)
......@@ -193,7 +194,7 @@ class ResultsProcessorIntegrationTests(unittest.TestCase):
def testHistogramsOutputAppendResults(self):
hist_file = os.path.join(self.output_dir,
histograms_output.HISTOGRAM_DICTS_NAME)
compute_metrics.HISTOGRAM_DICTS_FILE)
with open(hist_file, 'w') as f:
json.dump([histogram.Histogram('a', 'unitless').AsDict()], f)
......@@ -233,7 +234,7 @@ class ResultsProcessorIntegrationTests(unittest.TestCase):
def testHtmlOutput(self):
hist_file = os.path.join(self.output_dir,
histograms_output.HISTOGRAM_DICTS_NAME)
compute_metrics.HISTOGRAM_DICTS_FILE)
with open(hist_file, 'w') as f:
json.dump([histogram.Histogram('a', 'unitless').AsDict()], f)
......@@ -333,7 +334,7 @@ class ResultsProcessorIntegrationTests(unittest.TestCase):
def testCsvOutput(self):
hist_file = os.path.join(self.output_dir,
histograms_output.HISTOGRAM_DICTS_NAME)
compute_metrics.HISTOGRAM_DICTS_FILE)
test_hist = histogram.Histogram('a', 'ms')
test_hist.AddSample(3000)
with open(hist_file, 'w') as f:
......@@ -379,7 +380,7 @@ class ResultsProcessorIntegrationTests(unittest.TestCase):
def testCsvOutputResetResults(self):
hist_file = os.path.join(self.output_dir,
histograms_output.HISTOGRAM_DICTS_NAME)
compute_metrics.HISTOGRAM_DICTS_FILE)
with open(hist_file, 'w') as f:
json.dump([histogram.Histogram('a', 'unitless').AsDict()], f)
......@@ -415,7 +416,7 @@ class ResultsProcessorIntegrationTests(unittest.TestCase):
def testCsvOutputAppendResults(self):
hist_file = os.path.join(self.output_dir,
histograms_output.HISTOGRAM_DICTS_NAME)
compute_metrics.HISTOGRAM_DICTS_FILE)
with open(hist_file, 'w') as f:
json.dump([histogram.Histogram('a', 'unitless').AsDict()], f)
......
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for results_processor methods."""
import unittest
from core.results_processor import processor
from core.results_processor import testing
from tracing.value import histogram
from tracing.value import histogram_set
class ResultsProcessorUnitTests(unittest.TestCase):
def testAddDiagnosticsToHistograms(self):
histogram_dicts = [histogram.Histogram('a', 'unitless').AsDict()]
in_results = testing.IntermediateResults(
test_results=[],
diagnostics={
'benchmarks': ['benchmark'],
'osNames': ['linux'],
'documentationUrls': [['documentation', 'url']],
},
)
histograms_with_diagnostics = processor.AddDiagnosticsToHistograms(
histogram_dicts, in_results, results_label='label')
out_histograms = histogram_set.HistogramSet()
out_histograms.ImportDicts(histograms_with_diagnostics)
diag_values = [list(v) for v in out_histograms.shared_diagnostics]
self.assertEqual(len(diag_values), 4)
self.assertIn(['benchmark'], diag_values)
self.assertIn(['linux'], diag_values)
self.assertIn([['documentation', 'url']], diag_values)
self.assertIn(['label'], diag_values)
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment