Commit 1dbbf373 authored by Juan Antonio Navarro Perez's avatar Juan Antonio Navarro Perez Committed by Commit Bot

[tools/perf] Refactor perf_data_generator.py

Make it easier add new files to update/verify by just havint to create
a new "updater" function. Avoids all the code duplication in previous
"verify"-ing functions.

Bug: 1029042
Change-Id: Ia85209a38b588f7687aabdd04e2b68fb8e820037
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1944679
Commit-Queue: Caleb Rouleau <crouleau@chromium.org>
Auto-Submit: Juan Antonio Navarro Pérez <perezju@chromium.org>
Reviewed-by: default avatarCaleb Rouleau <crouleau@chromium.org>
Cr-Commit-Position: refs/heads/master@{#720658}
parent 42f82325
......@@ -22,6 +22,7 @@ import filecmp
import json
import os
import re
import shutil
import sys
import tempfile
import textwrap
......@@ -803,7 +804,17 @@ BUILDERS = {
# pylint: enable=line-too-long
def update_all_tests(builders_dict, file_path):
def update_all_builders(file_path):
return (_update_builders(BUILDERS, file_path) and
is_perf_benchmarks_scheduling_valid(file_path, sys.stderr))
def update_all_fyi_builders(file_path):
return _update_builders(FYI_BUILDERS, file_path)
def _update_builders(builders_dict, file_path):
tests = {}
tests['AAAAA1 AUTOGENERATED FILE DO NOT EDIT'] = {}
tests['AAAAA2 See //tools/perf/generate_perf_data to make changes'] = {}
......@@ -814,6 +825,7 @@ def update_all_tests(builders_dict, file_path):
with open(file_path, 'w') as fp:
json.dump(tests, fp, indent=2, separators=(',', ': '), sort_keys=True)
fp.write('\n')
return True
def merge_dicts(*dict_args):
......@@ -1045,6 +1057,7 @@ def update_benchmark_csv(file_path):
with open(file_path, 'wb') as f:
writer = csv.writer(f, lineterminator="\n")
writer.writerows(csv_data)
return True
def update_labs_docs_md(filepath):
......@@ -1068,33 +1081,7 @@ def update_labs_docs_md(filepath):
f.write(' * [{0.name}]({0.builder_url}): {0.description}.\n'.format(
tester))
f.write('\n')
def validate_waterfall(builders_dict, waterfall_file):
waterfall_tempfile = tempfile.NamedTemporaryFile(delete=False).name
try:
update_all_tests(builders_dict, waterfall_tempfile)
return filecmp.cmp(waterfall_file, waterfall_tempfile)
finally:
os.remove(waterfall_tempfile)
def validate_benchmark_csv(benchmark_file):
benchmark_tempfile = tempfile.NamedTemporaryFile(delete=False).name
try:
update_benchmark_csv(benchmark_tempfile)
return filecmp.cmp(benchmark_file, benchmark_tempfile)
finally:
os.remove(benchmark_tempfile)
def validate_docs(labs_docs_file):
labs_docs_tempfile = tempfile.NamedTemporaryFile(delete=False).name
try:
update_labs_docs_md(labs_docs_tempfile)
return filecmp.cmp(labs_docs_file, labs_docs_tempfile)
finally:
os.remove(labs_docs_tempfile)
return True
def generate_telemetry_args(tester_config, platform):
......@@ -1256,6 +1243,47 @@ def generate_builder_config(condensed_config, builder_name):
return config
# List of all updater functions and the file they generate. The updater
# functions must return True on success and False otherwise. File paths are
# relative to chromium src and should use posix path separators (i.e. '/').
ALL_UPDATERS_AND_FILES = [
(update_all_builders, 'testing/buildbot/chromium.perf.json'),
(update_all_fyi_builders, 'testing/buildbot/chromium.perf.fyi.json'),
(update_benchmark_csv, 'tools/perf/benchmark.csv'),
(update_labs_docs_md, 'docs/speed/perf_lab_platforms.md'),
]
def _source_filepath(posix_path):
return os.path.join(path_util.GetChromiumSrcDir(), *posix_path.split('/'))
def validate_all_files():
"""Validate all generated files."""
tempdir = tempfile.mkdtemp()
try:
for run_updater, src_file in ALL_UPDATERS_AND_FILES:
real_filepath = _source_filepath(src_file)
temp_filepath = os.path.join(tempdir, os.path.basename(real_filepath))
if not (os.path.exists(real_filepath) and
run_updater(temp_filepath) and
filecmp.cmp(temp_filepath, real_filepath)):
return False
finally:
shutil.rmtree(tempdir)
return True
def update_all_files():
"""Update all generated files."""
for run_updater, src_file in ALL_UPDATERS_AND_FILES:
if not run_updater(_source_filepath(src_file)):
print('Failed updating:', src_file)
return False
print('Updated:', src_file)
return True
def main(args):
parser = argparse.ArgumentParser(
description=('Generate perf test\' json config and benchmark.csv. '
......@@ -1268,28 +1296,8 @@ def main(args):
'configs'))
options = parser.parse_args(args)
perf_waterfall_file = os.path.join(
path_util.GetChromiumSrcDir(), 'testing', 'buildbot',
'chromium.perf.json')
fyi_waterfall_file = os.path.join(
path_util.GetChromiumSrcDir(), 'testing', 'buildbot',
'chromium.perf.fyi.json')
benchmark_file = os.path.join(
path_util.GetChromiumSrcDir(), 'tools', 'perf', 'benchmark.csv')
labs_docs_file = os.path.join(
path_util.GetChromiumSrcDir(), 'docs', 'speed', 'perf_lab_platforms.md')
return_code = 0
if options.validate_only:
if (validate_waterfall(BUILDERS, perf_waterfall_file)
and validate_waterfall(FYI_BUILDERS, fyi_waterfall_file)
and validate_benchmark_csv(benchmark_file)
and validate_docs(labs_docs_file)
and is_perf_benchmarks_scheduling_valid(
perf_waterfall_file, outstream=sys.stderr)):
if validate_all_files():
print('All the perf config files are up-to-date. \\o/')
return 0
else:
......@@ -1297,12 +1305,4 @@ def main(args):
'to update them.' % sys.argv[0])
return 1
else:
update_all_tests(FYI_BUILDERS, fyi_waterfall_file)
update_all_tests(BUILDERS, perf_waterfall_file)
update_benchmark_csv(benchmark_file)
update_labs_docs_md(labs_docs_file)
if not is_perf_benchmarks_scheduling_valid(
perf_waterfall_file, outstream=sys.stderr):
return_code = 1
return return_code
return 0 if update_all_files() else 1
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment