Commit 2383d320 authored by Ashley Enstad's avatar Ashley Enstad Committed by Commit Bot

Regenerating perf_data_generator on presubmit so the csv and json files stay up-to-date

Instead of verifying that a user manually ran the update command, just
run it anyways and block presubmit if there are uncommitted changes
(namely the updated csv and json files).

BUG=chromium:781312

Change-Id: I8f05cd0bf817ca958c608c0e54c87a990ba6848e
Reviewed-on: https://chromium-review.googlesource.com/754062
Commit-Queue: Ashley Enstad <ashleymarie@chromium.org>
Reviewed-by: default avatarNed Nguyen <nednguyen@google.com>
Cr-Commit-Position: refs/heads/master@{#516066}
parent dfe3a9ff
...@@ -15,15 +15,16 @@ def _CommonChecks(input_api, output_api): ...@@ -15,15 +15,16 @@ def _CommonChecks(input_api, output_api):
"""Performs common checks, which includes running pylint.""" """Performs common checks, which includes running pylint."""
results = [] results = []
_UpdatePerfData(input_api)
_UpdateBenchmarkShardingMap(input_api)
results.extend(_CheckNoUncommittedFiles(input_api, output_api))
results.extend(_CheckWprShaFiles(input_api, output_api)) results.extend(_CheckWprShaFiles(input_api, output_api))
results.extend(_CheckJson(input_api, output_api)) results.extend(_CheckJson(input_api, output_api))
_UpdateBenchmarkShardingMap(input_api)
results.extend(_CheckPerfJsonUpToDate(input_api, output_api))
results.extend(_CheckExpectations(input_api, output_api)) results.extend(_CheckExpectations(input_api, output_api))
results.extend(input_api.RunTests(input_api.canned_checks.GetPylint( results.extend(input_api.RunTests(input_api.canned_checks.GetPylint(
input_api, output_api, extra_paths_list=_GetPathsToPrepend(input_api), input_api, output_api, extra_paths_list=_GetPathsToPrepend(input_api),
pylintrc='pylintrc'))) pylintrc='pylintrc')))
results.extend(_CheckNoUncommittedFiles(input_api, output_api))
return results return results
...@@ -78,17 +79,11 @@ def _UpdateBenchmarkShardingMap(input_api): ...@@ -78,17 +79,11 @@ def _UpdateBenchmarkShardingMap(input_api):
input_api) input_api)
def _CheckPerfJsonUpToDate(input_api, output_api): def _UpdatePerfData(input_api):
results = []
perf_dir = input_api.PresubmitLocalPath() perf_dir = input_api.PresubmitLocalPath()
out, return_code = _RunArgs([ _RunArgs([
input_api.python_executable, input_api.python_executable,
input_api.os_path.join(perf_dir, 'generate_perf_data'), input_api.os_path.join(perf_dir, 'generate_perf_data')], input_api)
'--validate-only'], input_api)
if return_code:
results.append(output_api.PresubmitError(
'Validating Perf JSON configs failed.', long_text=out))
return results
def _CheckWprShaFiles(input_api, output_api): def _CheckWprShaFiles(input_api, output_api):
...@@ -128,6 +123,7 @@ def _CheckJson(input_api, output_api): ...@@ -128,6 +123,7 @@ def _CheckJson(input_api, output_api):
def _CheckNoUncommittedFiles(input_api, output_api): def _CheckNoUncommittedFiles(input_api, output_api):
"""Ensures that uncommitted updated files will block presubmit."""
results = [] results = []
diff_text = _RunArgs(['git', 'diff', '--name-only'], input_api)[0] diff_text = _RunArgs(['git', 'diff', '--name-only'], input_api)[0]
......
...@@ -9,13 +9,11 @@ ...@@ -9,13 +9,11 @@
the src/testing/buildbot directory and benchmark.csv in the src/tools/perf the src/testing/buildbot directory and benchmark.csv in the src/tools/perf
directory. Maintaining these files by hand is too unwieldy. directory. Maintaining these files by hand is too unwieldy.
""" """
import argparse
import collections import collections
import csv import csv
import json import json
import os import os
import re import re
import sys
import sets import sets
...@@ -982,26 +980,6 @@ def append_extra_tests(waterfall, tests): ...@@ -982,26 +980,6 @@ def append_extra_tests(waterfall, tests):
tests[key] = value tests[key] = value
def tests_are_up_to_date(waterfalls):
up_to_date = True
all_tests = {}
for w in waterfalls:
tests = generate_all_tests(w)
# Note: |all_tests| don't cover those manually-specified tests added by
# append_extra_tests().
all_tests.update(tests)
append_extra_tests(w, tests)
tests_data = json.dumps(tests, indent=2, separators=(',', ': '),
sort_keys=True)
config_file = get_json_config_file_for_waterfall(w)
with open(config_file, 'r') as fp:
config_data = fp.read().strip()
up_to_date &= tests_data == config_data
verify_all_tests_in_benchmark_csv(all_tests,
get_all_waterfall_benchmarks_metadata())
return up_to_date
def update_all_tests(waterfalls): def update_all_tests(waterfalls):
all_tests = {} all_tests = {}
for w in waterfalls: for w in waterfalls:
...@@ -1177,33 +1155,11 @@ def update_benchmark_csv(): ...@@ -1177,33 +1155,11 @@ def update_benchmark_csv():
writer.writerows(csv_data) writer.writerows(csv_data)
def main(args): def main():
parser = argparse.ArgumentParser(
description=('Generate perf test\' json config and benchmark.csv. '
'This needs to be done anytime you add/remove any existing'
'benchmarks in tools/perf/benchmarks.'))
parser.add_argument(
'--validate-only', action='store_true', default=False,
help=('Validate whether the perf json generated will be the same as the '
'existing configs. This does not change the contain of existing '
'configs'))
options = parser.parse_args(args)
waterfall = get_waterfall_config() waterfall = get_waterfall_config()
waterfall['name'] = 'chromium.perf' waterfall['name'] = 'chromium.perf'
fyi_waterfall = get_fyi_waterfall_config() fyi_waterfall = get_fyi_waterfall_config()
fyi_waterfall['name'] = 'chromium.perf.fyi' fyi_waterfall['name'] = 'chromium.perf.fyi'
if options.validate_only:
if tests_are_up_to_date([fyi_waterfall, waterfall]):
print 'All the perf JSON config files are up-to-date. \\o/'
return 0
else:
print ('The perf JSON config files are not up-to-date. Please run %s '
'without --validate-only flag to update the perf JSON '
'configs and benchmark.csv.') % sys.argv[0]
return 1
else:
update_all_tests([fyi_waterfall, waterfall]) update_all_tests([fyi_waterfall, waterfall])
update_benchmark_csv() update_benchmark_csv()
return 0
...@@ -9,4 +9,4 @@ from core import perf_data_generator ...@@ -9,4 +9,4 @@ from core import perf_data_generator
if __name__ == '__main__': if __name__ == '__main__':
sys.exit(perf_data_generator.main(sys.argv[1:])) sys.exit(perf_data_generator.main())
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment