Commit 5c8ad416 authored by Ashley Enstad's avatar Ashley Enstad Committed by Commit Bot

Updating presubmit for perf_data_generator

Bug: 781312
Change-Id: Ia8c7eba3853f1dff32e95c6f89d2fbfcc2e73aeb
Reviewed-on: https://chromium-review.googlesource.com/921926Reviewed-by: default avatarNed Nguyen <nednguyen@google.com>
Commit-Queue: Ashley Enstad <ashleymarie@chromium.org>
Cr-Commit-Position: refs/heads/master@{#537320}
parent fb87b7b7
...@@ -11,16 +11,20 @@ for more details about the presubmit API built into depot_tools. ...@@ -11,16 +11,20 @@ for more details about the presubmit API built into depot_tools.
import os import os
def _CommonChecks(input_api, output_api): def _CommonChecks(input_api, output_api, block_on_failure=False):
"""Performs common checks, which includes running pylint.""" """Performs common checks, which includes running pylint.
block_on_failure: For some failures, we would like to warn the
user but still allow them to upload the change. However, we
don't want them to commit code with those failures, so we
need to block the change on commit.
"""
results = [] results = []
_UpdatePerfData(input_api)
results.extend(_CheckNoUncommittedFiles(input_api, output_api))
results.extend(_CheckWprShaFiles(input_api, output_api))
results.extend(_CheckJson(input_api, output_api))
results.extend(_CheckExpectations(input_api, output_api)) results.extend(_CheckExpectations(input_api, output_api))
results.extend(_CheckJson(input_api, output_api))
results.extend(_CheckPerfData(input_api, output_api, block_on_failure))
results.extend(_CheckWprShaFiles(input_api, output_api))
results.extend(input_api.RunTests(input_api.canned_checks.GetPylint( results.extend(input_api.RunTests(input_api.canned_checks.GetPylint(
input_api, output_api, extra_paths_list=_GetPathsToPrepend(input_api), input_api, output_api, extra_paths_list=_GetPathsToPrepend(input_api),
pylintrc='pylintrc'))) pylintrc='pylintrc')))
...@@ -70,11 +74,21 @@ def _CheckExpectations(input_api, output_api): ...@@ -70,11 +74,21 @@ def _CheckExpectations(input_api, output_api):
return results return results
def _UpdatePerfData(input_api): def _CheckPerfData(input_api, output_api, block_on_failure):
results = []
perf_dir = input_api.PresubmitLocalPath() perf_dir = input_api.PresubmitLocalPath()
_RunArgs([ out, return_code = _RunArgs([
input_api.python_executable, input_api.python_executable,
input_api.os_path.join(perf_dir, 'generate_perf_data')], input_api) input_api.os_path.join(perf_dir, 'generate_perf_data'),
'--validate-only'], input_api)
if return_code:
if block_on_failure:
results.append(output_api.PresubmitError(
'Validating perf data failed', long_text=out))
else:
results.append(output_api.PresubmitPromptWarning(
'Validating perf data failed', long_text=out))
return results
def _CheckWprShaFiles(input_api, output_api): def _CheckWprShaFiles(input_api, output_api):
...@@ -113,18 +127,6 @@ def _CheckJson(input_api, output_api): ...@@ -113,18 +127,6 @@ def _CheckJson(input_api, output_api):
return [] return []
def _CheckNoUncommittedFiles(input_api, output_api):
"""Ensures that uncommitted updated files will block presubmit."""
results = []
diff_text = _RunArgs(['git', 'diff', '--name-only'], input_api)[0]
if diff_text != "":
results.append(output_api.PresubmitError(
('Please add the following changed files to your git client: %s' %
diff_text)))
return results
def CheckChangeOnUpload(input_api, output_api): def CheckChangeOnUpload(input_api, output_api):
report = [] report = []
report.extend(_CommonChecks(input_api, output_api)) report.extend(_CommonChecks(input_api, output_api))
...@@ -133,5 +135,5 @@ def CheckChangeOnUpload(input_api, output_api): ...@@ -133,5 +135,5 @@ def CheckChangeOnUpload(input_api, output_api):
def CheckChangeOnCommit(input_api, output_api): def CheckChangeOnCommit(input_api, output_api):
report = [] report = []
report.extend(_CommonChecks(input_api, output_api)) report.extend(_CommonChecks(input_api, output_api, block_on_failure=True))
return report return report
...@@ -11,12 +11,16 @@ directory. Maintaining these files by hand is too unwieldy. ...@@ -11,12 +11,16 @@ directory. Maintaining these files by hand is too unwieldy.
Note: chromium.perf.fyi.json is updated manuall for now until crbug.com/757933 Note: chromium.perf.fyi.json is updated manuall for now until crbug.com/757933
is complete. is complete.
""" """
import argparse
import collections import collections
import csv import csv
import filecmp
import json import json
import os import os
import re import re
import sys
import sets import sets
import tempfile
from core import path_util from core import path_util
...@@ -1027,12 +1031,54 @@ def update_benchmark_csv(file_path): ...@@ -1027,12 +1031,54 @@ def update_benchmark_csv(file_path):
writer.writerows(csv_data) writer.writerows(csv_data)
def main(): def validate_tests(waterfall, waterfall_file, benchmark_file):
up_to_date = True
waterfall_tempfile = tempfile.NamedTemporaryFile(delete=False).name
benchmark_tempfile = tempfile.NamedTemporaryFile(delete=False).name
try:
update_all_tests(waterfall, waterfall_tempfile)
up_to_date &= filecmp.cmp(waterfall_file, waterfall_tempfile)
update_benchmark_csv(benchmark_tempfile)
up_to_date &= filecmp.cmp(benchmark_file, benchmark_tempfile)
finally:
os.remove(waterfall_tempfile)
os.remove(benchmark_tempfile)
return up_to_date
def main(args):
parser = argparse.ArgumentParser(
description=('Generate perf test\' json config and benchmark.csv. '
'This needs to be done anytime you add/remove any existing'
'benchmarks in tools/perf/benchmarks.'))
parser.add_argument(
'--validate-only', action='store_true', default=False,
help=('Validate whether the perf json generated will be the same as the '
'existing configs. This does not change the contain of existing '
'configs'))
options = parser.parse_args(args)
waterfall_file = os.path.join( waterfall_file = os.path.join(
path_util.GetChromiumSrcDir(), 'testing', 'buildbot', path_util.GetChromiumSrcDir(), 'testing', 'buildbot',
'chromium.perf.json') 'chromium.perf.json')
update_all_tests(get_waterfall_config(), waterfall_file)
benchmark_file = os.path.join( benchmark_file = os.path.join(
path_util.GetChromiumSrcDir(), 'tools', 'perf', 'benchmark.csv') path_util.GetChromiumSrcDir(), 'tools', 'perf', 'benchmark.csv')
update_benchmark_csv(benchmark_file)
if options.validate_only:
if validate_tests(get_waterfall_config(), waterfall_file, benchmark_file):
print 'All the perf JSON config files are up-to-date. \\o/'
return 0
else:
print ('The perf JSON config files are not up-to-date. Please run %s '
'without --validate-only flag to update the perf JSON '
'configs and benchmark.csv.') % sys.argv[0]
return 1
else:
update_all_tests(get_waterfall_config(), waterfall_file)
update_benchmark_csv(benchmark_file)
return 0
...@@ -9,4 +9,4 @@ from core import perf_data_generator ...@@ -9,4 +9,4 @@ from core import perf_data_generator
if __name__ == '__main__': if __name__ == '__main__':
sys.exit(perf_data_generator.main()) sys.exit(perf_data_generator.main(sys.argv[1:]))
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment