Commit 5867c6e2 authored by qyearsley's avatar qyearsley Committed by Commit bot

Rename run-bisect-perf-regression.cfg -> auto_bisect/bisect.cfg

If this is submitted, then cl/74264031 should also be submitted so that the perf dashboard correctly makes patches.

Any thoughts?

BUG=

Review URL: https://codereview.chromium.org/511043002

Cr-Commit-Position: refs/heads/master@{#292434}
parent 46790669
......@@ -2,7 +2,7 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Top-level presubmit script for bisect/perf trybot.
"""Top-level presubmit script for auto-bisect.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts for
details on the presubmit API built into gcl.
......@@ -11,40 +11,65 @@ details on the presubmit API built into gcl.
import imp
import os
def _ExamineConfigFiles(input_api):
for f in input_api.AffectedFiles():
if (not f.LocalPath().endswith('run-bisect-perf-regression.cfg') and
not f.LocalPath().endswith('run-perf-test.cfg')):
continue
# Paths to bisect config files relative to src/tools.
CONFIG_FILES = [
'auto_bisect/config.cfg',
'run-perf-test.cfg'
]
try:
cfg_file = imp.load_source('config', os.path.basename(f.LocalPath()))
PYLINT_BLACKLIST = []
PYLINT_DISABLED_WARNINGS = []
for k, v in cfg_file.config.iteritems():
if v:
return f.LocalPath()
except (IOError, AttributeError, TypeError):
return f.LocalPath()
return None
def CheckChangeOnUpload(input_api, output_api):
return _CommonChecks(input_api, output_api)
def _CheckNoChangesToBisectConfigFile(input_api, output_api):
results = _ExamineConfigFiles(input_api)
if results:
return [output_api.PresubmitError(
'The bisection config file should only contain a config dict with '
'empty fields. Changes to this file should never be submitted.',
items=[results])]
return []
def CheckChangeOnCommit(input_api, output_api):
return _CommonChecks(input_api, output_api)
def CommonChecks(input_api, output_api):
def _CommonChecks(input_api, output_api):
"""Does all presubmit checks for auto-bisect."""
# TODO(qyearsley) Run bisect unit test.
# TODO(qyearsley) Run pylint on all auto-bisect py files but not other files.
results = []
results.extend(_CheckNoChangesToBisectConfigFile(input_api, output_api))
results.extend(_CheckAllConfigFiles(input_api, output_api))
return results
def CheckChangeOnUpload(input_api, output_api):
return CommonChecks(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return CommonChecks(input_api, output_api)
def _CheckAllConfigFiles(input_api, output_api):
"""Checks all bisect config files and returns a list of presubmit results."""
results = []
for f in input_api.AffectedFiles():
for config_file in CONFIG_FILES:
if f.LocalPath().endswith(config_file):
results.extend(_CheckConfigFile(config_file, output_api))
return results
def _CheckConfigFile(file_path, output_api):
"""Checks one bisect config file and returns a list of presubmit results."""
try:
config_file = imp.load_source('config', file_path)
except IOError as e:
warning = 'Failed to read config file %s: %s' % (file_path, str(e))
return [output_api.PresubmitError(warning, items=[file_path])]
if not hasattr(config_file.config):
warning = 'Config file has no "config" global variable: %s' % str(e)
return [output_api.PresubmitError(warning, items=[file_path])]
if type(config_file.config) is not dict:
warning = 'Config file "config" global variable is not dict: %s' % str(e)
return [output_api.PresubmitError(warning, items=[file_path])]
for k, v in config_dict.iteritems():
if v != '':
warning = 'Non-empty value in config dict: %s: %s' % (repr(k), repr(v))
warning += ('\nThe bisection config file should only contain a config '
'dict with empty fields. Changes to this file should not '
'be submitted.')
return [output_api.PresubmitError(warning, items=[file_path])]
return []
......@@ -8,16 +8,17 @@ Documentation:
http://www.chromium.org/developers/bisecting-bugs
http://www.chromium.org/developers/tree-sheriffs/perf-sheriffs/bisecting-performance-regressions
Overview of bisect-related files in src/tools:
run-bisect-perf-regression.py
Overview of bisect-related files:
src/tools/run-bisect-perf-regression.py
-- the script used to kick off a normal performance regression bisect job.
run-bisect-perf-regression.cfg
src/tools/auto_bisect/bisect.cfg:
-- this file contains parameters for a bisect job, and is read by other
modules including run-bisect-perf-regresion.py.
run-bisect-manual-test.py
modules including run-bisect-perf-regression.py.
src/tools/run-bisect-manual-test.py
-- a script which is used to manually bisect regressions; this also
depends on bisect-perf-gression.py.
bisect-perf-regression.py
depends on bisect-perf-regression.py.
src/tools/bisect-perf-regression.py
-- the main module which the others depend on.
bisect-manual-test.py
src/tools/bisect-manual-test.py
-- a helper module used when manually bisect regressions.
......@@ -2,7 +2,7 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Config file for Run Performance Test Bisect Tool
"""Config file read by run-bisect-perf-regression.py.
This script is intended for use by anyone that wants to run a remote bisection
on a range of revisions to look for a performance regression. Modify the config
......@@ -12,67 +12,50 @@ run a git try <bot>.
Changes to this file should never be submitted.
Args:
'command': This is the full command line to pass to the
bisect-perf-regression.py script in order to execute the test.
'command': This is the full command to execute the test.
'good_revision': An svn or git revision where the metric hadn't regressed yet.
'bad_revision': An svn or git revision sometime after the metric had
regressed.
'bad_revision': An svn or git revision sometime after the metric regressed.
'metric': The name of the metric to parse out from the results of the
performance test. You can retrieve the metric by looking at the stdio of
the performance test. Look for lines of the format:
RESULT <graph>: <trace>= <value> <units>
RESULT <graph>: <trace>= <value> <units>
The metric name is "<graph>/<trace>".
'repeat_count': The number of times to repeat the performance test.
'max_time_minutes': The script will attempt to run the performance test
"repeat_count" times, unless it exceeds "max_time_minutes".
'truncate_percent': Discard the highest/lowest % values from performance test.
'truncate_percent': The highest/lowest % values will be discarded before
computing the mean result for each revision.
Sample config:
config = {
'command': './tools/perf/run_measurement --browser=release blink_perf third_party/WebKit/PerformanceTests/Layout/floats_50_100.html',
'good_revision': '233015',
'bad_revision': '233115',
'metric': 'floats_50_100/floats_50_100',
'repeat_count': '20',
'max_time_minutes': '20',
'truncate_percent': '25',
}
On Windows:
- If you're calling a python script you will need to add "python" to
the command:
config = {
'command': 'python tools/perf/run_measurement -v --browser=release kraken',
'good_revision': '185319',
'bad_revision': '185364',
'command': './tools/perf/run_benchmark --browser=release sunspider',
'metric': 'Total/Total',
'good_revision': '14ac2486c0eba1266d2da1c52e8759d9c784fe80',
'bad_revision': 'fcf8643d31301eea990a4c42d7d8c9fc30cc33ec',
'repeat_count': '20',
'max_time_minutes': '20',
'truncate_percent': '25',
}
For Windows, if you're calling a python script you will need to add "python"
to the command, so the command would be changed to:
'python tools/perf/run_benchmark -v --browser=release sunspider',
On ChromeOS:
- Script accepts either ChromeOS versions, or unix timestamps as revisions.
For ChromeOS:
- For good and bad revision, the script may accept either ChromeOS versions
or unix timestamps.
- You don't need to specify --identity and --remote, they will be added to
the command using the bot's BISECT_CROS_IP and BISECT_CROS_BOARD values.
the command using the bot's BISECT_CROS_IP and BISECT_CROS_BOARD values
- Example:
config = {
'command': './tools/perf/run_measurement -v '\
'--browser=cros-chrome-guest '\
'dromaeo tools/perf/page_sets/dromaeo/jslibstylejquery.json',
'command': ('./tools/perf/run_measurement -v --browser=cros-chrome-guest '
'dromaeo.jslibstylejquery')
'good_revision': '4086.0.0',
'bad_revision': '4087.0.0',
'metric': 'jslib/jslib',
'repeat_count': '20',
'max_time_minutes': '20',
'truncate_percent': '25',
}
"""
config = {
......
......@@ -6,10 +6,9 @@
"""Run Performance Test Bisect Tool
This script is used by a try bot to run the src/tools/bisect-perf-regression.py
script with the parameters specified in run-bisect-perf-regression.cfg. It will
check out a copy of the depot in a subdirectory 'bisect' of the working
script with the parameters specified in src/tools/auto_bisect/bisect.cfg.
It will check out a copy of the depot in a subdirectory 'bisect' of the working
directory provided, and run the bisect-perf-regression.py script there.
"""
import imp
......@@ -31,12 +30,13 @@ bisect = imp.load_source('bisect-perf-regression',
CROS_BOARD_ENV = 'BISECT_CROS_BOARD'
CROS_IP_ENV = 'BISECT_CROS_IP'
# Default config file names.
BISECT_REGRESSION_CONFIG = 'run-bisect-perf-regression.cfg'
# Default config file paths, relative to this script.
BISECT_REGRESSION_CONFIG = os.path.join('auto_bisect', 'bisect.cfg')
RUN_TEST_CONFIG = 'run-perf-test.cfg'
WEBKIT_RUN_TEST_CONFIG = os.path.join(
'..', 'third_party', 'WebKit', 'Tools', 'run-perf-test.cfg')
class Goma(object):
def __init__(self, path_to_goma):
......@@ -485,7 +485,7 @@ def _OptionParser():
"""Returns the options parser for run-bisect-perf-regression.py."""
usage = ('%prog [options] [-- chromium-options]\n'
'Used by a try bot to run the bisection script using the parameters'
' provided in the run-bisect-perf-regression.cfg file.')
' provided in the auto_bisect/bisect.cfg file.')
parser = optparse.OptionParser(usage=usage)
parser.add_option('-w', '--working_directory',
type='str',
......@@ -566,7 +566,7 @@ def main():
config, current_dir, opts.path_to_goma)
print ('Error: Could not load config file. Double check your changes to '
'run-bisect-perf-regression.cfg or run-perf-test.cfg for syntax '
'auto_bisect/bisect.cfg or run-perf-test.cfg for syntax '
'errors.\n')
return 1
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment