Commit 27aca985 authored by Quinten Yearsley's avatar Quinten Yearsley Committed by Commit Bot

Remove old bisect-related scripts and config files.

Follow up to https://chromium-review.googlesource.com/c/579937/ which
removes //tools/auto_bisect. Reason: These are no longer used and will
no longer work after removing //tols/auto_bisect.

Change-Id: Ibe90a9ced87ee34a6673cd7f77645eb20b735f9f
Reviewed-on: https://chromium-review.googlesource.com/580492Reviewed-by: default avatarRoberto Carrillo <robertocn@chromium.org>
Reviewed-by: default avatarNico Weber <thakis@chromium.org>
Commit-Queue: Quinten Yearsley <qyearsley@chromium.org>
Cr-Commit-Position: refs/heads/master@{#488483}
parent 23eca3d6
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Top-level presubmit script for bisect/perf trybot.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts for
details on the presubmit API built into gcl.
"""
import imp
import os
def _ExamineConfigFiles(input_api):
for f in input_api.AffectedFiles():
if (not f.LocalPath().endswith('run-perf-test.cfg')):
continue
try:
cfg_file = imp.load_source('config', os.path.basename(f.LocalPath()))
for k, v in cfg_file.config.iteritems():
if v:
return f.LocalPath()
except (IOError, AttributeError, TypeError):
return f.LocalPath()
return None
def _CheckNoChangesToBisectConfigFile(input_api, output_api):
results = _ExamineConfigFiles(input_api)
if results:
return [output_api.PresubmitError(
'The perf try config file should only contain a config dict with '
'empty fields. Changes to this file should never be submitted.',
items=[results])]
return []
def CommonChecks(input_api, output_api):
results = []
results.extend(_CheckNoChangesToBisectConfigFile(input_api, output_api))
return results
def CheckChangeOnUpload(input_api, output_api):
return CommonChecks(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return CommonChecks(input_api, output_api)
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Config file for Run Performance Test Bot
This script is intended for use by anyone that wants to run a remote performance
test. Modify the config below and add the command to run the performance test,
the metric you're interested in, and repeat/discard parameters. You can then
run a git try <bot>.
Changes to this file should never be submitted.
Args:
'command': This is the full command line to pass to the
bisect-perf-regression.py script in order to execute the test.
'metric': The name of the metric to parse out from the results of the
performance test. You can retrieve the metric by looking at the stdio of
the performance test. Look for lines of the format:
RESULT <graph>: <trace>= <value> <units>
The metric name is "<graph>/<trace>".
'repeat_count': The number of times to repeat the performance test.
'max_time_minutes': The script will attempt to run the performance test
"repeat_count" times, unless it exceeds "max_time_minutes".
'truncate_percent': Discard the highest/lowest % values from performance test.
Sample config:
config = {
'command': './out/Release/performance_ui_tests' +
' --gtest_filter=PageCyclerTest.Intl1File',
'metric': 'times/t',
'repeat_count': '20',
'max_time_minutes': '20',
'truncate_percent': '25',
}
On Windows:
- If you're calling a python script you will need to add "python" to
the command:
config = {
'command': 'python tools/perf/run_measurement -v --browser=release kraken',
'metric': 'Total/Total',
'repeat_count': '20',
'max_time_minutes': '20',
'truncate_percent': '25',
}
On ChromeOS:
- Script accepts either ChromeOS versions, or unix timestamps as revisions.
- You don't need to specify --identity and --remote, they will be added to
the command using the bot's BISECT_CROS_IP and BISECT_CROS_BOARD values.
config = {
'command': './tools/perf/run_measurement -v '\
'--browser=cros-chrome-guest '\
'dromaeo tools/perf/page_sets/dromaeo/jslibstylejquery.json',
'metric': 'jslib/jslib',
'repeat_count': '20',
'max_time_minutes': '20',
'truncate_percent': '25',
}
"""
config = {
'command': '',
'metric': '',
'repeat_count': '',
'max_time_minutes': '',
'truncate_percent': '',
}
# Workaround git try issue, see crbug.com/257689
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Simple script which asks user to manually check result of bisection.
Typically used as by the run-bisect-manual-test.py script.
"""
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)),
'perf'))
from chrome_telemetry_build import chromium_config
sys.path.append(chromium_config.GetTelemetryDir())
from telemetry.internal.browser import browser_finder
from telemetry.internal.browser import browser_options
def _StartManualTest(options):
"""Start browser then ask the user whether build is good or bad."""
browser_to_create = browser_finder.FindBrowser(options)
print 'Starting browser: %s.' % options.browser_type
with browser_to_create.Create(options) as _:
# Loop until we get a response that we can parse.
while True:
sys.stderr.write('Revision is [(g)ood/(b)ad]: ')
response = raw_input()
if response and response in ('g', 'b'):
if response in ('g'):
print 'RESULT manual_test: manual_test= 1'
else:
print 'RESULT manual_test: manual_test= 0'
break
def main():
usage = ('%prog [options]\n'
'Starts browser with an optional url and asks user whether '
'revision is good or bad.\n')
options = browser_options.BrowserFinderOptions()
parser = options.CreateParser(usage)
options, _ = parser.parse_args()
_StartManualTest(options)
if __name__ == '__main__':
sys.exit(main())
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Run Manual Test Bisect Tool
An example usage:
tools/run-bisect-manual-test.py -g 201281 -b 201290
On Linux platform, follow the instructions in this document
https://chromium.googlesource.com/chromium/src/+/master/docs/linux_suid_sandbox_development.md
to setup the sandbox manually before running the script. Otherwise the script
fails to launch Chrome and exits with an error.
This script serves a similar function to bisect-builds.py, except it uses
the bisect_perf_regression.py. This means that that it can obtain builds of
Chromium for revisions where builds aren't available in cloud storage.
"""
import os
import subprocess
import sys
CROS_BOARD_ENV = 'BISECT_CROS_BOARD'
CROS_IP_ENV = 'BISECT_CROS_IP'
_TOOLS_DIR = os.path.abspath(os.path.dirname(__file__))
_BISECT_SCRIPT_PATH = os.path.join(
_TOOLS_DIR, 'auto_bisect', 'bisect_perf_regression.py')
sys.path.append(os.path.join(_TOOLS_DIR, 'perf'))
from chrome_telemetry_build import chromium_config
sys.path.append(chromium_config.GetTelemetryDir())
from telemetry.internal.browser import browser_options
def _RunBisectionScript(options):
"""Attempts to execute the bisect script (bisect_perf_regression.py).
Args:
options: The configuration options to pass to the bisect script.
Returns:
An exit code; 0 for success, 1 for failure.
"""
script_path = os.path.join(options.working_directory,
'bisect', 'src', 'tools','bisect-manual-test.py')
abs_script_path = os.path.abspath(script_path)
test_command = ('python %s --browser=%s --chrome-root=.' %
(abs_script_path, options.browser_type))
cmd = ['python', _BISECT_SCRIPT_PATH,
'-c', test_command,
'-g', options.good_revision,
'-b', options.bad_revision,
'-m', 'manual_test/manual_test',
'-r', '1',
'--working_directory', options.working_directory,
'--build_preference', 'ninja',
'--no_custom_deps',
'--builder_type', options.builder_type]
if options.extra_src:
cmd.extend(['--extra_src', options.extra_src])
if 'cros' in options.browser_type:
cmd.extend(['--target_platform', 'cros'])
if os.environ[CROS_BOARD_ENV] and os.environ[CROS_IP_ENV]:
cmd.extend(['--cros_board', os.environ[CROS_BOARD_ENV]])
cmd.extend(['--cros_remote_ip', os.environ[CROS_IP_ENV]])
else:
print ('Error: Cros build selected, but BISECT_CROS_IP or'
'BISECT_CROS_BOARD undefined.\n')
return 1
elif 'android-chrome' == options.browser_type:
if not options.extra_src:
print 'Error: Missing --extra_src to run bisect for android-chrome.'
sys.exit(-1)
cmd.extend(['--target_platform', 'android-chrome'])
elif 'android' in options.browser_type:
cmd.extend(['--target_platform', 'android'])
elif not options.target_build_type:
cmd.extend(['--target_build_type', options.browser_type.title()])
if options.target_build_type:
cmd.extend(['--target_build_type', options.target_build_type])
if options.goma_threads:
cmd.extend(['--use_goma', '--goma_threads', options.goma_threads])
cmd = [str(c) for c in cmd]
return_code = subprocess.call(cmd)
if return_code:
print 'Error: bisect_perf_regression.py had exit code %d.' % return_code
print
return return_code
def main():
"""Does a bisect based on the command-line arguments passed in.
The user will be prompted to classify each revision as good or bad.
"""
usage = ('%prog [options]\n'
'Used to run the bisection script with a manual test.')
options = browser_options.BrowserFinderOptions('release')
parser = options.CreateParser(usage)
parser.add_option('-b', '--bad_revision',
type='str',
help='A bad revision to start bisection. ' +
'Must be later than good revision. May be either a git' +
' or svn revision.')
parser.add_option('-g', '--good_revision',
type='str',
help='A revision to start bisection where performance' +
' test is known to pass. Must be earlier than the ' +
'bad revision. May be either a git or svn revision.')
parser.add_option('-w', '--working_directory',
type='str',
default='..',
help='A working directory to supply to the bisection '
'script, which will use it as the location to checkout '
'a copy of the chromium depot.')
parser.add_option('--extra_src',
type='str',
help='Path to extra source file. If this is supplied, '
'bisect script will use this to override default behavior.')
parser.add_option('--target_build_type',
type='choice',
choices=['Release', 'Debug'],
help='The target build type. Choices are "Release" '
'or "Debug".')
parser.add_option('--goma_threads', default=64,
type='int',
help='Number of goma threads to use. 0 will disable goma.')
parser.add_option('--builder_type', default='',
choices=['perf',
'full',
'android-chrome-perf', ''],
help='Type of builder to get build from. This allows '
'script to use cached builds. By default (empty), binaries '
'are built locally.')
options, _ = parser.parse_args()
error_msg = ''
if not options.good_revision:
error_msg += 'Error: missing required parameter: --good_revision\n'
if not options.bad_revision:
error_msg += 'Error: missing required parameter: --bad_revision\n'
if error_msg:
print error_msg
parser.print_help()
return 1
if 'android' not in options.browser_type and sys.platform.startswith('linux'):
if not os.environ.get('CHROME_DEVEL_SANDBOX'):
print 'SUID sandbox has not been setup.'\
' See https://chromium.googlesource.com/chromium/src/'\
'+/master/docs/linux_suid_sandbox_development.md.'
return 1
return _RunBisectionScript(options)
if __name__ == '__main__':
sys.exit(main())
This diff is collapsed.
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Config file for Run Performance Test Bot
This script is intended for use by anyone that wants to run a remote performance
test. Modify the config below and add the command to run the performance test,
the metric you're interested in, and repeat/discard parameters. You can then
run a git try <bot>.
Changes to this file should never be submitted.
Args:
'command': This is the full command line to pass to the
bisect-perf-regression.py script in order to execute the test.
'metric': The name of the metric to parse out from the results of the
performance test. You can retrieve the metric by looking at the stdio of
the performance test. Look for lines of the format:
RESULT <graph>: <trace>= <value> <units>
The metric name is "<graph>/<trace>".
'repeat_count': The number of times to repeat the performance test.
'max_time_minutes': The script will attempt to run the performance test
"repeat_count" times, unless it exceeds "max_time_minutes".
'truncate_percent': Discard the highest/lowest % values from performance test.
Sample config:
config = {
'command': './tools/perf/run_benchmark --browser=release smoothness.key_mobile_sites',
'metric': 'mean_frame_time/mean_frame_time',
'repeat_count': '20',
'max_time_minutes': '20',
'truncate_percent': '25',
}
On Windows:
- If you're calling a python script you will need to add "python" to
the command:
config = {
'command': 'python tools/perf/run_benchmark -v --browser=release smoothness.key_mobile_sites',
'metric': 'mean_frame_time/mean_frame_time',
'repeat_count': '20',
'max_time_minutes': '20',
'truncate_percent': '25',
}
On ChromeOS:
- Script accepts either ChromeOS versions, or unix timestamps as revisions.
- You don't need to specify --identity and --remote, they will be added to
the command using the bot's BISECT_CROS_IP and BISECT_CROS_BOARD values.
config = {
'command': './tools/perf/run_benchmark -v '\
'--browser=cros-chrome-guest '\
'smoothness.key_mobile_sites',
'metric': 'mean_frame_time/mean_frame_time',
'repeat_count': '20',
'max_time_minutes': '20',
'truncate_percent': '25',
}
"""
config = {
'command': '',
'metric': '',
'repeat_count': '',
'max_time_minutes': '',
'truncate_percent': '',
}
# Workaround git try issue, see crbug.com/257689
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment