Commit 62edbcf1 authored by qyearsley's avatar qyearsley Committed by Commit bot

Move bisect-perf-regression.py into auto_bisect directory.

The purpose of this is to continue gathering all the related code together, and to make it easier to run pylint/tests for the main module which still contains most of the code.

After doing this, pylint and the tests caught a couple of things -- one unused variable name, and after the script changed directory, some of the paths had to be modified as well.

BUG=

Review URL: https://codereview.chromium.org/564663002

Cr-Commit-Position: refs/heads/master@{#296847}
parent cff892f9
...@@ -86,5 +86,8 @@ def _RunUnitTests(input_api, output_api): ...@@ -86,5 +86,8 @@ def _RunUnitTests(input_api, output_api):
def _RunPyLint(input_api, output_api): def _RunPyLint(input_api, output_api):
"""Runs unit tests for auto-bisect.""" """Runs unit tests for auto-bisect."""
tests = input_api.canned_checks.GetPylint(input_api, output_api) telemetry_path = os.path.join(
input_api.PresubmitLocalPath(), os.path.pardir, 'telemetry')
tests = input_api.canned_checks.GetPylint(
input_api, output_api, extra_paths_list=[telemetry_path])
return input_api.RunTests(tests) return input_api.RunTests(tests)
...@@ -8,17 +8,11 @@ Documentation: ...@@ -8,17 +8,11 @@ Documentation:
http://www.chromium.org/developers/bisecting-bugs http://www.chromium.org/developers/bisecting-bugs
http://www.chromium.org/developers/tree-sheriffs/perf-sheriffs/bisecting-performance-regressions http://www.chromium.org/developers/tree-sheriffs/perf-sheriffs/bisecting-performance-regressions
Overview of bisect-related files: Overview of bisect-related files in src/tools:
src/tools/run-bisect-perf-regression.py run-bisect-perf-regression.py -- used to kick off a bisect job
-- the script used to kick off a normal performance regression bisect job. prepare-bisect-perf-regression.py -- run before the above to prepare the repo
src/tools/auto_bisect/bisect.cfg: run-bisect-manual-test.py -- used to manually bisect
-- this file contains parameters for a bisect job, and is read by other bisect-manual-test.py -- helper module used by run-bisect-manual-test.py
modules including run-bisect-perf-regression.py. auto_bisect/bisect.cfg -- config parameters for a bisect job
src/tools/run-bisect-manual-test.py run-perf-test.cfg -- config parameters running a perf test once
-- a script which is used to manually bisect regressions; this also
depends on bisect-perf-regression.py.
src/tools/bisect-perf-regression.py
-- the main module which the others depend on.
src/tools/bisect-manual-test.py
-- a helper module used when manually bisect regressions.
...@@ -15,7 +15,7 @@ range. ...@@ -15,7 +15,7 @@ range.
Example usage using SVN revisions: Example usage using SVN revisions:
./tools/bisect-perf-regression.py -c\ ./tools/bisect_perf_regression.py -c\
"out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\ "out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\
-g 168222 -b 168232 -m shutdown/simple-user-quit -g 168222 -b 168232 -m shutdown/simple-user-quit
...@@ -25,7 +25,7 @@ revision were merged in. ...@@ -25,7 +25,7 @@ revision were merged in.
Example usage using git hashes: Example usage using git hashes:
./tools/bisect-perf-regression.py -c\ ./tools/bisect_perf_regression.py -c\
"out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\ "out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\
-g 1f6e67861535121c5c819c16a666f2436c207e7b\ -g 1f6e67861535121c5c819c16a666f2436c207e7b\
-b b732f23b4f81c382db0b23b9035f3dadc7d925bb\ -b b732f23b4f81c382db0b23b9035f3dadc7d925bb\
...@@ -47,14 +47,15 @@ import sys ...@@ -47,14 +47,15 @@ import sys
import time import time
import zipfile import zipfile
sys.path.append(os.path.join(os.path.dirname(__file__), 'telemetry')) sys.path.append(os.path.join(
os.path.dirname(__file__), os.path.pardir, 'telemetry'))
from auto_bisect import bisect_utils import bisect_utils
from auto_bisect import builder import builder
from auto_bisect import math_utils import math_utils
from auto_bisect import request_build import request_build
from auto_bisect import source_control as source_control_module import source_control as source_control_module
from auto_bisect import ttest import ttest
from telemetry.util import cloud_storage from telemetry.util import cloud_storage
# Below is the map of "depot" names to information about each depot. Each depot # Below is the map of "depot" names to information about each depot. Each depot
...@@ -150,6 +151,9 @@ DEPOT_DEPS_NAME = { ...@@ -150,6 +151,9 @@ DEPOT_DEPS_NAME = {
DEPOT_NAMES = DEPOT_DEPS_NAME.keys() DEPOT_NAMES = DEPOT_DEPS_NAME.keys()
# The script is in chromium/src/tools/auto_bisect. Throughout this script,
# we use paths to other things in the chromium/src repository.
CROS_CHROMEOS_PATTERN = 'chromeos-base/chromeos-chrome' CROS_CHROMEOS_PATTERN = 'chromeos-base/chromeos-chrome'
# Possible return values from BisectPerformanceMetrics.RunTest. # Possible return values from BisectPerformanceMetrics.RunTest.
...@@ -913,6 +917,10 @@ class BisectPerformanceMetrics(object): ...@@ -913,6 +917,10 @@ class BisectPerformanceMetrics(object):
self.opts = opts self.opts = opts
self.source_control = source_control self.source_control = source_control
# The src directory here is NOT the src/ directory for the repository
# where the bisect script is running from. Instead, it's the src/ directory
# inside the bisect/ directory which is created before running.
self.src_cwd = os.getcwd() self.src_cwd = os.getcwd()
self.cros_cwd = os.path.join(os.getcwd(), '..', 'cros') self.cros_cwd = os.path.join(os.getcwd(), '..', 'cros')
self.depot_cwd = {} self.depot_cwd = {}
...@@ -920,12 +928,11 @@ class BisectPerformanceMetrics(object): ...@@ -920,12 +928,11 @@ class BisectPerformanceMetrics(object):
self.warnings = [] self.warnings = []
self.builder = builder.Builder.FromOpts(opts) self.builder = builder.Builder.FromOpts(opts)
for d in DEPOT_NAMES: for depot in DEPOT_NAMES:
# The working directory of each depot is just the path to the depot, but # The working directory of each depot is just the path to the depot, but
# since we're already in 'src', we can skip that part. # since we're already in 'src', we can skip that part.
self.depot_cwd[depot] = os.path.join(
self.depot_cwd[d] = os.path.join( self.src_cwd, DEPOT_DEPS_NAME[depot]['src'][4:])
self.src_cwd, DEPOT_DEPS_NAME[d]['src'][4:])
def PerformCleanup(self): def PerformCleanup(self):
"""Performs cleanup when script is finished.""" """Performs cleanup when script is finished."""
...@@ -1090,8 +1097,8 @@ class BisectPerformanceMetrics(object): ...@@ -1090,8 +1097,8 @@ class BisectPerformanceMetrics(object):
depot_data_src = depot_data.get('src') or depot_data.get('src_old') depot_data_src = depot_data.get('src') or depot_data.get('src_old')
src_dir = deps_data.get(depot_data_src) src_dir = deps_data.get(depot_data_src)
if src_dir: if src_dir:
self.depot_cwd[depot_name] = os.path.join(self.src_cwd, self.depot_cwd[depot_name] = os.path.join(
depot_data_src[4:]) self.src_cwd, depot_data_src[4:])
re_results = rxp.search(src_dir) re_results = rxp.search(src_dir)
if re_results: if re_results:
results[depot_name] = re_results.group('revision') results[depot_name] = re_results.group('revision')
...@@ -1567,11 +1574,14 @@ class BisectPerformanceMetrics(object): ...@@ -1567,11 +1574,14 @@ class BisectPerformanceMetrics(object):
return self.opts.bisect_mode in [BISECT_MODE_STD_DEV] return self.opts.bisect_mode in [BISECT_MODE_STD_DEV]
def GetCompatibleCommand(self, command_to_run, revision, depot): def GetCompatibleCommand(self, command_to_run, revision, depot):
# Prior to crrev.com/274857 *only* android-chromium-testshell """Return a possibly modified test command depending on the revision.
# Then until crrev.com/276628 *both* (android-chromium-testshell and
# android-chrome-shell) work. After that rev 276628 *only* Prior to crrev.com/274857 *only* android-chromium-testshell
# android-chrome-shell works. bisect-perf-regression.py script should Then until crrev.com/276628 *both* (android-chromium-testshell and
# handle these cases and set appropriate browser type based on revision. android-chrome-shell) work. After that rev 276628 *only*
android-chrome-shell works. The bisect_perf_regression.py script should
handle these cases and set appropriate browser type based on revision.
"""
if self.opts.target_platform in ['android']: if self.opts.target_platform in ['android']:
# When its a third_party depot, get the chromium revision. # When its a third_party depot, get the chromium revision.
if depot != 'chromium': if depot != 'chromium':
...@@ -2283,7 +2293,7 @@ class BisectPerformanceMetrics(object): ...@@ -2283,7 +2293,7 @@ class BisectPerformanceMetrics(object):
this will contain the field "error", otherwise None. this will contain the field "error", otherwise None.
""" """
if self.opts.target_platform == 'android': if self.opts.target_platform == 'android':
revision_to_check = self.source_control.GetCommitPosition(good_revision) good_revision = self.source_control.GetCommitPosition(good_revision)
if (bisect_utils.IsStringInt(good_revision) if (bisect_utils.IsStringInt(good_revision)
and good_revision < 265549): and good_revision < 265549):
return {'error': ( return {'error': (
......
...@@ -6,10 +6,8 @@ import os ...@@ -6,10 +6,8 @@ import os
import re import re
import unittest import unittest
from auto_bisect import source_control as source_control_module import bisect_perf_regression
import source_control as source_control_module
# Special import necessary because filename contains dash characters.
bisect_perf_module = __import__('bisect-perf-regression')
def _GetBisectPerformanceMetricsInstance(): def _GetBisectPerformanceMetricsInstance():
"""Returns an instance of the BisectPerformanceMetrics class.""" """Returns an instance of the BisectPerformanceMetrics class."""
...@@ -22,13 +20,13 @@ def _GetBisectPerformanceMetricsInstance(): ...@@ -22,13 +20,13 @@ def _GetBisectPerformanceMetricsInstance():
'good_revision': 280000, 'good_revision': 280000,
'bad_revision': 280005, 'bad_revision': 280005,
} }
bisect_options = bisect_perf_module.BisectOptions.FromDict(options_dict) bisect_options = bisect_perf_regression.BisectOptions.FromDict(options_dict)
source_control = source_control_module.DetermineAndCreateSourceControl( source_control = source_control_module.DetermineAndCreateSourceControl(
bisect_options) bisect_options)
bisect_instance = bisect_perf_module.BisectPerformanceMetrics( bisect_instance = bisect_perf_regression.BisectPerformanceMetrics(
source_control, bisect_options) source_control, bisect_options)
bisect_instance.src_cwd = os.path.abspath( bisect_instance.src_cwd = os.path.abspath(
os.path.join(os.path.dirname(__file__), os.path.pardir)) os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir))
return bisect_instance return bisect_instance
...@@ -49,7 +47,7 @@ class BisectPerfRegressionTest(unittest.TestCase): ...@@ -49,7 +47,7 @@ class BisectPerfRegressionTest(unittest.TestCase):
""" """
# ConfidenceScore takes a list of lists but these lists are flattened # ConfidenceScore takes a list of lists but these lists are flattened
# inside the function. # inside the function.
confidence = bisect_perf_module.ConfidenceScore( confidence = bisect_perf_regression.ConfidenceScore(
[[v] for v in bad_values], [[v] for v in bad_values],
[[v] for v in good_values]) [[v] for v in good_values])
self.assertEqual(score, confidence) self.assertEqual(score, confidence)
...@@ -128,7 +126,7 @@ class BisectPerfRegressionTest(unittest.TestCase): ...@@ -128,7 +126,7 @@ class BisectPerfRegressionTest(unittest.TestCase):
} }
# Testing private function. # Testing private function.
# pylint: disable=W0212 # pylint: disable=W0212
vars_dict = bisect_perf_module._ParseRevisionsFromDEPSFileManually( vars_dict = bisect_perf_regression._ParseRevisionsFromDEPSFileManually(
deps_file_contents) deps_file_contents)
self.assertEqual(vars_dict, expected_vars_dict) self.assertEqual(vars_dict, expected_vars_dict)
...@@ -140,7 +138,8 @@ class BisectPerfRegressionTest(unittest.TestCase): ...@@ -140,7 +138,8 @@ class BisectPerfRegressionTest(unittest.TestCase):
metric = ['my_chart', 'my_trace'] metric = ['my_chart', 'my_trace']
# Testing private function. # Testing private function.
# pylint: disable=W0212 # pylint: disable=W0212
values = bisect_perf_module._TryParseResultValuesFromOutput(metric, results) values = bisect_perf_regression._TryParseResultValuesFromOutput(
metric, results)
self.assertEqual(expected_values, values) self.assertEqual(expected_values, values)
def testTryParseResultValuesFromOutput_WithSingleValue(self): def testTryParseResultValuesFromOutput_WithSingleValue(self):
...@@ -192,15 +191,15 @@ class BisectPerfRegressionTest(unittest.TestCase): ...@@ -192,15 +191,15 @@ class BisectPerfRegressionTest(unittest.TestCase):
Prior to r274857, only android-chromium-testshell works. Prior to r274857, only android-chromium-testshell works.
In the range [274857, 276628], both work. In the range [274857, 276628], both work.
""" """
bisect_options = bisect_perf_module.BisectOptions() bisect_options = bisect_perf_regression.BisectOptions()
bisect_options.output_buildbot_annotations = None bisect_options.output_buildbot_annotations = None
source_control = source_control_module.DetermineAndCreateSourceControl( source_control = source_control_module.DetermineAndCreateSourceControl(
bisect_options) bisect_options)
bisect_instance = bisect_perf_module.BisectPerformanceMetrics( bisect_instance = bisect_perf_regression.BisectPerformanceMetrics(
source_control, bisect_options) source_control, bisect_options)
bisect_instance.opts.target_platform = target_platform bisect_instance.opts.target_platform = target_platform
git_revision = bisect_instance.source_control.ResolveToRevision( git_revision = bisect_instance.source_control.ResolveToRevision(
revision, 'chromium', bisect_perf_module.DEPOT_DEPS_NAME, 100) revision, 'chromium', bisect_perf_regression.DEPOT_DEPS_NAME, 100)
depot = 'chromium' depot = 'chromium'
command = bisect_instance.GetCompatibleCommand( command = bisect_instance.GetCompatibleCommand(
original_command, git_revision, depot) original_command, git_revision, depot)
...@@ -268,7 +267,7 @@ class BisectPerfRegressionTest(unittest.TestCase): ...@@ -268,7 +267,7 @@ class BisectPerfRegressionTest(unittest.TestCase):
def testGetCommitPositionForV8(self): def testGetCommitPositionForV8(self):
bisect_instance = _GetBisectPerformanceMetricsInstance() bisect_instance = _GetBisectPerformanceMetricsInstance()
v8_rev = '21d700eedcdd6570eff22ece724b63a5eefe78cb' v8_rev = '21d700eedcdd6570eff22ece724b63a5eefe78cb'
depot_path = os.path.join(bisect_instance.src_cwd, 'src', 'v8') depot_path = os.path.join(bisect_instance.src_cwd, 'v8')
self.assertEqual( self.assertEqual(
23634, 23634,
bisect_instance.source_control.GetCommitPosition(v8_rev, depot_path)) bisect_instance.source_control.GetCommitPosition(v8_rev, depot_path))
...@@ -276,8 +275,7 @@ class BisectPerfRegressionTest(unittest.TestCase): ...@@ -276,8 +275,7 @@ class BisectPerfRegressionTest(unittest.TestCase):
def testGetCommitPositionForWebKit(self): def testGetCommitPositionForWebKit(self):
bisect_instance = _GetBisectPerformanceMetricsInstance() bisect_instance = _GetBisectPerformanceMetricsInstance()
wk_rev = 'a94d028e0f2c77f159b3dac95eb90c3b4cf48c61' wk_rev = 'a94d028e0f2c77f159b3dac95eb90c3b4cf48c61'
depot_path = os.path.join(bisect_instance.src_cwd, 'src', 'third_party', depot_path = os.path.join(bisect_instance.src_cwd, 'third_party', 'WebKit')
'WebKit')
self.assertEqual( self.assertEqual(
181660, 181660,
bisect_instance.source_control.GetCommitPosition(wk_rev, depot_path)) bisect_instance.source_control.GetCommitPosition(wk_rev, depot_path))
...@@ -290,7 +288,7 @@ class BisectPerfRegressionTest(unittest.TestCase): ...@@ -290,7 +288,7 @@ class BisectPerfRegressionTest(unittest.TestCase):
# to search is not changed in DEPS content. # to search is not changed in DEPS content.
# TODO (prasadv): Add a separate test to validate the DEPS contents with the # TODO (prasadv): Add a separate test to validate the DEPS contents with the
# format that bisect script expects. # format that bisect script expects.
deps_contents = bisect_perf_module.ReadStringFromFile(deps_file) deps_contents = bisect_perf_regression.ReadStringFromFile(deps_file)
deps_key = 'v8_revision' deps_key = 'v8_revision'
depot = 'v8' depot = 'v8'
git_revision = 'a12345789a23456789a123456789a123456789' git_revision = 'a12345789a23456789a123456789a123456789'
......
...@@ -440,7 +440,6 @@ def RunGit(command, cwd=None): ...@@ -440,7 +440,6 @@ def RunGit(command, cwd=None):
A tuple of the output and return code. A tuple of the output and return code.
""" """
command = ['git'] + command command = ['git'] + command
return RunProcessAndRetrieveOutput(command, cwd=cwd) return RunProcessAndRetrieveOutput(command, cwd=cwd)
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
import os import os
from . import bisect_utils import bisect_utils
CROS_VERSION_PATTERN = 'new version number from %s' CROS_VERSION_PATTERN = 'new version number from %s'
......
...@@ -14,8 +14,8 @@ to setup the sandbox manually before running the script. Otherwise the script ...@@ -14,8 +14,8 @@ to setup the sandbox manually before running the script. Otherwise the script
fails to launch Chrome and exits with an error. fails to launch Chrome and exits with an error.
This script serves a similar function to bisect-builds.py, except it uses This script serves a similar function to bisect-builds.py, except it uses
the bisect-perf-regression.py. This means that that it can actually check out the bisect_perf_regression.py. This means that that it can obtain builds of
and build revisions of Chromium that are not available in cloud storage. Chromium for revisions where builds aren't available in cloud storage.
""" """
import os import os
...@@ -24,26 +24,28 @@ import sys ...@@ -24,26 +24,28 @@ import sys
CROS_BOARD_ENV = 'BISECT_CROS_BOARD' CROS_BOARD_ENV = 'BISECT_CROS_BOARD'
CROS_IP_ENV = 'BISECT_CROS_IP' CROS_IP_ENV = 'BISECT_CROS_IP'
_DIR_TOOLS_ROOT = os.path.abspath(os.path.dirname(__file__)) _TOOLS_DIR = os.path.abspath(os.path.dirname(__file__))
_BISECT_SCRIPT_PATH = os.path.join(
_TOOLS_DIR, 'auto_bisect', 'bisect_perf_regression.py')
sys.path.append(os.path.join(_DIR_TOOLS_ROOT, 'telemetry')) sys.path.append(os.path.join(_TOOLS_DIR, 'telemetry'))
from telemetry.core import browser_options from telemetry.core import browser_options
def _RunBisectionScript(options): def _RunBisectionScript(options):
"""Attempts to execute src/tools/bisect-perf-regression.py. """Attempts to execute the bisect script (bisect_perf_regression.py).
Args: Args:
options: The configuration options to pass to the bisect script. options: The configuration options to pass to the bisect script.
Returns: Returns:
The exit code of bisect-perf-regression.py: 0 on success, otherwise 1. An exit code; 0 for success, 1 for failure.
""" """
test_command = ('python %s --browser=%s --chrome-root=.' % test_command = ('python %s --browser=%s --chrome-root=.' %
(os.path.join(_DIR_TOOLS_ROOT, 'bisect-manual-test.py'), (os.path.join(_TOOLS_DIR, 'bisect-manual-test.py'),
options.browser_type)) options.browser_type))
cmd = ['python', os.path.join(_DIR_TOOLS_ROOT, 'bisect-perf-regression.py'), cmd = ['python', _BISECT_SCRIPT_PATH,
'-c', test_command, '-c', test_command,
'-g', options.good_revision, '-g', options.good_revision,
'-b', options.bad_revision, '-b', options.bad_revision,
...@@ -82,8 +84,7 @@ def _RunBisectionScript(options): ...@@ -82,8 +84,7 @@ def _RunBisectionScript(options):
return_code = subprocess.call(cmd) return_code = subprocess.call(cmd)
if return_code: if return_code:
print ('Error: bisect-perf-regression.py returned with error %d' % print 'Error: bisect_perf_regression.py had exit code %d.' % return_code
return_code)
print print
return return_code return return_code
......
...@@ -5,13 +5,12 @@ ...@@ -5,13 +5,12 @@
"""Run Performance Test Bisect Tool """Run Performance Test Bisect Tool
This script is used by a try bot to run the src/tools/bisect-perf-regression.py This script is used by a try bot to run the bisect script with the parameters
script with the parameters specified in src/tools/auto_bisect/bisect.cfg. specified in the bisect config file. It checks out a copy of the depot in
It will check out a copy of the depot in a subdirectory 'bisect' of the working a subdirectory 'bisect' of the working directory provided, annd runs the
directory provided, and run the bisect-perf-regression.py script there. bisect scrip there.
""" """
import imp
import optparse import optparse
import os import os
import platform import platform
...@@ -19,22 +18,20 @@ import subprocess ...@@ -19,22 +18,20 @@ import subprocess
import sys import sys
import traceback import traceback
from auto_bisect import bisect_perf_regression
from auto_bisect import bisect_utils from auto_bisect import bisect_utils
from auto_bisect import math_utils from auto_bisect import math_utils
bisect = imp.load_source('bisect-perf-regression',
os.path.join(os.path.abspath(os.path.dirname(sys.argv[0])),
'bisect-perf-regression.py'))
CROS_BOARD_ENV = 'BISECT_CROS_BOARD' CROS_BOARD_ENV = 'BISECT_CROS_BOARD'
CROS_IP_ENV = 'BISECT_CROS_IP' CROS_IP_ENV = 'BISECT_CROS_IP'
# Default config file paths, relative to this script. SCRIPT_DIR = os.path.dirname(__file__)
BISECT_REGRESSION_CONFIG = os.path.join('auto_bisect', 'bisect.cfg') SRC_DIR = os.path.join(SCRIPT_DIR, os.path.pardir)
RUN_TEST_CONFIG = 'run-perf-test.cfg' BISECT_CONFIG_PATH = os.path.join(SCRIPT_DIR, 'auto_bisect', 'bisect.cfg')
WEBKIT_RUN_TEST_CONFIG = os.path.join( RUN_TEST_CONFIG_PATH = os.path.join(SCRIPT_DIR, 'run-perf-test.cfg')
'..', 'third_party', 'WebKit', 'Tools', 'run-perf-test.cfg') WEBKIT_RUN_TEST_CONFIG_PATH = os.path.join(
SRC_DIR, 'third_party', 'WebKit', 'Tools', 'run-perf-test.cfg')
BISECT_SCRIPT_DIR = os.path.join(SCRIPT_DIR, 'auto_bisect')
class Goma(object): class Goma(object):
...@@ -230,26 +227,25 @@ def _CreateBisectOptionsFromConfig(config): ...@@ -230,26 +227,25 @@ def _CreateBisectOptionsFromConfig(config):
else: else:
opts_dict['target_platform'] = 'android' opts_dict['target_platform'] = 'android'
return bisect.BisectOptions.FromDict(opts_dict) return bisect_perf_regression.BisectOptions.FromDict(opts_dict)
def _RunPerformanceTest(config, path_to_file): def _RunPerformanceTest(config):
"""Runs a performance test with and without the current patch. """Runs a performance test with and without the current patch.
Args: Args:
config: Contents of the config file, a dictionary. config: Contents of the config file, a dictionary.
path_to_file: Path to the bisect-perf-regression.py script.
Attempts to build and run the current revision with and without the Attempts to build and run the current revision with and without the
current patch, with the parameters passed in. current patch, with the parameters passed in.
""" """
# Bisect script expects to be run from the src directory # Bisect script expects to be run from the src directory
os.chdir(os.path.join(path_to_file, '..')) os.chdir(SRC_DIR)
bisect_utils.OutputAnnotationStepStart('Building With Patch') bisect_utils.OutputAnnotationStepStart('Building With Patch')
opts = _CreateBisectOptionsFromConfig(config) opts = _CreateBisectOptionsFromConfig(config)
b = bisect.BisectPerformanceMetrics(None, opts) b = bisect_perf_regression.BisectPerformanceMetrics(None, opts)
if bisect_utils.RunGClient(['runhooks']): if bisect_utils.RunGClient(['runhooks']):
raise RuntimeError('Failed to run gclient runhooks') raise RuntimeError('Failed to run gclient runhooks')
...@@ -339,17 +335,16 @@ def _RunPerformanceTest(config, path_to_file): ...@@ -339,17 +335,16 @@ def _RunPerformanceTest(config, path_to_file):
bisect_utils.OutputAnnotationStepLink('HTML Results', cloud_file_link) bisect_utils.OutputAnnotationStepLink('HTML Results', cloud_file_link)
def _SetupAndRunPerformanceTest(config, path_to_file, path_to_goma): def _SetupAndRunPerformanceTest(config, path_to_goma):
"""Attempts to build and run the current revision with and without the """Attempts to build and run the current revision with and without the
current patch, with the parameters passed in. current patch, with the parameters passed in.
Args: Args:
config: The config read from run-perf-test.cfg. config: The config read from run-perf-test.cfg.
path_to_file: Path to the bisect-perf-regression.py script.
path_to_goma: Path to goma directory. path_to_goma: Path to goma directory.
Returns: Returns:
The exit code of bisect-perf-regression.py: 0 on success, otherwise 1. An exit code: 0 on success, otherwise 1.
""" """
if platform.release() == 'XP': if platform.release() == 'XP':
print 'Windows XP is not supported for perf try jobs because it lacks ' print 'Windows XP is not supported for perf try jobs because it lacks '
...@@ -360,7 +355,7 @@ def _SetupAndRunPerformanceTest(config, path_to_file, path_to_goma): ...@@ -360,7 +355,7 @@ def _SetupAndRunPerformanceTest(config, path_to_file, path_to_goma):
config['use_goma'] = bool(path_to_goma) config['use_goma'] = bool(path_to_goma)
if config['use_goma']: if config['use_goma']:
config['goma_dir'] = os.path.abspath(path_to_goma) config['goma_dir'] = os.path.abspath(path_to_goma)
_RunPerformanceTest(config, path_to_file) _RunPerformanceTest(config)
return 0 return 0
except RuntimeError, e: except RuntimeError, e:
bisect_utils.OutputAnnotationStepClosed() bisect_utils.OutputAnnotationStepClosed()
...@@ -369,16 +364,13 @@ def _SetupAndRunPerformanceTest(config, path_to_file, path_to_goma): ...@@ -369,16 +364,13 @@ def _SetupAndRunPerformanceTest(config, path_to_file, path_to_goma):
def _RunBisectionScript( def _RunBisectionScript(
config, working_directory, path_to_file, path_to_goma, path_to_extra_src, config, working_directory, path_to_goma, path_to_extra_src, dry_run):
dry_run): """Attempts to execute the bisect script with the given parameters.
"""Attempts to execute bisect-perf-regression.py with the given parameters.
Args: Args:
config: A dict containing the parameters to pass to the script. config: A dict containing the parameters to pass to the script.
working_directory: A working directory to provide to the working_directory: A working directory to provide to the bisect script,
bisect-perf-regression.py script, where it will store it's own copy of where it will store it's own copy of the depot.
the depot.
path_to_file: Path to the bisect-perf-regression.py script.
path_to_goma: Path to goma directory. path_to_goma: Path to goma directory.
path_to_extra_src: Path to extra source file. path_to_extra_src: Path to extra source file.
dry_run: Do a dry run, skipping sync, build, and performance testing steps. dry_run: Do a dry run, skipping sync, build, and performance testing steps.
...@@ -388,7 +380,7 @@ def _RunBisectionScript( ...@@ -388,7 +380,7 @@ def _RunBisectionScript(
""" """
_PrintConfigStep(config) _PrintConfigStep(config)
cmd = ['python', os.path.join(path_to_file, 'bisect-perf-regression.py'), cmd = ['python', os.path.join(BISECT_SCRIPT_DIR, 'bisect_perf_regression.py'),
'-c', config['command'], '-c', config['command'],
'-g', config['good_revision'], '-g', config['good_revision'],
'-b', config['bad_revision'], '-b', config['bad_revision'],
...@@ -469,7 +461,7 @@ def _RunBisectionScript( ...@@ -469,7 +461,7 @@ def _RunBisectionScript(
return_code = subprocess.call(cmd) return_code = subprocess.call(cmd)
if return_code: if return_code:
print ('Error: bisect-perf-regression.py returned with error %d\n' print ('Error: bisect_perf_regression.py returned with error %d\n'
% return_code) % return_code)
return return_code return return_code
...@@ -525,14 +517,11 @@ def main(): ...@@ -525,14 +517,11 @@ def main():
just run a performance test, depending on the particular config parameters just run a performance test, depending on the particular config parameters
specified in the config file. specified in the config file.
""" """
parser = _OptionParser() parser = _OptionParser()
opts, _ = parser.parse_args() opts, _ = parser.parse_args()
current_dir = os.path.abspath(os.path.dirname(sys.argv[0]))
# Use the default config file path unless one was specified. # Use the default config file path unless one was specified.
config_path = os.path.join(current_dir, BISECT_REGRESSION_CONFIG) config_path = BISECT_CONFIG_PATH
if opts.path_to_config: if opts.path_to_config:
config_path = opts.path_to_config config_path = opts.path_to_config
config = _LoadConfigFile(config_path) config = _LoadConfigFile(config_path)
...@@ -547,13 +536,13 @@ def main(): ...@@ -547,13 +536,13 @@ def main():
return 1 return 1
return _RunBisectionScript( return _RunBisectionScript(
config, opts.working_directory, current_dir, config, opts.working_directory, opts.path_to_goma, opts.extra_src,
opts.path_to_goma, opts.extra_src, opts.dry_run) opts.dry_run)
# If it wasn't valid for running a bisect, then maybe the user wanted # If it wasn't valid for running a bisect, then maybe the user wanted
# to run a perf test instead of a bisect job. Try reading any possible # to run a perf test instead of a bisect job. Try reading any possible
# perf test config files. # perf test config files.
perf_cfg_files = [RUN_TEST_CONFIG, WEBKIT_RUN_TEST_CONFIG] perf_cfg_files = [RUN_TEST_CONFIG_PATH, WEBKIT_RUN_TEST_CONFIG_PATH]
for current_perf_cfg_file in perf_cfg_files: for current_perf_cfg_file in perf_cfg_files:
if opts.path_to_config: if opts.path_to_config:
path_to_perf_cfg = opts.path_to_config path_to_perf_cfg = opts.path_to_config
...@@ -566,12 +555,10 @@ def main(): ...@@ -566,12 +555,10 @@ def main():
config_is_valid = _ValidatePerfConfigFile(config) config_is_valid = _ValidatePerfConfigFile(config)
if config and config_is_valid: if config and config_is_valid:
return _SetupAndRunPerformanceTest( return _SetupAndRunPerformanceTest(config, opts.path_to_goma)
config, current_dir, opts.path_to_goma)
print ('Error: Could not load config file. Double check your changes to ' print ('Error: Could not load config file. Double check your changes to '
'auto_bisect/bisect.cfg or run-perf-test.cfg for syntax ' 'auto_bisect/bisect.cfg or run-perf-test.cfg for syntax errors.\n')
'errors.\n')
return 1 return 1
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment