Commit 2a1c24fe authored by Nico Weber's avatar Nico Weber Committed by Commit Bot

Remove tools/auto_bisect.

It looks like this was replaced with something else a while ago,
so remove the old thing.  It still references a script I'm hoping
to delete.

Bug: 330631
Change-Id: Ic0834dfab0a635d6926682299d9cdf49e4a24d89
Reviewed-on: https://chromium-review.googlesource.com/579937Reviewed-by: default avatarSimon Hatch <simonhatch@chromium.org>
Commit-Queue: Nico Weber <thakis@chromium.org>
Cr-Commit-Position: refs/heads/master@{#488304}
parent 638a36ea
prasadv@chromium.org
qyearsley@chromium.org
robertocn@chromium.org
sergiyb@chromium.org
simonhatch@chromium.org
tonyg@chromium.org
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Top-level presubmit script for auto-bisect.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts for
details on the presubmit API.
"""
import imp
import subprocess
import os
# Paths to bisect config files relative to this script.
CONFIG_FILES = [
'bisect.cfg',
os.path.join('..', 'run-perf-test.cfg'),
]
def CheckChangeOnUpload(input_api, output_api):
return _CommonChecks(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return _CommonChecks(input_api, output_api)
def _CommonChecks(input_api, output_api):
"""Does all presubmit checks for auto-bisect."""
results = []
results.extend(_CheckAllConfigFiles(input_api, output_api))
results.extend(_RunUnitTests(input_api, output_api))
results.extend(_RunPyLint(input_api, output_api))
return results
def _CheckAllConfigFiles(input_api, output_api):
"""Checks all bisect config files and returns a list of presubmit results."""
results = []
script_path = input_api.PresubmitLocalPath()
for config_file in CONFIG_FILES:
file_path = os.path.join(script_path, config_file)
results.extend(_CheckConfigFile(file_path, output_api))
return results
def _CheckConfigFile(file_path, output_api):
"""Checks one bisect config file and returns a list of presubmit results."""
try:
config_file = imp.load_source('config', file_path)
except IOError as e:
warning = 'Failed to read config file %s: %s' % (file_path, str(e))
return [output_api.PresubmitError(warning, items=[file_path])]
if not hasattr(config_file, 'config'):
warning = 'Config file has no "config" global variable: %s' % str(e)
return [output_api.PresubmitError(warning, items=[file_path])]
if type(config_file.config) is not dict:
warning = 'Config file "config" global variable is not dict: %s' % str(e)
return [output_api.PresubmitError(warning, items=[file_path])]
for k, v in config_file.config.iteritems():
if v != '':
warning = 'Non-empty value in config dict: %s: %s' % (repr(k), repr(v))
warning += ('\nThe bisection config file should only contain a config '
'dict with empty fields. Changes to this file should not '
'be submitted.')
return [output_api.PresubmitError(warning, items=[file_path])]
return []
def _RunUnitTests(input_api, output_api):
"""Runs unit tests for auto-bisect."""
repo_root = input_api.change.RepositoryRoot()
auto_bisect_dir = os.path.join(repo_root, 'tools', 'auto_bisect')
test_runner = os.path.join(auto_bisect_dir, 'run_tests')
return_code = subprocess.call(['python', test_runner])
if return_code:
message = 'Auto-bisect unit tests did not all pass.'
return [output_api.PresubmitError(message)]
return []
def _RunPyLint(input_api, output_api):
"""Runs unit tests for auto-bisect."""
telemetry_path = os.path.join(
input_api.PresubmitLocalPath(), '..', '..', 'third_party', 'telemetry')
mock_path = os.path.join(
input_api.PresubmitLocalPath(), '..', '..', 'third_party', 'pymock')
disabled_warnings = [
'relative-import',
]
tests = input_api.canned_checks.GetPylint(
input_api, output_api, disabled_warnings=disabled_warnings,
extra_paths_list=[telemetry_path, mock_path])
return input_api.RunTests(tests)
This directory contains modules related to tools for bisecting regressions.
There are several different tools for bisecting regressions; the main use
of these tools is to find revisions where a performance regression occurred.
These tools are generally run by trybots but can also be run locally.
Documentation:
http://www.chromium.org/developers/bisecting-bugs
http://www.chromium.org/developers/tree-sheriffs/perf-sheriffs/bisecting-performance-regressions
Overview of bisect-related files in src/tools:
run-bisect-perf-regression.py -- used to kick off a bisect job
prepare-bisect-perf-regression.py -- run before the above to prepare the repo
run-bisect-manual-test.py -- used to manually bisect
bisect-manual-test.py -- helper module used by run-bisect-manual-test.py
auto_bisect/bisect.cfg -- config parameters for a bisect job
run-perf-test.cfg -- config parameters running a perf test once
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Config file read by run-bisect-perf-regression.py.
This script is intended for use by anyone that wants to run a remote bisection
on a range of revisions to look for a performance regression. Modify the config
below and add the revision range, performance command, and metric. You can then
run a git try <bot>.
Changes to this file should never be submitted.
Args:
'command': This is the full command to execute the test.
'good_revision': An svn or git revision where the metric hadn't regressed yet.
'bad_revision': An svn or git revision sometime after the metric regressed.
'metric': The name of the metric to parse out from the results of the
performance test. You can retrieve the metric by looking at the stdio of
the performance test. Look for lines of the format:
RESULT <graph>: <trace>= <value> <units>
The metric name is "<graph>/<trace>".
'repeat_count': The number of times to repeat the performance test.
'max_time_minutes': The script will attempt to run the performance test
"repeat_count" times, unless it exceeds "max_time_minutes".
'truncate_percent': The highest/lowest % values will be discarded before
computing the mean result for each revision.
Sample config:
config = {
'command': './tools/perf/run_benchmark --browser=release sunspider',
'metric': 'Total/Total',
'good_revision': '14ac2486c0eba1266d2da1c52e8759d9c784fe80',
'bad_revision': 'fcf8643d31301eea990a4c42d7d8c9fc30cc33ec',
'repeat_count': '20',
'max_time_minutes': '20',
'truncate_percent': '25',
}
For Windows, if you're calling a python script you will need to add "python"
to the command, so the command would be changed to:
'python tools/perf/run_benchmark -v --browser=release sunspider',
"""
config = {
'command': '',
'good_revision': '',
'bad_revision': '',
'metric': '',
'repeat_count': '',
'max_time_minutes': '',
'truncate_percent': '',
}
# Workaround git try issue, see crbug.com/257689
This source diff could not be displayed because it is too large. You can view the blob instead.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import bisect_utils
import source_control
def Get(bisect_results, opts, depot_registry):
"""Returns the results as a jsonable object."""
if opts.bisect_mode == bisect_utils.BISECT_MODE_RETURN_CODE:
change = '0'
else:
metric = '/'.join(opts.metric)
change = '%.02f%%' % bisect_results.regression_size
status = 'completed'
return {
'try_job_id': opts.try_job_id,
'bug_id': opts.bug_id,
'status': status,
'buildbot_log_url': _GetBuildBotLogUrl(),
'bisect_bot': os.environ.get('BUILDBOT_BUILDERNAME', ''),
'command': opts.command,
'metric': metric,
'change': change,
'score': bisect_results.confidence,
'good_revision': opts.good_revision,
'bad_revision': opts.bad_revision,
'warnings': bisect_results.warnings,
'abort_reason': bisect_results.abort_reason,
'culprit_data': _CulpritData(bisect_results),
'revision_data': _RevisionData(bisect_results, depot_registry),
}
def _CulpritData(bisect_results):
if not bisect_results.culprit_revisions:
return None
cl, culprit_info, depot = bisect_results.culprit_revisions[0]
commit_link = _GetViewVCLinkFromDepotAndHash(cl, depot)
if commit_link:
commit_link = '\nLink : %s' % commit_link
else:
commit_link = ('\Description:\n%s' % culprit_info['body'])
return {
'subject': culprit_info['subject'],
'author': culprit_info['email'],
'email': culprit_info['email'],
'cl_date': culprit_info['date'],
'commit_info': commit_link,
'revisions_links': [],
'cl': cl
}
def _RevisionData(bisect_results, depot_registry):
revision_rows = []
for state in bisect_results.state.GetRevisionStates():
commit_position = source_control.GetCommitPosition(
state.revision, depot_registry.GetDepotDir(state.depot))
revision_rows.append({
'depot_name': state.depot,
'deps_revision': state.revision,
'commit_pos': commit_position,
'result': 'good' if state.passed else 'bad',
})
return revision_rows
def _GetViewVCLinkFromDepotAndHash(git_revision, depot):
"""Gets link to the repository browser."""
if depot and 'viewvc' in bisect_utils.DEPOT_DEPS_NAME[depot]:
return bisect_utils.DEPOT_DEPS_NAME[depot]['viewvc'] + git_revision
return ''
def _GetBuildBotLogUrl():
master_url = os.environ.get('BUILDBOT_BUILDBOTURL')
builder_name = os.environ.get('BUILDBOT_BUILDERNAME')
builder_number = os.environ.get('BUILDBOT_BUILDNUMBER')
if master_url and builder_name and builder_number:
return '%s%s/%s' % (master_url, builder_name, builder_number)
return ''
This diff is collapsed.
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
class RevisionState(object):
"""Contains bisect state for a given revision.
Properties:
depot: The depot that this revision is from (e.g. WebKit).
revision: Revision number (Git hash or SVN number).
index: Position of the state in the list of all revisions.
value: Value(s) returned from the test.
perf_time: Time that a test took.
build_time: Time that a build took.
passed: Represents whether the performance test was successful at that
revision. Possible values include: 1 (passed), 0 (failed),
'?' (skipped), 'F' (build failed).
external: If the revision is a 'src' revision, 'external' contains the
revisions of each of the external libraries.
"""
def __init__(self, depot, revision, index):
self.depot = depot
self.revision = revision
self.index = index
self.value = None
self.perf_time = 0
self.build_time = 0
self.passed = '?'
self.external = None
# TODO(sergiyb): Update() to parse run_results from the RunTest.
class BisectState(object):
"""Represents a state of the bisect as a collection of revision states."""
def __init__(self, depot, revisions):
"""Initializes a new BisectState object with a set of revision states.
Args:
depot: Name of the depot used for initial set of revision states.
revisions: List of revisions used for initial set of revision states.
"""
self.revision_states = []
self.revision_index = {}
index = 0
for revision in revisions:
new_state = self._InitRevisionState(depot, revision, index)
self.revision_states.append(new_state)
index += 1
@staticmethod
def _RevisionKey(depot, revision):
return "%s:%s" % (depot, revision)
def _InitRevisionState(self, depot, revision, index):
key = self._RevisionKey(depot, revision)
self.revision_index[key] = index
return RevisionState(depot, revision, index)
def GetRevisionState(self, depot, revision):
"""Returns a mutable revision state."""
key = self._RevisionKey(depot, revision)
index = self.revision_index.get(key)
return self.revision_states[index] if index else None
def CreateRevisionStatesAfter(self, depot, revisions, reference_depot,
reference_revision):
"""Creates a set of new revision states after a specified reference state.
Args:
depot: Name of the depot for the new revision states.
revisions: List of revisions for the new revision states.
reference_depot: Name of the depot for the reference revision state.
reference_revision: Revision for the reference revision state.
Returns:
A list containing all created revision states in order as they were added.
"""
ref_key = self._RevisionKey(reference_depot, reference_revision)
ref_index = self.revision_index[ref_key]
num_new_revisions = len(revisions)
for entry in self.revision_states:
if entry.index > ref_index:
entry.index += num_new_revisions
first_index = ref_index + 1
for index, revision in enumerate(revisions, start=first_index):
new_state = self._InitRevisionState(depot, revision, index)
self.revision_states.insert(index, new_state)
return self.revision_states[first_index:first_index + num_new_revisions]
def GetRevisionStates(self):
"""Returns a copy of the list of the revision states."""
return list(self.revision_states)
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from bisect_state import BisectState
class BisectStateTest(unittest.TestCase):
def testCreatesRevisionsStateAfterAReferenceRevision(self):
bisect_state = BisectState('chromium', ['a', 'b', 'c', 'd'])
bisect_state.CreateRevisionStatesAfter('webkit', [1, 2, 3], 'chromium', 'b')
bisect_state.CreateRevisionStatesAfter('v8', [100, 200], 'webkit', 2)
actual_revisions = bisect_state.GetRevisionStates()
expected_revisions = [('chromium', 'a'), ('chromium', 'b'), ('webkit', 1),
('webkit', 2), ('v8', 100), ('v8', 200),
('webkit', 3), ('chromium', 'c'), ('chromium', 'd')]
self.assertEqual(len(expected_revisions), len(actual_revisions))
for i in xrange(len(actual_revisions)):
self.assertEqual(i, actual_revisions[i].index)
self.assertEqual(expected_revisions[i][0], actual_revisions[i].depot)
self.assertEqual(expected_revisions[i][1], actual_revisions[i].revision)
# TODO(sergiyb): More tests for the remaining functions.
if __name__ == '__main__':
unittest.main()
This diff is collapsed.
This diff is collapsed.
# This config just runs the sunspider command once.
# http://build.chromium.org/p/tryserver.chromium.perf/builders/linux_perf_bisect/builds/689
config = {
'command': 'tools/perf/run_benchmark -v --browser=android-chromium sunspider',
"max_time_minutes": "10",
"repeat_count": "1",
"truncate_percent": "0"
}
# Workaround git try issue, see crbug.com/257689
# This should reproduce the regression in http://crbug.com/425582.
# It was based on:
# http://build.chromium.org/p/tryserver.chromium.perf/builders/linux_perf_bisect/builds/704
config = {
'command': 'out/Release/content_unittests --single-process-tests --gtest_filter=DOMStorageAreaTest',
'good_revision': '311607',
'bad_revision': '311608',
'bisect_mode': 'return_code',
'builder_type': 'full',
}
# Workaround git try issue, see crbug.com/257689
# This should reproduce the regression in http://crbug.com/425582.
# It was based on:
# http://build.chromium.org/p/tryserver.chromium.perf/builders/linux_perf_bisect/builds/704
config = {
'command': 'tools/perf/run_benchmark -v --browser=release page_cycler.intl_ar_fa_he',
'good_revision': '300138',
'bad_revision': '300149',
'metric': 'warm_times/page_load_time',
'repeat_count': '5',
'max_time_minutes': '5',
'truncate_percent': '25',
# Default is "perf".
# 'builder_type': 'perf',
}
# Workaround git try issue, see crbug.com/257689
# This config just runs the tab-switching command once.
# http://build.chromium.org/p/tryserver.chromium.perf/builders/linux_perf_bisect/builds/689
config = {
"command": "./tools/perf/run_benchmark -v tab_switching.typical_25 --browser=release",
"max_time_minutes": "30",
"repeat_count": "1",
"truncate_percent": "0"
}
# Workaround git try issue, see crbug.com/257689
# Based on http://crbug.com/420120.
config = {
'command': 'tools/perf/run_benchmark -v --browser=release page_cycler.bloat',
'good_revision': '297905',
'bad_revision': '297940',
'metric': 'warm_times/page_load_time',
'repeat_count': '5',
'max_time_minutes': '5',
'truncate_percent': '20',
'builder_type': 'perf',
}
# Workaround git try issue, see crbug.com/257689
# This config is based on http://crbug.com/435291.
config = {
'command': 'tools/perf/run_benchmark -v --browser=release tab_switching.five_blank_pages',
'good_revision': '304855',
'bad_revision': '304881',
'metric': 'idle_wakeups_total/idle_wakeups_total',
'repeat_count': '5',
'max_time_minutes': '10',
'truncate_percent': '25',
'builder_type': 'perf',
}
# Workaround git try issue, see crbug.com/257689
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Starts bisect try jobs on multiple platforms using known-good configs.
The purpose of this script is to serve as an integration test for the
auto-bisect project by starting try jobs for various config types and
various platforms.
The known-good configs are in this same directory as this script. They
are expected to all end in ".cfg" and start with the name of the platform
followed by a dot.
You can specify --full to try running each config on all applicable bots;
the default behavior is to try each config on only one bot.
"""
import argparse
import logging
import os
import subprocess
import sys
SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))
BISECT_CONFIG = os.path.join(SCRIPT_DIR, os.path.pardir, 'bisect.cfg')
PERF_TEST_CONFIG = os.path.join(
SCRIPT_DIR, os.path.pardir, os.path.pardir, 'run-perf-test.cfg')
PLATFORM_BOT_MAP = {
'linux': ['linux_perf_bisect'],
'mac': ['mac_10_9_perf_bisect', 'mac_10_10_perf_bisect'],
'win': ['win_perf_bisect', 'win_8_perf_bisect', 'win_xp_perf_bisect'],
'winx64': ['win_x64_perf_bisect'],
'android': [
'android_nexus4_perf_bisect',
'android_nexus5_perf_bisect',
'android_nexus7_perf_bisect',
],
}
SVN_URL = 'svn://svn.chromium.org/chrome-try/try-perf'
AUTO_COMMIT_MESSAGE = 'Automatic commit for bisect try job.'
def main(argv):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--full', action='store_true',
help='Run each config on all applicable bots.')
parser.add_argument('configs', nargs='+',
help='One or more sample config files.')
parser.add_argument('--verbose', '-v', action='store_true',
help='Output additional debugging information.')
parser.add_argument('--dry-run', action='store_true',
help='Don\'t execute "git try" while running.')
args = parser.parse_args(argv[1:])
_SetupLogging(args.verbose)
logging.debug('Source configs: %s', args.configs)
try:
_StartTryJobs(args.configs, args.full, args.dry_run)
except subprocess.CalledProcessError as error:
print str(error)
print error.output
def _SetupLogging(verbose):
level = logging.INFO
if verbose:
level = logging.DEBUG
logging.basicConfig(level=level)
def _StartTryJobs(source_configs, full_mode=False, dry_run=False):
"""Tries each of the given sample configs on one or more try bots."""
for source_config in source_configs:
dest_config = _DestConfig(source_config)
bot_names = _BotNames(source_config, full_mode=full_mode)
_StartTry(source_config, dest_config, bot_names, dry_run=dry_run)
def _DestConfig(source_config):
"""Returns the path that a sample config should be copied to."""
if 'bisect' in source_config:
return BISECT_CONFIG
assert 'perf_test' in source_config, source_config
return PERF_TEST_CONFIG
def _BotNames(source_config, full_mode=False):
"""Returns try bot names to use for the given config file name."""
platform = os.path.basename(source_config).split('.')[0]
assert platform in PLATFORM_BOT_MAP
bot_names = PLATFORM_BOT_MAP[platform]
if full_mode:
return bot_names
return [bot_names[0]]
def _StartTry(source_config, dest_config, bot_names, dry_run=False):
"""Sends a try job with the given config to the given try bots.
Args:
source_config: Path of the sample config to copy over.
dest_config: Destination path to copy sample to, e.g. "./bisect.cfg".
bot_names: List of try bot builder names.
"""
assert os.path.exists(source_config)
assert os.path.exists(dest_config)
assert _LastCommitMessage() != AUTO_COMMIT_MESSAGE
# Copy the sample config over and commit it.
_Run(['cp', source_config, dest_config])
_Run(['git', 'commit', '--all', '-m', AUTO_COMMIT_MESSAGE])
try:
# Start the try job.
job_name = 'Automatically-started (%s)' % os.path.basename(source_config)
try_command = ['git', 'try', '--svn_repo', SVN_URL, '--name', job_name]
for bot_name in bot_names:
try_command.extend(['--bot', bot_name])
print _Run(try_command, dry_run=dry_run)
finally:
# Revert the immediately-previous commit which was made just above.
assert _LastCommitMessage() == AUTO_COMMIT_MESSAGE
_Run(['git', 'reset', '--hard', 'HEAD~1'])
def _LastCommitMessage():
return _Run(['git', 'log', '--format=%s', '-1']).strip()
def _Run(command, dry_run=False):
"""Runs a command in a subprocess.
Args:
command: The command given as an args list.
Returns:
The output of the command.
Raises:
subprocess.CalledProcessError: The return-code was non-zero.
"""
logging.debug('Running %s', command)
if dry_run:
return 'Did not run command because this is a dry run.'
return subprocess.check_output(command)
if __name__ == '__main__':
sys.exit(main(sys.argv))
# Config based on http://crbug.com/444762.
config = {
'command': 'python tools/perf/run_benchmark -v --browser=release dromaeo.domcorequery',
'good_revision': '309431',
'bad_revision': '309442',
'metric': 'dom/dom',
'repeat_count': '5',
'max_time_minutes': '5',
'truncate_percent': '20',
'builder_type': 'perf',
}
# Workaround git try issue, see crbug.com/257689
# This config just runs the kraken test once.
config = {
"command": "python tools/perf/run_benchmark -v --browser=release kraken",
"max_time_minutes": "10",
"repeat_count": "1",
"truncate_percent": "0"
}
# Workaround git try issue, see crbug.com/257689
# Config based on http://crbug.com/444762.
config = {
'command': 'python tools/perf/run_benchmark -v --browser=release dromaeo.domcorequery',
'good_revision': '309431',
'bad_revision': '309442',
'metric': 'dom/dom',
'repeat_count': '5',
'max_time_minutes': '5',
'truncate_percent': '20',
'builder_type': 'perf',
'target_arch': 'x64',
}
# Workaround git try issue, see crbug.com/257689
# This config just runs the kraken test once.
config = {
"command": "python tools/perf/run_benchmark -v --browser=release kraken",
"max_time_minutes": "10",
"repeat_count": "1",
"target_arch": "x64",
"truncate_percent": "0"
}
# Workaround git try issue, see crbug.com/257689
This diff is collapsed.
This diff is collapsed.
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""General statistical or mathematical functions."""
import math
def TruncatedMean(data_set, truncate_proportion):
"""Calculates the truncated mean of a set of values.
Note that this isn't just the mean of the set of values with the highest
and lowest values discarded; the non-discarded values are also weighted
differently depending how many values are discarded.
NOTE: If there's not much benefit from this keeping and weighting
partial values, it might be better to use a simplified truncated mean
function without weighting.
Args:
data_set: Non-empty list of values.
truncate_proportion: How much of the upper and lower portions of the data
set to discard, expressed as a value in the range [0, 1].
Note: a value of 0.5 or greater would be meaningless
Returns:
The truncated mean as a float.
Raises:
TypeError: The data set was empty after discarding values.
"""
if len(data_set) > 2:
data_set = sorted(data_set)
discard_num_float = len(data_set) * truncate_proportion
discard_num_int = int(math.floor(discard_num_float))
kept_weight = len(data_set) - (discard_num_float * 2)
data_set = data_set[discard_num_int:len(data_set)-discard_num_int]
weight_left = 1.0 - (discard_num_float - discard_num_int)
if weight_left < 1:
# If the % to discard leaves a fractional portion, need to weight those
# values.
unweighted_vals = data_set[1:len(data_set)-1]
weighted_vals = [data_set[0], data_set[len(data_set)-1]]
weighted_vals = [w * weight_left for w in weighted_vals]
data_set = weighted_vals + unweighted_vals
else:
kept_weight = len(data_set)
data_sum = reduce(lambda x, y: float(x) + float(y), data_set)
truncated_mean = data_sum / kept_weight
return truncated_mean
def Mean(values):
"""Calculates the arithmetic mean of a list of values."""
return TruncatedMean(values, 0.0)
def Variance(values):
"""Calculates the sample variance."""
if len(values) == 1:
return 0.0
mean = Mean(values)
differences_from_mean = [float(x) - mean for x in values]
squared_differences = [float(x * x) for x in differences_from_mean]
variance = sum(squared_differences) / (len(values) - 1)
return variance
def StandardDeviation(values):
"""Calculates the sample standard deviation of the given list of values."""
return math.sqrt(Variance(values))
def RelativeChange(before, after):
"""Returns the relative change of before and after, relative to before.
There are several different ways to define relative difference between
two numbers; sometimes it is defined as relative to the smaller number,
or to the mean of the two numbers. This version returns the difference
relative to the first of the two numbers.
Args:
before: A number representing an earlier value.
after: Another number, representing a later value.
Returns:
A non-negative floating point number; 0.1 represents a 10% change.
"""
if before == after:
return 0.0
if before == 0:
return float('nan')
difference = after - before
return math.fabs(difference / before)
def PooledStandardError(work_sets):
"""Calculates the pooled sample standard error for a set of samples.
Args:
work_sets: A collection of collections of numbers.
Returns:
Pooled sample standard error.
"""
numerator = 0.0
denominator1 = 0.0
denominator2 = 0.0
for current_set in work_sets:
std_dev = StandardDeviation(current_set)
numerator += (len(current_set) - 1) * std_dev ** 2
denominator1 += len(current_set) - 1
if len(current_set) > 0:
denominator2 += 1.0 / len(current_set)
if denominator1 == 0:
return 0.0
return math.sqrt(numerator / denominator1) * math.sqrt(denominator2)
# Redefining built-in 'StandardError'
# pylint: disable=W0622
def StandardError(values):
"""Calculates the standard error of a list of values."""
# NOTE: This behavior of returning 0.0 in the case of an empty list is
# inconsistent with Variance and StandardDeviation above.
if len(values) <= 1:
return 0.0
std_dev = StandardDeviation(values)
return std_dev / math.sqrt(len(values))
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import math
import unittest
import math_utils
class MathUtilsTest(unittest.TestCase):
"""Tests for mathematical utility functions."""
def testTruncatedMean_EmptyList(self):
# TruncatedMean raises an error when passed an empty list.
self.assertRaises(TypeError, math_utils.TruncatedMean, [], 0)
def testTruncatedMean_TruncateTooMuch(self):
# An exception is raised if 50% or more is truncated from both sides.
self.assertRaises(TypeError, math_utils.TruncatedMean, [1, 2, 3], 1.0)
self.assertRaises(
ZeroDivisionError, math_utils.TruncatedMean, [1, 2, 3], 0.5)
def testTruncatedMean_AlwaysKeepsAtLeastTwoValues(self):
# If the length of the input is 1 or 2, nothing is truncated and
# the average is returned.
self.assertEqual(5.0, math_utils.TruncatedMean([5.0], 0.0))
self.assertEqual(5.0, math_utils.TruncatedMean([5.0], 0.25))
self.assertEqual(5.0, math_utils.TruncatedMean([5.0], 0.5))
self.assertEqual(5.5, math_utils.TruncatedMean([5.0, 6.0], 0.0))
self.assertEqual(5.5, math_utils.TruncatedMean([5.0, 6.0], 0.25))
self.assertEqual(5.5, math_utils.TruncatedMean([5.0, 6.0], 0.5))
def testTruncatedMean_Interquartile_NumValuesDivisibleByFour(self):
self.assertEqual(5.0, math_utils.TruncatedMean([1, 4, 6, 100], 0.25))
self.assertEqual(
6.5, math_utils.TruncatedMean([1, 2, 5, 6, 7, 8, 40, 50], 0.25))
def testTruncatedMean_Weighting(self):
# In the list [0, 1, 4, 5, 20, 100], when 25% of the list at the start
# and end are discarded, the part that's left is [1, 4, 5, 20], but
# first and last values are weighted so that they only count for half
# as much. So the truncated mean is (1/2 + 4 + 5 + 20/2) / 5.0.
self.assertEqual(6.5, (0.5 + 4 + 5 + 10) / 3.0)
self.assertEqual(6.5, math_utils.TruncatedMean([0, 1, 4, 5, 20, 100], 0.25))
def testMean_OneValue(self):
self.assertEqual(3.0, math_utils.Mean([3]))
def testMean_ShortList(self):
self.assertEqual(0.5, math_utils.Mean([-3, 0, 1, 4]))
def testMean_CompareAlternateImplementation(self):
"""Tests Mean by comparing against an alternate implementation."""
def AlternateMean(values):
return sum(values) / float(len(values))
test_value_lists = [
[1],
[5, 6.5, 1.2, 3],
[-3, 0, 1, 4],
[-3, -1, 0.12, 0.752, 3.33, 8, 16, 32, 439],
]
for value_list in test_value_lists:
self.assertEqual(AlternateMean(value_list), math_utils.Mean(value_list))
def testRelativeChange_NonZero(self):
# The change is relative to the first value, regardless of which is bigger.
self.assertEqual(0.5, math_utils.RelativeChange(1.0, 1.5))
self.assertEqual(0.5, math_utils.RelativeChange(2.0, 1.0))
def testRelativeChange_FromZero(self):
# If the first number is zero, then the result is not a number.
self.assertEqual(0, math_utils.RelativeChange(0, 0))
self.assertTrue(math.isnan(math_utils.RelativeChange(0, 1)))
self.assertTrue(math.isnan(math_utils.RelativeChange(0, -1)))
def testRelativeChange_Negative(self):
# Note that the return value of RelativeChange is always positive.
self.assertEqual(3.0, math_utils.RelativeChange(-1, 2))
self.assertEqual(3.0, math_utils.RelativeChange(1, -2))
self.assertEqual(1.0, math_utils.RelativeChange(-1, -2))
def testVariance_EmptyList(self):
self.assertRaises(TypeError, math_utils.Variance, [])
def testVariance_OneValue(self):
self.assertEqual(0, math_utils.Variance([0]))
self.assertEqual(0, math_utils.Variance([4.3]))
def testVariance_ShortList(self):
# Population variance is the average of squared deviations from the mean.
# The deviations from the mean in this example are [3.5, 0.5, -0.5, -3.5],
# and the squared deviations are [12.25, 0.25, 0.25, 12.25].
# With sample variance, however, 1 is subtracted from the sample size.
# So the sample variance is sum([12.25, 0.25, 0.25, 12.25]) / 3.0.
self.assertAlmostEqual(8.333333334, sum([12.25, 0.25, 0.25, 12.25]) / 3.0)
self.assertAlmostEqual(8.333333334, math_utils.Variance([-3, 0, 1, 4]))
def testStandardDeviation(self):
# Standard deviation is the square root of variance.
self.assertRaises(TypeError, math_utils.StandardDeviation, [])
self.assertEqual(0.0, math_utils.StandardDeviation([4.3]))
self.assertAlmostEqual(2.88675135, math.sqrt(8.33333333333333))
self.assertAlmostEqual(2.88675135,
math_utils.StandardDeviation([-3, 0, 1, 4]))
def testStandardError(self):
# Standard error is std. dev. divided by square root of sample size.
self.assertEqual(0.0, math_utils.StandardError([]))
self.assertEqual(0.0, math_utils.StandardError([4.3]))
self.assertAlmostEqual(1.44337567, 2.88675135 / math.sqrt(4))
self.assertAlmostEqual(1.44337567, math_utils.StandardError([-3, 0, 1, 4]))
if __name__ == '__main__':
unittest.main()
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions to query the chromium issue tracker.
Note that documentation for the Issue Tracker API says it's DEPRECATED, however
it seems to be in use in other places like the performance dashboard. Also,
this module attempts to handle most exceptions thrown by querying the tracker
so that when and if this api is turned off no impact is caused to the bisection
process."""
import json
import urllib2
SINGLE_ISSUE_URL = ('https://code.google.com/feeds/issues/p/chromium/issues'
'/full?id=%s&alt=json')
class IssueTrackerQueryException(Exception):
pass
def QuerySingleIssue(issue_id, url_template=SINGLE_ISSUE_URL):
"""Queries the tracker for a specific issue. Returns a dict.
This uses the deprecated Issue Tracker API to fetch a JSON representation of
the issue details.
Args:
issue_id: An int or string representing the issue id.
url_template: URL to query the tracker with '%s' instead of the bug id.
Returns:
A dictionary as parsed by the JSON library from the tracker response.
Raises:
urllib2.HTTPError when appropriate.
"""
assert str(issue_id).isdigit()
response = urllib2.urlopen(url_template % issue_id).read()
return json.loads(response)
def GetIssueState(issue_id):
"""Returns either 'closed' or 'open' for the given bug ID.
Args:
issue_id: string or string-castable object containing a numeric bug ID.
Returns:
'open' or 'closed' depending on the state of the bug.
Raises:
IssueTrackerQueryException if the data cannot be retrieved or parsed.
"""
try:
query_response = QuerySingleIssue(issue_id)
# We assume the query returns a single result hence the [0]
issue_detail = query_response['feed']['entry'][0]
state = issue_detail['issues$state']['$t']
return state
except urllib2.URLError:
raise IssueTrackerQueryException(
'Could not fetch the details form the issue tracker.')
except ValueError:
raise IssueTrackerQueryException(
'Could not parse the issue tracker\'s response as a json doc.')
except KeyError:
raise IssueTrackerQueryException(
'The data from the issue tracker is not in the expected format.')
def CheckIssueClosed(issue_id):
"""Checks if a given issue is closed. Returns False when in doubt."""
# We only check when issue_id appears to be valid
if str(issue_id).isdigit():
try:
return GetIssueState(issue_id) == 'closed'
except IssueTrackerQueryException:
# We let this fall through to the return False
pass
# We return False for anything other than a positive number
return False
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
import unittest
import urllib2
from query_crbug import CheckIssueClosed
SRC = os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir)
sys.path.append(os.path.join(SRC, 'third_party', 'pymock'))
import mock
_current_directory = os.path.dirname(__file__)
_test_data_directory = os.path.join(_current_directory, 'test_data')
# These strings are simulated responses to various conditions when querying
# the chromium issue tracker.
CLOSED_ISSUE_DATA = open(os.path.join(_test_data_directory,
'closed.json')).read()
OPEN_ISSUE_DATA = open(os.path.join(_test_data_directory,
'open.json')).read()
UNEXPECTED_FORMAT_DATA = CLOSED_ISSUE_DATA.replace('issues$state', 'gibberish')
BROKEN_ISSUE_DATA = "\n<HTML><HEAD><TITLE>Not a JSON Doc</TITLE></HEAD></HTML>"
class MockResponse(object):
def __init__(self, result):
self._result = result
def read(self):
return self._result
def MockUrlOpen(url):
# Note that these strings DO NOT represent http responses. They are just
# memorable numeric bug ids to use.
if '200' in url:
return MockResponse(CLOSED_ISSUE_DATA)
elif '201' in url:
return MockResponse(OPEN_ISSUE_DATA)
elif '300' in url:
return MockResponse(UNEXPECTED_FORMAT_DATA)
elif '403' in url:
raise urllib2.URLError('')
elif '404' in url:
return MockResponse('')
elif '500' in url:
return MockResponse(BROKEN_ISSUE_DATA)
class crbugQueryTest(unittest.TestCase):
@mock.patch('urllib2.urlopen', MockUrlOpen)
def testClosedIssueIsClosed(self):
self.assertTrue(CheckIssueClosed(200))
@mock.patch('urllib2.urlopen', MockUrlOpen)
def testOpenIssueIsNotClosed(self):
self.assertFalse(CheckIssueClosed(201))
@mock.patch('urllib2.urlopen', MockUrlOpen)
def testUnexpectedFormat(self):
self.assertFalse(CheckIssueClosed(300))
@mock.patch('urllib2.urlopen', MockUrlOpen)
def testUrlError(self):
self.assertFalse(CheckIssueClosed(403))
@mock.patch('urllib2.urlopen', MockUrlOpen)
def testEmptyResponse(self):
self.assertFalse(CheckIssueClosed(404))
@mock.patch('urllib2.urlopen', MockUrlOpen)
def testBrokenResponse(self):
self.assertFalse(CheckIssueClosed(500))
if __name__ == '__main__':
unittest.main()
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This module contains functionality for starting build try jobs via HTTP.
This includes both sending a request to start a job, and also related code
for querying the status of the job.
This module can be either run as a stand-alone script to send a request to a
builder, or imported and used by calling the public functions below.
"""
import json
import urllib2
# URL template for fetching JSON data about builds.
BUILDER_JSON_URL = ('%(server_url)s/json/builders/%(bot_name)s/builds/'
'%(build_num)s?as_text=1&filter=0')
# URL template for displaying build steps.
BUILDER_HTML_URL = '%(server_url)s/builders/%(bot_name)s/builds/%(build_num)s'
# Status codes that can be returned by the GetBuildStatus method
# From buildbot.status.builder.
# See: http://docs.buildbot.net/current/developer/results.html
SUCCESS, WARNINGS, FAILURE, SKIPPED, EXCEPTION, RETRY, TRYPENDING = range(7)
OK = (SUCCESS, WARNINGS) # These indicate build is complete.
FAILED = (FAILURE, EXCEPTION, SKIPPED) # These indicate build failure.
PENDING = (RETRY, TRYPENDING) # These indicate in progress or in pending queue.
class ServerAccessError(Exception):
def __str__(self):
return '%s\nSorry, cannot connect to server.' % self.args[0]
def _IsBuildRunning(build_data):
"""Checks whether the build is in progress on buildbot.
Presence of currentStep element in build JSON indicates build is in progress.
Args:
build_data: A dictionary with build data, loaded from buildbot JSON API.
Returns:
True if build is in progress, otherwise False.
"""
current_step = build_data.get('currentStep')
if (current_step and current_step.get('isStarted') and
current_step.get('results') is None):
return True
return False
def _IsBuildFailed(build_data):
"""Checks whether the build failed on buildbot.
Sometime build status is marked as failed even though compile and packaging
steps are successful. This may happen due to some intermediate steps of less
importance such as gclient revert, generate_telemetry_profile are failed.
Therefore we do an addition check to confirm if build was successful by
calling _IsBuildSuccessful.
Args:
build_data: A dictionary with build data, loaded from buildbot JSON API.
Returns:
True if revision is failed build, otherwise False.
"""
if (build_data.get('results') in FAILED and
not _IsBuildSuccessful(build_data)):
return True
return False
def _IsBuildSuccessful(build_data):
"""Checks whether the build succeeded on buildbot.
We treat build as successful if the package_build step is completed without
any error i.e., when results attribute of the this step has value 0 or 1
in its first element.
Args:
build_data: A dictionary with build data, loaded from buildbot JSON API.
Returns:
True if revision is successfully build, otherwise False.
"""
if build_data.get('steps'):
for item in build_data.get('steps'):
# The 'results' attribute of each step consists of two elements,
# results[0]: This represents the status of build step.
# See: http://docs.buildbot.net/current/developer/results.html
# results[1]: List of items, contains text if step fails, otherwise empty.
if (item.get('name') == 'package_build' and
item.get('isFinished') and
item.get('results')[0] in OK):
return True
return False
def _FetchBuilderData(builder_url):
"""Fetches JSON data for the all the builds from the try server.
Args:
builder_url: A try server URL to fetch builds information.
Returns:
A dictionary with information of all build on the try server.
"""
data = None
try:
url = urllib2.urlopen(builder_url)
except urllib2.URLError as e:
print ('urllib2.urlopen error %s, waterfall status page down.[%s]' % (
builder_url, str(e)))
return None
if url is not None:
try:
data = url.read()
except IOError as e:
print 'urllib2 file object read error %s, [%s].' % (builder_url, str(e))
return data
def _GetBuildData(buildbot_url):
"""Gets build information for the given build id from the try server.
Args:
buildbot_url: A try server URL to fetch build information.
Returns:
A dictionary with build information if build exists, otherwise None.
"""
builds_json = _FetchBuilderData(buildbot_url)
if builds_json:
return json.loads(builds_json)
return None
def GetBuildStatus(build_num, bot_name, server_url):
"""Gets build status from the buildbot status page for a given build number.
Args:
build_num: A build number on try server to determine its status.
bot_name: Name of the bot where the build information is scanned.
server_url: URL of the buildbot.
Returns:
A pair which consists of build status (SUCCESS, FAILED or PENDING) and a
link to build status page on the waterfall.
"""
results_url = None
if build_num:
# Get the URL for requesting JSON data with status information.
buildbot_url = BUILDER_JSON_URL % {
'server_url': server_url,
'bot_name': bot_name,
'build_num': build_num,
}
build_data = _GetBuildData(buildbot_url)
if build_data:
# Link to build on the buildbot showing status of build steps.
results_url = BUILDER_HTML_URL % {
'server_url': server_url,
'bot_name': bot_name,
'build_num': build_num,
}
if _IsBuildFailed(build_data):
return (FAILED, results_url)
elif _IsBuildSuccessful(build_data):
return (OK, results_url)
return (PENDING, results_url)
def GetBuildNumFromBuilder(build_reason, bot_name, server_url):
"""Gets build number on build status page for a given 'build reason'.
This function parses the JSON data from buildbot page and collects basic
information about the all the builds, and then uniquely identifies the build
based on the 'reason' attribute in the JSON data about the build.
The 'reason' attribute set is when a build request is posted, and it is used
to identify the build on status page.
Args:
build_reason: A unique build name set to build on try server.
bot_name: Name of the bot where the build information is scanned.
server_url: URL of the buildbot.
Returns:
A build number as a string if found, otherwise None.
"""
buildbot_url = BUILDER_JSON_URL % {
'server_url': server_url,
'bot_name': bot_name,
'build_num': '_all',
}
builds_json = _FetchBuilderData(buildbot_url)
if builds_json:
builds_data = json.loads(builds_json)
for current_build in builds_data:
if builds_data[current_build].get('reason') == build_reason:
return builds_data[current_build].get('number')
return None
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs all tests in all unit test modules in this directory."""
import os
import sys
import unittest
import logging
SRC = os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir)
def main():
if 'full-log' in sys.argv:
# Configure logging to show line numbers and logging level
fmt = '%(module)s:%(lineno)d - %(levelname)s: %(message)s'
logging.basicConfig(level=logging.DEBUG, stream=sys.stdout, format=fmt)
elif 'no-log' in sys.argv:
# Only WARN and above are shown, to standard error. (This is the logging
# module default config, hence we do nothing here)
pass
else:
# Behave as before. Make logging.info mimic print behavior
fmt = '%(message)s'
logging.basicConfig(level=logging.INFO, stream=sys.stdout, format=fmt)
# Running the tests depends on having the below modules in PYTHONPATH.
sys.path.append(os.path.join(SRC, 'third_party', 'catapult', 'telemetry'))
sys.path.append(os.path.join(SRC, 'third_party', 'pymock'))
suite = unittest.TestSuite()
loader = unittest.TestLoader()
script_dir = os.path.dirname(__file__)
suite.addTests(loader.discover(start_dir=script_dir, pattern='*_test.py'))
print 'Running unit tests in %s...' % os.path.abspath(script_dir)
result = unittest.TextTestRunner(verbosity=1).run(suite)
return 0 if result.wasSuccessful() else 1
if __name__ == '__main__':
sys.exit(main())
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This module contains functions for performing source control operations."""
import bisect_utils
def IsInGitRepository():
output, _ = bisect_utils.RunGit(['rev-parse', '--is-inside-work-tree'])
return output.strip() == 'true'
def GetRevisionList(end_revision_hash, start_revision_hash, cwd=None):
"""Retrieves a list of git commit hashes in a range.
Args:
end_revision_hash: The SHA1 for the end of the range, inclusive.
start_revision_hash: The SHA1 for the beginning of the range, inclusive.
Returns:
A list of the git commit hashes in the range, in reverse time order --
that is, starting with |end_revision_hash|.
"""
revision_range = '%s..%s' % (start_revision_hash, end_revision_hash)
cmd = ['log', '--format=%H', '-10000', '--first-parent', revision_range]
log_output = bisect_utils.CheckRunGit(cmd, cwd=cwd)
revision_hash_list = log_output.split()
revision_hash_list.append(start_revision_hash)
return revision_hash_list
def SyncToRevision(revision, sync_client=None):
"""Syncs or checks out a revision based on sync_client argument.
Args:
revision: Git hash for the solutions with the format <repo>@rev.
E.g., "src@2ae43f...", "src/third_party/webkit@asr1234" etc.
sync_client: Syncs to revision when this is True otherwise checks out
the revision.
Returns:
True if sync or checkout is successful, False otherwise.
"""
if not sync_client:
_, return_code = bisect_utils.RunGit(['checkout', revision])
elif sync_client == 'gclient':
return_code = bisect_utils.RunGClientAndSync([revision])
else:
raise NotImplementedError('Unsupported sync_client: "%s"' % sync_client)
return not return_code
def GetCurrentRevision(cwd=None):
"""Gets current revision of the given repository."""
return bisect_utils.CheckRunGit(['rev-parse', 'HEAD'], cwd=cwd).strip()
def ResolveToRevision(revision_to_check, depot, depot_deps_dict,
search, cwd=None):
"""Tries to resolve an SVN revision or commit position to a git SHA1.
Args:
revision_to_check: The user supplied revision string that may need to be
resolved to a git commit hash. This may be an SVN revision, git commit
position, or a git commit hash.
depot: The depot (dependency repository) that |revision_to_check| is from.
depot_deps_dict: A dictionary with information about different depots.
search: How many revisions forward or backward to search. If the value is
negative, the function will search backwards chronologically, otherwise
it will search forward.
Returns:
A string containing a git SHA1 hash, otherwise None.
"""
# Android-chrome is git only, so no need to resolve this to anything else.
if depot == 'android-chrome':
return revision_to_check
# If the given revision can't be parsed as an integer, then it may already
# be a git commit hash.
if not bisect_utils.IsStringInt(revision_to_check):
return revision_to_check
depot_svn = 'svn://svn.chromium.org/chrome/trunk/src'
if depot != 'chromium':
depot_svn = depot_deps_dict[depot]['svn']
svn_revision = int(revision_to_check)
git_revision = None
if search > 0:
search_range = xrange(svn_revision, svn_revision + search, 1)
else:
search_range = xrange(svn_revision, svn_revision + search, -1)
for i in search_range:
# NOTE: Checking for the git-svn-id footer is for backwards compatibility.
# When we can assume that all the revisions we care about are from after
# git commit positions started getting added, we don't need to check this.
svn_pattern = 'git-svn-id: %s@%d' % (depot_svn, i)
commit_position_pattern = '^Cr-Commit-Position: .*@{#%d}' % i
cmd = ['log', '--format=%H', '-1', '--grep', svn_pattern,
'--grep', commit_position_pattern, 'origin/master']
log_output = bisect_utils.CheckRunGit(cmd, cwd=cwd)
log_output = log_output.strip()
if log_output:
git_revision = log_output
break
return git_revision
def IsInProperBranch():
"""Checks whether the current branch is "master"."""
cmd = ['rev-parse', '--abbrev-ref', 'HEAD']
log_output = bisect_utils.CheckRunGit(cmd)
log_output = log_output.strip()
return log_output == 'master'
def GetCommitPosition(git_revision, cwd=None):
"""Finds git commit position for the given git hash.
This function executes "git footer --position-num <git hash>" command to get
commit position the given revision.
Args:
git_revision: The git SHA1 to use.
cwd: Working directory to run the command from.
Returns:
Git commit position as integer or None.
"""
# Some of the repositories are pure git based, unlike other repositories
# they doesn't have commit position. e.g., skia, angle.
cmd = ['footers', '--position-num', git_revision]
output, return_code = bisect_utils.RunGit(cmd, cwd)
if not return_code:
commit_position = output.strip()
if bisect_utils.IsStringInt(commit_position):
return int(commit_position)
return None
def GetCommitTime(git_revision, cwd=None):
"""Returns commit time for the given revision in UNIX timestamp."""
cmd = ['log', '--format=%ct', '-1', git_revision]
output = bisect_utils.CheckRunGit(cmd, cwd=cwd)
return int(output)
def QueryRevisionInfo(revision, cwd=None):
"""Gathers information on a particular revision, such as author's name,
email, subject, and date.
Args:
revision: Revision you want to gather information on; a git commit hash.
Returns:
A dict in the following format:
{
'author': %s,
'email': %s,
'date': %s,
'subject': %s,
'body': %s,
}
"""
commit_info = {}
formats = ['%aN', '%aE', '%s', '%cD', '%b']
targets = ['author', 'email', 'subject', 'date', 'body']
for i in xrange(len(formats)):
cmd = ['log', '--format=%s' % formats[i], '-1', revision]
output = bisect_utils.CheckRunGit(cmd, cwd=cwd)
commit_info[targets[i]] = output.rstrip()
return commit_info
def CheckoutFileAtRevision(file_name, revision, cwd=None):
"""Performs a checkout on a file at the given revision.
Returns:
True if successful.
"""
command = ['checkout', revision, file_name]
_, return_code = bisect_utils.RunGit(command, cwd=cwd)
return not return_code
def RevertFileToHead(file_name):
"""Un-stages a file and resets the file's state to HEAD.
Returns:
True if successful.
"""
# Reset doesn't seem to return 0 on success.
bisect_utils.RunGit(['reset', 'HEAD', file_name])
_, return_code = bisect_utils.RunGit(
['checkout', bisect_utils.FILE_DEPS_GIT])
return not return_code
def QueryFileRevisionHistory(filename, revision_start, revision_end):
"""Returns a list of commits that modified this file.
Args:
filename: Name of file.
revision_start: Start of revision range (inclusive).
revision_end: End of revision range.
Returns:
Returns a list of commits that touched this file.
"""
cmd = [
'log',
'--format=%H',
'%s~1..%s' % (revision_start, revision_end),
'--',
filename,
]
output = bisect_utils.CheckRunGit(cmd)
lines = output.split('\n')
return [o for o in lines if o]
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment