Commit 2a1c24fe authored by Nico Weber's avatar Nico Weber Committed by Commit Bot

Remove tools/auto_bisect.

It looks like this was replaced with something else a while ago,
so remove the old thing.  It still references a script I'm hoping
to delete.

Bug: 330631
Change-Id: Ic0834dfab0a635d6926682299d9cdf49e4a24d89
Reviewed-on: https://chromium-review.googlesource.com/579937Reviewed-by: default avatarSimon Hatch <simonhatch@chromium.org>
Commit-Queue: Nico Weber <thakis@chromium.org>
Cr-Commit-Position: refs/heads/master@{#488304}
parent 638a36ea
prasadv@chromium.org
qyearsley@chromium.org
robertocn@chromium.org
sergiyb@chromium.org
simonhatch@chromium.org
tonyg@chromium.org
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Top-level presubmit script for auto-bisect.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts for
details on the presubmit API.
"""
import imp
import subprocess
import os
# Paths to bisect config files relative to this script.
CONFIG_FILES = [
'bisect.cfg',
os.path.join('..', 'run-perf-test.cfg'),
]
def CheckChangeOnUpload(input_api, output_api):
return _CommonChecks(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return _CommonChecks(input_api, output_api)
def _CommonChecks(input_api, output_api):
"""Does all presubmit checks for auto-bisect."""
results = []
results.extend(_CheckAllConfigFiles(input_api, output_api))
results.extend(_RunUnitTests(input_api, output_api))
results.extend(_RunPyLint(input_api, output_api))
return results
def _CheckAllConfigFiles(input_api, output_api):
"""Checks all bisect config files and returns a list of presubmit results."""
results = []
script_path = input_api.PresubmitLocalPath()
for config_file in CONFIG_FILES:
file_path = os.path.join(script_path, config_file)
results.extend(_CheckConfigFile(file_path, output_api))
return results
def _CheckConfigFile(file_path, output_api):
"""Checks one bisect config file and returns a list of presubmit results."""
try:
config_file = imp.load_source('config', file_path)
except IOError as e:
warning = 'Failed to read config file %s: %s' % (file_path, str(e))
return [output_api.PresubmitError(warning, items=[file_path])]
if not hasattr(config_file, 'config'):
warning = 'Config file has no "config" global variable: %s' % str(e)
return [output_api.PresubmitError(warning, items=[file_path])]
if type(config_file.config) is not dict:
warning = 'Config file "config" global variable is not dict: %s' % str(e)
return [output_api.PresubmitError(warning, items=[file_path])]
for k, v in config_file.config.iteritems():
if v != '':
warning = 'Non-empty value in config dict: %s: %s' % (repr(k), repr(v))
warning += ('\nThe bisection config file should only contain a config '
'dict with empty fields. Changes to this file should not '
'be submitted.')
return [output_api.PresubmitError(warning, items=[file_path])]
return []
def _RunUnitTests(input_api, output_api):
"""Runs unit tests for auto-bisect."""
repo_root = input_api.change.RepositoryRoot()
auto_bisect_dir = os.path.join(repo_root, 'tools', 'auto_bisect')
test_runner = os.path.join(auto_bisect_dir, 'run_tests')
return_code = subprocess.call(['python', test_runner])
if return_code:
message = 'Auto-bisect unit tests did not all pass.'
return [output_api.PresubmitError(message)]
return []
def _RunPyLint(input_api, output_api):
"""Runs unit tests for auto-bisect."""
telemetry_path = os.path.join(
input_api.PresubmitLocalPath(), '..', '..', 'third_party', 'telemetry')
mock_path = os.path.join(
input_api.PresubmitLocalPath(), '..', '..', 'third_party', 'pymock')
disabled_warnings = [
'relative-import',
]
tests = input_api.canned_checks.GetPylint(
input_api, output_api, disabled_warnings=disabled_warnings,
extra_paths_list=[telemetry_path, mock_path])
return input_api.RunTests(tests)
This directory contains modules related to tools for bisecting regressions.
There are several different tools for bisecting regressions; the main use
of these tools is to find revisions where a performance regression occurred.
These tools are generally run by trybots but can also be run locally.
Documentation:
http://www.chromium.org/developers/bisecting-bugs
http://www.chromium.org/developers/tree-sheriffs/perf-sheriffs/bisecting-performance-regressions
Overview of bisect-related files in src/tools:
run-bisect-perf-regression.py -- used to kick off a bisect job
prepare-bisect-perf-regression.py -- run before the above to prepare the repo
run-bisect-manual-test.py -- used to manually bisect
bisect-manual-test.py -- helper module used by run-bisect-manual-test.py
auto_bisect/bisect.cfg -- config parameters for a bisect job
run-perf-test.cfg -- config parameters running a perf test once
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Config file read by run-bisect-perf-regression.py.
This script is intended for use by anyone that wants to run a remote bisection
on a range of revisions to look for a performance regression. Modify the config
below and add the revision range, performance command, and metric. You can then
run a git try <bot>.
Changes to this file should never be submitted.
Args:
'command': This is the full command to execute the test.
'good_revision': An svn or git revision where the metric hadn't regressed yet.
'bad_revision': An svn or git revision sometime after the metric regressed.
'metric': The name of the metric to parse out from the results of the
performance test. You can retrieve the metric by looking at the stdio of
the performance test. Look for lines of the format:
RESULT <graph>: <trace>= <value> <units>
The metric name is "<graph>/<trace>".
'repeat_count': The number of times to repeat the performance test.
'max_time_minutes': The script will attempt to run the performance test
"repeat_count" times, unless it exceeds "max_time_minutes".
'truncate_percent': The highest/lowest % values will be discarded before
computing the mean result for each revision.
Sample config:
config = {
'command': './tools/perf/run_benchmark --browser=release sunspider',
'metric': 'Total/Total',
'good_revision': '14ac2486c0eba1266d2da1c52e8759d9c784fe80',
'bad_revision': 'fcf8643d31301eea990a4c42d7d8c9fc30cc33ec',
'repeat_count': '20',
'max_time_minutes': '20',
'truncate_percent': '25',
}
For Windows, if you're calling a python script you will need to add "python"
to the command, so the command would be changed to:
'python tools/perf/run_benchmark -v --browser=release sunspider',
"""
config = {
'command': '',
'good_revision': '',
'bad_revision': '',
'metric': '',
'repeat_count': '',
'max_time_minutes': '',
'truncate_percent': '',
}
# Workaround git try issue, see crbug.com/257689
This source diff could not be displayed because it is too large. You can view the blob instead.
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import re
import shutil
import sys
import urlparse
import unittest
SRC = os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir)
sys.path.append(os.path.join(SRC, 'third_party', 'pymock'))
import bisect_perf_regression
import bisect_results
import bisect_state
import bisect_utils
import fetch_build
import mock
import source_control
# Regression confidence: 0%
CLEAR_NON_REGRESSION = [
# Mean: 30.223 Std. Dev.: 11.383
[[16.886], [16.909], [16.99], [17.723], [17.952], [18.118], [19.028],
[19.552], [21.954], [38.573], [38.839], [38.965], [40.007], [40.572],
[41.491], [42.002], [42.33], [43.109], [43.238]],
# Mean: 34.76 Std. Dev.: 11.516
[[16.426], [17.347], [20.593], [21.177], [22.791], [27.843], [28.383],
[28.46], [29.143], [40.058], [40.303], [40.558], [41.918], [42.44],
[45.223], [46.494], [50.002], [50.625], [50.839]]
]
# Regression confidence: ~ 90%
ALMOST_REGRESSION = [
# Mean: 30.042 Std. Dev.: 2.002
[[26.146], [28.04], [28.053], [28.074], [28.168], [28.209], [28.471],
[28.652], [28.664], [30.862], [30.973], [31.002], [31.897], [31.929],
[31.99], [32.214], [32.323], [32.452], [32.696]],
# Mean: 33.008 Std. Dev.: 4.265
[[34.963], [30.741], [39.677], [39.512], [34.314], [31.39], [34.361],
[25.2], [30.489], [29.434]]
]
# Regression confidence: ~ 98%
BARELY_REGRESSION = [
# Mean: 28.828 Std. Dev.: 1.993
[[26.96], [27.605], [27.768], [27.829], [28.006], [28.206], [28.393],
[28.911], [28.933], [30.38], [30.462], [30.808], [31.74], [31.805],
[31.899], [32.077], [32.454], [32.597], [33.155]],
# Mean: 31.156 Std. Dev.: 1.980
[[28.729], [29.112], [29.258], [29.454], [29.789], [30.036], [30.098],
[30.174], [30.534], [32.285], [32.295], [32.552], [32.572], [32.967],
[33.165], [33.403], [33.588], [33.744], [34.147], [35.84]]
]
# Regression confidence: 99.5%
CLEAR_REGRESSION = [
# Mean: 30.254 Std. Dev.: 2.987
[[26.494], [26.621], [26.701], [26.997], [26.997], [27.05], [27.37],
[27.488], [27.556], [31.846], [32.192], [32.21], [32.586], [32.596],
[32.618], [32.95], [32.979], [33.421], [33.457], [34.97]],
# Mean: 33.190 Std. Dev.: 2.972
[[29.547], [29.713], [29.835], [30.132], [30.132], [30.33], [30.406],
[30.592], [30.72], [34.486], [35.247], [35.253], [35.335], [35.378],
[35.934], [36.233], [36.41], [36.947], [37.982]]
]
# Regression confidence > 95%, taken from: crbug.com/434318
# Specifically from Builder android_nexus10_perf_bisect Build #1198
MULTIPLE_VALUES = [
[
[18.916, 22.371, 8.527, 5.877, 5.407, 9.476, 8.100, 5.334,
4.507, 4.842, 8.485, 8.308, 27.490, 4.560, 4.804, 23.068, 17.577,
17.346, 26.738, 60.330, 32.307, 5.468, 27.803, 27.373, 17.823,
5.158, 27.439, 5.236, 11.413],
[18.999, 22.642, 8.158, 5.995, 5.495, 9.499, 8.092, 5.324,
4.468, 4.788, 8.248, 7.853, 27.533, 4.410, 4.622, 22.341, 22.313,
17.072, 26.731, 57.513, 33.001, 5.500, 28.297, 27.277, 26.462,
5.009, 27.361, 5.130, 10.955]
],
[
[18.238, 22.365, 8.555, 5.939, 5.437, 9.463, 7.047, 5.345, 4.517,
4.796, 8.593, 7.901, 27.499, 4.378, 5.040, 4.904, 4.816, 4.828,
4.853, 57.363, 34.184, 5.482, 28.190, 27.290, 26.694, 5.099,
4.905, 5.290, 4.813],
[18.301, 22.522, 8.035, 6.021, 5.565, 9.037, 6.998, 5.321, 4.485,
4.768, 8.397, 7.865, 27.636, 4.640, 5.015, 4.962, 4.933, 4.977,
4.961, 60.648, 34.593, 5.538, 28.454, 27.297, 26.490, 5.099, 5,
5.247, 4.945],
[18.907, 23.368, 8.100, 6.169, 5.621, 9.971, 8.161, 5.331, 4.513,
4.837, 8.255, 7.852, 26.209, 4.388, 5.045, 5.029, 5.032, 4.946,
4.973, 60.334, 33.377, 5.499, 28.275, 27.550, 26.103, 5.108,
4.951, 5.285, 4.910],
[18.715, 23.748, 8.128, 6.148, 5.691, 9.361, 8.106, 5.334, 4.528,
4.965, 8.261, 7.851, 27.282, 4.391, 4.949, 4.981, 4.964, 4.935,
4.933, 60.231, 33.361, 5.489, 28.106, 27.457, 26.648, 5.108,
4.963, 5.272, 4.954]
]
]
# Default options for the dry run
DEFAULT_OPTIONS = {
'debug_ignore_build': True,
'debug_ignore_sync': True,
'debug_ignore_perf_test': True,
'debug_ignore_regression_confidence': True,
'command': 'fake_command',
'metric': 'fake/metric',
'good_revision': 280000,
'bad_revision': 280005,
}
# This global is a placeholder for a generator to be defined by the test cases
# that use _MockRunTests.
_MockResultsGenerator = (x for x in [])
def _MakeMockRunTests(bisect_mode_is_return_code=False):
def _MockRunTests(*args, **kwargs): # pylint: disable=unused-argument
return _FakeTestResult(
_MockResultsGenerator.next(), bisect_mode_is_return_code)
return _MockRunTests
def _FakeTestResult(values, bisect_mode_is_return_code):
mean = 0.0
if bisect_mode_is_return_code:
mean = 0 if (all(v == 0 for v in values)) else 1
result_dict = {'mean': mean, 'std_err': 0.0, 'std_dev': 0.0, 'values': values}
success_code = 0
return (result_dict, success_code)
def _SampleBisecResult(opts):
revisions = [
'ae7ef14ba2d9b5ef0d2c1c092ec98a417e44740d'
'ab55ead638496b061c9de61685b982f7cea38ca7',
'89aa0c99e4b977b9a4f992ac14da0d6624f7316e']
state = bisect_state.BisectState(depot='chromium', revisions=revisions)
depot_registry = bisect_perf_regression.DepotDirectoryRegistry('/mock/src')
results = bisect_results.BisectResults(
bisect_state=state, depot_registry=depot_registry, opts=opts,
runtime_warnings=[])
results.confidence = 99.9
results.culprit_revisions = [(
'ab55ead638496b061c9de61685b982f7cea38ca7',
{
'date': 'Thu, 26 Jun 2014 14:29:49 +0000',
'body': 'Fix',
'author': 'author@chromium.org',
'subject': 'Fix',
'email': 'author@chromium.org',
},
'chromium')]
return results
def _GetMockCallArg(function_mock, call_index):
"""Gets the list of called arguments for call at |call_index|.
Args:
function_mock: A Mock object.
call_index: The index at which the mocked function was called.
Returns:
The called argument list.
"""
call_args_list = function_mock.call_args_list
if not call_args_list or len(call_args_list) <= call_index:
return None
args, _ = call_args_list[call_index]
return args
def _GetBisectPerformanceMetricsInstance(options_dict):
"""Returns an instance of the BisectPerformanceMetrics class."""
opts = bisect_perf_regression.BisectOptions.FromDict(options_dict)
return bisect_perf_regression.BisectPerformanceMetrics(opts, os.getcwd())
def _GetExtendedOptions(improvement_dir, fake_first, ignore_confidence=True,
**extra_opts):
"""Returns the a copy of the default options dict plus some options."""
result = dict(DEFAULT_OPTIONS)
result.update({
'improvement_direction': improvement_dir,
'debug_fake_first_test_mean': fake_first,
'debug_ignore_regression_confidence': ignore_confidence
})
result.update(extra_opts)
return result
def _GenericDryRun(options, print_results=False):
"""Performs a dry run of the bisector.
Args:
options: Dictionary containing the options for the bisect instance.
print_results: Boolean telling whether to call FormatAndPrintResults.
Returns:
The results dictionary as returned by the bisect Run method.
"""
_AbortIfThereAreStagedChanges()
# Disable rmtree to avoid deleting local trees.
old_rmtree = shutil.rmtree
shutil.rmtree = lambda path, on_error: None
# git reset HEAD may be run during the dry run, which removes staged changes.
try:
bisect_instance = _GetBisectPerformanceMetricsInstance(options)
results = bisect_instance.Run(
bisect_instance.opts.command, bisect_instance.opts.bad_revision,
bisect_instance.opts.good_revision, bisect_instance.opts.metric)
if print_results:
bisect_instance.printer.FormatAndPrintResults(results)
return results
finally:
shutil.rmtree = old_rmtree
def _AbortIfThereAreStagedChanges():
"""Exits the test prematurely if there are staged changes."""
# The output of "git status --short" will be an empty string if there are
# no staged changes in the current branch. Untracked files are ignored
# because when running the presubmit on the trybot there are sometimes
# untracked changes to the run-perf-test.cfg and bisect.cfg files.
status_output = bisect_utils.CheckRunGit(
['status', '--short', '--untracked-files=no'])
if status_output:
print 'There are un-committed changes in the current branch.'
print 'Aborting the tests to avoid destroying local changes. Changes:'
print status_output
sys.exit(1)
class BisectPerfRegressionTest(unittest.TestCase):
"""Test case for other functions and classes in bisect-perf-regression.py."""
def setUp(self):
self.cwd = os.getcwd()
os.chdir(os.path.abspath(os.path.join(os.path.dirname(__file__),
os.path.pardir, os.path.pardir)))
def tearDown(self):
os.chdir(self.cwd)
def testBisectOptionsCanPrintHelp(self):
"""Tests that the argument parser can be made and can print help."""
bisect_options = bisect_perf_regression.BisectOptions()
parser = bisect_options._CreateCommandLineParser()
parser.format_help()
def testParseDEPSStringManually(self):
"""Tests DEPS parsing."""
deps_file_contents = """
vars = {
'ffmpeg_hash':
'@ac4a9f31fe2610bd146857bbd55d7a260003a888',
'webkit_url':
'https://chromium.googlesource.com/chromium/blink.git',
'git_url':
'https://chromium.googlesource.com',
'webkit_rev':
'@e01ac0a267d1017288bc67fa3c366b10469d8a24',
'angle_revision':
'74697cf2064c0a2c0d7e1b1b28db439286766a05'
}"""
# Should only expect SVN/git revisions to come through, and URLs should be
# filtered out.
expected_vars_dict = {
'ffmpeg_hash': '@ac4a9f31fe2610bd146857bbd55d7a260003a888',
'webkit_rev': '@e01ac0a267d1017288bc67fa3c366b10469d8a24',
'angle_revision': '74697cf2064c0a2c0d7e1b1b28db439286766a05'
}
# Testing private function.
# pylint: disable=W0212
vars_dict = bisect_perf_regression._ParseRevisionsFromDEPSFileManually(
deps_file_contents)
self.assertEqual(vars_dict, expected_vars_dict)
def _AssertParseResult(self, expected_values, result_string):
"""Asserts some values are parsed from a RESULT line."""
results_template = ('RESULT other_chart: other_trace= 123 count\n'
'RESULT my_chart: my_trace= %(value)s\n')
results = results_template % {'value': result_string}
metric = ['my_chart', 'my_trace']
# Testing private function.
# pylint: disable=W0212
values = bisect_perf_regression._TryParseResultValuesFromOutput(
metric, results)
self.assertEqual(expected_values, values)
def testTryParseResultValuesFromOutput_WithSingleValue(self):
"""Tests result pattern <*>RESULT <graph>: <trace>= <value>"""
self._AssertParseResult([66.88], '66.88 kb')
self._AssertParseResult([66.88], '66.88 ')
self._AssertParseResult([-66.88], '-66.88 kb')
self._AssertParseResult([66], '66 kb')
self._AssertParseResult([0.66], '.66 kb')
self._AssertParseResult([], '. kb')
self._AssertParseResult([], 'aaa kb')
def testTryParseResultValuesFromOutput_WithMultiValue(self):
"""Tests result pattern <*>RESULT <graph>: <trace>= [<value>,<value>, ..]"""
self._AssertParseResult([66.88], '[66.88] kb')
self._AssertParseResult([66.88, 99.44], '[66.88, 99.44]kb')
self._AssertParseResult([66.88, 99.44], '[ 66.88, 99.44 ]')
self._AssertParseResult([-66.88, 99.44], '[-66.88, 99.44] kb')
self._AssertParseResult([-66, 99], '[-66,99] kb')
self._AssertParseResult([-66, 99], '[-66,99,] kb')
self._AssertParseResult([-66, 0.99], '[-66,.99] kb')
self._AssertParseResult([], '[] kb')
self._AssertParseResult([], '[-66,abc] kb')
def testTryParseResultValuesFromOutputWithMeanStd(self):
"""Tests result pattern <*>RESULT <graph>: <trace>= {<mean, std}"""
self._AssertParseResult([33.22], '{33.22, 3.6} kb')
self._AssertParseResult([33.22], '{33.22, 3.6} kb')
self._AssertParseResult([33.22], '{33.22,3.6}kb')
self._AssertParseResult([33.22], '{33.22,3.6} kb')
self._AssertParseResult([33.22], '{ 33.22,3.6 }kb')
self._AssertParseResult([-33.22], '{-33.22,3.6}kb')
self._AssertParseResult([22], '{22,6}kb')
self._AssertParseResult([.22], '{.22,6}kb')
self._AssertParseResult([], '{.22,6, 44}kb')
self._AssertParseResult([], '{}kb')
self._AssertParseResult([], '{XYZ}kb')
# This method doesn't reference self; it fails if an error is thrown.
# pylint: disable=R0201
def testDryRun(self):
"""Does a dry run of the bisect script.
This serves as a smoke test to catch errors in the basic execution of the
script.
"""
_GenericDryRun(DEFAULT_OPTIONS, True)
def testBisectImprovementDirectionFails(self):
"""Dry run of a bisect with an improvement instead of regression."""
# Test result goes from 0 to 100 where higher is better
results = _GenericDryRun(_GetExtendedOptions(1, 100))
self.assertIsNotNone(results.error)
self.assertIn('not a regression', results.error)
# Test result goes from 0 to -100 where lower is better
results = _GenericDryRun(_GetExtendedOptions(-1, -100))
self.assertIsNotNone(results.error)
self.assertIn('not a regression', results.error)
def testBisectImprovementDirectionSucceeds(self):
"""Bisects with improvement direction matching regression range."""
# Test result goes from 0 to 100 where lower is better
results = _GenericDryRun(_GetExtendedOptions(-1, 100))
self.assertIsNone(results.error)
# Test result goes from 0 to -100 where higher is better
results = _GenericDryRun(_GetExtendedOptions(1, -100))
self.assertIsNone(results.error)
@mock.patch('urllib2.urlopen')
def testBisectResultsPosted(self, mock_urlopen):
options_dict = dict(DEFAULT_OPTIONS)
options_dict.update({
'bisect_mode': bisect_utils.BISECT_MODE_MEAN,
'try_job_id': 1234,
})
opts = bisect_perf_regression.BisectOptions.FromDict(options_dict)
bisect_instance = _GetBisectPerformanceMetricsInstance(options_dict)
results = _SampleBisecResult(opts)
bisect_instance.PostBisectResults(results)
call_args = _GetMockCallArg(mock_urlopen, 0)
self.assertIsNotNone(call_args)
called_data = urlparse.parse_qs(call_args[1])
results_data = json.loads(called_data['data'][0])
self.assertEqual(1234, results_data['try_job_id'])
def _CheckAbortsEarly(self, results, **extra_opts):
"""Returns True if the bisect job would abort early."""
global _MockResultsGenerator
_MockResultsGenerator = (r for r in results)
bisect_class = bisect_perf_regression.BisectPerformanceMetrics
original_run_tests = bisect_class.RunPerformanceTestAndParseResults
bisect_class.RunPerformanceTestAndParseResults = _MakeMockRunTests()
try:
dry_run_results = _GenericDryRun(_GetExtendedOptions(
improvement_dir=0, fake_first=0, ignore_confidence=False,
**extra_opts))
except StopIteration:
# If StopIteration was raised, that means that the next value after
# the first two values was requested, so the job was not aborted.
return False
finally:
bisect_class.RunPerformanceTestAndParseResults = original_run_tests
# If the job was aborted, there should be a warning about it.
self.assertTrue(
any('did not clearly reproduce a regression' in w
for w in dry_run_results.warnings))
return True
def testBisectAbortedOnClearNonRegression(self):
self.assertTrue(self._CheckAbortsEarly(CLEAR_NON_REGRESSION))
def testBisectNotAborted_AlmostRegression(self):
self.assertFalse(self._CheckAbortsEarly(ALMOST_REGRESSION))
def testBisectNotAborted_ClearRegression(self):
self.assertFalse(self._CheckAbortsEarly(CLEAR_REGRESSION))
def testBisectNotAborted_BarelyRegression(self):
self.assertFalse(self._CheckAbortsEarly(BARELY_REGRESSION))
def testBisectNotAborted_MultipleValues(self):
self.assertFalse(self._CheckAbortsEarly(MULTIPLE_VALUES))
def testBisectNotAbortedWhenRequiredConfidenceIsZero(self):
self.assertFalse(self._CheckAbortsEarly(
CLEAR_NON_REGRESSION, required_initial_confidence=0))
def _CheckAbortsEarlyForReturnCode(self, results):
"""Returns True if the bisect job would abort early in return code mode."""
global _MockResultsGenerator
_MockResultsGenerator = (r for r in results)
bisect_class = bisect_perf_regression.BisectPerformanceMetrics
original_run_tests = bisect_class.RunPerformanceTestAndParseResults
bisect_class.RunPerformanceTestAndParseResults = _MakeMockRunTests(True)
options = dict(DEFAULT_OPTIONS)
options.update({'bisect_mode': 'return_code'})
try:
dry_run_results = _GenericDryRun(options)
except StopIteration:
# If StopIteration was raised, that means that the next value after
# the first two values was requested, so the job was not aborted.
return False
finally:
bisect_class.RunPerformanceTestAndParseResults = original_run_tests
# If the job was aborted, there should be a warning about it.
if ('known good and known bad revisions returned same' in
dry_run_results.abort_reason):
return True
return False
def testBisectAbortOn_SameReturnCode(self):
self.assertTrue(self._CheckAbortsEarlyForReturnCode([[0,0,0], [0,0,0]]))
def testBisectNotAbortedOn_DifferentReturnCode(self):
self.assertFalse(self._CheckAbortsEarlyForReturnCode([[1,1,1], [0,0,0]]))
def testGetCommitPosition(self):
cp_git_rev = '7017a81991de983e12ab50dfc071c70e06979531'
self.assertEqual(291765, source_control.GetCommitPosition(cp_git_rev))
svn_git_rev = 'e6db23a037cad47299a94b155b95eebd1ee61a58'
self.assertEqual(291467, source_control.GetCommitPosition(svn_git_rev))
def testGetCommitPositionForV8(self):
bisect_instance = _GetBisectPerformanceMetricsInstance(DEFAULT_OPTIONS)
v8_rev = '818769db41cb3e83979aa16cc76c69b66045e369'
depot_path = os.path.join(bisect_instance.src_cwd, 'v8')
self.assertEqual(
43769, source_control.GetCommitPosition(v8_rev, depot_path))
def testGetCommitPositionForSkia(self):
bisect_instance = _GetBisectPerformanceMetricsInstance(DEFAULT_OPTIONS)
skia_rev = 'a94d028eCheckAbortsEarly0f2c77f159b3dac95eb90c3b4cf48c61'
depot_path = os.path.join(bisect_instance.src_cwd, 'third_party', 'skia')
# Skia doesn't use commit positions, and GetCommitPosition should
# return None for repos that don't use commit positions.
self.assertIsNone(source_control.GetCommitPosition(skia_rev, depot_path))
def testUpdateDepsContent(self):
bisect_instance = _GetBisectPerformanceMetricsInstance(DEFAULT_OPTIONS)
deps_file = 'DEPS'
# We are intentionally reading DEPS file contents instead of string literal
# with few lines from DEPS because to check if the format we are expecting
# to search is not changed in DEPS content.
# TODO (prasadv): Add a separate test to validate the DEPS contents with the
# format that bisect script expects.
deps_contents = bisect_perf_regression.ReadStringFromFile(deps_file)
deps_key = 'v8_revision'
depot = 'v8'
git_revision = 'a12345789a23456789a123456789a123456789'
updated_content = bisect_instance.UpdateDepsContents(
deps_contents, depot, git_revision, deps_key)
self.assertIsNotNone(updated_content)
ss = re.compile('["\']%s["\']: ["\']%s["\']' % (deps_key, git_revision))
self.assertIsNotNone(re.search(ss, updated_content))
@mock.patch('bisect_utils.RunGClient')
def testSyncToRevisionForChromium(self, mock_RunGClient):
bisect_instance = _GetBisectPerformanceMetricsInstance(DEFAULT_OPTIONS)
mock_RunGClient.return_value = 0
bisect_instance._SyncRevision(
'chromium', 'e6db23a037cad47299a94b155b95eebd1ee61a58', 'gclient')
expected_params = [
'sync',
'--verbose',
'--nohooks',
'--force',
'--delete_unversioned_trees',
'--revision',
'src@e6db23a037cad47299a94b155b95eebd1ee61a58',
]
mock_RunGClient.assert_called_with(expected_params, cwd=None)
@mock.patch('bisect_utils.RunGit')
def testSyncToRevisionForWebKit(self, mock_RunGit):
bisect_instance = _GetBisectPerformanceMetricsInstance(DEFAULT_OPTIONS)
mock_RunGit.return_value = None, None
bisect_instance._SyncRevision(
'webkit', 'a94d028e0f2c77f159b3dac95eb90c3b4cf48c61', None)
expected_params = ['checkout', 'a94d028e0f2c77f159b3dac95eb90c3b4cf48c61']
mock_RunGit.assert_called_with(expected_params)
def testTryJobSvnRepo_PerfBuilderType_ReturnsRepoUrl(self):
self.assertEqual(
bisect_perf_regression.PERF_SVN_REPO_URL,
bisect_perf_regression._TryJobSvnRepo(fetch_build.PERF_BUILDER))
def testTryJobSvnRepo_FullBuilderType_ReturnsRepoUrl(self):
self.assertEqual(
bisect_perf_regression.FULL_SVN_REPO_URL,
bisect_perf_regression._TryJobSvnRepo(fetch_build.FULL_BUILDER))
def testTryJobSvnRepo_WithUnknownBuilderType_ThrowsError(self):
with self.assertRaises(NotImplementedError):
bisect_perf_regression._TryJobSvnRepo('foo')
def _CheckIsDownloadable(self, depot, target_platform='chromium',
builder_type='perf'):
opts = dict(DEFAULT_OPTIONS)
opts.update({'target_platform': target_platform,
'builder_type': builder_type})
bisect_instance = _GetBisectPerformanceMetricsInstance(opts)
return bisect_instance.IsDownloadable(depot)
def testIsDownloadable_ChromiumDepot_ReturnsTrue(self):
self.assertTrue(self._CheckIsDownloadable(depot='chromium'))
def testIsDownloadable_DEPSDepot_ReturnsTrue(self):
self.assertTrue(self._CheckIsDownloadable(depot='v8'))
def testIsDownloadable_AndroidChromeDepot_ReturnsTrue(self):
self.assertTrue(self._CheckIsDownloadable(
depot='android-chrome', target_platform='android-chrome'))
def testIsDownloadable_AndroidChromeWithDEPSChromium_ReturnsFalse(self):
self.assertFalse(self._CheckIsDownloadable(
depot='chromium', target_platform='android-chrome'))
def testIsDownloadable_AndroidChromeWithDEPSV8_ReturnsFalse(self):
self.assertFalse(self._CheckIsDownloadable(
depot='v8', target_platform='android-chrome'))
def testIsDownloadable_NoBuilderType_ReturnsFalse(self):
self.assertFalse(
self._CheckIsDownloadable(depot='chromium', builder_type=''))
class DepotDirectoryRegistryTest(unittest.TestCase):
def setUp(self):
self.old_chdir = os.chdir
os.chdir = self.mockChdir
self.old_depot_names = bisect_utils.DEPOT_NAMES
bisect_utils.DEPOT_NAMES = ['mock_depot']
self.old_depot_deps_name = bisect_utils.DEPOT_DEPS_NAME
bisect_utils.DEPOT_DEPS_NAME = {'mock_depot': {'src': 'src/foo'}}
self.registry = bisect_perf_regression.DepotDirectoryRegistry('/mock/src')
self.cur_dir = None
def tearDown(self):
os.chdir = self.old_chdir
bisect_utils.DEPOT_NAMES = self.old_depot_names
bisect_utils.DEPOT_DEPS_NAME = self.old_depot_deps_name
def mockChdir(self, new_dir):
self.cur_dir = new_dir
def testReturnsCorrectResultForChrome(self):
self.assertEqual(self.registry.GetDepotDir('chromium'), '/mock/src')
def testUsesDepotSpecToInitializeRegistry(self):
self.assertEqual(self.registry.GetDepotDir('mock_depot'), '/mock/src/foo')
def testChangedTheDirectory(self):
self.registry.ChangeToDepotDir('mock_depot')
self.assertEqual(self.cur_dir, '/mock/src/foo')
# The tests below test private functions (W0212).
# pylint: disable=W0212
class GitTryJobTestCases(unittest.TestCase):
"""Test case for bisect try job."""
def setUp(self):
bisect_utils_patcher = mock.patch('bisect_perf_regression.bisect_utils')
self.mock_bisect_utils = bisect_utils_patcher.start()
self.addCleanup(bisect_utils_patcher.stop)
def _SetupRunGitMock(self, git_cmds):
"""Setup RunGit mock with expected output for given git command."""
def side_effect(git_cmd_args):
for val in git_cmds:
if set(val[0]) == set(git_cmd_args):
return val[1]
self.mock_bisect_utils.RunGit = mock.Mock(side_effect=side_effect)
def _AssertRunGitExceptions(self, git_cmds, func, *args):
"""Setup RunGit mock and tests RunGitException.
Args:
git_cmds: List of tuples with git command and expected output.
func: Callback function to be executed.
args: List of arguments to be passed to the function.
"""
self._SetupRunGitMock(git_cmds)
self.assertRaises(bisect_perf_regression.RunGitError,
func,
*args)
def testNotGitRepo(self):
new_branch = bisect_perf_regression.BISECT_TRYJOB_BRANCH
parent_branch = bisect_perf_regression.BISECT_MASTER_BRANCH
cmds = [(['rev-parse', '--abbrev-ref', 'HEAD'], (None, 128))]
self._AssertRunGitExceptions(cmds,
bisect_perf_regression._PrepareBisectBranch,
parent_branch, new_branch)
def testFailedCheckoutMaster(self):
new_branch = bisect_perf_regression.BISECT_TRYJOB_BRANCH
parent_branch = bisect_perf_regression.BISECT_MASTER_BRANCH
cmds = [
(['rev-parse', '--abbrev-ref', 'HEAD'], (new_branch, 0)),
(['checkout', '-f', parent_branch], ('Checkout Failed', 1)),
]
self._AssertRunGitExceptions(cmds,
bisect_perf_regression._PrepareBisectBranch,
parent_branch, new_branch)
def testDeleteBisectBranchIfExists(self):
new_branch = bisect_perf_regression.BISECT_TRYJOB_BRANCH
parent_branch = bisect_perf_regression.BISECT_MASTER_BRANCH
cmds = [
(['rev-parse', '--abbrev-ref', 'HEAD'], (parent_branch, 0)),
(['branch', '--list'], ('bisect-tryjob\n*master\nsomebranch', 0)),
(['branch', '-D', new_branch], ('Failed to delete branch', 128)),
]
self._AssertRunGitExceptions(cmds,
bisect_perf_regression._PrepareBisectBranch,
parent_branch, new_branch)
def testCreatNewBranchFails(self):
new_branch = bisect_perf_regression.BISECT_TRYJOB_BRANCH
parent_branch = bisect_perf_regression.BISECT_MASTER_BRANCH
cmds = [
(['rev-parse', '--abbrev-ref', 'HEAD'], (parent_branch, 0)),
(['branch', '--list'], ('bisect-tryjob\n*master\nsomebranch', 0)),
(['branch', '-D', new_branch], ('None', 0)),
(['update-index', '--refresh', '-q'], (None, 0)),
(['diff-index', 'HEAD'], (None, 0)),
(['checkout', '-b', new_branch], ('Failed to create branch', 128)),
]
self._AssertRunGitExceptions(cmds,
bisect_perf_regression._PrepareBisectBranch,
parent_branch, new_branch)
def testSetUpstreamToFails(self):
new_branch = bisect_perf_regression.BISECT_TRYJOB_BRANCH
parent_branch = bisect_perf_regression.BISECT_MASTER_BRANCH
cmds = [
(['rev-parse', '--abbrev-ref', 'HEAD'], (parent_branch, 0)),
(['branch', '--list'], ('bisect-tryjob\n*master\nsomebranch', 0)),
(['branch', '-D', new_branch], ('None', 0)),
(['update-index', '--refresh', '-q'], (None, 0)),
(['diff-index', 'HEAD'], (None, 0)),
(['checkout', '-b', new_branch], ('None', 0)),
(['branch', '--set-upstream-to', parent_branch],
('Setuptream fails', 1)),
]
self._AssertRunGitExceptions(cmds,
bisect_perf_regression._PrepareBisectBranch,
parent_branch, new_branch)
def testStartBuilderTryJobForException(self):
git_revision = 'ac4a9f31fe2610bd146857bbd55d7a260003a888'
bot_name = 'linux_perf_bisect_builder'
bisect_job_name = 'testBisectJobname'
patch = None
patch_content = '/dev/null'
new_branch = bisect_perf_regression.BISECT_TRYJOB_BRANCH
parent_branch = bisect_perf_regression.BISECT_MASTER_BRANCH
try_cmd = [
(['rev-parse', '--abbrev-ref', 'HEAD'], (parent_branch, 0)),
(['branch', '--list'], ('bisect-tryjob\n*master\nsomebranch', 0)),
(['branch', '-D', new_branch], ('None', 0)),
(['update-index', '--refresh', '-q'], (None, 0)),
(['diff-index', 'HEAD'], (None, 0)),
(['checkout', '-b', new_branch], ('None', 0)),
(['branch', '--set-upstream-to', parent_branch],
('Setuptream fails', 0)),
(['try',
'--bot=%s' % bot_name,
'--revision=%s' % git_revision,
'--name=%s' % bisect_job_name,
'--svn_repo=%s' % bisect_perf_regression.PERF_SVN_REPO_URL,
'--diff=%s' % patch_content],
(None, 1)),
]
self._AssertRunGitExceptions(
try_cmd, bisect_perf_regression._StartBuilderTryJob,
fetch_build.PERF_BUILDER, git_revision, bot_name, bisect_job_name,
patch)
def testBuilderTryJob(self):
git_revision = 'ac4a9f31fe2610bd146857bbd55d7a260003a888'
bot_name = 'linux_perf_bisect_builder'
bisect_job_name = 'testBisectJobname'
patch = None
patch_content = '/dev/null'
new_branch = bisect_perf_regression.BISECT_TRYJOB_BRANCH
parent_branch = bisect_perf_regression.BISECT_MASTER_BRANCH
try_cmd = [
(['rev-parse', '--abbrev-ref', 'HEAD'], (parent_branch, 0)),
(['branch', '--list'], ('bisect-tryjob\n*master\nsomebranch', 0)),
(['branch', '-D', new_branch], ('None', 0)),
(['update-index', '--refresh', '-q'], (None, 0)),
(['diff-index', 'HEAD'], (None, 0)),
(['checkout', '-b', new_branch], ('None', 0)),
(['branch', '--set-upstream-to', parent_branch],
('Setuptream fails', 0)),
(['try',
'--bot=%s' % bot_name,
'--revision=%s' % git_revision,
'--name=%s' % bisect_job_name,
'--svn_repo=%s' % bisect_perf_regression.PERF_SVN_REPO_URL,
'--diff=%s' % patch_content],
(None, 0)),
]
self._SetupRunGitMock(try_cmd)
bisect_perf_regression._StartBuilderTryJob(
fetch_build.PERF_BUILDER, git_revision, bot_name, bisect_job_name,
patch)
if __name__ == '__main__':
unittest.main()
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This file contains printing-related functionality of the bisect."""
import datetime
import re
from bisect_results import BisectResults
import bisect_utils
import source_control
# The perf dashboard looks for a string like "Estimated Confidence: 95%"
# to decide whether or not to cc the author(s). If you change this, please
# update the perf dashboard as well.
RESULTS_BANNER = """
===== BISECT JOB RESULTS =====
Status: %(status)s
Test Command: %(command)s
Test Metric: %(metric)s
Relative Change: %(change)s
Estimated Confidence: %(confidence).02f%%
Retested CL with revert: %(retest)s"""
# When the bisect was aborted without a bisect failure the following template
# is used.
ABORT_REASON_TEMPLATE = """
===== BISECTION ABORTED =====
The bisect was aborted because %(abort_reason)s
Please contact the the team (see below) if you believe this is in error.
Bug ID: %(bug_id)s
Test Command: %(command)s
Test Metric: %(metric)s
Good revision: %(good_revision)s
Bad revision: %(bad_revision)s """
# The perf dashboard specifically looks for the string
# "Author : " to parse out who to cc on a bug. If you change the
# formatting here, please update the perf dashboard as well.
RESULTS_REVISION_INFO = """
===== SUSPECTED CL(s) =====
Subject : %(subject)s
Author : %(author)s%(commit_info)s
Commit : %(cl)s
Date : %(cl_date)s"""
RESULTS_THANKYOU = """
| O O | Visit http://www.chromium.org/developers/speed-infra/perf-bug-faq
| X | for more information addressing perf regression bugs. For feedback,
| / \\ | file a bug with label Cr-Tests-AutoBisect. Thank you!"""
class BisectPrinter(object):
def __init__(self, opts, depot_registry=None):
self.opts = opts
self.depot_registry = depot_registry
def FormatAndPrintResults(self, bisect_results):
"""Prints the results from a bisection run in a readable format.
Also prints annotations creating buildbot step "Results".
Args:
bisect_results: BisectResult object containing results to be printed.
"""
if bisect_results.abort_reason:
self._PrintAbortResults(bisect_results.abort_reason)
return
if self.opts.output_buildbot_annotations:
bisect_utils.OutputAnnotationStepStart('Build Status Per Revision')
print
print 'Full results of bisection:'
for revision_state in bisect_results.state.GetRevisionStates():
build_status = revision_state.passed
if type(build_status) is bool:
if build_status:
build_status = 'Good'
else:
build_status = 'Bad'
print ' %20s %40s %s' % (revision_state.depot,
revision_state.revision,
build_status)
print
if self.opts.output_buildbot_annotations:
bisect_utils.OutputAnnotationStepClosed()
# The perf dashboard scrapes the "results" step in order to comment on
# bugs. If you change this, please update the perf dashboard as well.
bisect_utils.OutputAnnotationStepStart('Results')
self._PrintBanner(bisect_results)
self._PrintWarnings(bisect_results.warnings)
if bisect_results.culprit_revisions and bisect_results.confidence:
for culprit in bisect_results.culprit_revisions:
cl, info, depot = culprit
self._PrintRevisionInfo(cl, info, depot)
self._PrintRetestResults(bisect_results)
self._PrintTestedCommitsTable(bisect_results.state.GetRevisionStates(),
bisect_results.first_working_revision,
bisect_results.last_broken_revision,
bisect_results.confidence,
final_step=True)
self._PrintStepTime(bisect_results.state.GetRevisionStates())
self._PrintThankYou()
if self.opts.output_buildbot_annotations:
bisect_utils.OutputAnnotationStepClosed()
def PrintPartialResults(self, bisect_state):
revision_states = bisect_state.GetRevisionStates()
first_working_rev, last_broken_rev = BisectResults.FindBreakingRevRange(
revision_states)
self._PrintTestedCommitsTable(revision_states, first_working_rev,
last_broken_rev, 100, final_step=False)
def _PrintAbortResults(self, abort_reason):
if self.opts.output_buildbot_annotations:
bisect_utils.OutputAnnotationStepStart('Results')
# Metric string in config is not split in case of return code mode.
if (self.opts.metric and
self.opts.bisect_mode != bisect_utils.BISECT_MODE_RETURN_CODE):
metric = '/'.join(self.opts.metric)
else:
metric = self.opts.metric
print ABORT_REASON_TEMPLATE % {
'abort_reason': abort_reason,
'bug_id': self.opts.bug_id or 'NOT SPECIFIED',
'command': self.opts.command,
'metric': metric,
'good_revision': self.opts.good_revision,
'bad_revision': self.opts.bad_revision,
}
self._PrintThankYou()
if self.opts.output_buildbot_annotations:
bisect_utils.OutputAnnotationStepClosed()
@staticmethod
def _PrintThankYou():
print RESULTS_THANKYOU
@staticmethod
def _PrintStepTime(revision_states):
"""Prints information about how long various steps took.
Args:
revision_states: Ordered list of revision states."""
step_perf_time_avg = 0.0
step_build_time_avg = 0.0
step_count = 0.0
for revision_state in revision_states:
if revision_state.value:
step_perf_time_avg += revision_state.perf_time
step_build_time_avg += revision_state.build_time
step_count += 1
if step_count:
step_perf_time_avg = step_perf_time_avg / step_count
step_build_time_avg = step_build_time_avg / step_count
print
print 'Average build time : %s' % datetime.timedelta(
seconds=int(step_build_time_avg))
print 'Average test time : %s' % datetime.timedelta(
seconds=int(step_perf_time_avg))
@staticmethod
def _GetViewVCLinkFromDepotAndHash(git_revision, depot):
"""Gets link to the repository browser."""
if depot and 'viewvc' in bisect_utils.DEPOT_DEPS_NAME[depot]:
return bisect_utils.DEPOT_DEPS_NAME[depot]['viewvc'] + git_revision
return ''
def _PrintRevisionInfo(self, cl, info, depot=None):
commit_link = self._GetViewVCLinkFromDepotAndHash(cl, depot)
if commit_link:
commit_link = '\nLink : %s' % commit_link
else:
commit_link = ('\Description:\n%s' % info['body'])
print RESULTS_REVISION_INFO % {
'subject': info['subject'],
'author': info['email'],
'commit_info': commit_link,
'cl': cl,
'cl_date': info['date']
}
@staticmethod
def _PrintTableRow(column_widths, row_data):
"""Prints out a row in a formatted table that has columns aligned.
Args:
column_widths: A list of column width numbers.
row_data: A list of items for each column in this row.
"""
assert len(column_widths) == len(row_data)
text = ''
for i in xrange(len(column_widths)):
current_row_data = row_data[i].center(column_widths[i], ' ')
text += ('%%%ds' % column_widths[i]) % current_row_data
print text
def _PrintTestedCommitsHeader(self):
if self.opts.bisect_mode == bisect_utils.BISECT_MODE_MEAN:
self._PrintTableRow(
[20, 12, 70, 14, 12, 13],
['Depot', 'Position', 'SHA', 'Mean', 'Std. Error', 'State'])
elif self.opts.bisect_mode == bisect_utils.BISECT_MODE_STD_DEV:
self._PrintTableRow(
[20, 12, 70, 14, 12, 13],
['Depot', 'Position', 'SHA', 'Std. Error', 'Mean', 'State'])
elif self.opts.bisect_mode == bisect_utils.BISECT_MODE_RETURN_CODE:
self._PrintTableRow(
[20, 12, 70, 14, 13],
['Depot', 'Position', 'SHA', 'Return Code', 'State'])
else:
assert False, 'Invalid bisect_mode specified.'
def _PrintTestedCommitsEntry(self, revision_state, commit_position, cl_link,
state_str):
if self.opts.bisect_mode == bisect_utils.BISECT_MODE_MEAN:
std_error = '+-%.02f' % revision_state.value['std_err']
mean = '%.02f' % revision_state.value['mean']
self._PrintTableRow(
[20, 12, 70, 12, 14, 13],
[revision_state.depot, commit_position, cl_link, mean, std_error,
state_str])
elif self.opts.bisect_mode == bisect_utils.BISECT_MODE_STD_DEV:
std_error = '+-%.02f' % revision_state.value['std_err']
mean = '%.02f' % revision_state.value['mean']
self._PrintTableRow(
[20, 12, 70, 12, 14, 13],
[revision_state.depot, commit_position, cl_link, std_error, mean,
state_str])
elif self.opts.bisect_mode == bisect_utils.BISECT_MODE_RETURN_CODE:
mean = '%d' % revision_state.value['mean']
self._PrintTableRow(
[20, 12, 70, 14, 13],
[revision_state.depot, commit_position, cl_link, mean,
state_str])
def _PrintTestedCommitsTable(
self, revision_states, first_working_revision, last_broken_revision,
confidence, final_step=True):
print
if final_step:
print '===== TESTED COMMITS ====='
else:
print '===== PARTIAL RESULTS ====='
self._PrintTestedCommitsHeader()
state = 0
for revision_state in revision_states:
if revision_state.value:
if (revision_state == last_broken_revision or
revision_state == first_working_revision):
# If confidence is too low, don't add this empty line since it's
# used to put focus on a suspected CL.
if confidence and final_step:
print
state += 1
if state == 2 and not final_step:
# Just want a separation between "bad" and "good" cl's.
print
state_str = 'Bad'
if state == 1 and final_step:
state_str = 'Suspected CL'
elif state == 2:
state_str = 'Good'
# If confidence is too low, don't bother outputting good/bad.
if not confidence:
state_str = ''
state_str = state_str.center(13, ' ')
commit_position = source_control.GetCommitPosition(
revision_state.revision,
self.depot_registry.GetDepotDir(revision_state.depot))
display_commit_pos = ''
if commit_position:
display_commit_pos = str(commit_position)
self._PrintTestedCommitsEntry(revision_state,
display_commit_pos,
revision_state.revision,
state_str)
def _PrintRetestResults(self, bisect_results):
if (not bisect_results.retest_results_tot or
not bisect_results.retest_results_reverted):
return
print
print '===== RETEST RESULTS ====='
self._PrintTestedCommitsEntry(
bisect_results.retest_results_tot, '', '', '')
self._PrintTestedCommitsEntry(
bisect_results.retest_results_reverted, '', '', '')
def _PrintBanner(self, bisect_results):
if self.opts.bisect_mode == bisect_utils.BISECT_MODE_RETURN_CODE:
metric = 'N/A'
change = 'Yes'
else:
metric = '/'.join(self.opts.metric)
change = '%.02f%% (+/-%.02f%%)' % (
bisect_results.regression_size, bisect_results.regression_std_err)
if not bisect_results.culprit_revisions:
change = 'No significant change reproduced.'
print RESULTS_BANNER % {
'status': self._StatusMessage(bisect_results),
'command': self.opts.command,
'metric': metric,
'change': change,
'confidence': bisect_results.confidence,
'retest': 'Yes' if bisect_results.retest_results_tot else 'No',
}
@staticmethod
def _StatusMessage(bisect_results):
if bisect_results.confidence >= bisect_utils.HIGH_CONFIDENCE:
return 'Positive: Reproduced a change.'
elif bisect_results.culprit_revisions:
return 'Negative: Found possible suspect(s), but with low confidence.'
return 'Negative: Did not reproduce a change.'
@staticmethod
def _PrintWarnings(warnings):
"""Prints a list of warning strings if there are any."""
if not warnings:
return
print
print 'WARNINGS:'
for w in set(warnings):
print ' ! %s' % w
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import math
import os
import bisect_utils
import math_utils
import source_control
import ttest
from bisect_state import RevisionState
class BisectResults(object):
"""Contains results of the completed bisect.
Properties:
error: Error message if the bisect failed.
If the error is None, the following properties are present:
warnings: List of warnings from the bisect run.
state: BisectState object from which these results were generated.
first_working_revision: First good revision.
last_broken_revision: Last bad revision.
If both of above revisions are not None, the follow properties are present:
culprit_revisions: A list of revisions, which contain the bad change
introducing the failure.
regression_size: For performance bisects, this is a relative change of
the mean metric value. For other bisects this field always contains
'zero-to-nonzero'.
regression_std_err: For performance bisects, it is a pooled standard error
for groups of good and bad runs. Not used for other bisects.
confidence: For performance bisects, it is a confidence that the good and
bad runs are distinct groups. Not used for non-performance bisects.
"""
def __init__(self, bisect_state=None, depot_registry=None, opts=None,
runtime_warnings=None, error=None, abort_reason=None):
"""Computes final bisect results after a bisect run is complete.
This constructor should be called in one of the following ways:
BisectResults(state, depot_registry, opts, runtime_warnings)
BisectResults(error=error)
First option creates an object representing successful bisect results, while
second option creates an error result.
Args:
bisect_state: BisectState object representing latest bisect state.
depot_registry: DepotDirectoryRegistry object with information on each
repository in the bisect_state.
opts: Options passed to the bisect run.
runtime_warnings: A list of warnings from the bisect run.
error: Error message. When error is not None, other arguments are ignored.
"""
# Setting these attributes so that bisect printer does not break when the
# regression cannot be reproduced (no broken revision was found)
self.regression_size = 0
self.regression_std_err = 0
self.confidence = 0
self.culprit_revisions = []
self.error = error
self.abort_reason = abort_reason
if error is not None or abort_reason is not None:
return
assert (bisect_state is not None and depot_registry is not None and
opts is not None and runtime_warnings is not None), (
'Incorrect use of the BisectResults constructor. '
'When error is None, all other arguments are required.')
self.state = bisect_state
rev_states = bisect_state.GetRevisionStates()
first_working_rev, last_broken_rev = self.FindBreakingRevRange(rev_states)
self.first_working_revision = first_working_rev
self.last_broken_revision = last_broken_rev
self.warnings = runtime_warnings
self.retest_results_tot = None
self.retest_results_reverted = None
if first_working_rev is not None and last_broken_rev is not None:
statistics = self._ComputeRegressionStatistics(
rev_states, first_working_rev, last_broken_rev)
self.regression_size = statistics['regression_size']
self.regression_std_err = statistics['regression_std_err']
self.confidence = statistics['confidence']
self.culprit_revisions = self._FindCulpritRevisions(
rev_states, depot_registry, first_working_rev, last_broken_rev)
self.warnings += self._GetResultBasedWarnings(
self.culprit_revisions, opts, self.confidence)
def AddRetestResults(self, results_tot, results_reverted):
if not results_tot or not results_reverted:
self.warnings.append(
'Failed to re-test reverted culprit CL against ToT.')
return
confidence = BisectResults.ConfidenceScore(
results_reverted[0]['values'],
results_tot[0]['values'])
self.retest_results_tot = RevisionState('ToT', 'n/a', 0)
self.retest_results_tot.value = results_tot[0]
self.retest_results_reverted = RevisionState('Reverted', 'n/a', 0)
self.retest_results_reverted.value = results_reverted[0]
if confidence <= bisect_utils.HIGH_CONFIDENCE:
self.warnings.append(
'Confidence of re-test with reverted CL is not high.'
' Check that the regression hasn\'t already recovered. '
' There\'s still a chance this is a regression, as performance of'
' local builds may not match official builds.')
@staticmethod
def _GetResultBasedWarnings(culprit_revisions, opts, confidence):
warnings = []
if len(culprit_revisions) > 1:
warnings.append('Due to build errors, regression range could '
'not be narrowed down to a single commit.')
if opts.repeat_test_count == 1:
warnings.append('Tests were only set to run once. This may '
'be insufficient to get meaningful results.')
if 0 < confidence < bisect_utils.HIGH_CONFIDENCE:
warnings.append('Confidence is not high. Try bisecting again '
'with increased repeat_count, larger range, or '
'on another metric.')
if not confidence:
warnings.append('Confidence score is 0%. Try bisecting again on '
'another platform or another metric.')
return warnings
@staticmethod
def ConfidenceScore(sample1, sample2, accept_single_bad_or_good=False):
"""Calculates a confidence score.
This score is based on a statistical hypothesis test. The null
hypothesis is that the two groups of results have no difference,
i.e. there is no performance regression. The alternative hypothesis
is that there is some difference between the groups that's unlikely
to occur by chance.
The score returned by this function represents our confidence in the
alternative hypothesis.
Note that if there's only one item in either sample, this means only
one revision was classified good or bad, so there's not much evidence
to make a decision.
Args:
sample1: A flat list of "good" result numbers.
sample2: A flat list of "bad" result numbers.
accept_single_bad_or_good: If True, compute a value even if
there is only one bad or good revision.
Returns:
A float between 0 and 100; 0 if the samples aren't large enough.
"""
if ((len(sample1) <= 1 or len(sample2) <= 1) and
not accept_single_bad_or_good):
return 0.0
if not sample1 or not sample2:
return 0.0
_, _, p_value = ttest.WelchsTTest(sample1, sample2)
return 100.0 * (1.0 - p_value)
@staticmethod
def FindBreakingRevRange(revision_states):
"""Finds the last known good and first known bad revisions.
Note that since revision_states is expected to be in reverse chronological
order, the last known good revision is the first revision in the list that
has the passed property set to 1, therefore the name
`first_working_revision`. The inverse applies to `last_broken_revision`.
Args:
revision_states: A list of RevisionState instances.
Returns:
A tuple containing the two revision states at the border. (Last
known good and first known bad.)
"""
first_working_revision = None
last_broken_revision = None
for revision_state in revision_states:
if revision_state.passed == 1 and not first_working_revision:
first_working_revision = revision_state
if not revision_state.passed:
last_broken_revision = revision_state
return first_working_revision, last_broken_revision
@staticmethod
def _FindCulpritRevisions(revision_states, depot_registry, first_working_rev,
last_broken_rev):
cwd = os.getcwd()
culprit_revisions = []
for i in xrange(last_broken_rev.index, first_working_rev.index):
depot_registry.ChangeToDepotDir(revision_states[i].depot)
info = source_control.QueryRevisionInfo(revision_states[i].revision)
culprit_revisions.append((revision_states[i].revision, info,
revision_states[i].depot))
os.chdir(cwd)
return culprit_revisions
@classmethod
def _ComputeRegressionStatistics(cls, rev_states, first_working_rev,
last_broken_rev):
# TODO(sergiyb): We assume that value has "values" key, which may not be
# the case for failure-bisects, where there is a single value only.
broken_means = [state.value['values']
for state in rev_states[:last_broken_rev.index+1]
if state.value]
working_means = [state.value['values']
for state in rev_states[first_working_rev.index:]
if state.value]
# Flatten the lists to calculate mean of all values.
working_mean = sum(working_means, [])
broken_mean = sum(broken_means, [])
# Calculate the approximate size of the regression
mean_of_bad_runs = math_utils.Mean(broken_mean)
mean_of_good_runs = math_utils.Mean(working_mean)
regression_size = 100 * math_utils.RelativeChange(mean_of_good_runs,
mean_of_bad_runs)
if math.isnan(regression_size):
regression_size = 'zero-to-nonzero'
regression_std_err = math.fabs(math_utils.PooledStandardError(
[working_mean, broken_mean]) /
max(0.0001, min(mean_of_good_runs, mean_of_bad_runs))) * 100.0
# Give a "confidence" in the bisect culprit by seeing whether the results
# of the culprit revision and the revision before that appear to be
# statistically significantly different.
confidence = cls.ConfidenceScore(
sum([first_working_rev.value['values']], []),
sum([last_broken_rev.value['values']], []))
bad_greater_than_good = mean_of_bad_runs > mean_of_good_runs
return {'regression_size': regression_size,
'regression_std_err': regression_std_err,
'confidence': confidence,
'bad_greater_than_good': bad_greater_than_good}
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import bisect_utils
import source_control
def Get(bisect_results, opts, depot_registry):
"""Returns the results as a jsonable object."""
if opts.bisect_mode == bisect_utils.BISECT_MODE_RETURN_CODE:
change = '0'
else:
metric = '/'.join(opts.metric)
change = '%.02f%%' % bisect_results.regression_size
status = 'completed'
return {
'try_job_id': opts.try_job_id,
'bug_id': opts.bug_id,
'status': status,
'buildbot_log_url': _GetBuildBotLogUrl(),
'bisect_bot': os.environ.get('BUILDBOT_BUILDERNAME', ''),
'command': opts.command,
'metric': metric,
'change': change,
'score': bisect_results.confidence,
'good_revision': opts.good_revision,
'bad_revision': opts.bad_revision,
'warnings': bisect_results.warnings,
'abort_reason': bisect_results.abort_reason,
'culprit_data': _CulpritData(bisect_results),
'revision_data': _RevisionData(bisect_results, depot_registry),
}
def _CulpritData(bisect_results):
if not bisect_results.culprit_revisions:
return None
cl, culprit_info, depot = bisect_results.culprit_revisions[0]
commit_link = _GetViewVCLinkFromDepotAndHash(cl, depot)
if commit_link:
commit_link = '\nLink : %s' % commit_link
else:
commit_link = ('\Description:\n%s' % culprit_info['body'])
return {
'subject': culprit_info['subject'],
'author': culprit_info['email'],
'email': culprit_info['email'],
'cl_date': culprit_info['date'],
'commit_info': commit_link,
'revisions_links': [],
'cl': cl
}
def _RevisionData(bisect_results, depot_registry):
revision_rows = []
for state in bisect_results.state.GetRevisionStates():
commit_position = source_control.GetCommitPosition(
state.revision, depot_registry.GetDepotDir(state.depot))
revision_rows.append({
'depot_name': state.depot,
'deps_revision': state.revision,
'commit_pos': commit_position,
'result': 'good' if state.passed else 'bad',
})
return revision_rows
def _GetViewVCLinkFromDepotAndHash(git_revision, depot):
"""Gets link to the repository browser."""
if depot and 'viewvc' in bisect_utils.DEPOT_DEPS_NAME[depot]:
return bisect_utils.DEPOT_DEPS_NAME[depot]['viewvc'] + git_revision
return ''
def _GetBuildBotLogUrl():
master_url = os.environ.get('BUILDBOT_BUILDBOTURL')
builder_name = os.environ.get('BUILDBOT_BUILDERNAME')
builder_number = os.environ.get('BUILDBOT_BUILDNUMBER')
if master_url and builder_name and builder_number:
return '%s%s/%s' % (master_url, builder_name, builder_number)
return ''
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import unittest
from bisect_results import BisectResults
import source_control
class MockDepotRegistry(object):
def ChangeToDepotDir(self, depot):
pass
class MockRevisionState(object):
def __init__(self, revision, index, depot='chromium', value=None,
perf_time=0, build_time=0, passed='?', external=None):
self.depot = depot
self.revision = revision
self.index = index
self.value = value
self.perf_time = perf_time
self.build_time = build_time
self.passed = passed
self.external = external
class MockBisectState(object):
def __init__(self):
self.mock_revision_states = []
mock_bad_val = {'values': [100, 105, 95]}
for i, rev in enumerate(['a', 'b']):
mock_rev_state = MockRevisionState(rev, i, value=mock_bad_val, passed=0)
self.mock_revision_states.append(mock_rev_state)
mock_good_val = {'values': [1, 2, 3]}
for i, rev in enumerate(['c', 'd', 'e'], start=2):
mock_rev_state = MockRevisionState(rev, i, value=mock_good_val, passed=1)
self.mock_revision_states.append(mock_rev_state)
def GetRevisionStates(self):
return self.mock_revision_states
class MockBisectOptions(object):
def __init__(self):
self.repeat_test_count = 3
class BisectResultsTest(unittest.TestCase):
def setUp(self):
self.mock_bisect_state = MockBisectState()
self.mock_depot_registry = MockDepotRegistry()
self.mock_opts = MockBisectOptions()
self.mock_warnings = []
self.original_getcwd = os.getcwd
self.original_chdir = os.chdir
self.original_query_revision_info = source_control.QueryRevisionInfo
os.getcwd = lambda: '/path'
os.chdir = lambda _: None
revision_infos = {'b': {'test': 'b'}, 'c': {'test': 'c'}}
source_control.QueryRevisionInfo = lambda rev: revision_infos[rev]
def tearDown(self):
os.getcwd = self.original_getcwd
os.chdir = self.original_chdir
source_control.QueryRevisionInfo = self.original_query_revision_info
def _AssertConfidence(self, score, bad_values, good_values):
"""Checks whether the given sets of values have a given confidence score.
The score represents our confidence that the two sets of values wouldn't
be as different as they are just by chance; that is, that some real change
occurred between the two sets of values.
Args:
score: Expected confidence score.
bad_values: First list of numbers.
good_values: Second list of numbers.
"""
confidence = BisectResults.ConfidenceScore(bad_values, good_values)
self.assertEqual(score, confidence)
def testConfidenceScoreIsZeroOnTooFewLists(self):
self._AssertConfidence(0.0, [], [1, 2])
self._AssertConfidence(0.0, [1, 2], [])
self._AssertConfidence(0.0, [1], [1, 2])
self._AssertConfidence(0.0, [1, 2], [1])
def testConfidenceScore_ZeroConfidence(self):
# The good and bad sets contain the same values, so the confidence that
# they're different should be zero.
self._AssertConfidence(0.0, [4, 5, 7, 6, 8, 7], [8, 7, 6, 7, 5, 4])
def testConfidenceScore_MediumConfidence(self):
self._AssertConfidence(80.0, [0, 1, 1, 1, 2, 2], [1, 1, 1, 3, 3, 4])
def testConfidenceScore_HighConfidence(self):
self._AssertConfidence(95.0, [0, 1, 1, 1, 2, 2], [1, 2, 2, 3, 3, 4])
def testConfidenceScore_VeryHighConfidence(self):
# Confidence is high if the two sets of values have no internal variance.
self._AssertConfidence(99.9, [1, 1, 1, 1], [1.2, 1.2, 1.2, 1.2])
self._AssertConfidence(99.9, [1, 1, 1, 1], [1.01, 1.01, 1.01, 1.01])
def testConfidenceScore_UnbalancedSampleSize(self):
# The second set of numbers only contains one number, so confidence is 0.
self._AssertConfidence(0.0, [1.1, 1.2, 1.1, 1.2, 1.0, 1.3, 1.2], [1.4])
def testConfidenceScore_EmptySample(self):
# Confidence is zero if either or both samples are empty.
self._AssertConfidence(0.0, [], [])
self._AssertConfidence(0.0, [], [1.1, 1.2, 1.1, 1.2, 1.0, 1.3, 1.2, 1.3])
self._AssertConfidence(0.0, [1.1, 1.2, 1.1, 1.2, 1.0, 1.3, 1.2, 1.3], [])
def testConfidenceScore_FunctionalTestResults(self):
self._AssertConfidence(80.0, [1, 1, 0, 1, 1, 1, 0, 1], [0, 0, 1, 0, 1, 0])
self._AssertConfidence(99.9, [1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0])
def testConfidenceScore_RealWorldCases(self):
"""This method contains a set of data from actual bisect results.
The confidence scores asserted below were all copied from the actual
results, so the purpose of this test method is mainly to show what the
results for real cases are, and compare when we change the confidence
score function in the future.
"""
self._AssertConfidence(80, [133, 130, 132, 132, 130, 129], [129, 129, 125])
self._AssertConfidence(99.5, [668, 667], [498, 498, 499])
self._AssertConfidence(80, [67, 68], [65, 65, 67])
self._AssertConfidence(0, [514], [514])
self._AssertConfidence(90, [616, 613, 607, 615], [617, 619, 619, 617])
self._AssertConfidence(0, [3.5, 5.8, 4.7, 3.5, 3.6], [2.8])
self._AssertConfidence(90, [3, 3, 3], [2, 2, 2, 3])
self._AssertConfidence(0, [1999004, 1999627], [223355])
self._AssertConfidence(90, [1040, 934, 961], [876, 875, 789])
self._AssertConfidence(90, [309, 305, 304], [302, 302, 299, 303, 298])
def testCorrectlyFindsBreakingRange(self):
revision_states = self.mock_bisect_state.mock_revision_states
revision_states[0].passed = 0
revision_states[1].passed = 0
revision_states[2].passed = 1
revision_states[3].passed = 1
revision_states[4].passed = 1
results = BisectResults(self.mock_bisect_state, self.mock_depot_registry,
self.mock_opts, self.mock_warnings)
self.assertEqual(revision_states[2], results.first_working_revision)
self.assertEqual(revision_states[1], results.last_broken_revision)
def testCorrectlyFindsBreakingRangeNotInOrder(self):
revision_states = self.mock_bisect_state.mock_revision_states
revision_states[0].passed = 0
revision_states[1].passed = 1
revision_states[2].passed = 0
revision_states[3].passed = 1
revision_states[4].passed = 1
results = BisectResults(self.mock_bisect_state, self.mock_depot_registry,
self.mock_opts, self.mock_warnings)
self.assertEqual(revision_states[1], results.first_working_revision)
self.assertEqual(revision_states[2], results.last_broken_revision)
def testCorrectlyFindsBreakingRangeIncompleteBisect(self):
revision_states = self.mock_bisect_state.mock_revision_states
revision_states[0].passed = 0
revision_states[1].passed = 0
revision_states[2].passed = '?'
revision_states[3].passed = 1
revision_states[4].passed = 1
results = BisectResults(self.mock_bisect_state, self.mock_depot_registry,
self.mock_opts, self.mock_warnings)
self.assertEqual(revision_states[3], results.first_working_revision)
self.assertEqual(revision_states[1], results.last_broken_revision)
def testFindBreakingRangeAllPassed(self):
revision_states = self.mock_bisect_state.mock_revision_states
revision_states[0].passed = 1
revision_states[1].passed = 1
revision_states[2].passed = 1
revision_states[3].passed = 1
revision_states[4].passed = 1
results = BisectResults(self.mock_bisect_state, self.mock_depot_registry,
self.mock_opts, self.mock_warnings)
self.assertEqual(revision_states[0], results.first_working_revision)
self.assertIsNone(results.last_broken_revision)
def testFindBreakingRangeNonePassed(self):
revision_states = self.mock_bisect_state.mock_revision_states
revision_states[0].passed = 0
revision_states[1].passed = 0
revision_states[2].passed = 0
revision_states[3].passed = 0
revision_states[4].passed = 0
results = BisectResults(self.mock_bisect_state, self.mock_depot_registry,
self.mock_opts, self.mock_warnings)
self.assertIsNone(results.first_working_revision)
self.assertEqual(revision_states[4], results.last_broken_revision)
def testCorrectlyComputesRegressionStatistics(self):
revision_states = self.mock_bisect_state.mock_revision_states
revision_states[0].passed = 0
revision_states[0].value = {'values': [1000, 999, 998]}
revision_states[1].passed = 0
revision_states[1].value = {'values': [980, 1000, 999]}
revision_states[2].passed = 1
revision_states[2].value = {'values': [50, 45, 55]}
revision_states[3].passed = 1
revision_states[3].value = {'values': [45, 56, 45]}
revision_states[4].passed = 1
revision_states[4].value = {'values': [51, 41, 58]}
results = BisectResults(self.mock_bisect_state, self.mock_depot_registry,
self.mock_opts, self.mock_warnings)
self.assertAlmostEqual(99.9, results.confidence)
self.assertAlmostEqual(1909.86547085, results.regression_size)
self.assertAlmostEqual(7.16625904, results.regression_std_err)
def testFindsCulpritRevisions(self):
revision_states = self.mock_bisect_state.mock_revision_states
revision_states[1].depot = 'chromium'
revision_states[2].depot = 'webkit'
results = BisectResults(self.mock_bisect_state, self.mock_depot_registry,
self.mock_opts, self.mock_warnings)
self.assertEqual(1, len(results.culprit_revisions))
self.assertEqual(('b', {'test': 'b'}, 'chromium'),
results.culprit_revisions[0])
def testNoResultBasedWarningsForNormalState(self):
results = BisectResults(self.mock_bisect_state, self.mock_depot_registry,
self.mock_opts, self.mock_warnings)
self.assertEqual(0, len(results.warnings))
def testWarningForMultipleCulpritRevisions(self):
self.mock_bisect_state.mock_revision_states[2].passed = 'Skipped'
results = BisectResults(self.mock_bisect_state, self.mock_depot_registry,
self.mock_opts, self.mock_warnings)
self.assertEqual(1, len(results.warnings))
def testWarningForTooLowRetryLimit(self):
self.mock_opts.repeat_test_count = 1
results = BisectResults(self.mock_bisect_state, self.mock_depot_registry,
self.mock_opts, self.mock_warnings)
self.assertEqual(1, len(results.warnings))
def testWarningForTooLowConfidence(self):
revision_states = self.mock_bisect_state.mock_revision_states
revision_states[2].value = {'values': [95, 90, 90]}
revision_states[3].value = {'values': [95, 90, 90]}
revision_states[4].value = {'values': [95, 90, 90]}
results = BisectResults(self.mock_bisect_state, self.mock_depot_registry,
self.mock_opts, self.mock_warnings)
self.assertGreater(results.confidence, 0)
self.assertEqual(1, len(results.warnings))
def testWarningForZeroConfidence(self):
revision_states = self.mock_bisect_state.mock_revision_states
revision_states[2].value = {'values': [100, 105, 95]}
revision_states[3].value = {'values': [100, 105, 95]}
revision_states[4].value = {'values': [100, 105, 95]}
results = BisectResults(self.mock_bisect_state, self.mock_depot_registry,
self.mock_opts, self.mock_warnings)
self.assertEqual(0, results.confidence)
self.assertEqual(1, len(results.warnings))
if __name__ == '__main__':
unittest.main()
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
class RevisionState(object):
"""Contains bisect state for a given revision.
Properties:
depot: The depot that this revision is from (e.g. WebKit).
revision: Revision number (Git hash or SVN number).
index: Position of the state in the list of all revisions.
value: Value(s) returned from the test.
perf_time: Time that a test took.
build_time: Time that a build took.
passed: Represents whether the performance test was successful at that
revision. Possible values include: 1 (passed), 0 (failed),
'?' (skipped), 'F' (build failed).
external: If the revision is a 'src' revision, 'external' contains the
revisions of each of the external libraries.
"""
def __init__(self, depot, revision, index):
self.depot = depot
self.revision = revision
self.index = index
self.value = None
self.perf_time = 0
self.build_time = 0
self.passed = '?'
self.external = None
# TODO(sergiyb): Update() to parse run_results from the RunTest.
class BisectState(object):
"""Represents a state of the bisect as a collection of revision states."""
def __init__(self, depot, revisions):
"""Initializes a new BisectState object with a set of revision states.
Args:
depot: Name of the depot used for initial set of revision states.
revisions: List of revisions used for initial set of revision states.
"""
self.revision_states = []
self.revision_index = {}
index = 0
for revision in revisions:
new_state = self._InitRevisionState(depot, revision, index)
self.revision_states.append(new_state)
index += 1
@staticmethod
def _RevisionKey(depot, revision):
return "%s:%s" % (depot, revision)
def _InitRevisionState(self, depot, revision, index):
key = self._RevisionKey(depot, revision)
self.revision_index[key] = index
return RevisionState(depot, revision, index)
def GetRevisionState(self, depot, revision):
"""Returns a mutable revision state."""
key = self._RevisionKey(depot, revision)
index = self.revision_index.get(key)
return self.revision_states[index] if index else None
def CreateRevisionStatesAfter(self, depot, revisions, reference_depot,
reference_revision):
"""Creates a set of new revision states after a specified reference state.
Args:
depot: Name of the depot for the new revision states.
revisions: List of revisions for the new revision states.
reference_depot: Name of the depot for the reference revision state.
reference_revision: Revision for the reference revision state.
Returns:
A list containing all created revision states in order as they were added.
"""
ref_key = self._RevisionKey(reference_depot, reference_revision)
ref_index = self.revision_index[ref_key]
num_new_revisions = len(revisions)
for entry in self.revision_states:
if entry.index > ref_index:
entry.index += num_new_revisions
first_index = ref_index + 1
for index, revision in enumerate(revisions, start=first_index):
new_state = self._InitRevisionState(depot, revision, index)
self.revision_states.insert(index, new_state)
return self.revision_states[first_index:first_index + num_new_revisions]
def GetRevisionStates(self):
"""Returns a copy of the list of the revision states."""
return list(self.revision_states)
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from bisect_state import BisectState
class BisectStateTest(unittest.TestCase):
def testCreatesRevisionsStateAfterAReferenceRevision(self):
bisect_state = BisectState('chromium', ['a', 'b', 'c', 'd'])
bisect_state.CreateRevisionStatesAfter('webkit', [1, 2, 3], 'chromium', 'b')
bisect_state.CreateRevisionStatesAfter('v8', [100, 200], 'webkit', 2)
actual_revisions = bisect_state.GetRevisionStates()
expected_revisions = [('chromium', 'a'), ('chromium', 'b'), ('webkit', 1),
('webkit', 2), ('v8', 100), ('v8', 200),
('webkit', 3), ('chromium', 'c'), ('chromium', 'd')]
self.assertEqual(len(expected_revisions), len(actual_revisions))
for i in xrange(len(actual_revisions)):
self.assertEqual(i, actual_revisions[i].index)
self.assertEqual(expected_revisions[i][0], actual_revisions[i].depot)
self.assertEqual(expected_revisions[i][1], actual_revisions[i].revision)
# TODO(sergiyb): More tests for the remaining functions.
if __name__ == '__main__':
unittest.main()
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions used by the bisect tool.
This includes functions related to checking out the depot and outputting
annotations for the Buildbot waterfall.
"""
import errno
import imp
import os
import stat
import subprocess
import sys
DEFAULT_GCLIENT_CUSTOM_DEPS = {
'src/data/page_cycler': 'https://chrome-internal.googlesource.com/'
'chrome/data/page_cycler/.git',
'src/data/dom_perf': 'https://chrome-internal.googlesource.com/'
'chrome/data/dom_perf/.git',
'src/data/mach_ports': 'https://chrome-internal.googlesource.com/'
'chrome/data/mach_ports/.git',
'src/tools/perf/data': 'https://chrome-internal.googlesource.com/'
'chrome/tools/perf/data/.git',
'src/third_party/adobe/flash/binaries/ppapi/linux':
'https://chrome-internal.googlesource.com/'
'chrome/deps/adobe/flash/binaries/ppapi/linux/.git',
'src/third_party/adobe/flash/binaries/ppapi/linux_x64':
'https://chrome-internal.googlesource.com/'
'chrome/deps/adobe/flash/binaries/ppapi/linux_x64/.git',
'src/third_party/adobe/flash/binaries/ppapi/mac':
'https://chrome-internal.googlesource.com/'
'chrome/deps/adobe/flash/binaries/ppapi/mac/.git',
'src/third_party/adobe/flash/binaries/ppapi/mac_64':
'https://chrome-internal.googlesource.com/'
'chrome/deps/adobe/flash/binaries/ppapi/mac_64/.git',
'src/third_party/adobe/flash/binaries/ppapi/win':
'https://chrome-internal.googlesource.com/'
'chrome/deps/adobe/flash/binaries/ppapi/win/.git',
'src/third_party/adobe/flash/binaries/ppapi/win_x64':
'https://chrome-internal.googlesource.com/'
'chrome/deps/adobe/flash/binaries/ppapi/win_x64/.git',
'src/third_party/WebKit/LayoutTests': None,
'src/tools/valgrind': None,
}
GCLIENT_SPEC_DATA = [
{
'name': 'src',
'url': 'https://chromium.googlesource.com/chromium/src.git',
'deps_file': '.DEPS.git',
'managed': True,
'custom_deps': {},
'safesync_url': '',
},
]
GCLIENT_SPEC_ANDROID = "\ntarget_os = ['android']"
GCLIENT_CUSTOM_DEPS_V8 = {
'src/v8_bleeding_edge': 'https://chromium.googlesource.com/v8/v8.git'
}
FILE_DEPS_GIT = '.DEPS.git'
FILE_DEPS = 'DEPS'
# Bisect working directory.
BISECT_DIR = 'bisect'
# The percentage at which confidence is considered high.
HIGH_CONFIDENCE = 95
# Below is the map of "depot" names to information about each depot. Each depot
# is a repository, and in the process of bisecting, revision ranges in these
# repositories may also be bisected.
#
# Each depot information dictionary may contain:
# src: Path to the working directory.
# recurse: True if this repository will get bisected.
# svn: URL of SVN repository. Needed for git workflow to resolve hashes to
# SVN revisions.
# from: Parent depot that must be bisected before this is bisected.
# deps_var: Key name in vars variable in DEPS file that has revision
# information.
DEPOT_DEPS_NAME = {
'chromium': {
'src': 'src',
'recurse': True,
'from': ['android-chrome'],
'viewvc': 'https://chromium.googlesource.com/chromium/src/+/',
'deps_var': 'chromium_rev'
},
'webkit': {
'src': 'src/third_party/WebKit',
'recurse': True,
'from': ['chromium'],
'viewvc': 'https://chromium.googlesource.com/chromium/blink/+/',
'deps_var': 'webkit_revision'
},
'angle': {
'src': 'src/third_party/angle',
'src_old': 'src/third_party/angle_dx11',
'recurse': True,
'from': ['chromium'],
'platform': 'nt',
'viewvc': 'https://chromium.googlesource.com/angle/angle/+/',
'deps_var': 'angle_revision'
},
'v8': {
'src': 'src/v8',
'recurse': True,
'from': ['chromium'],
'custom_deps': GCLIENT_CUSTOM_DEPS_V8,
'viewvc': 'https://chromium.googlesource.com/v8/v8.git/+/',
'deps_var': 'v8_revision'
},
'v8_bleeding_edge': {
'src': 'src/v8_bleeding_edge',
'recurse': True,
'svn': 'https://v8.googlecode.com/svn/branches/bleeding_edge',
'from': ['v8'],
'viewvc': 'https://chromium.googlesource.com/v8/v8.git/+/',
'deps_var': 'v8_revision'
},
'skia': {
'src': 'src/third_party/skia',
'recurse': True,
'from': ['chromium'],
'viewvc': 'https://chromium.googlesource.com/skia/+/',
'deps_var': 'skia_revision'
}
}
DEPOT_NAMES = DEPOT_DEPS_NAME.keys()
# The possible values of the --bisect_mode flag, which determines what to
# use when classifying a revision as "good" or "bad".
BISECT_MODE_MEAN = 'mean'
BISECT_MODE_STD_DEV = 'std_dev'
BISECT_MODE_RETURN_CODE = 'return_code'
def AddAdditionalDepotInfo(depot_info):
"""Adds additional depot info to the global depot variables."""
global DEPOT_DEPS_NAME
global DEPOT_NAMES
DEPOT_DEPS_NAME = dict(DEPOT_DEPS_NAME.items() + depot_info.items())
DEPOT_NAMES = DEPOT_DEPS_NAME.keys()
def OutputAnnotationStepStart(name):
"""Outputs annotation to signal the start of a step to a try bot.
Args:
name: The name of the step.
"""
print
print '@@@SEED_STEP %s@@@' % name
print '@@@STEP_CURSOR %s@@@' % name
print '@@@STEP_STARTED@@@'
print
sys.stdout.flush()
def OutputAnnotationStepClosed():
"""Outputs annotation to signal the closing of a step to a try bot."""
print
print '@@@STEP_CLOSED@@@'
print
sys.stdout.flush()
def OutputAnnotationStepText(text):
"""Outputs appropriate annotation to print text.
Args:
name: The text to print.
"""
print
print '@@@STEP_TEXT@%s@@@' % text
print
sys.stdout.flush()
def OutputAnnotationStepWarning():
"""Outputs appropriate annotation to signal a warning."""
print
print '@@@STEP_WARNINGS@@@'
print
def OutputAnnotationStepFailure():
"""Outputs appropriate annotation to signal a warning."""
print
print '@@@STEP_FAILURE@@@'
print
def OutputAnnotationStepLink(label, url):
"""Outputs appropriate annotation to print a link.
Args:
label: The name to print.
url: The URL to print.
"""
print
print '@@@STEP_LINK@%s@%s@@@' % (label, url)
print
sys.stdout.flush()
def LoadExtraSrc(path_to_file):
"""Attempts to load an extra source file, and overrides global values.
If the extra source file is loaded successfully, then it will use the new
module to override some global values, such as gclient spec data.
Args:
path_to_file: File path.
Returns:
The loaded module object, or None if none was imported.
"""
try:
global GCLIENT_SPEC_DATA
global GCLIENT_SPEC_ANDROID
extra_src = imp.load_source('data', path_to_file)
GCLIENT_SPEC_DATA = extra_src.GetGClientSpec()
GCLIENT_SPEC_ANDROID = extra_src.GetGClientSpecExtraParams()
return extra_src
except ImportError:
return None
def IsTelemetryCommand(command):
"""Attempts to discern whether or not a given command is running telemetry."""
return 'tools/perf/run_' in command or 'tools\\perf\\run_' in command
def _CreateAndChangeToSourceDirectory(working_directory):
"""Creates a directory 'bisect' as a subdirectory of |working_directory|.
If successful, the current working directory will be changed to the new
'bisect' directory.
Args:
working_directory: The directory to create the new 'bisect' directory in.
Returns:
True if the directory was successfully created (or already existed).
"""
cwd = os.getcwd()
os.chdir(working_directory)
try:
os.mkdir(BISECT_DIR)
except OSError, e:
if e.errno != errno.EEXIST: # EEXIST indicates that it already exists.
os.chdir(cwd)
return False
os.chdir(BISECT_DIR)
return True
def _SubprocessCall(cmd, cwd=None):
"""Runs a command in a subprocess.
Args:
cmd: The command to run.
cwd: Working directory to run from.
Returns:
The return code of the call.
"""
if os.name == 'nt':
# "HOME" isn't normally defined on windows, but is needed
# for git to find the user's .netrc file.
if not os.getenv('HOME'):
os.environ['HOME'] = os.environ['USERPROFILE']
shell = os.name == 'nt'
return subprocess.call(cmd, shell=shell, cwd=cwd)
def RunGClient(params, cwd=None):
"""Runs gclient with the specified parameters.
Args:
params: A list of parameters to pass to gclient.
cwd: Working directory to run from.
Returns:
The return code of the call.
"""
cmd = ['gclient'] + params
return _SubprocessCall(cmd, cwd=cwd)
def RunGClientAndCreateConfig(opts, custom_deps=None, cwd=None):
"""Runs gclient and creates a config containing both src and src-internal.
Args:
opts: The options parsed from the command line through parse_args().
custom_deps: A dictionary of additional dependencies to add to .gclient.
cwd: Working directory to run from.
Returns:
The return code of the call.
"""
spec = GCLIENT_SPEC_DATA
if custom_deps:
for k, v in custom_deps.iteritems():
spec[0]['custom_deps'][k] = v
# Cannot have newlines in string on windows
spec = 'solutions =' + str(spec)
spec = ''.join([l for l in spec.splitlines()])
if 'android' in opts.target_platform:
spec += GCLIENT_SPEC_ANDROID
return_code = RunGClient(
['config', '--spec=%s' % spec], cwd=cwd)
return return_code
def OnAccessError(func, path, _):
"""Error handler for shutil.rmtree.
Source: http://goo.gl/DEYNCT
If the error is due to an access error (read only file), it attempts to add
write permissions, then retries.
If the error is for another reason it re-raises the error.
Args:
func: The function that raised the error.
path: The path name passed to func.
_: Exception information from sys.exc_info(). Not used.
"""
if not os.access(path, os.W_OK):
os.chmod(path, stat.S_IWUSR)
func(path)
else:
raise
def _CleanupPreviousGitRuns(cwd=os.getcwd()):
"""Cleans up any leftover index.lock files after running git."""
# If a previous run of git crashed, or bot was reset, etc., then we might
# end up with leftover index.lock files.
for path, _, files in os.walk(cwd):
for cur_file in files:
if cur_file.endswith('index.lock'):
path_to_file = os.path.join(path, cur_file)
os.remove(path_to_file)
def RunGClientAndSync(revisions=None, cwd=None):
"""Runs gclient and does a normal sync.
Args:
revisions: List of revisions that need to be synced.
E.g., "src@2ae43f...", "src/third_party/webkit@asr1234" etc.
cwd: Working directory to run from.
Returns:
The return code of the call.
"""
params = ['sync', '--verbose', '--nohooks', '--force',
'--delete_unversioned_trees']
if revisions is not None:
for revision in revisions:
if revision is not None:
params.extend(['--revision', revision])
return RunGClient(params, cwd=cwd)
def SetupGitDepot(opts, custom_deps):
"""Sets up the depot for the bisection.
The depot will be located in a subdirectory called 'bisect'.
Args:
opts: The options parsed from the command line through parse_args().
custom_deps: A dictionary of additional dependencies to add to .gclient.
Returns:
True if gclient successfully created the config file and did a sync, False
otherwise.
"""
name = 'Setting up Bisection Depot'
try:
if opts.output_buildbot_annotations:
OutputAnnotationStepStart(name)
if RunGClientAndCreateConfig(opts, custom_deps):
return False
_CleanupPreviousGitRuns()
RunGClient(['revert'])
return not RunGClientAndSync()
finally:
if opts.output_buildbot_annotations:
OutputAnnotationStepClosed()
def CheckIfBisectDepotExists(opts):
"""Checks if the bisect directory already exists.
Args:
opts: The options parsed from the command line through parse_args().
Returns:
Returns True if it exists.
"""
path_to_dir = os.path.join(opts.working_directory, BISECT_DIR, 'src')
return os.path.exists(path_to_dir)
def CheckRunGit(command, cwd=None):
"""Run a git subcommand, returning its output and return code. Asserts if
the return code of the call is non-zero.
Args:
command: A list containing the args to git.
Returns:
A tuple of the output and return code.
"""
output, return_code = RunGit(command, cwd=cwd)
assert not return_code, 'An error occurred while running'\
' "git %s"' % ' '.join(command)
return output
def RunGit(command, cwd=None):
"""Run a git subcommand, returning its output and return code.
Args:
command: A list containing the args to git.
cwd: A directory to change to while running the git command (optional).
Returns:
A tuple of the output and return code.
"""
command = ['git'] + command
return RunProcessAndRetrieveOutput(command, cwd=cwd)
def CreateBisectDirectoryAndSetupDepot(opts, custom_deps):
"""Sets up a subdirectory 'bisect' and then retrieves a copy of the depot
there using gclient.
Args:
opts: The options parsed from the command line through parse_args().
custom_deps: A dictionary of additional dependencies to add to .gclient.
"""
if CheckIfBisectDepotExists(opts):
path_to_dir = os.path.join(os.path.abspath(opts.working_directory),
BISECT_DIR, 'src')
output, _ = RunGit(['rev-parse', '--is-inside-work-tree'], cwd=path_to_dir)
if output.strip() == 'true':
# Before checking out master, cleanup up any leftover index.lock files.
_CleanupPreviousGitRuns(path_to_dir)
# Checks out the master branch, throws an exception if git command fails.
CheckRunGit(['checkout', '-f', 'master'], cwd=path_to_dir)
if not _CreateAndChangeToSourceDirectory(opts.working_directory):
raise RuntimeError('Could not create bisect directory.')
if not SetupGitDepot(opts, custom_deps):
raise RuntimeError('Failed to grab source.')
def RunProcess(command, cwd=None, shell=False):
"""Runs an arbitrary command.
If output from the call is needed, use RunProcessAndRetrieveOutput instead.
Args:
command: A list containing the command and args to execute.
Returns:
The return code of the call.
"""
# On Windows, use shell=True to get PATH interpretation.
shell = shell or IsWindowsHost()
return subprocess.call(command, cwd=cwd, shell=shell)
def RunProcessAndRetrieveOutput(command, cwd=None):
"""Runs an arbitrary command, returning its output and return code.
Since output is collected via communicate(), there will be no output until
the call terminates. If you need output while the program runs (ie. so
that the buildbot doesn't terminate the script), consider RunProcess().
Args:
command: A list containing the command and args to execute.
cwd: A directory to change to while running the command. The command can be
relative to this directory. If this is None, the command will be run in
the current directory.
Returns:
A tuple of the output and return code.
"""
if cwd:
original_cwd = os.getcwd()
os.chdir(cwd)
# On Windows, use shell=True to get PATH interpretation.
shell = IsWindowsHost()
proc = subprocess.Popen(
command, shell=shell, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
output, _ = proc.communicate()
if cwd:
os.chdir(original_cwd)
return (output, proc.returncode)
def IsStringInt(string_to_check):
"""Checks whether or not the given string can be converted to an int."""
try:
int(string_to_check)
return True
except ValueError:
return False
def IsStringFloat(string_to_check):
"""Checks whether or not the given string can be converted to a float."""
try:
float(string_to_check)
return True
except ValueError:
return False
def IsWindowsHost():
return sys.platform == 'cygwin' or sys.platform.startswith('win')
def Is64BitWindows():
"""Checks whether or not Windows is a 64-bit version."""
platform = os.environ.get('PROCESSOR_ARCHITEW6432')
if not platform:
# Must not be running in WoW64, so PROCESSOR_ARCHITECTURE is correct.
platform = os.environ.get('PROCESSOR_ARCHITECTURE')
return platform and platform in ['AMD64', 'I64']
def IsLinuxHost():
return sys.platform.startswith('linux')
def IsMacHost():
return sys.platform.startswith('darwin')
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Classes and functions for building Chrome.
This includes functions for running commands to build, as well as
specific rules about which targets to build.
"""
import os
import subprocess
import sys
import bisect_utils
ORIGINAL_ENV = {}
class Builder(object):
"""Subclasses of the Builder class are used by the bisect script to build
relevant targets.
"""
def __init__(self, opts):
"""Performs setup for building with target build system.
Args:
opts: Options parsed from command line.
Raises:
RuntimeError: Some condition necessary for building was not met.
"""
if bisect_utils.IsWindowsHost():
if not opts.build_preference:
opts.build_preference = 'msvs'
if opts.build_preference == 'msvs':
if not os.getenv('VS100COMNTOOLS'):
raise RuntimeError(
'Path to visual studio could not be determined.')
else:
# Need to re-escape goma dir, see crbug.com/394990.
if opts.goma_dir:
opts.goma_dir = opts.goma_dir.encode('string_escape')
SetBuildSystemDefault(opts.build_preference, opts.use_goma,
opts.goma_dir, opts.target_arch)
else:
if not opts.build_preference:
if 'ninja' in os.getenv('GYP_GENERATORS', default=''):
opts.build_preference = 'ninja'
else:
opts.build_preference = 'make'
SetBuildSystemDefault(opts.build_preference, opts.use_goma, opts.goma_dir)
if not SetupPlatformBuildEnvironment(opts):
raise RuntimeError('Failed to set platform environment.')
@staticmethod
def FromOpts(opts):
"""Constructs and returns a Builder object.
Args:
opts: Options parsed from the command-line.
"""
builder = None
if opts.target_platform == 'android':
builder = AndroidBuilder(opts)
elif opts.target_platform == 'android-chrome':
builder = AndroidChromeBuilder(opts)
else:
builder = DesktopBuilder(opts)
return builder
def Build(self, depot, opts):
"""Runs a command to build Chrome."""
raise NotImplementedError()
def GetBuildOutputDirectory(opts, src_dir=None):
"""Returns the path to the build directory, relative to the checkout root.
Assumes that the current working directory is the checkout root.
Args:
opts: Command-line options.
src_dir: Path to chromium/src directory.
Returns:
A path to the directory to use as build output directory.
Raises:
NotImplementedError: The platform according to sys.platform is unexpected.
"""
src_dir = src_dir or 'src'
if opts.build_preference == 'ninja' or bisect_utils.IsLinuxHost():
return os.path.join(src_dir, 'out')
if bisect_utils.IsMacHost():
return os.path.join(src_dir, 'xcodebuild')
if bisect_utils.IsWindowsHost():
return os.path.join(src_dir, 'build')
raise NotImplementedError('Unexpected platform %s' % sys.platform)
class DesktopBuilder(Builder):
"""DesktopBuilder is used to build Chromium on Linux, Mac, or Windows."""
def __init__(self, opts):
super(DesktopBuilder, self).__init__(opts)
def Build(self, depot, opts):
"""Builds chromium_builder_perf target using options passed into the script.
Args:
depot: Name of current depot being bisected.
opts: The options parsed from the command line.
Returns:
True if build was successful.
"""
targets = ['chromium_builder_perf']
threads = None
if opts.use_goma:
threads = opts.goma_threads
build_success = False
if opts.build_preference == 'make':
build_success = BuildWithMake(threads, targets, opts.target_build_type)
elif opts.build_preference == 'ninja':
build_success = BuildWithNinja(threads, targets, opts.target_build_type)
elif opts.build_preference == 'msvs':
assert bisect_utils.IsWindowsHost(), 'msvs is only supported on Windows.'
build_success = BuildWithVisualStudio(targets, opts.target_build_type)
else:
assert False, 'No build system defined.'
return build_success
class AndroidBuilder(Builder):
"""AndroidBuilder is used to build on android."""
def __init__(self, opts):
super(AndroidBuilder, self).__init__(opts)
# TODO(qyearsley): Make this a class method and verify that it works with
# a unit test.
# pylint: disable=R0201
def _GetTargets(self):
"""Returns a list of build targets."""
return [
'chrome_public_apk',
'cc_perftests_apk',
'android_tools'
]
def Build(self, depot, opts):
"""Builds the android content shell and other necessary tools.
Args:
depot: Current depot being bisected.
opts: The options parsed from the command line.
Returns:
True if build was successful.
"""
threads = None
if opts.use_goma:
threads = opts.goma_threads
build_success = False
if opts.build_preference == 'ninja':
build_success = BuildWithNinja(
threads, self._GetTargets(), opts.target_build_type)
else:
assert False, 'No build system defined.'
return build_success
class AndroidChromeBuilder(AndroidBuilder):
"""AndroidChromeBuilder is used to build "android-chrome".
This is slightly different from AndroidBuilder.
"""
def __init__(self, opts):
super(AndroidChromeBuilder, self).__init__(opts)
# TODO(qyearsley): Make this a class method and verify that it works with
# a unit test.
# pylint: disable=R0201
def _GetTargets(self):
"""Returns a list of build targets."""
return AndroidBuilder._GetTargets(self) + ['chrome_apk']
def SetBuildSystemDefault(build_system, use_goma, goma_dir, target_arch='ia32'):
"""Sets up any environment variables needed to build with the specified build
system.
Args:
build_system: A string specifying build system. Currently only 'ninja' or
'make' are supported.
use_goma: Determines whether to GOMA for compile.
goma_dir: GOMA directory path.
target_arch: The target build architecture, ia32 or x64. Default is ia32.
"""
if build_system == 'ninja':
gyp_var = os.getenv('GYP_GENERATORS', default='')
if not gyp_var or not 'ninja' in gyp_var:
if gyp_var:
os.environ['GYP_GENERATORS'] = gyp_var + ',ninja'
else:
os.environ['GYP_GENERATORS'] = 'ninja'
if bisect_utils.IsWindowsHost():
os.environ['GYP_DEFINES'] = 'component=shared_library '\
'incremental_chrome_dll=1 disable_nacl=1 fastbuild=1 '\
'chromium_win_pch=0'
elif build_system == 'make':
os.environ['GYP_GENERATORS'] = 'make'
else:
raise RuntimeError('%s build not supported.' % build_system)
if use_goma:
os.environ['GYP_DEFINES'] = '%s %s' % (os.getenv('GYP_DEFINES', default=''),
'use_goma=1')
if goma_dir:
os.environ['GYP_DEFINES'] += ' gomadir=%s' % goma_dir
# Produce 64 bit chromium binaries when target architecure is set to x64.
if target_arch == 'x64':
os.environ['GYP_DEFINES'] += ' target_arch=%s' % target_arch
def SetupPlatformBuildEnvironment(opts):
"""Performs any platform-specific setup.
Args:
opts: The options parsed from the command line through parse_args().
Returns:
True if successful.
"""
if 'android' in opts.target_platform:
CopyAndSaveOriginalEnvironmentVars()
return SetupAndroidBuildEnvironment(opts)
return True
def BuildWithMake(threads, targets, build_type='Release'):
"""Runs a make command with the given targets.
Args:
threads: The number of threads to use. None means unspecified/unlimited.
targets: List of make targets.
build_type: Release or Debug.
Returns:
True if the command had a 0 exit code, False otherwise.
"""
cmd = ['make', 'BUILDTYPE=%s' % build_type]
if threads:
cmd.append('-j%d' % threads)
cmd += targets
return_code = bisect_utils.RunProcess(cmd)
return not return_code
def BuildWithNinja(threads, targets, build_type='Release'):
"""Runs a ninja command with the given targets."""
cmd = ['ninja', '-C', os.path.join('out', build_type)]
if threads:
cmd.append('-j%d' % threads)
cmd += targets
return_code = bisect_utils.RunProcess(cmd)
return not return_code
def BuildWithVisualStudio(targets, build_type='Release'):
"""Runs a command to build the given targets with Visual Studio."""
path_to_devenv = os.path.abspath(
os.path.join(os.environ['VS100COMNTOOLS'], '..', 'IDE', 'devenv.com'))
path_to_sln = os.path.join(os.getcwd(), 'chrome', 'chrome.sln')
cmd = [path_to_devenv, '/build', build_type, path_to_sln]
for t in targets:
cmd.extend(['/Project', t])
return_code = bisect_utils.RunProcess(cmd)
return not return_code
def CopyAndSaveOriginalEnvironmentVars():
"""Makes a copy of the current environment variables.
Before making a copy of the environment variables and setting a global
variable, this function unsets a certain set of environment variables.
"""
# TODO: Waiting on crbug.com/255689, will remove this after.
vars_to_remove = [
'CHROME_SRC',
'CHROMIUM_GYP_FILE',
'GYP_DEFINES',
'GYP_GENERATORS',
'GYP_GENERATOR_FLAGS',
'OBJCOPY',
]
for key in os.environ:
if 'ANDROID' in key:
vars_to_remove.append(key)
for key in vars_to_remove:
if os.environ.has_key(key):
del os.environ[key]
global ORIGINAL_ENV
ORIGINAL_ENV = os.environ.copy()
def SetupAndroidBuildEnvironment(opts, path_to_src=None):
"""Sets up the android build environment.
Args:
opts: The options parsed from the command line through parse_args().
path_to_src: Path to the src checkout.
Returns:
True if successful.
"""
# Revert the environment variables back to default before setting them up
# with envsetup.sh.
env_vars = os.environ.copy()
for k, _ in env_vars.iteritems():
del os.environ[k]
for k, v in ORIGINAL_ENV.iteritems():
os.environ[k] = v
envsetup_path = os.path.join('build', 'android', 'envsetup.sh')
proc = subprocess.Popen(['bash', '-c', 'source %s && env' % envsetup_path],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=path_to_src)
out, _ = proc.communicate()
for line in out.splitlines():
k, _, v = line.partition('=')
os.environ[k] = v
# envsetup.sh no longer sets OS=android in GYP_DEFINES environment variable.
# (See http://crrev.com/170273005). So, we set this variable explicitly here
# in order to build Chrome on Android.
if 'GYP_DEFINES' not in os.environ:
os.environ['GYP_DEFINES'] = 'OS=android'
else:
os.environ['GYP_DEFINES'] += ' OS=android'
if opts.use_goma:
os.environ['GYP_DEFINES'] += ' use_goma=1'
return not proc.returncode
# This config just runs the sunspider command once.
# http://build.chromium.org/p/tryserver.chromium.perf/builders/linux_perf_bisect/builds/689
config = {
'command': 'tools/perf/run_benchmark -v --browser=android-chromium sunspider',
"max_time_minutes": "10",
"repeat_count": "1",
"truncate_percent": "0"
}
# Workaround git try issue, see crbug.com/257689
# This should reproduce the regression in http://crbug.com/425582.
# It was based on:
# http://build.chromium.org/p/tryserver.chromium.perf/builders/linux_perf_bisect/builds/704
config = {
'command': 'out/Release/content_unittests --single-process-tests --gtest_filter=DOMStorageAreaTest',
'good_revision': '311607',
'bad_revision': '311608',
'bisect_mode': 'return_code',
'builder_type': 'full',
}
# Workaround git try issue, see crbug.com/257689
# This should reproduce the regression in http://crbug.com/425582.
# It was based on:
# http://build.chromium.org/p/tryserver.chromium.perf/builders/linux_perf_bisect/builds/704
config = {
'command': 'tools/perf/run_benchmark -v --browser=release page_cycler.intl_ar_fa_he',
'good_revision': '300138',
'bad_revision': '300149',
'metric': 'warm_times/page_load_time',
'repeat_count': '5',
'max_time_minutes': '5',
'truncate_percent': '25',
# Default is "perf".
# 'builder_type': 'perf',
}
# Workaround git try issue, see crbug.com/257689
# This config just runs the tab-switching command once.
# http://build.chromium.org/p/tryserver.chromium.perf/builders/linux_perf_bisect/builds/689
config = {
"command": "./tools/perf/run_benchmark -v tab_switching.typical_25 --browser=release",
"max_time_minutes": "30",
"repeat_count": "1",
"truncate_percent": "0"
}
# Workaround git try issue, see crbug.com/257689
# Based on http://crbug.com/420120.
config = {
'command': 'tools/perf/run_benchmark -v --browser=release page_cycler.bloat',
'good_revision': '297905',
'bad_revision': '297940',
'metric': 'warm_times/page_load_time',
'repeat_count': '5',
'max_time_minutes': '5',
'truncate_percent': '20',
'builder_type': 'perf',
}
# Workaround git try issue, see crbug.com/257689
# This config is based on http://crbug.com/435291.
config = {
'command': 'tools/perf/run_benchmark -v --browser=release tab_switching.five_blank_pages',
'good_revision': '304855',
'bad_revision': '304881',
'metric': 'idle_wakeups_total/idle_wakeups_total',
'repeat_count': '5',
'max_time_minutes': '10',
'truncate_percent': '25',
'builder_type': 'perf',
}
# Workaround git try issue, see crbug.com/257689
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Starts bisect try jobs on multiple platforms using known-good configs.
The purpose of this script is to serve as an integration test for the
auto-bisect project by starting try jobs for various config types and
various platforms.
The known-good configs are in this same directory as this script. They
are expected to all end in ".cfg" and start with the name of the platform
followed by a dot.
You can specify --full to try running each config on all applicable bots;
the default behavior is to try each config on only one bot.
"""
import argparse
import logging
import os
import subprocess
import sys
SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))
BISECT_CONFIG = os.path.join(SCRIPT_DIR, os.path.pardir, 'bisect.cfg')
PERF_TEST_CONFIG = os.path.join(
SCRIPT_DIR, os.path.pardir, os.path.pardir, 'run-perf-test.cfg')
PLATFORM_BOT_MAP = {
'linux': ['linux_perf_bisect'],
'mac': ['mac_10_9_perf_bisect', 'mac_10_10_perf_bisect'],
'win': ['win_perf_bisect', 'win_8_perf_bisect', 'win_xp_perf_bisect'],
'winx64': ['win_x64_perf_bisect'],
'android': [
'android_nexus4_perf_bisect',
'android_nexus5_perf_bisect',
'android_nexus7_perf_bisect',
],
}
SVN_URL = 'svn://svn.chromium.org/chrome-try/try-perf'
AUTO_COMMIT_MESSAGE = 'Automatic commit for bisect try job.'
def main(argv):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--full', action='store_true',
help='Run each config on all applicable bots.')
parser.add_argument('configs', nargs='+',
help='One or more sample config files.')
parser.add_argument('--verbose', '-v', action='store_true',
help='Output additional debugging information.')
parser.add_argument('--dry-run', action='store_true',
help='Don\'t execute "git try" while running.')
args = parser.parse_args(argv[1:])
_SetupLogging(args.verbose)
logging.debug('Source configs: %s', args.configs)
try:
_StartTryJobs(args.configs, args.full, args.dry_run)
except subprocess.CalledProcessError as error:
print str(error)
print error.output
def _SetupLogging(verbose):
level = logging.INFO
if verbose:
level = logging.DEBUG
logging.basicConfig(level=level)
def _StartTryJobs(source_configs, full_mode=False, dry_run=False):
"""Tries each of the given sample configs on one or more try bots."""
for source_config in source_configs:
dest_config = _DestConfig(source_config)
bot_names = _BotNames(source_config, full_mode=full_mode)
_StartTry(source_config, dest_config, bot_names, dry_run=dry_run)
def _DestConfig(source_config):
"""Returns the path that a sample config should be copied to."""
if 'bisect' in source_config:
return BISECT_CONFIG
assert 'perf_test' in source_config, source_config
return PERF_TEST_CONFIG
def _BotNames(source_config, full_mode=False):
"""Returns try bot names to use for the given config file name."""
platform = os.path.basename(source_config).split('.')[0]
assert platform in PLATFORM_BOT_MAP
bot_names = PLATFORM_BOT_MAP[platform]
if full_mode:
return bot_names
return [bot_names[0]]
def _StartTry(source_config, dest_config, bot_names, dry_run=False):
"""Sends a try job with the given config to the given try bots.
Args:
source_config: Path of the sample config to copy over.
dest_config: Destination path to copy sample to, e.g. "./bisect.cfg".
bot_names: List of try bot builder names.
"""
assert os.path.exists(source_config)
assert os.path.exists(dest_config)
assert _LastCommitMessage() != AUTO_COMMIT_MESSAGE
# Copy the sample config over and commit it.
_Run(['cp', source_config, dest_config])
_Run(['git', 'commit', '--all', '-m', AUTO_COMMIT_MESSAGE])
try:
# Start the try job.
job_name = 'Automatically-started (%s)' % os.path.basename(source_config)
try_command = ['git', 'try', '--svn_repo', SVN_URL, '--name', job_name]
for bot_name in bot_names:
try_command.extend(['--bot', bot_name])
print _Run(try_command, dry_run=dry_run)
finally:
# Revert the immediately-previous commit which was made just above.
assert _LastCommitMessage() == AUTO_COMMIT_MESSAGE
_Run(['git', 'reset', '--hard', 'HEAD~1'])
def _LastCommitMessage():
return _Run(['git', 'log', '--format=%s', '-1']).strip()
def _Run(command, dry_run=False):
"""Runs a command in a subprocess.
Args:
command: The command given as an args list.
Returns:
The output of the command.
Raises:
subprocess.CalledProcessError: The return-code was non-zero.
"""
logging.debug('Running %s', command)
if dry_run:
return 'Did not run command because this is a dry run.'
return subprocess.check_output(command)
if __name__ == '__main__':
sys.exit(main(sys.argv))
# Config based on http://crbug.com/444762.
config = {
'command': 'python tools/perf/run_benchmark -v --browser=release dromaeo.domcorequery',
'good_revision': '309431',
'bad_revision': '309442',
'metric': 'dom/dom',
'repeat_count': '5',
'max_time_minutes': '5',
'truncate_percent': '20',
'builder_type': 'perf',
}
# Workaround git try issue, see crbug.com/257689
# This config just runs the kraken test once.
config = {
"command": "python tools/perf/run_benchmark -v --browser=release kraken",
"max_time_minutes": "10",
"repeat_count": "1",
"truncate_percent": "0"
}
# Workaround git try issue, see crbug.com/257689
# Config based on http://crbug.com/444762.
config = {
'command': 'python tools/perf/run_benchmark -v --browser=release dromaeo.domcorequery',
'good_revision': '309431',
'bad_revision': '309442',
'metric': 'dom/dom',
'repeat_count': '5',
'max_time_minutes': '5',
'truncate_percent': '20',
'builder_type': 'perf',
'target_arch': 'x64',
}
# Workaround git try issue, see crbug.com/257689
# This config just runs the kraken test once.
config = {
"command": "python tools/perf/run_benchmark -v --browser=release kraken",
"max_time_minutes": "10",
"repeat_count": "1",
"target_arch": "x64",
"truncate_percent": "0"
}
# Workaround git try issue, see crbug.com/257689
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This module contains functions for fetching and extracting archived builds.
The builds may be stored in different places by different types of builders;
for example, builders on tryserver.chromium.perf stores builds in one place,
while builders on chromium.linux store builds in another.
This module can be either imported or run as a stand-alone script to download
and extract a build.
Usage: fetch_build.py <type> <revision> <output_dir> [options]
"""
import argparse
import errno
import logging
import os
import shutil
import sys
import zipfile
_PY_UTILS_PATH = os.path.abspath(os.path.join(
os.path.dirname(__file__), '..', '..', 'third_party', 'catapult',
'common', 'py_utils'))
if _PY_UTILS_PATH not in sys.path:
sys.path.insert(1, _PY_UTILS_PATH)
from py_utils import cloud_storage
import bisect_utils
# Possible builder types.
PERF_BUILDER = 'perf'
FULL_BUILDER = 'full'
ANDROID_CHROME_PERF_BUILDER = 'android-chrome-perf'
# Maximum time in seconds to wait after posting build request to the try server.
MAX_MAC_BUILD_TIME = 14400
MAX_WIN_BUILD_TIME = 14400
MAX_LINUX_BUILD_TIME = 14400
# Try server status page URLs, used to get build status.
PERF_TRY_SERVER_URL = 'http://build.chromium.org/p/tryserver.chromium.perf'
LINUX_TRY_SERVER_URL = 'http://build.chromium.org/p/tryserver.chromium.linux'
def GetBucketAndRemotePath(revision, builder_type=PERF_BUILDER,
target_arch='ia32', target_platform='chromium',
deps_patch_sha=None, extra_src=None):
"""Returns the location where a build archive is expected to be.
Args:
revision: Revision string, e.g. a git commit hash or SVN revision.
builder_type: Type of build archive.
target_arch: Architecture, e.g. "ia32".
target_platform: Platform name, e.g. "chromium" or "android".
deps_patch_sha: SHA1 hash which identifies a particular combination of
custom revisions for dependency repositories.
extra_src: Path to a script which can be used to modify the bisect script's
behavior.
Returns:
A pair of strings (bucket, path), where the archive is expected to be.
"""
logging.info('Getting GS URL for archive of builder "%s", "%s", "%s".',
builder_type, target_arch, target_platform)
build_archive = BuildArchive.Create(
builder_type, target_arch=target_arch, target_platform=target_platform,
extra_src=extra_src)
bucket = build_archive.BucketName()
remote_path = build_archive.FilePath(revision, deps_patch_sha=deps_patch_sha)
return bucket, remote_path
def GetBuilderNameAndBuildTime(builder_type=PERF_BUILDER, target_arch='ia32',
target_platform='chromium', extra_src=None):
"""Gets builder bot name and build time in seconds based on platform."""
logging.info('Getting builder name for builder "%s", "%s", "%s".',
builder_type, target_arch, target_platform)
build_archive = BuildArchive.Create(
builder_type, target_arch=target_arch, target_platform=target_platform,
extra_src=extra_src)
return build_archive.GetBuilderName(), build_archive.GetBuilderBuildTime()
def GetBuildBotUrl(builder_type=PERF_BUILDER, target_arch='ia32',
target_platform='chromium', extra_src=None):
"""Gets buildbot URL for a given builder type."""
logging.info('Getting buildbot URL for "%s", "%s", "%s".',
builder_type, target_arch, target_platform)
build_archive = BuildArchive.Create(
builder_type, target_arch=target_arch, target_platform=target_platform,
extra_src=extra_src)
return build_archive.GetBuildBotUrl()
class BuildArchive(object):
"""Represents a place where builds of some type are stored.
There are two pieces of information required to locate a file in Google
Cloud Storage, bucket name and file path. Subclasses of this class contain
specific logic about which bucket names and paths should be used to fetch
a build.
"""
@staticmethod
def Create(builder_type, target_arch='ia32', target_platform='chromium',
extra_src=None):
if builder_type == PERF_BUILDER:
return PerfBuildArchive(target_arch, target_platform)
if builder_type == FULL_BUILDER:
return FullBuildArchive(target_arch, target_platform)
if builder_type == ANDROID_CHROME_PERF_BUILDER:
try:
# Load and initialize a module in extra source file and
# return its module object to access android-chrome specific data.
loaded_extra_src = bisect_utils.LoadExtraSrc(extra_src)
return AndroidChromeBuildArchive(
target_arch, target_platform, loaded_extra_src)
except (IOError, TypeError, ImportError):
raise RuntimeError('Invalid or missing --extra_src. [%s]' % extra_src)
raise NotImplementedError('Builder type "%s" not supported.' % builder_type)
def __init__(self, target_arch='ia32', target_platform='chromium',
extra_src=None):
self._extra_src = extra_src
if bisect_utils.IsLinuxHost() and target_platform == 'android':
if target_arch == 'arm64':
self._platform = 'android_arm64'
else:
self._platform = 'android'
elif bisect_utils.IsLinuxHost() and target_platform == 'android-chrome':
self._platform = 'android-chrome'
elif bisect_utils.IsLinuxHost():
self._platform = 'linux'
elif bisect_utils.IsMacHost():
self._platform = 'mac'
elif bisect_utils.Is64BitWindows() and target_arch == 'x64':
self._platform = 'win64'
elif bisect_utils.IsWindowsHost():
self._platform = 'win'
else:
raise NotImplementedError('Unknown platform "%s".' % sys.platform)
def BucketName(self):
raise NotImplementedError()
def FilePath(self, revision, deps_patch_sha=None):
"""Returns the remote file path to download a build from.
Args:
revision: A Chromium revision; this could be a git commit hash or
commit position or SVN revision number.
deps_patch_sha: The SHA1 hash of a patch to the DEPS file, which
uniquely identifies a change to use a particular revision of
a dependency.
Returns:
A file path, which not does not include a bucket name.
"""
raise NotImplementedError()
def _ZipFileName(self, revision, deps_patch_sha=None):
"""Gets the file name of a zip archive for a particular revision.
This returns a file name of the form full-build-<platform>_<revision>.zip,
which is a format used by multiple types of builders that store archives.
Args:
revision: A git commit hash or other revision string.
deps_patch_sha: SHA1 hash of a DEPS file patch.
Returns:
The archive file name.
"""
base_name = 'full-build-%s' % self._PlatformName()
if deps_patch_sha:
revision = '%s_%s' % (revision, deps_patch_sha)
return '%s_%s.zip' % (base_name, revision)
def _PlatformName(self):
"""Return a string to be used in paths for the platform."""
if self._platform in ('win', 'win64'):
# Build archive for win64 is still stored with "win32" in the name.
return 'win32'
if self._platform in ('linux', 'android', 'android_arm64'):
# Android builds are also stored with "linux" in the name.
return 'linux'
if self._platform == 'mac':
return 'mac'
raise NotImplementedError('Unknown platform "%s".' % sys.platform)
def GetBuilderName(self):
raise NotImplementedError()
def GetBuilderBuildTime(self):
"""Returns the time to wait for a build after requesting one."""
if self._platform in ('win', 'win64'):
return MAX_WIN_BUILD_TIME
if self._platform in ('linux', 'android',
'android_arm64', 'android-chrome'):
return MAX_LINUX_BUILD_TIME
if self._platform == 'mac':
return MAX_MAC_BUILD_TIME
raise NotImplementedError('Unsupported Platform "%s".' % sys.platform)
def GetBuildBotUrl(self):
raise NotImplementedError()
class PerfBuildArchive(BuildArchive):
def BucketName(self):
return 'chrome-perf'
def FilePath(self, revision, deps_patch_sha=None):
return '%s/%s' % (self._ArchiveDirectory(),
self._ZipFileName(revision, deps_patch_sha))
def _ArchiveDirectory(self):
"""Returns the directory name to download builds from."""
platform_to_directory = {
'android': 'android_perf_rel',
'android_arm64': 'android_perf_rel_arm64',
'linux': 'Linux Builder',
'mac': 'Mac Builder',
'win64': 'Win x64 Builder',
'win': 'Win Builder',
}
assert self._platform in platform_to_directory
return platform_to_directory.get(self._platform)
def GetBuilderName(self):
"""Gets builder bot name based on platform."""
if self._platform == 'win64':
return 'winx64_bisect_builder'
elif self._platform == 'win':
return 'win_perf_bisect_builder'
elif self._platform == 'linux':
return 'linux_perf_bisect_builder'
elif self._platform == 'android':
return 'android_perf_bisect_builder'
elif self._platform == 'android_arm64':
return 'android_arm64_perf_bisect_builder'
elif self._platform == 'mac':
return 'mac_perf_bisect_builder'
raise NotImplementedError('Unsupported platform "%s".' % sys.platform)
def GetBuildBotUrl(self):
"""Returns buildbot URL for fetching build info."""
return PERF_TRY_SERVER_URL
class FullBuildArchive(BuildArchive):
def BucketName(self):
platform_to_bucket = {
'android': 'chromium-android',
'linux': 'chromium-linux-archive',
'mac': 'chromium-mac-archive',
'win64': 'chromium-win-archive',
'win': 'chromium-win-archive',
}
assert self._platform in platform_to_bucket
return platform_to_bucket.get(self._platform)
def FilePath(self, revision, deps_patch_sha=None):
return '%s/%s' % (self._ArchiveDirectory(),
self._ZipFileName(revision, deps_patch_sha))
def _ArchiveDirectory(self):
"""Returns the remote directory to download builds from."""
platform_to_directory = {
'android': 'android_main_rel',
'linux': 'chromium.linux/Linux Builder',
'mac': 'chromium.mac/Mac Builder',
'win64': 'chromium.win/Win x64 Builder',
'win': 'chromium.win/Win Builder',
}
assert self._platform in platform_to_directory
return platform_to_directory.get(self._platform)
def GetBuilderName(self):
"""Gets builder bot name based on platform."""
if self._platform == 'linux':
return 'linux_full_bisect_builder'
raise NotImplementedError('Unsupported platform "%s".' % sys.platform)
def GetBuildBotUrl(self):
"""Returns buildbot URL for fetching build info."""
return LINUX_TRY_SERVER_URL
class AndroidChromeBuildArchive(BuildArchive):
"""Represents a place where builds of android-chrome type are stored.
If AndroidChromeBuildArchive is used, it is assumed that the --extra_src
is a valid Python module which contains the module-level functions
GetBucketName and GetArchiveDirectory.
"""
def BucketName(self):
return self._extra_src.GetBucketName()
def _ZipFileName(self, revision, deps_patch_sha=None):
"""Gets the file name of a zip archive on android-chrome.
This returns a file name of the form build_product_<revision>.zip,
which is a format used by android-chrome.
Args:
revision: A git commit hash or other revision string.
deps_patch_sha: SHA1 hash of a DEPS file patch.
Returns:
The archive file name.
"""
if deps_patch_sha:
revision = '%s_%s' % (revision, deps_patch_sha)
return 'build_product_%s.zip' % revision
def FilePath(self, revision, deps_patch_sha=None):
return '%s/%s' % (self._ArchiveDirectory(),
self._ZipFileName(revision, deps_patch_sha))
def _ArchiveDirectory(self):
"""Returns the directory name to download builds from."""
return self._extra_src.GetArchiveDirectory()
def GetBuilderName(self):
"""Returns the builder name extra source."""
return self._extra_src.GetBuilderName()
def GetBuildBotUrl(self):
"""Returns buildbot URL for fetching build info."""
return self._extra_src.GetBuildBotUrl()
def BuildIsAvailable(bucket_name, remote_path):
"""Checks whether a build is currently archived at some place."""
logging.info('Checking existence: gs://%s/%s' % (bucket_name, remote_path))
try:
exists = cloud_storage.Exists(bucket_name, remote_path)
logging.info('Exists? %s' % exists)
return exists
except cloud_storage.CloudStorageError:
return False
def FetchFromCloudStorage(bucket_name, source_path, destination_dir):
"""Fetches file(s) from the Google Cloud Storage.
As a side-effect, this prints messages to stdout about what's happening.
Args:
bucket_name: Google Storage bucket name.
source_path: Source file path.
destination_dir: Destination file path.
Returns:
Local file path of downloaded file if it was downloaded. If the file does
not exist in the given bucket, or if there was an error while downloading,
None is returned.
"""
target_file = os.path.join(destination_dir, os.path.basename(source_path))
gs_url = 'gs://%s/%s' % (bucket_name, source_path)
try:
if cloud_storage.Exists(bucket_name, source_path):
logging.info('Fetching file from %s...', gs_url)
cloud_storage.Get(bucket_name, source_path, target_file)
if os.path.exists(target_file):
return target_file
else:
logging.info('File %s not found in cloud storage.', gs_url)
return None
except Exception as e:
logging.warn('Exception while fetching from cloud storage: %s', e)
if os.path.exists(target_file):
os.remove(target_file)
return None
def Unzip(file_path, output_dir, verbose=True):
"""Extracts a zip archive's contents into the given output directory.
This was based on ExtractZip from build/scripts/common/chromium_utils.py.
Args:
file_path: Path of the zip file to extract.
output_dir: Path to the destination directory.
verbose: Whether to print out what is being extracted.
Raises:
IOError: The unzip command had a non-zero exit code.
RuntimeError: Failed to create the output directory.
"""
_MakeDirectory(output_dir)
# On Linux and Mac, we use the unzip command because it handles links and
# file permissions bits, so achieving this behavior is easier than with
# ZipInfo options.
#
# The Mac Version of unzip unfortunately does not support Zip64, whereas
# the python module does, so we have to fall back to the python zip module
# on Mac if the file size is greater than 4GB.
mac_zip_size_limit = 2 ** 32 # 4GB
if (bisect_utils.IsLinuxHost() or
(bisect_utils.IsMacHost()
and os.path.getsize(file_path) < mac_zip_size_limit)):
unzip_command = ['unzip', '-o']
_UnzipUsingCommand(unzip_command, file_path, output_dir)
return
# On Windows, try to use 7z if it is installed, otherwise fall back to the
# Python zipfile module. If 7z is not installed, then this may fail if the
# zip file is larger than 512MB.
sevenzip_path = r'C:\Program Files\7-Zip\7z.exe'
if bisect_utils.IsWindowsHost() and os.path.exists(sevenzip_path):
unzip_command = [sevenzip_path, 'x', '-y']
_UnzipUsingCommand(unzip_command, file_path, output_dir)
return
_UnzipUsingZipFile(file_path, output_dir, verbose)
def _UnzipUsingCommand(unzip_command, file_path, output_dir):
"""Extracts a zip file using an external command.
Args:
unzip_command: An unzipping command, as a string list, without the filename.
file_path: Path to the zip file.
output_dir: The directory which the contents should be extracted to.
Raises:
IOError: The command had a non-zero exit code.
"""
absolute_filepath = os.path.abspath(file_path)
command = unzip_command + [absolute_filepath]
return_code = _RunCommandInDirectory(output_dir, command)
if return_code:
_RemoveDirectoryTree(output_dir)
raise IOError('Unzip failed: %s => %s' % (str(command), return_code))
def _RunCommandInDirectory(directory, command):
"""Changes to a directory, runs a command, then changes back."""
saved_dir = os.getcwd()
os.chdir(directory)
return_code = bisect_utils.RunProcess(command)
os.chdir(saved_dir)
return return_code
def _UnzipUsingZipFile(file_path, output_dir, verbose=True):
"""Extracts a zip file using the Python zipfile module."""
assert bisect_utils.IsWindowsHost() or bisect_utils.IsMacHost()
zf = zipfile.ZipFile(file_path)
for name in zf.namelist():
if verbose:
print 'Extracting %s' % name
zf.extract(name, output_dir)
if bisect_utils.IsMacHost():
# Restore file permission bits.
mode = zf.getinfo(name).external_attr >> 16
os.chmod(os.path.join(output_dir, name), mode)
def _MakeDirectory(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def _RemoveDirectoryTree(path):
try:
if os.path.exists(path):
shutil.rmtree(path)
except OSError, e:
if e.errno != errno.ENOENT:
raise
def Main(argv):
"""Downloads and extracts a build based on the command line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument('builder_type')
parser.add_argument('revision')
parser.add_argument('output_dir')
parser.add_argument('--target-arch', default='ia32')
parser.add_argument('--target-platform', default='chromium')
parser.add_argument('--deps-patch-sha')
args = parser.parse_args(argv[1:])
bucket_name, remote_path = GetBucketAndRemotePath(
args.revision, args.builder_type, target_arch=args.target_arch,
target_platform=args.target_platform,
deps_patch_sha=args.deps_patch_sha)
print 'Bucket name: %s, remote path: %s' % (bucket_name, remote_path)
if not BuildIsAvailable(bucket_name, remote_path):
print 'Build is not available.'
return 1
FetchFromCloudStorage(bucket_name, remote_path, args.output_dir)
print 'Build has been downloaded to and extracted in %s.' % args.output_dir
return 0
if __name__ == '__main__':
sys.exit(Main(sys.argv))
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for the fetch_builds module."""
import errno
import unittest
# The third-party mock module is expected to be available in PYTHONPATH.
import mock
import fetch_build
# The tests below test private functions (W0212).
# Some methods don't reference self because they use the mock module (R0201).
# pylint: disable=R0201,W0212
class FetchBuildTest(unittest.TestCase):
def setUp(self):
# Mocks of the os and bisect_utils modules are used in the methods below.
cloud_storage_patcher = mock.patch('fetch_build.cloud_storage')
self.mock_cloud_storage = cloud_storage_patcher.start()
self.addCleanup(cloud_storage_patcher.stop)
@mock.patch('fetch_build.os.path.exists')
def test_FetchFromCloudStorage_FileFound(self, mock_os_path_exists):
self.mock_cloud_storage.Exists.return_value = True
mock_os_path_exists.return_value = True
local_path = fetch_build.FetchFromCloudStorage(
'my_bucket', 'remote/foo.zip', 'local')
self.assertEqual('local/foo.zip', local_path)
self.mock_cloud_storage.Get.assert_called_with(
'my_bucket', 'remote/foo.zip', 'local/foo.zip')
def test_FetchFromCloudStorage_FileNotFound(self):
self.mock_cloud_storage.Exists.return_value = False
local_path = fetch_build.FetchFromCloudStorage(
'my_bucket', 'remote/foo.zip', 'local')
self.assertIsNone(local_path)
self.assertFalse(self.mock_cloud_storage.Get.called)
class BuildArchiveTest(unittest.TestCase):
def test_CreatePerfBuildArchive(self):
archive = fetch_build.BuildArchive.Create(fetch_build.PERF_BUILDER)
self.assertEqual('chrome-perf', archive.BucketName())
self.assertTrue(isinstance(archive, fetch_build.PerfBuildArchive))
def test_CreateFullBuildArchive(self):
archive = fetch_build.BuildArchive.Create(fetch_build.FULL_BUILDER)
archive._platform = 'linux'
self.assertEqual('chromium-linux-archive', archive.BucketName())
self.assertTrue(isinstance(archive, fetch_build.FullBuildArchive))
def test_BuildArchive_NonExistentType(self):
self.assertRaises(
NotImplementedError, fetch_build.BuildArchive.Create, 'other')
def test_FullBuildArchive_Linux(self):
archive = fetch_build.FullBuildArchive()
archive._platform = 'linux'
self.assertEqual('chromium-linux-archive', archive.BucketName())
self.assertEqual(
'chromium.linux/Linux Builder/full-build-linux_1234567890abcdef.zip',
archive.FilePath('1234567890abcdef'))
def test_FullBuildArchive_Android(self):
archive = fetch_build.FullBuildArchive()
archive._platform = 'android'
self.assertEqual('chromium-android', archive.BucketName())
self.assertEqual(
'android_main_rel/full-build-linux_1234567890abcdef.zip',
archive.FilePath('1234567890abcdef'))
def test_FullBuildArchive_Linux_BuilderName(self):
archive = fetch_build.FullBuildArchive()
archive._platform = 'linux'
self.assertEqual('linux_full_bisect_builder', archive.GetBuilderName())
def test_FullBuildArchive_Windows_BuildTime(self):
archive = fetch_build.FullBuildArchive()
archive._platform = 'win'
self.assertEqual(14400, archive.GetBuilderBuildTime())
def test_PerfBuildArchive_Linux(self):
archive = fetch_build.PerfBuildArchive()
archive._platform = 'linux'
self.assertEqual('chrome-perf', archive.BucketName())
self.assertEqual(
'Linux Builder/full-build-linux_1234567890abcdef.zip',
archive.FilePath('1234567890abcdef'))
def test_PerfBuildArchive_Android(self):
archive = fetch_build.PerfBuildArchive()
archive._platform = 'android'
self.assertEqual('chrome-perf', archive.BucketName())
self.assertEqual(
'android_perf_rel/full-build-linux_123456.zip',
archive.FilePath('123456'))
def test_PerfBuildArchive_AndroidArm64(self):
archive = fetch_build.PerfBuildArchive()
archive._platform = 'android_arm64'
self.assertEqual('chrome-perf', archive.BucketName())
self.assertEqual(
'android_perf_rel_arm64/full-build-linux_123456.zip',
archive.FilePath('123456'))
def test_PerfBuildArchive_64BitWindows(self):
archive = fetch_build.PerfBuildArchive(target_arch='x64')
archive._platform = 'win64'
self.assertEqual('chrome-perf', archive.BucketName())
self.assertEqual(
'Win x64 Builder/full-build-win32_123456.zip',
archive.FilePath('123456'))
def test_PerfBuildArchive_WithDepsPatchSha(self):
archive = fetch_build.PerfBuildArchive()
archive._platform = 'linux'
self.assertEqual(
'Linux Builder/full-build-linux_123456'
'_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.zip',
archive.FilePath(123456, 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'))
def test_PerfBuildArchive_64BitWindows_BuilderName(self):
archive = fetch_build.PerfBuildArchive()
archive._platform = 'win64'
self.assertEqual('winx64_bisect_builder', archive.GetBuilderName())
def test_PerfBuildArchive_64BitWindows_BuildTime(self):
archive = fetch_build.PerfBuildArchive()
archive._platform = 'win64'
self.assertEqual(14400, archive.GetBuilderBuildTime())
def test_PerfBuildArchive_Windows_BuilderName(self):
archive = fetch_build.PerfBuildArchive()
archive._platform = 'win'
self.assertEqual('win_perf_bisect_builder', archive.GetBuilderName())
def test_PerfBuildArchive_Windows_BuildTime(self):
archive = fetch_build.PerfBuildArchive()
archive._platform = 'win'
self.assertEqual(14400, archive.GetBuilderBuildTime())
def test_PerfBuildArchive_Linux_BuilderName(self):
archive = fetch_build.PerfBuildArchive()
archive._platform = 'linux'
self.assertEqual('linux_perf_bisect_builder', archive.GetBuilderName())
def test_PerfBuildArchive_Linux_BuildTime(self):
archive = fetch_build.PerfBuildArchive()
archive._platform = 'linux'
self.assertEqual(14400, archive.GetBuilderBuildTime())
def test_PerfBuildArchive_Android_BuilderName(self):
archive = fetch_build.PerfBuildArchive()
archive._platform = 'android'
self.assertEqual('android_perf_bisect_builder', archive.GetBuilderName())
def test_PerfBuildArchive_Android_BuildTime(self):
archive = fetch_build.PerfBuildArchive()
archive._platform = 'android'
self.assertEqual(14400, archive.GetBuilderBuildTime())
def test_PerfBuildArchive_Mac_BuilderName(self):
archive = fetch_build.PerfBuildArchive()
archive._platform = 'mac'
self.assertEqual('mac_perf_bisect_builder', archive.GetBuilderName())
def test_PerfBuildArchive_mac_BuildTime(self):
archive = fetch_build.PerfBuildArchive()
archive._platform = 'mac'
self.assertEqual(14400, archive.GetBuilderBuildTime())
def test_GetBuildBotUrl_Perf(self):
self.assertEqual(
fetch_build.PERF_TRY_SERVER_URL,
fetch_build.GetBuildBotUrl(fetch_build.PERF_BUILDER))
def test_GetBuildBotUrl_full(self):
self.assertEqual(
fetch_build.LINUX_TRY_SERVER_URL,
fetch_build.GetBuildBotUrl(fetch_build.FULL_BUILDER))
class UnzipTest(unittest.TestCase):
def setUp(self):
# Mocks of the os and bisect_utils modules are used in the methods below.
os_patcher = mock.patch('fetch_build.os')
self.mock_os = os_patcher.start()
self.addCleanup(os_patcher.stop)
bisect_utils_patcher = mock.patch('fetch_build.bisect_utils')
self.mock_bisect_utils = bisect_utils_patcher.start()
self.addCleanup(bisect_utils_patcher.stop)
@mock.patch('fetch_build._MakeDirectory')
@mock.patch('fetch_build._UnzipUsingCommand')
def test_Unzip_Linux(self, mock_UnzipUsingCommand, mock_MakeDirectory):
self.mock_bisect_utils.IsLinuxHost.return_value = True
self.mock_bisect_utils.IsMacHost.return_value = False
self.mock_bisect_utils.IsWindowsHost.return_value = False
fetch_build.Unzip('x.zip', 'out_dir', verbose=False)
mock_MakeDirectory.assert_called_with('out_dir')
mock_UnzipUsingCommand.assert_called_with(
['unzip', '-o'], 'x.zip', 'out_dir')
@mock.patch('fetch_build._MakeDirectory')
@mock.patch('fetch_build._UnzipUsingZipFile')
def test_Unzip_Mac_LargeFile(
self, mock_UnzipUsingZipFile, mock_MakeDirectory):
# The zipfile module is used to unzip on mac when the file is > 4GB.
self.mock_bisect_utils.IsLinuxHost.return_value = False
self.mock_bisect_utils.IsMacHost.return_value = True
self.mock_bisect_utils.IsWindowsHost.return_value = False
self.mock_os.path.getsize.return_value = 2 ** 33 # 8GB
fetch_build.Unzip('x.zip', 'out_dir', verbose=False)
mock_MakeDirectory.assert_called_with('out_dir')
mock_UnzipUsingZipFile.assert_called_with('x.zip', 'out_dir', False)
def test_UnzipUsingCommand(self):
# The _UnzipUsingCommand function should move to the output
# directory and run the command with the file's absolute path.
self.mock_os.path.abspath.return_value = '/foo/some/path/x.zip'
self.mock_os.getcwd.return_value = 'curr_dir'
self.mock_bisect_utils.RunProcess.return_value = 0
fetch_build._UnzipUsingCommand(['unzip'], 'x.zip', 'out_dir')
self.mock_os.chdir.assert_has_calls(
[mock.call('out_dir'), mock.call('curr_dir')])
self.mock_bisect_utils.RunProcess.assert_called_with(
['unzip', '/foo/some/path/x.zip'])
def test_MakeDirectory(self):
# _MakeDirectory uses os.makedirs.
fetch_build._MakeDirectory('some/path')
self.mock_os.makedirs.assert_called_with('some/path')
def test_MakeDirectory_RaisesError(self):
self.mock_os.makedirs.side_effect = OSError()
self.assertRaises(OSError, fetch_build._MakeDirectory, 'some/path')
def test_MakeDirectory_NoErrorIfDirectoryAlreadyExists(self):
already_exists = OSError()
already_exists.errno = errno.EEXIST
self.mock_os.makedirs.side_effect = already_exists
fetch_build._MakeDirectory('some/path')
@mock.patch('fetch_build.shutil')
def test_RemoveDirectoryTree(self, mock_shutil):
# _RemoveDirectoryTree uses shutil.rmtree.
fetch_build._RemoveDirectoryTree('some/path')
mock_shutil.rmtree.assert_called_with('some/path')
if __name__ == '__main__':
unittest.main()
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""General statistical or mathematical functions."""
import math
def TruncatedMean(data_set, truncate_proportion):
"""Calculates the truncated mean of a set of values.
Note that this isn't just the mean of the set of values with the highest
and lowest values discarded; the non-discarded values are also weighted
differently depending how many values are discarded.
NOTE: If there's not much benefit from this keeping and weighting
partial values, it might be better to use a simplified truncated mean
function without weighting.
Args:
data_set: Non-empty list of values.
truncate_proportion: How much of the upper and lower portions of the data
set to discard, expressed as a value in the range [0, 1].
Note: a value of 0.5 or greater would be meaningless
Returns:
The truncated mean as a float.
Raises:
TypeError: The data set was empty after discarding values.
"""
if len(data_set) > 2:
data_set = sorted(data_set)
discard_num_float = len(data_set) * truncate_proportion
discard_num_int = int(math.floor(discard_num_float))
kept_weight = len(data_set) - (discard_num_float * 2)
data_set = data_set[discard_num_int:len(data_set)-discard_num_int]
weight_left = 1.0 - (discard_num_float - discard_num_int)
if weight_left < 1:
# If the % to discard leaves a fractional portion, need to weight those
# values.
unweighted_vals = data_set[1:len(data_set)-1]
weighted_vals = [data_set[0], data_set[len(data_set)-1]]
weighted_vals = [w * weight_left for w in weighted_vals]
data_set = weighted_vals + unweighted_vals
else:
kept_weight = len(data_set)
data_sum = reduce(lambda x, y: float(x) + float(y), data_set)
truncated_mean = data_sum / kept_weight
return truncated_mean
def Mean(values):
"""Calculates the arithmetic mean of a list of values."""
return TruncatedMean(values, 0.0)
def Variance(values):
"""Calculates the sample variance."""
if len(values) == 1:
return 0.0
mean = Mean(values)
differences_from_mean = [float(x) - mean for x in values]
squared_differences = [float(x * x) for x in differences_from_mean]
variance = sum(squared_differences) / (len(values) - 1)
return variance
def StandardDeviation(values):
"""Calculates the sample standard deviation of the given list of values."""
return math.sqrt(Variance(values))
def RelativeChange(before, after):
"""Returns the relative change of before and after, relative to before.
There are several different ways to define relative difference between
two numbers; sometimes it is defined as relative to the smaller number,
or to the mean of the two numbers. This version returns the difference
relative to the first of the two numbers.
Args:
before: A number representing an earlier value.
after: Another number, representing a later value.
Returns:
A non-negative floating point number; 0.1 represents a 10% change.
"""
if before == after:
return 0.0
if before == 0:
return float('nan')
difference = after - before
return math.fabs(difference / before)
def PooledStandardError(work_sets):
"""Calculates the pooled sample standard error for a set of samples.
Args:
work_sets: A collection of collections of numbers.
Returns:
Pooled sample standard error.
"""
numerator = 0.0
denominator1 = 0.0
denominator2 = 0.0
for current_set in work_sets:
std_dev = StandardDeviation(current_set)
numerator += (len(current_set) - 1) * std_dev ** 2
denominator1 += len(current_set) - 1
if len(current_set) > 0:
denominator2 += 1.0 / len(current_set)
if denominator1 == 0:
return 0.0
return math.sqrt(numerator / denominator1) * math.sqrt(denominator2)
# Redefining built-in 'StandardError'
# pylint: disable=W0622
def StandardError(values):
"""Calculates the standard error of a list of values."""
# NOTE: This behavior of returning 0.0 in the case of an empty list is
# inconsistent with Variance and StandardDeviation above.
if len(values) <= 1:
return 0.0
std_dev = StandardDeviation(values)
return std_dev / math.sqrt(len(values))
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import math
import unittest
import math_utils
class MathUtilsTest(unittest.TestCase):
"""Tests for mathematical utility functions."""
def testTruncatedMean_EmptyList(self):
# TruncatedMean raises an error when passed an empty list.
self.assertRaises(TypeError, math_utils.TruncatedMean, [], 0)
def testTruncatedMean_TruncateTooMuch(self):
# An exception is raised if 50% or more is truncated from both sides.
self.assertRaises(TypeError, math_utils.TruncatedMean, [1, 2, 3], 1.0)
self.assertRaises(
ZeroDivisionError, math_utils.TruncatedMean, [1, 2, 3], 0.5)
def testTruncatedMean_AlwaysKeepsAtLeastTwoValues(self):
# If the length of the input is 1 or 2, nothing is truncated and
# the average is returned.
self.assertEqual(5.0, math_utils.TruncatedMean([5.0], 0.0))
self.assertEqual(5.0, math_utils.TruncatedMean([5.0], 0.25))
self.assertEqual(5.0, math_utils.TruncatedMean([5.0], 0.5))
self.assertEqual(5.5, math_utils.TruncatedMean([5.0, 6.0], 0.0))
self.assertEqual(5.5, math_utils.TruncatedMean([5.0, 6.0], 0.25))
self.assertEqual(5.5, math_utils.TruncatedMean([5.0, 6.0], 0.5))
def testTruncatedMean_Interquartile_NumValuesDivisibleByFour(self):
self.assertEqual(5.0, math_utils.TruncatedMean([1, 4, 6, 100], 0.25))
self.assertEqual(
6.5, math_utils.TruncatedMean([1, 2, 5, 6, 7, 8, 40, 50], 0.25))
def testTruncatedMean_Weighting(self):
# In the list [0, 1, 4, 5, 20, 100], when 25% of the list at the start
# and end are discarded, the part that's left is [1, 4, 5, 20], but
# first and last values are weighted so that they only count for half
# as much. So the truncated mean is (1/2 + 4 + 5 + 20/2) / 5.0.
self.assertEqual(6.5, (0.5 + 4 + 5 + 10) / 3.0)
self.assertEqual(6.5, math_utils.TruncatedMean([0, 1, 4, 5, 20, 100], 0.25))
def testMean_OneValue(self):
self.assertEqual(3.0, math_utils.Mean([3]))
def testMean_ShortList(self):
self.assertEqual(0.5, math_utils.Mean([-3, 0, 1, 4]))
def testMean_CompareAlternateImplementation(self):
"""Tests Mean by comparing against an alternate implementation."""
def AlternateMean(values):
return sum(values) / float(len(values))
test_value_lists = [
[1],
[5, 6.5, 1.2, 3],
[-3, 0, 1, 4],
[-3, -1, 0.12, 0.752, 3.33, 8, 16, 32, 439],
]
for value_list in test_value_lists:
self.assertEqual(AlternateMean(value_list), math_utils.Mean(value_list))
def testRelativeChange_NonZero(self):
# The change is relative to the first value, regardless of which is bigger.
self.assertEqual(0.5, math_utils.RelativeChange(1.0, 1.5))
self.assertEqual(0.5, math_utils.RelativeChange(2.0, 1.0))
def testRelativeChange_FromZero(self):
# If the first number is zero, then the result is not a number.
self.assertEqual(0, math_utils.RelativeChange(0, 0))
self.assertTrue(math.isnan(math_utils.RelativeChange(0, 1)))
self.assertTrue(math.isnan(math_utils.RelativeChange(0, -1)))
def testRelativeChange_Negative(self):
# Note that the return value of RelativeChange is always positive.
self.assertEqual(3.0, math_utils.RelativeChange(-1, 2))
self.assertEqual(3.0, math_utils.RelativeChange(1, -2))
self.assertEqual(1.0, math_utils.RelativeChange(-1, -2))
def testVariance_EmptyList(self):
self.assertRaises(TypeError, math_utils.Variance, [])
def testVariance_OneValue(self):
self.assertEqual(0, math_utils.Variance([0]))
self.assertEqual(0, math_utils.Variance([4.3]))
def testVariance_ShortList(self):
# Population variance is the average of squared deviations from the mean.
# The deviations from the mean in this example are [3.5, 0.5, -0.5, -3.5],
# and the squared deviations are [12.25, 0.25, 0.25, 12.25].
# With sample variance, however, 1 is subtracted from the sample size.
# So the sample variance is sum([12.25, 0.25, 0.25, 12.25]) / 3.0.
self.assertAlmostEqual(8.333333334, sum([12.25, 0.25, 0.25, 12.25]) / 3.0)
self.assertAlmostEqual(8.333333334, math_utils.Variance([-3, 0, 1, 4]))
def testStandardDeviation(self):
# Standard deviation is the square root of variance.
self.assertRaises(TypeError, math_utils.StandardDeviation, [])
self.assertEqual(0.0, math_utils.StandardDeviation([4.3]))
self.assertAlmostEqual(2.88675135, math.sqrt(8.33333333333333))
self.assertAlmostEqual(2.88675135,
math_utils.StandardDeviation([-3, 0, 1, 4]))
def testStandardError(self):
# Standard error is std. dev. divided by square root of sample size.
self.assertEqual(0.0, math_utils.StandardError([]))
self.assertEqual(0.0, math_utils.StandardError([4.3]))
self.assertAlmostEqual(1.44337567, 2.88675135 / math.sqrt(4))
self.assertAlmostEqual(1.44337567, math_utils.StandardError([-3, 0, 1, 4]))
if __name__ == '__main__':
unittest.main()
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions to query the chromium issue tracker.
Note that documentation for the Issue Tracker API says it's DEPRECATED, however
it seems to be in use in other places like the performance dashboard. Also,
this module attempts to handle most exceptions thrown by querying the tracker
so that when and if this api is turned off no impact is caused to the bisection
process."""
import json
import urllib2
SINGLE_ISSUE_URL = ('https://code.google.com/feeds/issues/p/chromium/issues'
'/full?id=%s&alt=json')
class IssueTrackerQueryException(Exception):
pass
def QuerySingleIssue(issue_id, url_template=SINGLE_ISSUE_URL):
"""Queries the tracker for a specific issue. Returns a dict.
This uses the deprecated Issue Tracker API to fetch a JSON representation of
the issue details.
Args:
issue_id: An int or string representing the issue id.
url_template: URL to query the tracker with '%s' instead of the bug id.
Returns:
A dictionary as parsed by the JSON library from the tracker response.
Raises:
urllib2.HTTPError when appropriate.
"""
assert str(issue_id).isdigit()
response = urllib2.urlopen(url_template % issue_id).read()
return json.loads(response)
def GetIssueState(issue_id):
"""Returns either 'closed' or 'open' for the given bug ID.
Args:
issue_id: string or string-castable object containing a numeric bug ID.
Returns:
'open' or 'closed' depending on the state of the bug.
Raises:
IssueTrackerQueryException if the data cannot be retrieved or parsed.
"""
try:
query_response = QuerySingleIssue(issue_id)
# We assume the query returns a single result hence the [0]
issue_detail = query_response['feed']['entry'][0]
state = issue_detail['issues$state']['$t']
return state
except urllib2.URLError:
raise IssueTrackerQueryException(
'Could not fetch the details form the issue tracker.')
except ValueError:
raise IssueTrackerQueryException(
'Could not parse the issue tracker\'s response as a json doc.')
except KeyError:
raise IssueTrackerQueryException(
'The data from the issue tracker is not in the expected format.')
def CheckIssueClosed(issue_id):
"""Checks if a given issue is closed. Returns False when in doubt."""
# We only check when issue_id appears to be valid
if str(issue_id).isdigit():
try:
return GetIssueState(issue_id) == 'closed'
except IssueTrackerQueryException:
# We let this fall through to the return False
pass
# We return False for anything other than a positive number
return False
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
import unittest
import urllib2
from query_crbug import CheckIssueClosed
SRC = os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir)
sys.path.append(os.path.join(SRC, 'third_party', 'pymock'))
import mock
_current_directory = os.path.dirname(__file__)
_test_data_directory = os.path.join(_current_directory, 'test_data')
# These strings are simulated responses to various conditions when querying
# the chromium issue tracker.
CLOSED_ISSUE_DATA = open(os.path.join(_test_data_directory,
'closed.json')).read()
OPEN_ISSUE_DATA = open(os.path.join(_test_data_directory,
'open.json')).read()
UNEXPECTED_FORMAT_DATA = CLOSED_ISSUE_DATA.replace('issues$state', 'gibberish')
BROKEN_ISSUE_DATA = "\n<HTML><HEAD><TITLE>Not a JSON Doc</TITLE></HEAD></HTML>"
class MockResponse(object):
def __init__(self, result):
self._result = result
def read(self):
return self._result
def MockUrlOpen(url):
# Note that these strings DO NOT represent http responses. They are just
# memorable numeric bug ids to use.
if '200' in url:
return MockResponse(CLOSED_ISSUE_DATA)
elif '201' in url:
return MockResponse(OPEN_ISSUE_DATA)
elif '300' in url:
return MockResponse(UNEXPECTED_FORMAT_DATA)
elif '403' in url:
raise urllib2.URLError('')
elif '404' in url:
return MockResponse('')
elif '500' in url:
return MockResponse(BROKEN_ISSUE_DATA)
class crbugQueryTest(unittest.TestCase):
@mock.patch('urllib2.urlopen', MockUrlOpen)
def testClosedIssueIsClosed(self):
self.assertTrue(CheckIssueClosed(200))
@mock.patch('urllib2.urlopen', MockUrlOpen)
def testOpenIssueIsNotClosed(self):
self.assertFalse(CheckIssueClosed(201))
@mock.patch('urllib2.urlopen', MockUrlOpen)
def testUnexpectedFormat(self):
self.assertFalse(CheckIssueClosed(300))
@mock.patch('urllib2.urlopen', MockUrlOpen)
def testUrlError(self):
self.assertFalse(CheckIssueClosed(403))
@mock.patch('urllib2.urlopen', MockUrlOpen)
def testEmptyResponse(self):
self.assertFalse(CheckIssueClosed(404))
@mock.patch('urllib2.urlopen', MockUrlOpen)
def testBrokenResponse(self):
self.assertFalse(CheckIssueClosed(500))
if __name__ == '__main__':
unittest.main()
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This module contains functionality for starting build try jobs via HTTP.
This includes both sending a request to start a job, and also related code
for querying the status of the job.
This module can be either run as a stand-alone script to send a request to a
builder, or imported and used by calling the public functions below.
"""
import json
import urllib2
# URL template for fetching JSON data about builds.
BUILDER_JSON_URL = ('%(server_url)s/json/builders/%(bot_name)s/builds/'
'%(build_num)s?as_text=1&filter=0')
# URL template for displaying build steps.
BUILDER_HTML_URL = '%(server_url)s/builders/%(bot_name)s/builds/%(build_num)s'
# Status codes that can be returned by the GetBuildStatus method
# From buildbot.status.builder.
# See: http://docs.buildbot.net/current/developer/results.html
SUCCESS, WARNINGS, FAILURE, SKIPPED, EXCEPTION, RETRY, TRYPENDING = range(7)
OK = (SUCCESS, WARNINGS) # These indicate build is complete.
FAILED = (FAILURE, EXCEPTION, SKIPPED) # These indicate build failure.
PENDING = (RETRY, TRYPENDING) # These indicate in progress or in pending queue.
class ServerAccessError(Exception):
def __str__(self):
return '%s\nSorry, cannot connect to server.' % self.args[0]
def _IsBuildRunning(build_data):
"""Checks whether the build is in progress on buildbot.
Presence of currentStep element in build JSON indicates build is in progress.
Args:
build_data: A dictionary with build data, loaded from buildbot JSON API.
Returns:
True if build is in progress, otherwise False.
"""
current_step = build_data.get('currentStep')
if (current_step and current_step.get('isStarted') and
current_step.get('results') is None):
return True
return False
def _IsBuildFailed(build_data):
"""Checks whether the build failed on buildbot.
Sometime build status is marked as failed even though compile and packaging
steps are successful. This may happen due to some intermediate steps of less
importance such as gclient revert, generate_telemetry_profile are failed.
Therefore we do an addition check to confirm if build was successful by
calling _IsBuildSuccessful.
Args:
build_data: A dictionary with build data, loaded from buildbot JSON API.
Returns:
True if revision is failed build, otherwise False.
"""
if (build_data.get('results') in FAILED and
not _IsBuildSuccessful(build_data)):
return True
return False
def _IsBuildSuccessful(build_data):
"""Checks whether the build succeeded on buildbot.
We treat build as successful if the package_build step is completed without
any error i.e., when results attribute of the this step has value 0 or 1
in its first element.
Args:
build_data: A dictionary with build data, loaded from buildbot JSON API.
Returns:
True if revision is successfully build, otherwise False.
"""
if build_data.get('steps'):
for item in build_data.get('steps'):
# The 'results' attribute of each step consists of two elements,
# results[0]: This represents the status of build step.
# See: http://docs.buildbot.net/current/developer/results.html
# results[1]: List of items, contains text if step fails, otherwise empty.
if (item.get('name') == 'package_build' and
item.get('isFinished') and
item.get('results')[0] in OK):
return True
return False
def _FetchBuilderData(builder_url):
"""Fetches JSON data for the all the builds from the try server.
Args:
builder_url: A try server URL to fetch builds information.
Returns:
A dictionary with information of all build on the try server.
"""
data = None
try:
url = urllib2.urlopen(builder_url)
except urllib2.URLError as e:
print ('urllib2.urlopen error %s, waterfall status page down.[%s]' % (
builder_url, str(e)))
return None
if url is not None:
try:
data = url.read()
except IOError as e:
print 'urllib2 file object read error %s, [%s].' % (builder_url, str(e))
return data
def _GetBuildData(buildbot_url):
"""Gets build information for the given build id from the try server.
Args:
buildbot_url: A try server URL to fetch build information.
Returns:
A dictionary with build information if build exists, otherwise None.
"""
builds_json = _FetchBuilderData(buildbot_url)
if builds_json:
return json.loads(builds_json)
return None
def GetBuildStatus(build_num, bot_name, server_url):
"""Gets build status from the buildbot status page for a given build number.
Args:
build_num: A build number on try server to determine its status.
bot_name: Name of the bot where the build information is scanned.
server_url: URL of the buildbot.
Returns:
A pair which consists of build status (SUCCESS, FAILED or PENDING) and a
link to build status page on the waterfall.
"""
results_url = None
if build_num:
# Get the URL for requesting JSON data with status information.
buildbot_url = BUILDER_JSON_URL % {
'server_url': server_url,
'bot_name': bot_name,
'build_num': build_num,
}
build_data = _GetBuildData(buildbot_url)
if build_data:
# Link to build on the buildbot showing status of build steps.
results_url = BUILDER_HTML_URL % {
'server_url': server_url,
'bot_name': bot_name,
'build_num': build_num,
}
if _IsBuildFailed(build_data):
return (FAILED, results_url)
elif _IsBuildSuccessful(build_data):
return (OK, results_url)
return (PENDING, results_url)
def GetBuildNumFromBuilder(build_reason, bot_name, server_url):
"""Gets build number on build status page for a given 'build reason'.
This function parses the JSON data from buildbot page and collects basic
information about the all the builds, and then uniquely identifies the build
based on the 'reason' attribute in the JSON data about the build.
The 'reason' attribute set is when a build request is posted, and it is used
to identify the build on status page.
Args:
build_reason: A unique build name set to build on try server.
bot_name: Name of the bot where the build information is scanned.
server_url: URL of the buildbot.
Returns:
A build number as a string if found, otherwise None.
"""
buildbot_url = BUILDER_JSON_URL % {
'server_url': server_url,
'bot_name': bot_name,
'build_num': '_all',
}
builds_json = _FetchBuilderData(buildbot_url)
if builds_json:
builds_data = json.loads(builds_json)
for current_build in builds_data:
if builds_data[current_build].get('reason') == build_reason:
return builds_data[current_build].get('number')
return None
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs all tests in all unit test modules in this directory."""
import os
import sys
import unittest
import logging
SRC = os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir)
def main():
if 'full-log' in sys.argv:
# Configure logging to show line numbers and logging level
fmt = '%(module)s:%(lineno)d - %(levelname)s: %(message)s'
logging.basicConfig(level=logging.DEBUG, stream=sys.stdout, format=fmt)
elif 'no-log' in sys.argv:
# Only WARN and above are shown, to standard error. (This is the logging
# module default config, hence we do nothing here)
pass
else:
# Behave as before. Make logging.info mimic print behavior
fmt = '%(message)s'
logging.basicConfig(level=logging.INFO, stream=sys.stdout, format=fmt)
# Running the tests depends on having the below modules in PYTHONPATH.
sys.path.append(os.path.join(SRC, 'third_party', 'catapult', 'telemetry'))
sys.path.append(os.path.join(SRC, 'third_party', 'pymock'))
suite = unittest.TestSuite()
loader = unittest.TestLoader()
script_dir = os.path.dirname(__file__)
suite.addTests(loader.discover(start_dir=script_dir, pattern='*_test.py'))
print 'Running unit tests in %s...' % os.path.abspath(script_dir)
result = unittest.TextTestRunner(verbosity=1).run(suite)
return 0 if result.wasSuccessful() else 1
if __name__ == '__main__':
sys.exit(main())
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This module contains functions for performing source control operations."""
import bisect_utils
def IsInGitRepository():
output, _ = bisect_utils.RunGit(['rev-parse', '--is-inside-work-tree'])
return output.strip() == 'true'
def GetRevisionList(end_revision_hash, start_revision_hash, cwd=None):
"""Retrieves a list of git commit hashes in a range.
Args:
end_revision_hash: The SHA1 for the end of the range, inclusive.
start_revision_hash: The SHA1 for the beginning of the range, inclusive.
Returns:
A list of the git commit hashes in the range, in reverse time order --
that is, starting with |end_revision_hash|.
"""
revision_range = '%s..%s' % (start_revision_hash, end_revision_hash)
cmd = ['log', '--format=%H', '-10000', '--first-parent', revision_range]
log_output = bisect_utils.CheckRunGit(cmd, cwd=cwd)
revision_hash_list = log_output.split()
revision_hash_list.append(start_revision_hash)
return revision_hash_list
def SyncToRevision(revision, sync_client=None):
"""Syncs or checks out a revision based on sync_client argument.
Args:
revision: Git hash for the solutions with the format <repo>@rev.
E.g., "src@2ae43f...", "src/third_party/webkit@asr1234" etc.
sync_client: Syncs to revision when this is True otherwise checks out
the revision.
Returns:
True if sync or checkout is successful, False otherwise.
"""
if not sync_client:
_, return_code = bisect_utils.RunGit(['checkout', revision])
elif sync_client == 'gclient':
return_code = bisect_utils.RunGClientAndSync([revision])
else:
raise NotImplementedError('Unsupported sync_client: "%s"' % sync_client)
return not return_code
def GetCurrentRevision(cwd=None):
"""Gets current revision of the given repository."""
return bisect_utils.CheckRunGit(['rev-parse', 'HEAD'], cwd=cwd).strip()
def ResolveToRevision(revision_to_check, depot, depot_deps_dict,
search, cwd=None):
"""Tries to resolve an SVN revision or commit position to a git SHA1.
Args:
revision_to_check: The user supplied revision string that may need to be
resolved to a git commit hash. This may be an SVN revision, git commit
position, or a git commit hash.
depot: The depot (dependency repository) that |revision_to_check| is from.
depot_deps_dict: A dictionary with information about different depots.
search: How many revisions forward or backward to search. If the value is
negative, the function will search backwards chronologically, otherwise
it will search forward.
Returns:
A string containing a git SHA1 hash, otherwise None.
"""
# Android-chrome is git only, so no need to resolve this to anything else.
if depot == 'android-chrome':
return revision_to_check
# If the given revision can't be parsed as an integer, then it may already
# be a git commit hash.
if not bisect_utils.IsStringInt(revision_to_check):
return revision_to_check
depot_svn = 'svn://svn.chromium.org/chrome/trunk/src'
if depot != 'chromium':
depot_svn = depot_deps_dict[depot]['svn']
svn_revision = int(revision_to_check)
git_revision = None
if search > 0:
search_range = xrange(svn_revision, svn_revision + search, 1)
else:
search_range = xrange(svn_revision, svn_revision + search, -1)
for i in search_range:
# NOTE: Checking for the git-svn-id footer is for backwards compatibility.
# When we can assume that all the revisions we care about are from after
# git commit positions started getting added, we don't need to check this.
svn_pattern = 'git-svn-id: %s@%d' % (depot_svn, i)
commit_position_pattern = '^Cr-Commit-Position: .*@{#%d}' % i
cmd = ['log', '--format=%H', '-1', '--grep', svn_pattern,
'--grep', commit_position_pattern, 'origin/master']
log_output = bisect_utils.CheckRunGit(cmd, cwd=cwd)
log_output = log_output.strip()
if log_output:
git_revision = log_output
break
return git_revision
def IsInProperBranch():
"""Checks whether the current branch is "master"."""
cmd = ['rev-parse', '--abbrev-ref', 'HEAD']
log_output = bisect_utils.CheckRunGit(cmd)
log_output = log_output.strip()
return log_output == 'master'
def GetCommitPosition(git_revision, cwd=None):
"""Finds git commit position for the given git hash.
This function executes "git footer --position-num <git hash>" command to get
commit position the given revision.
Args:
git_revision: The git SHA1 to use.
cwd: Working directory to run the command from.
Returns:
Git commit position as integer or None.
"""
# Some of the repositories are pure git based, unlike other repositories
# they doesn't have commit position. e.g., skia, angle.
cmd = ['footers', '--position-num', git_revision]
output, return_code = bisect_utils.RunGit(cmd, cwd)
if not return_code:
commit_position = output.strip()
if bisect_utils.IsStringInt(commit_position):
return int(commit_position)
return None
def GetCommitTime(git_revision, cwd=None):
"""Returns commit time for the given revision in UNIX timestamp."""
cmd = ['log', '--format=%ct', '-1', git_revision]
output = bisect_utils.CheckRunGit(cmd, cwd=cwd)
return int(output)
def QueryRevisionInfo(revision, cwd=None):
"""Gathers information on a particular revision, such as author's name,
email, subject, and date.
Args:
revision: Revision you want to gather information on; a git commit hash.
Returns:
A dict in the following format:
{
'author': %s,
'email': %s,
'date': %s,
'subject': %s,
'body': %s,
}
"""
commit_info = {}
formats = ['%aN', '%aE', '%s', '%cD', '%b']
targets = ['author', 'email', 'subject', 'date', 'body']
for i in xrange(len(formats)):
cmd = ['log', '--format=%s' % formats[i], '-1', revision]
output = bisect_utils.CheckRunGit(cmd, cwd=cwd)
commit_info[targets[i]] = output.rstrip()
return commit_info
def CheckoutFileAtRevision(file_name, revision, cwd=None):
"""Performs a checkout on a file at the given revision.
Returns:
True if successful.
"""
command = ['checkout', revision, file_name]
_, return_code = bisect_utils.RunGit(command, cwd=cwd)
return not return_code
def RevertFileToHead(file_name):
"""Un-stages a file and resets the file's state to HEAD.
Returns:
True if successful.
"""
# Reset doesn't seem to return 0 on success.
bisect_utils.RunGit(['reset', 'HEAD', file_name])
_, return_code = bisect_utils.RunGit(
['checkout', bisect_utils.FILE_DEPS_GIT])
return not return_code
def QueryFileRevisionHistory(filename, revision_start, revision_end):
"""Returns a list of commits that modified this file.
Args:
filename: Name of file.
revision_start: Start of revision range (inclusive).
revision_end: End of revision range.
Returns:
Returns a list of commits that touched this file.
"""
cmd = [
'log',
'--format=%H',
'%s~1..%s' % (revision_start, revision_end),
'--',
filename,
]
output = bisect_utils.CheckRunGit(cmd)
lines = output.split('\n')
return [o for o in lines if o]
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for the source_control module."""
import unittest
import mock
import source_control
class SourceControlTest(unittest.TestCase):
@mock.patch('source_control.bisect_utils.CheckRunGit')
def testQueryRevisionInfo(self, mock_run_git):
# The QueryRevisionInfo function should run a sequence of git commands,
# then returns a dict with the results.
command_output_map = [
(['log', '--format=%aN', '-1', 'abcd1234'], 'Some Name\n'),
(['log', '--format=%aE', '-1', 'abcd1234'], 'somename@x.com'),
(['log', '--format=%s', '-1', 'abcd1234'], 'Commit subject '),
(['log', '--format=%cD', '-1', 'abcd1234'], 'Fri, 10 Oct 2014'),
(['log', '--format=%b', '-1', 'abcd1234'], 'Commit body\n'),
]
_SetMockCheckRunGitBehavior(mock_run_git, command_output_map)
# The result of calling QueryRevisionInfo is a dictionary like that below.
# Trailing whitespace is stripped.
expected = {
'author': 'Some Name',
'email': 'somename@x.com',
'date': 'Fri, 10 Oct 2014',
'subject': 'Commit subject',
'body': 'Commit body',
}
self.assertEqual(expected, source_control.QueryRevisionInfo('abcd1234'))
self.assertEqual(5, mock_run_git.call_count)
def testResolveToRevision_InputGitHash(self):
# The ResolveToRevision function returns a git commit hash corresponding
# to the input, so if the input can't be parsed as an int, it is returned.
self.assertEqual(
'abcd1234',
source_control.ResolveToRevision('abcd1234', 'chromium', {}, 5))
# Note: It actually does this for any junk that isn't an int. This isn't
# necessarily desired behavior.
self.assertEqual(
'foo bar',
source_control.ResolveToRevision('foo bar', 'chromium', {}, 5))
@mock.patch('source_control.bisect_utils.CheckRunGit')
def testResolveToRevision_NotFound(self, mock_run_git):
# If no corresponding git hash was found, then None is returned.
mock_run_git.return_value = ''
self.assertIsNone(
source_control.ResolveToRevision('12345', 'chromium', {}, 5))
@mock.patch('source_control.bisect_utils.CheckRunGit')
def testResolveToRevision_Found(self, mock_run_git):
# In general, ResolveToRevision finds a git commit hash by repeatedly
# calling "git log --grep ..." with different numbers until something
# matches.
mock_run_git.return_value = 'abcd1234'
self.assertEqual(
'abcd1234',
source_control.ResolveToRevision('12345', 'chromium', {}, 5))
self.assertEqual(1, mock_run_git.call_count)
def _SetMockCheckRunGitBehavior(mock_obj, command_output_map):
"""Sets the behavior of a mock function according to the given mapping."""
# Unused argument 'cwd', expected in args list but not needed.
# pylint: disable=W0613
def FakeCheckRunGit(in_command, cwd=None):
for command, output in command_output_map:
if command == in_command:
return output
mock_obj.side_effect = FakeCheckRunGit
if __name__ == '__main__':
unittest.main()
{
"version": "1.0",
"encoding": "UTF-8",
"feed": {
"xmlns": "http://www.w3.org/2005/Atom",
"xmlns$openSearch": "http://a9.com/-/spec/opensearch/1.1/",
"xmlns$gd": "http://schemas.google.com/g/2005",
"xmlns$issues": "http://schemas.google.com/projecthosting/issues/2009",
"id": {
"$t": "http://code.google.com/feeds/issues/p/chromium/issues/full"
},
"updated": {
"$t": "2014-10-31T23:44:30.795Z"
},
"title": {
"$t": "Issues - chromium"
},
"subtitle": {
"$t": "Issues - chromium"
},
"link": [
{
"rel": "alternate",
"type": "text/html",
"href": "http://code.google.com/p/chromium/issues/list"
},
{
"rel": "http://schemas.google.com/g/2005#feed",
"type": "application/atom+xml",
"href": "https://code.google.com/feeds/issues/p/chromium/issues/full"
},
{
"rel": "http://schemas.google.com/g/2005#post",
"type": "application/atom+xml",
"href": "https://code.google.com/feeds/issues/p/chromium/issues/full"
},
{
"rel": "self",
"type": "application/atom+xml",
"href": "https://code.google.com/feeds/issues/p/chromium/issues/full?alt=json&max-results=1&id=422382"
}
],
"generator": {
"$t": "ProjectHosting",
"version": "1.0",
"uri": "http://code.google.com/feeds/issues"
},
"openSearch$totalResults": {
"$t": 1
},
"openSearch$startIndex": {
"$t": 1
},
"openSearch$itemsPerPage": {
"$t": 1
},
"entry": [
{
"gd$etag": "W/\"CUUFSX47eCl7ImA9XRdQGEk.\"",
"id": {
"$t": "http://code.google.com/feeds/issues/p/chromium/issues/full/422382"
},
"published": {
"$t": "2014-10-10T17:07:06.000Z"
},
"updated": {
"$t": "2014-10-20T22:13:38.000Z"
},
"title": {
"$t": "11.1% regression in speedometer at 298653:298680"
},
"content": {
"$t": "See the link to graphs below.",
"type": "html"
},
"link": [
{
"rel": "replies",
"type": "application/atom+xml",
"href": "http://code.google.com/feeds/issues/p/chromium/issues/422382/comments/full"
},
{
"rel": "alternate",
"type": "text/html",
"href": "http://code.google.com/p/chromium/issues/detail?id=422382"
},
{
"rel": "self",
"type": "application/atom+xml",
"href": "https://code.google.com/feeds/issues/p/chromium/issues/full/422382"
}
],
"author": [
{
"name": {
"$t": "gov...@chromium.org"
},
"uri": {
"$t": "/u/104724762920274240672/"
}
}
],
"issues$closedDate": {
"$t": "2014-10-20T22:10:22.000Z"
},
"issues$id": {
"$t": 422382
},
"issues$label": [
{
"$t": "Type-Bug-Regression"
},
{
"$t": "Performance-Sheriff"
},
{
"$t": "Pri-2"
}
],
"issues$owner": {
"issues$uri": {
"$t": "/u/104724762920274240672/"
},
"issues$username": {
"$t": "gov...@chromium.org"
}
},
"issues$stars": {
"$t": 0
},
"issues$state": {
"$t": "closed"
},
"issues$status": {
"$t": "WontFix"
}
}
]
}
}
{
"version": "1.0",
"encoding": "UTF-8",
"feed": {
"xmlns": "http://www.w3.org/2005/Atom",
"xmlns$openSearch": "http://a9.com/-/spec/opensearch/1.1/",
"xmlns$gd": "http://schemas.google.com/g/2005",
"xmlns$issues": "http://schemas.google.com/projecthosting/issues/2009",
"id": {
"$t": "http://code.google.com/feeds/issues/p/chromium/issues/full"
},
"updated": {
"$t": "2014-10-31T23:44:18.640Z"
},
"title": {
"$t": "Issues - chromium"
},
"subtitle": {
"$t": "Issues - chromium"
},
"link": [
{
"rel": "alternate",
"type": "text/html",
"href": "http://code.google.com/p/chromium/issues/list"
},
{
"rel": "http://schemas.google.com/g/2005#feed",
"type": "application/atom+xml",
"href": "https://code.google.com/feeds/issues/p/chromium/issues/full"
},
{
"rel": "http://schemas.google.com/g/2005#post",
"type": "application/atom+xml",
"href": "https://code.google.com/feeds/issues/p/chromium/issues/full"
},
{
"rel": "self",
"type": "application/atom+xml",
"href": "https://code.google.com/feeds/issues/p/chromium/issues/full?alt=json&max-results=1&id=424688"
}
],
"generator": {
"$t": "ProjectHosting",
"version": "1.0",
"uri": "http://code.google.com/feeds/issues"
},
"openSearch$totalResults": {
"$t": 1
},
"openSearch$startIndex": {
"$t": 1
},
"openSearch$itemsPerPage": {
"$t": 1
},
"entry": [
{
"gd$etag": "W/\"A08NQX47eCl7ImA9XRdXFkw.\"",
"id": {
"$t": "http://code.google.com/feeds/issues/p/chromium/issues/full/424688"
},
"published": {
"$t": "2014-10-17T18:50:15.000Z"
},
"updated": {
"$t": "2014-10-29T21:58:10.000Z"
},
"title": {
"$t": "Should Not Start Bisect on Closed Bugs."
},
"content": {
"$t": "I have noticed that in some cases bisect jobs are running for closed bugs:\r\nhttps://code.google.com/p/chromium/issues/detail?id=422661\r\nhttps://code.google.com/p/chromium/issues/detail?id=422228\r\nhttps://code.google.com/p/chromium/issues/detail?id=421488\r\n\r\nIt is possible that the bugs can be marked as closed when the corresponding bisect jobs are in the queue. So to avoid bisects on such bugs, can we please add logic to bisect script to first check for bug state before running the bisect. This will save us from doing some unnecessary bisects.\r\n\r\n",
"type": "html"
},
"link": [
{
"rel": "replies",
"type": "application/atom+xml",
"href": "http://code.google.com/feeds/issues/p/chromium/issues/424688/comments/full"
},
{
"rel": "alternate",
"type": "text/html",
"href": "http://code.google.com/p/chromium/issues/detail?id=424688"
},
{
"rel": "self",
"type": "application/atom+xml",
"href": "https://code.google.com/feeds/issues/p/chromium/issues/full/424688"
}
],
"author": [
{
"name": {
"$t": "anan...@chromium.org"
},
"uri": {
"$t": "/u/112777092906361529031/"
}
}
],
"issues$cc": [
{
"issues$uri": {
"$t": "/u/116704265016059607269/"
},
"issues$username": {
"$t": "pras...@chromium.org"
}
},
{
"issues$uri": {
"$t": "/u/107012661329935444717/"
},
"issues$username": {
"$t": "qyears...@chromium.org"
}
},
{
"issues$uri": {
"$t": "/u/tonyg@chromium.org/"
},
"issues$username": {
"$t": "tonyg@chromium.org"
}
},
{
"issues$uri": {
"$t": "/u/114810703796781371055/"
},
"issues$username": {
"$t": "robert...@chromium.org"
}
}
],
"issues$id": {
"$t": 424688
},
"issues$label": [
{
"$t": "Type-Feature"
},
{
"$t": "Pri-2"
},
{
"$t": "Cr-Tests-AutoBisect"
},
{
"$t": "OS-All"
}
],
"issues$owner": {
"issues$uri": {
"$t": "/u/114810703796781371055/"
},
"issues$username": {
"$t": "robert...@chromium.org"
}
},
"issues$stars": {
"$t": 1
},
"issues$state": {
"$t": "open"
},
"issues$status": {
"$t": "Assigned"
}
}
]
}
}
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Functions for doing independent two-sample t-tests and looking up p-values.
Note: This module was copied from the Performance Dashboard code, and changed
to use definitions of mean and variance from math_utils instead of numpy.
> A t-test is any statistical hypothesis test in which the test statistic
> follows a Student's t distribution if the null hypothesis is supported.
> It can be used to determine if two sets of data are significantly different
> from each other.
There are several conditions that the data under test should meet in order
for a t-test to be completely applicable:
- The data should be roughly normal in distribution.
- The two samples that are compared should be roughly similar in size.
References:
http://en.wikipedia.org/wiki/Student%27s_t-test
http://en.wikipedia.org/wiki/Welch%27s_t-test
https://github.com/scipy/scipy/blob/master/scipy/stats/stats.py#L3244
"""
import math
import math_utils
def WelchsTTest(sample1, sample2):
"""Performs Welch's t-test on the two samples.
Welch's t-test is an adaptation of Student's t-test which is used when the
two samples may have unequal variances. It is also an independent two-sample
t-test.
Args:
sample1: A collection of numbers.
sample2: Another collection of numbers.
Returns:
A 3-tuple (t-statistic, degrees of freedom, p-value).
"""
mean1 = math_utils.Mean(sample1)
mean2 = math_utils.Mean(sample2)
v1 = math_utils.Variance(sample1)
v2 = math_utils.Variance(sample2)
n1 = len(sample1)
n2 = len(sample2)
t = _TValue(mean1, mean2, v1, v2, n1, n2)
df = _DegreesOfFreedom(v1, v2, n1, n2)
p = _LookupPValue(t, df)
return t, df, p
def _TValue(mean1, mean2, v1, v2, n1, n2):
"""Calculates a t-statistic value using the formula for Welch's t-test.
The t value can be thought of as a signal-to-noise ratio; a higher t-value
tells you that the groups are more different.
Args:
mean1: Mean of sample 1.
mean2: Mean of sample 2.
v1: Variance of sample 1.
v2: Variance of sample 2.
n1: Sample size of sample 1.
n2: Sample size of sample 2.
Returns:
A t value, which may be negative or positive.
"""
# If variance of both segments is zero, return some large t-value.
if v1 == 0 and v2 == 0:
return 1000.0
return (mean1 - mean2) / (math.sqrt(v1 / n1 + v2 / n2))
def _DegreesOfFreedom(v1, v2, n1, n2):
"""Calculates degrees of freedom using the Welch-Satterthwaite formula.
Degrees of freedom is a measure of sample size. For other types of tests,
degrees of freedom is sometimes N - 1, where N is the sample size. However,
Args:
v1: Variance of sample 1.
v2: Variance of sample 2.
n1: Size of sample 2.
n2: Size of sample 2.
Returns:
An estimate of degrees of freedom. Must be at least 1.0.
"""
# When there's no variance in either sample, return 1.
if v1 == 0 and v2 == 0:
return 1
# If the sample size is too small, also return the minimum (1).
if n1 <= 1 or n2 <= 2:
return 1
df = (((v1 / n1 + v2 / n2) ** 2) /
((v1 ** 2) / ((n1 ** 2) * (n1 - 1)) +
(v2 ** 2) / ((n2 ** 2) * (n2 - 1))))
return max(1, df)
# Below is a hard-coded table for looking up p-values.
#
# Normally, p-values are calculated based on the t-distribution formula.
# Looking up pre-calculated values is a less accurate but less complicated
# alternative.
#
# Reference: http://www.sjsu.edu/faculty/gerstman/StatPrimer/t-table.pdf
# A list of p-values for a two-tailed test. The entries correspond to to
# entries in the rows of the table below.
TWO_TAIL = [1, 0.20, 0.10, 0.05, 0.02, 0.01, 0.005, 0.002, 0.001]
# A map of degrees of freedom to lists of t-values. The index of the t-value
# can be used to look up the corresponding p-value.
TABLE = {
1: [0, 3.078, 6.314, 12.706, 31.820, 63.657, 127.321, 318.309, 636.619],
2: [0, 1.886, 2.920, 4.303, 6.965, 9.925, 14.089, 22.327, 31.599],
3: [0, 1.638, 2.353, 3.182, 4.541, 5.841, 7.453, 10.215, 12.924],
4: [0, 1.533, 2.132, 2.776, 3.747, 4.604, 5.598, 7.173, 8.610],
5: [0, 1.476, 2.015, 2.571, 3.365, 4.032, 4.773, 5.893, 6.869],
6: [0, 1.440, 1.943, 2.447, 3.143, 3.707, 4.317, 5.208, 5.959],
7: [0, 1.415, 1.895, 2.365, 2.998, 3.499, 4.029, 4.785, 5.408],
8: [0, 1.397, 1.860, 2.306, 2.897, 3.355, 3.833, 4.501, 5.041],
9: [0, 1.383, 1.833, 2.262, 2.821, 3.250, 3.690, 4.297, 4.781],
10: [0, 1.372, 1.812, 2.228, 2.764, 3.169, 3.581, 4.144, 4.587],
11: [0, 1.363, 1.796, 2.201, 2.718, 3.106, 3.497, 4.025, 4.437],
12: [0, 1.356, 1.782, 2.179, 2.681, 3.055, 3.428, 3.930, 4.318],
13: [0, 1.350, 1.771, 2.160, 2.650, 3.012, 3.372, 3.852, 4.221],
14: [0, 1.345, 1.761, 2.145, 2.625, 2.977, 3.326, 3.787, 4.140],
15: [0, 1.341, 1.753, 2.131, 2.602, 2.947, 3.286, 3.733, 4.073],
16: [0, 1.337, 1.746, 2.120, 2.584, 2.921, 3.252, 3.686, 4.015],
17: [0, 1.333, 1.740, 2.110, 2.567, 2.898, 3.222, 3.646, 3.965],
18: [0, 1.330, 1.734, 2.101, 2.552, 2.878, 3.197, 3.610, 3.922],
19: [0, 1.328, 1.729, 2.093, 2.539, 2.861, 3.174, 3.579, 3.883],
20: [0, 1.325, 1.725, 2.086, 2.528, 2.845, 3.153, 3.552, 3.850],
21: [0, 1.323, 1.721, 2.080, 2.518, 2.831, 3.135, 3.527, 3.819],
22: [0, 1.321, 1.717, 2.074, 2.508, 2.819, 3.119, 3.505, 3.792],
23: [0, 1.319, 1.714, 2.069, 2.500, 2.807, 3.104, 3.485, 3.768],
24: [0, 1.318, 1.711, 2.064, 2.492, 2.797, 3.090, 3.467, 3.745],
25: [0, 1.316, 1.708, 2.060, 2.485, 2.787, 3.078, 3.450, 3.725],
26: [0, 1.315, 1.706, 2.056, 2.479, 2.779, 3.067, 3.435, 3.707],
27: [0, 1.314, 1.703, 2.052, 2.473, 2.771, 3.057, 3.421, 3.690],
28: [0, 1.313, 1.701, 2.048, 2.467, 2.763, 3.047, 3.408, 3.674],
29: [0, 1.311, 1.699, 2.045, 2.462, 2.756, 3.038, 3.396, 3.659],
30: [0, 1.310, 1.697, 2.042, 2.457, 2.750, 3.030, 3.385, 3.646],
31: [0, 1.309, 1.695, 2.040, 2.453, 2.744, 3.022, 3.375, 3.633],
32: [0, 1.309, 1.694, 2.037, 2.449, 2.738, 3.015, 3.365, 3.622],
33: [0, 1.308, 1.692, 2.035, 2.445, 2.733, 3.008, 3.356, 3.611],
34: [0, 1.307, 1.691, 2.032, 2.441, 2.728, 3.002, 3.348, 3.601],
35: [0, 1.306, 1.690, 2.030, 2.438, 2.724, 2.996, 3.340, 3.591],
36: [0, 1.306, 1.688, 2.028, 2.434, 2.719, 2.991, 3.333, 3.582],
37: [0, 1.305, 1.687, 2.026, 2.431, 2.715, 2.985, 3.326, 3.574],
38: [0, 1.304, 1.686, 2.024, 2.429, 2.712, 2.980, 3.319, 3.566],
39: [0, 1.304, 1.685, 2.023, 2.426, 2.708, 2.976, 3.313, 3.558],
40: [0, 1.303, 1.684, 2.021, 2.423, 2.704, 2.971, 3.307, 3.551],
42: [0, 1.302, 1.682, 2.018, 2.418, 2.698, 2.963, 3.296, 3.538],
44: [0, 1.301, 1.680, 2.015, 2.414, 2.692, 2.956, 3.286, 3.526],
46: [0, 1.300, 1.679, 2.013, 2.410, 2.687, 2.949, 3.277, 3.515],
48: [0, 1.299, 1.677, 2.011, 2.407, 2.682, 2.943, 3.269, 3.505],
50: [0, 1.299, 1.676, 2.009, 2.403, 2.678, 2.937, 3.261, 3.496],
60: [0, 1.296, 1.671, 2.000, 2.390, 2.660, 2.915, 3.232, 3.460],
70: [0, 1.294, 1.667, 1.994, 2.381, 2.648, 2.899, 3.211, 3.435],
80: [0, 1.292, 1.664, 1.990, 2.374, 2.639, 2.887, 3.195, 3.416],
90: [0, 1.291, 1.662, 1.987, 2.369, 2.632, 2.878, 3.183, 3.402],
100: [0, 1.290, 1.660, 1.984, 2.364, 2.626, 2.871, 3.174, 3.391],
120: [0, 1.289, 1.658, 1.980, 2.358, 2.617, 2.860, 3.160, 3.373],
150: [0, 1.287, 1.655, 1.976, 2.351, 2.609, 2.849, 3.145, 3.357],
200: [0, 1.286, 1.652, 1.972, 2.345, 2.601, 2.839, 3.131, 3.340],
300: [0, 1.284, 1.650, 1.968, 2.339, 2.592, 2.828, 3.118, 3.323],
500: [0, 1.283, 1.648, 1.965, 2.334, 2.586, 2.820, 3.107, 3.310],
}
def _LookupPValue(t, df):
"""Looks up a p-value in a t-distribution table.
Args:
t: A t statistic value; the result of a t-test.
df: Number of degrees of freedom.
Returns:
A p-value, which represents the likelihood of obtaining a result at least
as extreme as the one observed just by chance (the null hypothesis).
"""
assert df >= 1, 'Degrees of freedom must be positive'
# We ignore the negative sign on the t-value because our null hypothesis
# is that the two samples are the same; our alternative hypothesis is that
# the second sample is lesser OR greater than the first.
t = abs(t)
def GreatestSmaller(nums, target):
"""Returns the largest number that is <= the target number."""
lesser_equal = [n for n in nums if n <= target]
assert lesser_equal, 'No number in number list <= target.'
return max(lesser_equal)
df_key = GreatestSmaller(TABLE.keys(), df)
t_table_row = TABLE[df_key]
approximate_t_value = GreatestSmaller(t_table_row, t)
t_value_index = t_table_row.index(approximate_t_value)
return TWO_TAIL[t_value_index]
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for ttest module."""
import unittest
import ttest
# This test case accesses private functions of the ttest module.
# pylint: disable=W0212
class TTestTest(unittest.TestCase):
"""Tests for the t-test functions."""
def testWelchsFormula(self):
"""Tests calculation of the t value."""
# Results can be verified by directly plugging variables into Welch's
# equation (e.g. using a calculator or the Python interpreter).
self.assertEqual(
-0.2796823595120407,
ttest._TValue(0.299, 0.307, 0.05, 0.08, 150, 165))
# Note that a negative t value is obtained when the first sample has a
# smaller mean than the second, otherwise a positive value is returned.
self.assertEqual(
0.2796823595120407,
ttest._TValue(0.307, 0.299, 0.08, 0.05, 165, 150))
def testWelchSatterthwaiteFormula(self):
"""Tests calculation of estimated degrees of freedom."""
# Note that since the Welch-Satterthwaite equation gives an estimate of
# degrees of freedom, the result may not be an integer.
self.assertEqual(
307.1987997516727,
ttest._DegreesOfFreedom(0.05, 0.08, 150, 165))
def testWelchsTTest(self):
"""Tests the t value and degrees of freedom output of Welch's t-test."""
# The t-value can be checked with scipy.stats.ttest_ind(equal_var=False).
t, df, _ = ttest.WelchsTTest([2, 3, 2, 3, 2, 3], [4, 5, 4, 5, 4, 5])
self.assertAlmostEqual(10.0, df)
# The t-value produced by scipy.stats.ttest_ind is -6.32455532034.
# Our function produces slightly different results.
# Possibly due to differences in rounding error?
self.assertAlmostEqual(-6.325, t, delta=1.0)
def testTTestEqualSamples(self):
"""Checks that t = 0 and p = 1 when the samples are the same."""
t, _, p = ttest.WelchsTTest([1, 2, 3], [1, 2, 3])
self.assertEqual(0, t)
self.assertEqual(1, p)
t, _, p = ttest.WelchsTTest([1, 2], [1, 2])
self.assertEqual(0, t)
self.assertEqual(1, p)
def testTTestVeryDifferentSamples(self):
"""Checks that p is very low when the samples are clearly different."""
t, _, p = ttest.WelchsTTest(
[100, 101, 100, 101, 100], [1, 2, 1, 2, 1, 2, 1, 2])
self.assertGreaterEqual(t, 250)
self.assertLessEqual(p, 0.01)
def testTTestVariance(self):
"""Verifies that higher variance -> higher p value."""
_, _, p_low_var = ttest.WelchsTTest([2, 3, 2, 3], [4, 5, 4, 5])
_, _, p_high_var = ttest.WelchsTTest([1, 4, 1, 4], [3, 6, 3, 6])
self.assertLess(p_low_var, p_high_var)
def testTTestSampleSize(self):
"""Verifies that smaller sample size -> higher p value."""
_, _, p_larger_sample = ttest.WelchsTTest([2, 3, 2, 3], [4, 5, 4, 5])
_, _, p_smaller_sample = ttest.WelchsTTest([2, 3, 2, 3], [4, 5])
self.assertLess(p_larger_sample, p_smaller_sample)
def testTTestMeanDifference(self):
"""Verifies that smaller difference between means -> higher p value."""
_, _, p_far_means = ttest.WelchsTTest([2, 3, 2, 3], [5, 6, 5, 6])
_, _, p_near_means = ttest.WelchsTTest([2, 3, 2, 3], [3, 4, 3, 4])
self.assertLess(p_far_means, p_near_means)
class LookupTableTest(unittest.TestCase):
"""Tests for functionality related to lookup of p-values in a table."""
def setUp(self):
self.original_TWO_TAIL = ttest.TWO_TAIL
self.original_TABLE = ttest.TABLE
ttest.TWO_TAIL = [1, 0.2, 0.1, 0.05, 0.02, 0.01]
ttest.TABLE = {
1: [0, 6.314, 12.71, 31.82, 63.66, 318.31],
2: [0, 2.920, 4.303, 6.965, 9.925, 22.327],
3: [0, 2.353, 3.182, 4.541, 5.841, 10.215],
4: [0, 2.132, 2.776, 3.747, 4.604, 7.173],
}
def tearDown(self):
ttest.TWO_TAIL = self.original_TWO_TAIL
ttest.TABLE = self.original_TABLE
def testLookupExactMatch(self):
"""Tests a lookup when there is an exact match."""
self.assertEqual(0.1, ttest._LookupPValue(3.182, 3))
self.assertEqual(0.1, ttest._LookupPValue(-3.182, 3))
def testLookupAbove(self):
"""Tests a lookup when the given value is above an entry in the table."""
self.assertEqual(0.2, ttest._LookupPValue(3.1, 2))
self.assertEqual(0.2, ttest._LookupPValue(-3.1, 2))
def testLookupLargeTValue(self):
"""Tests a lookup when the given t-value is very large."""
self.assertEqual(0.01, ttest._LookupPValue(500.0, 1))
self.assertEqual(0.01, ttest._LookupPValue(-500.0, 1))
def testLookupZeroTValue(self):
"""Tests a lookup when the given t-value is zero."""
self.assertEqual(1, ttest._LookupPValue(0.0, 1))
self.assertEqual(1, ttest._LookupPValue(0.0, 2))
def testLookupLargeDF(self):
"""Tests a lookup when the given degrees of freedom is large."""
self.assertEqual(0.02, ttest._LookupPValue(5.0, 50))
if __name__ == '__main__':
unittest.main()
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment