Commit 5f552b39 authored by Tom Anderson's avatar Tom Anderson Committed by Commit Bot

Reland "Simplify static initializer test"

This reverts commit 239e5b82.

> The old sizes.py perf test used to check for a whole set of performance
> regressions. This is now handled elsewhere with the exception of the SI
> check. The SI check is fairly simple, however, and doesn't require all
> of the the perf_expectations infrastructure which is now obsolete.
>
> This CL removes the obsolete perf_expectations, hard codes the expected
> number of SIs in the script, and removes the windows and android code
> as it doesn't actually do anything.
>
> Bug: 572393

CQ_INCLUDE_TRYBOTS=luci.chromium.try:linux_chromium_archive_rel_ng;luci.chromium.try:mac_chromium_archive_rel_ng;luci.chromium.try:win_archive;luci.chromium.try:win_x64_archive;master.tryserver.chromium.android:android_archive_rel_ng
TBR=dpranke

Bug: 572393
Change-Id: Iad25e02aba593632f167bb534be95ba6a5691446
Reviewed-on: https://chromium-review.googlesource.com/1241935Reviewed-by: default avatarThomas Anderson <thomasanderson@chromium.org>
Reviewed-by: default avatarDirk Pranke <dpranke@chromium.org>
Reviewed-by: default avatarNico Weber <thakis@chromium.org>
Commit-Queue: Thomas Anderson <thomasanderson@chromium.org>
Cr-Commit-Position: refs/heads/master@{#594389}
parent 9fbe1d8b
......@@ -9,7 +9,7 @@ http://neugierig.org/software/chromium/notes/2011/08/static-initializers.html
# How Static Initializers are Checked
* For Linux and Mac:
* The expected count is stored in [//tools/perf_expectations/perf_expectations.json](https://cs.chromium.org/chromium/src/tools/perf_expectations/perf_expectations.json)
* The expected count is stored in [//infra/scripts/legacy/scripts/slave/chromium/sizes.py](https://cs.chromium.org/chromium/src/infra/scripts/legacy/scripts/slave/chromium/sizes.py)
* For Android:
* The expected count is stored in the build target [//chrome/android:monochrome_static_initializers](https://cs.chromium.org/chromium/src/chrome/android/BUILD.gn)
......
......@@ -50,45 +50,13 @@ def main_run(script_args):
sizes_cmd.extend(['--platform', args.platform])
rc = common.run_runtest(script_args, runtest_args + sizes_cmd)
with open(tempfile_path) as f:
results = json.load(f)
with open(os.path.join(common.SRC_DIR, 'tools', 'perf_expectations',
'perf_expectations.json')) as f:
perf_expectations = json.load(f)
valid = (rc == 0)
failures = []
for name, result in results.iteritems():
fqtn = '%s/%s/%s' % (args.prefix, name, result['identifier'])
if fqtn not in perf_expectations:
continue
if perf_expectations[fqtn]['type'] != 'absolute':
print 'ERROR: perf expectation %r is not yet supported' % fqtn
valid = False
continue
actual = result['value']
expected = perf_expectations[fqtn]['regress']
better = perf_expectations[fqtn]['better']
check_result = ((actual <= expected) if better == 'lower'
else (actual >= expected))
if not check_result:
failures.append(fqtn)
print 'FAILED %s: actual %s, expected %s, better %s' % (
fqtn, actual, expected, better)
failures = json.load(f)
json.dump({
'valid': valid,
'valid': rc == 0,
'failures': failures,
}, script_args.output)
# sizes.py itself doesn't fail on regressions.
if failures and rc == 0:
rc = 1
return rc
......
......@@ -38,8 +38,10 @@ NOTES = {
IS_GIT_WORKSPACE = (subprocess.Popen(
['git', 'rev-parse'], stderr=subprocess.PIPE).wait() == 0)
class Demangler(object):
"""A wrapper around c++filt to provide a function to demangle symbols."""
def __init__(self, toolchain):
self.cppfilt = subprocess.Popen([toolchain + 'c++filt'],
stdin=subprocess.PIPE,
......@@ -50,6 +52,7 @@ class Demangler(object):
self.cppfilt.stdin.write(sym + '\n')
return self.cppfilt.stdout.readline().strip()
# Matches for example: "cert_logger.pb.cc", capturing "cert_logger".
protobuf_filename_re = re.compile(r'(.*)\.pb\.cc$')
def QualifyFilenameAsProto(filename):
......@@ -72,6 +75,7 @@ def QualifyFilenameAsProto(filename):
candidate = line.strip()
return candidate
# Regex matching the substring of a symbol's demangled text representation most
# likely to appear in a source file.
# Example: "v8::internal::Builtins::InitBuiltinFunctionTable()" becomes
......@@ -99,6 +103,7 @@ def QualifyFilename(filename, symbol):
candidate = line.strip()
return candidate
# Regex matching nm output for the symbols we're interested in.
# See test_ParseNmLine for examples.
nm_re = re.compile(r'(\S+) (\S+) t (?:_ZN12)?_GLOBAL__(?:sub_)?I_(.*)')
......@@ -123,6 +128,7 @@ def test_ParseNmLine():
'_GLOBAL__sub_I_extension_specifics.pb.cc')
assert parse == ('extension_specifics.pb.cc', 40607408, 36), parse
# Just always run the test; it is fast enough.
test_ParseNmLine()
......@@ -136,6 +142,7 @@ def ParseNm(toolchain, binary):
if parse:
yield parse
# Regex matching objdump output for the symbols we're interested in.
# Example line:
# 12354ab: (disassembly, including <FunctionReference>)
......@@ -158,13 +165,14 @@ def ExtractSymbolReferences(toolchain, binary, start, end):
if ref.startswith('.LC') or ref.startswith('_DYNAMIC'):
# Ignore these, they are uninformative.
continue
if ref.startswith('_GLOBAL__I_'):
if re.match('_GLOBAL__(?:sub_)?I_', ref):
# Probably a relative jump within this function.
continue
refs.add(ref)
return sorted(refs)
def main():
parser = optparse.OptionParser(usage='%prog [option] filename')
parser.add_option('-d', '--diffable', dest='diffable',
......@@ -236,5 +244,6 @@ def main():
return 0
if '__main__' == __name__:
sys.exit(main())
jochen@chromium.org
thakis@chromium.org
thestig@chromium.org
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for perf_expectations.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts for
details on the presubmit API built into depot_tools.
"""
PERF_EXPECTATIONS = 'tools/perf_expectations/perf_expectations.json'
CONFIG_FILE = 'tools/perf_expectations/chromium_perf_expectations.cfg'
def CheckChangeOnUpload(input_api, output_api):
run_tests = False
for path in input_api.LocalPaths():
path = path.replace('\\', '/')
if (PERF_EXPECTATIONS == path or CONFIG_FILE == path):
run_tests = True
output = []
if run_tests:
whitelist = [r'.+_unittest\.py$']
output.extend(input_api.canned_checks.RunUnitTestsInDirectory(
input_api, output_api, 'tests', whitelist))
return output
def CheckChangeOnCommit(input_api, output_api):
run_tests = False
for path in input_api.LocalPaths():
path = path.replace('\\', '/')
if (PERF_EXPECTATIONS == path or CONFIG_FILE == path):
run_tests = True
output = []
if run_tests:
whitelist = [r'.+_unittest\.py$']
output.extend(input_api.canned_checks.RunUnitTestsInDirectory(
input_api, output_api, 'tests', whitelist))
output.extend(input_api.canned_checks.CheckDoNotSubmit(input_api,
output_api))
return output
This is where old perf machinery used to live, keeping track of binary sizes,
etc. Now that lives elsewhere and has a team to support it (see
https://www.chromium.org/developers/tree-sheriffs/perf-sheriffs). This code
remains to ensure that no static initializers get into Chromium.
Because this code has this history, it's far more complicated than it needs to
be. TODO(dpranke): Simplify it. https://crbug.com/572393
In the meanwhile, if you're trying to update perf_expectations.json, there are
no instructions for doing so, and the tools that you used to use don't work
because they rely on data files that were last updated at the end of 2015. So
here's what to do to reset the expected static initializer count value.
The expected static initializer count value is in the "regress" field for the
platform. In addition, each platform has a checksum in the "sha1" field to
ensure that you properly used the magic tools. Since the magic tools don't work
anymore, dpranke added a bypass to the verification. If you run:
> tools/perf_expectations/make_expectations.py --checksum --verbose
the script will tell you what the checksum *should* be. Alter the "sha1" field
to be that value, and you can commit changes to that file.
Please see https://crbug.com/572393 for more information.
{
"base_url": "http://build.chromium.org/f/chromium/perf",
"perf_file": "perf_expectations.json"
}
This diff is collapsed.
{"linux-release-64/sizes/chrome-si/initializers": {"reva": 480969, "revb": 480969, "type": "absolute", "better": "lower", "improve": 8, "regress": 8, "tolerance": 0, "sha1": "3c815259"},
"linux-release-64/sizes/nacl_helper-si/initializers": {"reva": 480969, "revb": 480969, "type": "absolute", "better": "lower", "improve": 6, "regress": 8, "sha1": "8416f450"},
"linux-release-64/sizes/nacl_helper_bootstrap-si/initializers": {"reva": 114822, "revb": 115019, "type": "absolute", "better": "lower", "improve": 0, "regress": 0, "sha1": "228221af"},
"linux-release/sizes/chrome-si/initializers": {"reva": 480969, "revb": 480969, "type": "absolute", "better": "lower", "improve": 9, "regress": 9, "tolerance": 0, "sha1": "03dc3cfd"},
"linux-release/sizes/nacl_helper-si/initializers": {"reva": 480969, "revb": 480969, "type": "absolute", "better": "lower", "improve": 7, "regress": 9, "sha1": "1a3c5b2b"},
"linux-release/sizes/nacl_helper_bootstrap-si/initializers": {"reva": 114822, "revb": 115019, "type": "absolute", "better": "lower", "improve": 0, "regress": 0, "sha1": "dd908f29"},
"mac-release/sizes/chrome-si/initializers": {"reva": 281731, "revb": 281731, "type": "absolute", "better": "lower", "improve": 0, "regress": 0, "tolerance": 0, "sha1": "01759b7f"},
"load": true
}
{"linux-release/media_tests_av_perf/audio_latency/latency": {"reva": 180005, "revb": 180520, "type": "absolute", "better": "lower", "improve": 190, "regress": 222, "sha1": "fc9815d5"},
"linux-release/media_tests_av_perf/dropped_fps/tulip2.wav": {"reva": 181131, "revb": 181572, "type": "absolute", "better": "lower", "improve": 0, "regress": 0, "sha1": "fb8157f9"},
"linux-release/media_tests_av_perf/dropped_fps/tulip2.webm": {"reva": 181131, "revb": 181572, "type": "absolute", "better": "lower", "improve": 0, "regress": 0, "sha1": "c0fb3421"},
"linux-release/media_tests_av_perf/dropped_frames/crowd1080.webm": {"reva": 181131, "revb": 181572, "type": "absolute", "better": "lower", "improve": 0, "regress": 0, "sha1": "fa9582d3"},
"linux-release/media_tests_av_perf/dropped_frames/crowd2160.webm": {"reva": 181131, "revb": 181572, "type": "absolute", "better": "lower", "improve": 166, "regress": 231, "sha1": "ca3a7a47"},
"linux-release/media_tests_av_perf/fps/tulip2.m4a": {"reva": 34239, "revb": 1298213, "type": "absolute", "better": "higher", "improve": 0, "regress": 0},
"linux-release/media_tests_av_perf/fps/tulip2.mp3": {"reva": 34239, "revb": 1298213, "type": "absolute", "better": "higher", "improve": 0, "regress": 0},
"linux-release/media_tests_av_perf/fps/tulip2.mp4": {"reva": 34239, "revb": 1298213, "type": "absolute", "better": "higher", "improve": 32, "regress": 28},
"linux-release/media_tests_av_perf/fps/tulip2.ogg": {"reva": 34239, "revb": 1298213, "type": "absolute", "better": "higher", "improve": 0, "regress": 0},
"linux-release/media_tests_av_perf/fps/tulip2.ogv": {"reva": 34239, "revb": 1298213, "type": "absolute", "better": "higher", "improve": 32, "regress": 28},
"linux-release/media_tests_av_perf/fps/tulip2.wav": {"reva": 34239, "revb": 1298213, "type": "absolute", "better": "higher", "improve": 0, "regress": 0},
"win-release/media_tests_av_perf/dropped_fps/tulip2.wav": {"reva": 181131, "revb": 181572, "type": "absolute", "better": "lower", "improve": 0, "regress": 0, "sha1": "646c02f2"},
"win-release/media_tests_av_perf/dropped_fps/tulip2.webm": {"reva": 181131, "revb": 181572, "type": "absolute", "better": "lower", "improve": 0, "regress": 0, "sha1": "46c97b57"},
"win-release/media_tests_av_perf/dropped_frames/crowd1080.webm": {"reva": 181131, "revb": 181572, "type": "absolute", "better": "lower", "improve": 0, "regress": 0, "sha1": "9b709aab"},
"win-release/media_tests_av_perf/dropped_frames/crowd2160.webm": {"reva": 181131, "revb": 181572, "type": "absolute", "better": "lower", "improve": 174, "regress": 204, "sha1": "4c0270a6"},
"win-release/media_tests_av_perf/fps/crowd1080.webm": {"reva": 163299, "revb": 164141, "type": "absolute", "better": "higher", "improve": 53, "regress": 43, "sha1": "7ad49461"},
"win-release/media_tests_av_perf/fps/crowd2160.webm": {"reva": 176330, "revb": 176978, "type": "absolute", "better": "higher", "improve": 26.0399945997, "regress": 25.9062437562, "sha1": "700526a9"},
"win-release/media_tests_av_perf/fps/crowd360.webm": {"reva": 163299, "revb": 164141, "type": "absolute", "better": "higher", "improve": 51, "regress": 47, "sha1": "7f8ef21c"},
"win-release/media_tests_av_perf/fps/crowd480.webm": {"reva": 163299, "revb": 164141, "type": "absolute", "better": "higher", "improve": 50, "regress": 47, "sha1": "5dc96881"},
"win-release/media_tests_av_perf/fps/crowd720.webm": {"reva": 163299, "revb": 164141, "type": "absolute", "better": "higher", "improve": 52, "regress": 47, "sha1": "4fcfb653"},
"win-release/media_tests_av_perf/fps/tulip2.m4a": {"reva": 163299, "revb": 164141, "type": "absolute", "better": "higher", "improve": 0, "regress": 0, "sha1": "54d94538"},
"win-release/media_tests_av_perf/fps/tulip2.mp3": {"reva": 163299, "revb": 164141, "type": "absolute", "better": "higher", "improve": 0, "regress": 0, "sha1": "113aef17"},
"win-release/media_tests_av_perf/fps/tulip2.mp4": {"reva": 163299, "revb": 164141, "type": "absolute", "better": "higher", "improve": 30, "regress": 28, "sha1": "a22847d0"},
"win-release/media_tests_av_perf/fps/tulip2.ogg": {"reva": 163299, "revb": 164141, "type": "absolute", "better": "higher", "improve": 0, "regress": 0, "sha1": "6ee2e716"},
"win-release/media_tests_av_perf/fps/tulip2.ogv": {"reva": 163299, "revb": 164141, "type": "absolute", "better": "higher", "improve": 32, "regress": 26, "sha1": "dfadb872"},
"win-release/media_tests_av_perf/fps/tulip2.wav": {"reva": 163299, "revb": 164141, "type": "absolute", "better": "higher", "improve": 0, "regress": 0, "sha1": "530c5bf5"},
"win-release/media_tests_av_perf/fps/tulip2.webm": {"reva": 163299, "revb": 164141, "type": "absolute", "better": "higher", "improve": 30, "regress": 28, "sha1": "35b91c8e"}
}
\ No newline at end of file
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Verify perf_expectations.json can be loaded using simplejson.
perf_expectations.json is a JSON-formatted file. This script verifies
that simplejson can load it correctly. It should catch most common
formatting problems.
"""
import subprocess
import sys
import os
import unittest
import re
simplejson = None
def OnTestsLoad():
old_path = sys.path
script_path = os.path.dirname(sys.argv[0])
load_path = None
global simplejson
# This test script should be stored in src/tools/perf_expectations/. That
# directory will most commonly live in 2 locations:
#
# - a regular Chromium checkout, in which case src/third_party
# is where to look for simplejson
#
# - a buildbot checkout, in which case .../pylibs is where
# to look for simplejson
#
# Locate and install the correct path based on what we can find.
#
for path in ('../../../third_party', '../../../../../pylibs'):
path = os.path.join(script_path, path)
if os.path.exists(path) and os.path.isdir(path):
load_path = os.path.abspath(path)
break
if load_path is None:
msg = "%s expects to live within a Chromium checkout" % sys.argv[0]
raise Exception, "Error locating simplejson load path (%s)" % msg
# Try importing simplejson once. If this succeeds, we found it and will
# load it again later properly. Fail if we cannot load it.
sys.path.append(load_path)
try:
import simplejson as Simplejson
simplejson = Simplejson
except ImportError, e:
msg = "%s expects to live within a Chromium checkout" % sys.argv[0]
raise Exception, "Error trying to import simplejson from %s (%s)" % \
(load_path, msg)
finally:
sys.path = old_path
return True
def LoadJsonFile(filename):
f = open(filename, 'r')
try:
data = simplejson.load(f)
except ValueError, e:
f.seek(0)
print "Error reading %s:\n%s" % (filename,
f.read()[:50]+'...')
raise e
f.close()
return data
OnTestsLoad()
CONFIG_JSON = os.path.join(os.path.dirname(sys.argv[0]),
'../chromium_perf_expectations.cfg')
MAKE_EXPECTATIONS = os.path.join(os.path.dirname(sys.argv[0]),
'../make_expectations.py')
PERF_EXPECTATIONS = os.path.join(os.path.dirname(sys.argv[0]),
'../perf_expectations.json')
class PerfExpectationsUnittest(unittest.TestCase):
def testPerfExpectations(self):
# Test data is dictionary.
perf_data = LoadJsonFile(PERF_EXPECTATIONS)
if not isinstance(perf_data, dict):
raise Exception('perf expectations is not a dict')
# Test the 'load' key.
if not 'load' in perf_data:
raise Exception("perf expectations is missing a load key")
if not isinstance(perf_data['load'], bool):
raise Exception("perf expectations load key has non-bool value")
# Test all key values are dictionaries.
bad_keys = []
for key in perf_data:
if key == 'load':
continue
if not isinstance(perf_data[key], dict):
bad_keys.append(key)
if len(bad_keys) > 0:
msg = "perf expectations keys have non-dict values"
raise Exception("%s: %s" % (msg, bad_keys))
# Test all key values have delta and var keys.
for key in perf_data:
if key == 'load':
continue
# First check if regress/improve is in the key's data.
if 'regress' in perf_data[key]:
if 'improve' not in perf_data[key]:
bad_keys.append(key)
if (not isinstance(perf_data[key]['regress'], int) and
not isinstance(perf_data[key]['regress'], float)):
bad_keys.append(key)
if (not isinstance(perf_data[key]['improve'], int) and
not isinstance(perf_data[key]['improve'], float)):
bad_keys.append(key)
else:
# Otherwise check if delta/var is in the key's data.
if 'delta' not in perf_data[key] or 'var' not in perf_data[key]:
bad_keys.append(key)
if (not isinstance(perf_data[key]['delta'], int) and
not isinstance(perf_data[key]['delta'], float)):
bad_keys.append(key)
if (not isinstance(perf_data[key]['var'], int) and
not isinstance(perf_data[key]['var'], float)):
bad_keys.append(key)
if len(bad_keys) > 0:
msg = "perf expectations key values missing or invalid delta/var"
raise Exception("%s: %s" % (msg, bad_keys))
# Test all keys have the correct format.
for key in perf_data:
if key == 'load':
continue
# tools/buildbot/scripts/master/log_parser.py should have a matching
# regular expression.
if not re.match(r"^([\w\.-]+)/([\w\.-]+)/([\w\.-]+)/([\w\.-]+)$", key):
bad_keys.append(key)
if len(bad_keys) > 0:
msg = "perf expectations keys in bad format, expected a/b/c/d"
raise Exception("%s: %s" % (msg, bad_keys))
def testNoUpdatesNeeded(self):
p = subprocess.Popen([MAKE_EXPECTATIONS, '-s'], stdout=subprocess.PIPE)
p.wait();
self.assertEqual(p.returncode, 0,
msg='Update expectations first by running ./make_expectations.py')
def testConfigFile(self):
# Test that the config file can be parsed as JSON.
config = LoadJsonFile(CONFIG_JSON)
# Require the following keys.
if 'base_url' not in config:
raise Exception('base_url not specified in config file')
if 'perf_file' not in config:
raise Exception('perf_file not specified in config file')
if __name__ == '__main__':
unittest.main()
#!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Prepare tests that require re-baselining for input to make_expectations.py.
The regularly running perf-AV tests require re-baselineing of expectations
about once a week. The steps involved in rebaselining are:
1.) Identify the tests to update, based off reported e-mail results.
2.) Figure out reva and revb values, which is the starting and ending revision
numbers for the range that we should use to obtain new thresholds.
3.) Modify lines in perf_expectations.json referring to the tests to be updated,
so that they may be used as input to make_expectations.py.
This script automates the last step above.
Here's a sample line from perf_expectations.json:
"win-release/media_tests_av_perf/fps/tulip2.m4a": {"reva": 163299, \
"revb": 164141, "type": "absolute", "better": "higher", "improve": 0, \
"regress": 0, "sha1": "54d94538"},
To get the above test ready for input to make_expectations.py, it should become:
"win-release/media_tests_av_perf/fps/tulip2.m4a": {"reva": <new reva>, \
"revb": <new revb>, "type": "absolute", "better": "higher", "improve": 0, \
"regress": 0},
Examples:
1.) To update the test specified above and get baseline
values using the revision range 12345 and 23456, run this script with a command
line like this:
python update_perf_expectations.py -f \
win-release/media_tests_av_perf/fps/tulip2.m4a --reva 12345 --revb 23456
Or, using an input file,
where the input file contains a single line with text
win-release/media_tests_av_perf/fps/tulip2.m4a
run with this command line:
python update_perf_expectations.py -i input.txt --reva 12345 --revb 23456
2.) Let's say you want to update all seek tests on windows, and get baseline
values using the revision range 12345 and 23456.
Run this script with this command line:
python update_perf_expectations.py -f win-release/media_tests_av_perf/seek/ \
--reva 12345 --revb 23456
Or:
python update_perf_expectations.py -f win-release/.*/seek/ --reva 12345 \
--revb 23456
Or, using an input file,
where the input file contains a single line with text win-release/.*/seek/:
python update_perf_expectations.py -i input.txt --reva 12345 --revb 23456
3.) Similarly, if you want to update seek tests on all platforms
python update_perf_expectations.py -f .*-release/.*/seek/ --reva 12345 \
--revb 23456
"""
import logging
from optparse import OptionParser
import os
import re
import make_expectations as perf_ex_lib
# Default logging is INFO. Use --verbose to enable DEBUG logging.
_DEFAULT_LOG_LEVEL = logging.INFO
def GetTestsToUpdate(contents, all_test_keys):
"""Parses input contents and obtains tests to be re-baselined.
Args:
contents: string containing contents of input file.
all_test_keys: list of keys of test dictionary.
Returns:
A list of keys for tests that should be updated.
"""
# Each line of the input file specifies a test case to update.
tests_list = []
for test_case_filter in contents.splitlines():
# Skip any empty lines.
if test_case_filter:
# Sample expected line:
# win-release/media_tests_av_perf/seek/\
# CACHED_BUFFERED_SEEK_NoConstraints_crowd1080.ogv
# Or, if reg-ex, then sample line:
# win-release/media-tests_av_perf/seek*
# Skip any leading spaces if they exist in the input file.
logging.debug('Trying to match %s', test_case_filter)
tests_list.extend(GetMatchingTests(test_case_filter.strip(),
all_test_keys))
return tests_list
def GetMatchingTests(tests_to_update, all_test_keys):
"""Parses input reg-ex filter and obtains tests to be re-baselined.
Args:
tests_to_update: reg-ex string specifying tests to be updated.
all_test_keys: list of keys of tests dictionary.
Returns:
A list of keys for tests that should be updated.
"""
tests_list = []
search_string = re.compile(tests_to_update)
# Get matching tests from the dictionary of tests
for test_key in all_test_keys:
if search_string.match(test_key):
tests_list.append(test_key)
logging.debug('%s will be updated', test_key)
logging.info('%s tests found matching reg-ex: %s', len(tests_list),
tests_to_update)
return tests_list
def PrepareTestsForUpdate(tests_to_update, all_tests, reva, revb):
"""Modifies value of tests that are to re-baselined:
Set reva and revb values to specified new values. Remove sha1.
Args:
tests_to_update: list of tests to be updated.
all_tests: dictionary of all tests.
reva: oldest revision in range to use for new values.
revb: newest revision in range to use for new values.
Raises:
ValueError: If reva or revb are not valid ints, or if either
of them are negative.
"""
reva = int(reva)
revb = int(revb)
if reva < 0 or revb < 0:
raise ValueError('Revision values should be positive.')
# Ensure reva is less than revb.
# (this is similar to the check done in make_expectations.py)
if revb < reva:
temp = revb
revb = reva
reva = temp
for test_key in tests_to_update:
# Get original test from the dictionary of tests
test_value = all_tests[test_key]
if test_value:
# Sample line in perf_expectations.json:
# "linux-release/media_tests _av_perf/dropped_frames/crowd360.webm":\
# {"reva": 155180, "revb": 155280, "type": "absolute", \
# "better": "lower", "improve": 0, "regress": 3, "sha1": "276ba29c"},
# Set new revision range
test_value['reva'] = reva
test_value['revb'] = revb
# Remove sha1 to indicate this test requires an update
# Check first to make sure it exist.
if 'sha1' in test_value:
del test_value['sha1']
else:
logging.warning('%s does not exist.', test_key)
logging.info('Done preparing tests for update.')
def GetCommandLineOptions():
"""Parse command line arguments.
Returns:
An options object containing command line arguments and their values.
"""
parser = OptionParser()
parser.add_option('--reva', dest='reva', type='int',
help='Starting revision of new range.',
metavar='START_REVISION')
parser.add_option('--revb', dest='revb', type='int',
help='Ending revision of new range.',
metavar='END_REVISION')
parser.add_option('-f', dest='tests_filter',
help='Regex to use for filtering tests to be updated. '
'At least one of -filter or -input_file must be provided. '
'If both are provided, then input-file is used.',
metavar='FILTER', default='')
parser.add_option('-i', dest='input_file',
help='Optional path to file with reg-exes for tests to'
' update. If provided, it overrides the filter argument.',
metavar='INPUT_FILE', default='')
parser.add_option('--config', dest='config_file',
default=perf_ex_lib.DEFAULT_CONFIG_FILE,
help='Set the config file to FILE.', metavar='FILE')
parser.add_option('-v', dest='verbose', action='store_true', default=False,
help='Enable verbose output.')
options = parser.parse_args()[0]
return options
def Main():
"""Main driver function."""
options = GetCommandLineOptions()
_SetLogger(options.verbose)
# Do some command-line validation
if not options.input_file and not options.tests_filter:
logging.error('At least one of input-file or test-filter must be provided.')
exit(1)
if options.input_file and options.tests_filter:
logging.error('Specify only one of input file or test-filter.')
exit(1)
if not options.reva or not options.revb:
logging.error('Start and end revision of range must be specified.')
exit(1)
# Load config.
config = perf_ex_lib.ConvertJsonIntoDict(
perf_ex_lib.ReadFile(options.config_file))
# Obtain the perf expectations file from the config file.
perf_file = os.path.join(
os.path.dirname(options.config_file), config['perf_file'])
# We should have all the information we require now.
# On to the real thang.
# First, get all the existing tests from the original perf_expectations file.
all_tests = perf_ex_lib.ConvertJsonIntoDict(
perf_ex_lib.ReadFile(perf_file))
all_test_keys = all_tests.keys()
# Remove the load key, because we don't want to modify it.
all_test_keys.remove('load')
# Keep tests sorted, like in the original file.
all_test_keys.sort()
# Next, get all tests that have been identified for an update.
tests_to_update = []
if options.input_file:
# Tests to update have been specified in an input_file.
# Get contents of file.
tests_filter = perf_ex_lib.ReadFile(options.input_file)
elif options.tests_filter:
# Tests to update have been specified as a reg-ex filter.
tests_filter = options.tests_filter
# Get tests to update based on filter specified.
tests_to_update = GetTestsToUpdate(tests_filter, all_test_keys)
logging.info('Done obtaining matching tests.')
# Now, prepare tests for update.
PrepareTestsForUpdate(tests_to_update, all_tests, options.reva, options.revb)
# Finally, write modified tests back to perf_expectations file.
perf_ex_lib.WriteJson(perf_file, all_tests, all_test_keys,
calculate_sha1=False)
logging.info('Done writing tests for update to %s.', perf_file)
def _SetLogger(verbose):
log_level = _DEFAULT_LOG_LEVEL
if verbose:
log_level = logging.DEBUG
logging.basicConfig(level=log_level, format='%(message)s')
if __name__ == '__main__':
Main()
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment