Commit 239e5b82 authored by Tim Brown's avatar Tim Brown Committed by Commit Bot

Revert "Simplify static initializer test"

This reverts commit 6b3f0a07.

Reason for revert: sizes test fails in waterfall

Original change's description:
> Simplify static initializer test
> 
> The old sizes.py perf test used to check for a whole set of performance
> regressions. This is now handled elsewhere with the exception of the SI
> check. The SI check is fairly simple, however, and doesn't require all
> of the the perf_expectations infrastructure which is now obsolete.
> 
> This CL removes the obsolete perf_expectations, hard codes the expected
> number of SIs in the script, and removes the windows and android code
> as it doesn't actually do anything.
> 
> Bug: 572393
> Change-Id: I960cb8fec63e25c489e8c4e90f670c7e35dd4fd6
> Reviewed-on: https://chromium-review.googlesource.com/654178
> Reviewed-by: Scott Graham <scottmg@chromium.org>
> Reviewed-by: Thomas Anderson <thomasanderson@chromium.org>
> Reviewed-by: Dirk Pranke <dpranke@chromium.org>
> Commit-Queue: Tim Brown <timbrown@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#505434}

TBR=primiano@chromium.org,dpranke@chromium.org,scottmg@chromium.org,thomasanderson@chromium.org,timbrown@chromium.org

Change-Id: Ia610eb13a485d4c6044248b5c0c5dc4f71ffeec3
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: 572393
Reviewed-on: https://chromium-review.googlesource.com/692567
Commit-Queue: Tim Brown <timbrown@chromium.org>
Reviewed-by: default avatarThomas Anderson <thomasanderson@chromium.org>
Cr-Commit-Position: refs/heads/master@{#505441}
parent b242320c
...@@ -5,10 +5,10 @@ ...@@ -5,10 +5,10 @@
"""A tool to extract size information for chrome, executed by buildbot. """A tool to extract size information for chrome, executed by buildbot.
When this is run, the current directory (cwd) should be the outer build When this is run, the current directory (cwd) should be the outer build
directory (e.g., chrome-release/build/). directory (e.g., chrome-release/build/).
For a list of command-line options, call this script with '--help'. For a list of command-line options, call this script with '--help'.
""" """
import errno import errno
...@@ -24,16 +24,48 @@ import tempfile ...@@ -24,16 +24,48 @@ import tempfile
from slave import build_directory from slave import build_directory
SRC_DIR = os.path.abspath( SRC_DIR = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..', '..', '..', '..', '..')) os.path.join(os.path.dirname(__file__), '..', '..', '..', '..', '..', '..'))
EXPECTED_LINUX_SI_COUNTS = {
'chrome': 7,
'nacl_helper': 7,
'nacl_helper_bootstrap': 7,
}
EXPECTED_MAC_SI_COUNT = 0 class ResultsCollector(object):
def __init__(self):
self.results = {}
def add_result(self, name, identifier, value, units):
assert name not in self.results
self.results[name] = {
'identifier': identifier,
'value': int(value),
'units': units
}
# Legacy printing, previously used for parsing the text logs.
print 'RESULT %s: %s= %s %s' % (name, identifier, value, units)
def get_size(filename):
return os.stat(filename)[stat.ST_SIZE]
def get_linux_stripped_size(filename):
EU_STRIP_NAME = 'eu-strip'
# Assumes |filename| is in out/Release
# build/linux/bin/eu-strip'
src_dir = os.path.dirname(os.path.dirname(os.path.dirname(filename)))
eu_strip_path = os.path.join(src_dir, 'build', 'linux', 'bin', EU_STRIP_NAME)
if (platform.architecture()[0] == '64bit' or
not os.path.exists(eu_strip_path)):
eu_strip_path = EU_STRIP_NAME
with tempfile.NamedTemporaryFile() as stripped_file:
strip_cmd = [eu_strip_path, '-o', stripped_file.name, filename]
result = 0
result, _ = run_process(result, strip_cmd)
if result != 0:
return (result, 0)
return (result, get_size(stripped_file.name))
def run_process(result, command): def run_process(result, command):
...@@ -52,7 +84,7 @@ def print_si_fail_hint(path_to_tool): ...@@ -52,7 +84,7 @@ def print_si_fail_hint(path_to_tool):
print '# HINT: diff against the log from the last run to see what changed' print '# HINT: diff against the log from the last run to see what changed'
def main_mac(options): def main_mac(options, args, results_collector):
"""Print appropriate size information about built Mac targets. """Print appropriate size information about built Mac targets.
Returns the first non-zero exit status of any command it executes, Returns the first non-zero exit status of any command it executes,
...@@ -62,7 +94,6 @@ def main_mac(options): ...@@ -62,7 +94,6 @@ def main_mac(options):
target_dir = os.path.join(build_dir, options.target) target_dir = os.path.join(build_dir, options.target)
result = 0 result = 0
failures = []
# Work with either build type. # Work with either build type.
base_names = ('Chromium', 'Google Chrome') base_names = ('Chromium', 'Google Chrome')
for base_name in base_names: for base_name in base_names:
...@@ -88,6 +119,34 @@ def main_mac(options): ...@@ -88,6 +119,34 @@ def main_mac(options):
chromium_framework_unstripped = os.path.join(target_dir, chromium_framework_unstripped = os.path.join(target_dir,
framework_unstripped_name) framework_unstripped_name)
if os.path.exists(chromium_executable): if os.path.exists(chromium_executable):
print_dict = {
# Remove spaces in the names so any downstream processing is less
# likely to choke.
'app_name' : re.sub(r'\s', '', base_name),
'app_bundle' : re.sub(r'\s', '', app_bundle),
'framework_name' : re.sub(r'\s', '', framework_name),
'framework_bundle' : re.sub(r'\s', '', framework_bundle),
'app_size' : get_size(chromium_executable),
'framework_size' : get_size(chromium_framework_executable)
}
# Collect the segment info out of the App
result, stdout = run_process(result, ['size', chromium_executable])
print_dict['app_text'], print_dict['app_data'], print_dict['app_objc'] = \
re.search(r'(\d+)\s+(\d+)\s+(\d+)', stdout).groups()
# Collect the segment info out of the Framework
result, stdout = run_process(result, ['size',
chromium_framework_executable])
print_dict['framework_text'], print_dict['framework_data'], \
print_dict['framework_objc'] = \
re.search(r'(\d+)\s+(\d+)\s+(\d+)', stdout).groups()
# Collect the whole size of the App bundle on disk (include the framework)
result, stdout = run_process(result, ['du', '-s', '-k', chromium_app_dir])
du_s = re.search(r'(\d+)', stdout).group(1)
print_dict['app_bundle_size'] = (int(du_s) * 1024)
# Count the number of files with at least one static initializer. # Count the number of files with at least one static initializer.
pipes = [['otool', '-l', chromium_framework_executable], pipes = [['otool', '-l', chromium_framework_executable],
['grep', '__mod_init_func', '-C', '5'], ['grep', '__mod_init_func', '-C', '5'],
...@@ -104,27 +163,25 @@ def main_mac(options): ...@@ -104,27 +163,25 @@ def main_mac(options):
result = p.returncode result = p.returncode
else: else:
initializers_s = '0' initializers_s = '0'
word_size = 8 # Assume 64 bit word_size = 4 # Assume 32 bit
si_count = int(initializers_s, 16) / word_size si_count = int(initializers_s, 16) / word_size
if si_count != EXPECTED_MAC_SI_COUNT: print_dict['initializers'] = si_count
failures.append(
'Binary contains %d static initializers (expected %d)' %
(si_count, EXPECTED_MAC_SI_COUNT))
# Use dump-static-initializers.py to print the list of SIs. # For Release builds only, use dump-static-initializers.py to print the
if si_count > 0: # list of static initializers.
if si_count > 0 and options.target == 'Release':
print '\n# Static initializers in %s:' % chromium_framework_executable print '\n# Static initializers in %s:' % chromium_framework_executable
# First look for a dSYM to get information about the initializers. If # First look for a dSYM to get information about the initializers. If
# one is not present, check if there is an unstripped copy of the build # one is not present, check if there is an unstripped copy of the build
# output. # output.
mac_tools_path = os.path.join( mac_tools_path = os.path.join(os.path.dirname(build_dir),
os.path.dirname(build_dir), 'tools', 'mac') 'tools', 'mac')
if os.path.exists(chromium_framework_dsym): if os.path.exists(chromium_framework_dsym):
dump_static_initializers = os.path.join( dump_static_initializers = os.path.join(
mac_tools_path, 'dump-static-initializers.py') mac_tools_path, 'dump-static-initializers.py')
result, stdout = run_process( result, stdout = run_process(result, [dump_static_initializers,
result, [dump_static_initializers, chromium_framework_dsym]) chromium_framework_dsym])
print_si_fail_hint('tools/mac/dump-static-initializers.py') print_si_fail_hint('tools/mac/dump-static-initializers.py')
print stdout print stdout
else: else:
...@@ -140,18 +197,54 @@ def main_mac(options): ...@@ -140,18 +197,54 @@ def main_mac(options):
print_si_fail_hint('tools/mac/show_mod_init_func.py') print_si_fail_hint('tools/mac/show_mod_init_func.py')
print stdout print stdout
results_collector.add_result(
print_dict['app_name'], print_dict['app_name'],
print_dict['app_size'], 'bytes')
results_collector.add_result(
'%s-__TEXT' % print_dict['app_name'], '__TEXT',
print_dict['app_text'], 'bytes')
results_collector.add_result(
'%s-__DATA' % print_dict['app_name'], '__DATA',
print_dict['app_data'], 'bytes')
results_collector.add_result(
'%s-__OBJC' % print_dict['app_name'], '__OBJC',
print_dict['app_objc'], 'bytes')
results_collector.add_result(
print_dict['framework_name'], print_dict['framework_name'],
print_dict['framework_size'], 'bytes')
results_collector.add_result(
'%s-__TEXT' % print_dict['framework_name'], '__TEXT',
print_dict['framework_text'], 'bytes')
results_collector.add_result(
'%s-__DATA' % print_dict['framework_name'], '__DATA',
print_dict['framework_data'], 'bytes')
results_collector.add_result(
'%s-__OBJC' % print_dict['framework_name'], '__OBJC',
print_dict['framework_objc'], 'bytes')
results_collector.add_result(
print_dict['app_bundle'], print_dict['app_bundle'],
print_dict['app_bundle_size'], 'bytes')
results_collector.add_result(
'chrome-si', 'initializers',
print_dict['initializers'], 'files')
# Found a match, don't check the other base_names. # Found a match, don't check the other base_names.
return result, failures return result
# If no base_names matched, fail script. # If no base_names matched, fail script.
return 66, failures return 66
def check_linux_binary(target_dir, binary_name): def check_linux_binary(target_dir, binary_name, options):
"""Collect appropriate size information about the built Linux binary given. """Collect appropriate size information about the built Linux binary given.
Returns a tuple (result, failures). result is the first non-zero exit Returns a tuple (result, sizes). result is the first non-zero exit
status of any command it executes, or zero on success. failures is a list status of any command it executes, or zero on success. sizes is a list
of strings containing any error messages relating to failures of the checks. of tuples (name, identifier, totals_identifier, value, units).
The printed line looks like:
name: identifier= value units
When this same data is used for totals across all the binaries, then
totals_identifier is the identifier to use, or '' to just use identifier.
""" """
binary_file = os.path.join(target_dir, binary_name) binary_file = os.path.join(target_dir, binary_name)
...@@ -160,17 +253,33 @@ def check_linux_binary(target_dir, binary_name): ...@@ -160,17 +253,33 @@ def check_linux_binary(target_dir, binary_name):
return 0, [] return 0, []
result = 0 result = 0
failures = [] sizes = []
def get_elf_section_size(readelf_stdout, section_name): def get_elf_section_size(readelf_stdout, section_name):
# Matches: .ctors PROGBITS 000000000516add0 5169dd0 000010 00 WA 0 0 8 # Matches: .ctors PROGBITS 000000000516add0 5169dd0 000010 00 WA 0 0 8
match = re.search(r'\.%s.*$' % re.escape(section_name), readelf_stdout, match = re.search(r'\.%s.*$' % re.escape(section_name),
re.MULTILINE) readelf_stdout, re.MULTILINE)
if not match: if not match:
return (False, -1) return (False, -1)
size_str = re.split(r'\W+', match.group(0))[5] size_str = re.split(r'\W+', match.group(0))[5]
return (True, int(size_str, 16)) return (True, int(size_str, 16))
sizes.append((binary_name, binary_name, 'size',
get_size(binary_file), 'bytes'))
result, stripped_size = get_linux_stripped_size(binary_file)
sizes.append((binary_name + '-stripped', 'stripped', 'stripped',
stripped_size, 'bytes'))
result, stdout = run_process(result, ['size', binary_file])
text, data, bss = re.search(r'(\d+)\s+(\d+)\s+(\d+)', stdout).groups()
sizes += [
(binary_name + '-text', 'text', '', text, 'bytes'),
(binary_name + '-data', 'data', '', data, 'bytes'),
(binary_name + '-bss', 'bss', '', bss, 'bytes'),
]
# Find the number of files with at least one static initializer. # Find the number of files with at least one static initializer.
# First determine if we're 32 or 64 bit # First determine if we're 32 or 64 bit
result, stdout = run_process(result, ['readelf', '-h', binary_file]) result, stdout = run_process(result, ['readelf', '-h', binary_file])
...@@ -190,24 +299,22 @@ def check_linux_binary(target_dir, binary_name): ...@@ -190,24 +299,22 @@ def check_linux_binary(target_dir, binary_name):
if has_init_array: if has_init_array:
si_count = init_array_size / word_size si_count = init_array_size / word_size
# In newer versions of gcc crtbegin.o inserts frame_dummy into .init_array # In newer versions of gcc crtbegin.o inserts frame_dummy into .init_array
# but we don't want to count this entry, since its always present and not # but we don't want to count this entry, since its alwasys present and not
# related to our code. # related to our code.
assert (si_count > 0) assert(si_count > 0)
si_count -= 1 si_count -= 1
if (si_count != EXPECTED_LINUX_SI_COUNTS[binary_name]): sizes.append((binary_name + '-si', 'initializers', '', si_count, 'files'))
failures.append('%s contains %d static initializers (expected %d)' %
(binary_name, si_count, EXPECTED_LINUX_SI_COUNTS[binary_name]))
result = 125
# Use dump-static-initializers.py to print the list of static initializers. # For Release builds only, use dump-static-initializers.py to print the list
if si_count > 0: # of static initializers.
if si_count > 0 and options.target == 'Release':
build_dir = os.path.dirname(target_dir) build_dir = os.path.dirname(target_dir)
dump_static_initializers = os.path.join( dump_static_initializers = os.path.join(os.path.dirname(build_dir),
os.path.dirname(build_dir), 'tools', 'linux', 'tools', 'linux',
'dump-static-initializers.py') 'dump-static-initializers.py')
result, stdout = run_process(result, result, stdout = run_process(result, [dump_static_initializers,
[dump_static_initializers, '-d', binary_file]) '-d', binary_file])
print '\n# Static initializers in %s:' % binary_file print '\n# Static initializers in %s:' % binary_file
print_si_fail_hint('tools/linux/dump-static-initializers.py') print_si_fail_hint('tools/linux/dump-static-initializers.py')
print stdout print stdout
...@@ -221,13 +328,12 @@ def check_linux_binary(target_dir, binary_name): ...@@ -221,13 +328,12 @@ def check_linux_binary(target_dir, binary_name):
# There are some, so count them. # There are some, so count them.
result, stdout = run_process(result, ['eu-findtextrel', binary_file]) result, stdout = run_process(result, ['eu-findtextrel', binary_file])
count = stdout.count('\n') count = stdout.count('\n')
failures.append('%s contains %d TEXTREL relocations (expected 0)' % sizes.append((binary_name + '-textrel', 'textrel', '', count, 'relocs'))
(binary_name, count))
return result, failures return result, sizes
def main_linux(options): def main_linux(options, args, results_collector):
"""Print appropriate size information about built Linux targets. """Print appropriate size information about built Linux targets.
Returns the first non-zero exit status of any command it executes, Returns the first non-zero exit status of any command it executes,
...@@ -236,59 +342,205 @@ def main_linux(options): ...@@ -236,59 +342,205 @@ def main_linux(options):
build_dir = build_directory.GetBuildOutputDirectory(SRC_DIR) build_dir = build_directory.GetBuildOutputDirectory(SRC_DIR)
target_dir = os.path.join(build_dir, options.target) target_dir = os.path.join(build_dir, options.target)
binaries = EXPECTED_LINUX_SI_COUNTS.keys() binaries = [
'chrome',
'nacl_helper',
'nacl_helper_bootstrap',
'libffmpegsumo.so',
'libgcflashplayer.so',
'libppGoogleNaClPluginChrome.so',
]
result = 0 result = 0
failures = []
totals = {}
for binary in binaries: for binary in binaries:
this_result, this_failures = check_linux_binary(target_dir, binary) this_result, this_sizes = check_linux_binary(target_dir, binary, options)
if result == 0: if result == 0:
result = this_result result = this_result
failures.extend(this_failures) for name, identifier, totals_id, value, units in this_sizes:
results_collector.add_result(name, identifier, value, units)
totals_id = totals_id or identifier, units
totals[totals_id] = totals.get(totals_id, 0) + int(value)
files = [
'nacl_irt_x86_64.nexe',
'resources.pak',
]
for filename in files:
path = os.path.join(target_dir, filename)
try:
size = get_size(path)
except OSError, e:
if e.errno == errno.ENOENT:
continue # Don't print anything for missing files.
raise
results_collector.add_result(filename, filename, size, 'bytes')
totals['size', 'bytes'] += size
# TODO(mcgrathr): This should all be refactored so the mac and win flavors
# also deliver data structures rather than printing, and the logic for
# the printing and the summing totals is shared across all three flavors.
for (identifier, units), value in sorted(totals.iteritems()):
results_collector.add_result(
'totals-%s' % identifier, identifier, value, units)
return result
def check_android_binaries(binaries, target_dir, options):
"""Common method for printing size information for Android targets.
"""
result = 0
for binary in binaries:
this_result, this_sizes = check_linux_binary(target_dir, binary, options)
if result == 0:
result = this_result
for name, identifier, _, value, units in this_sizes:
print 'RESULT %s: %s= %s %s' % (name.replace('/', '_'), identifier, value,
units)
return result
def main_android(options, args, results_collector):
"""Print appropriate size information about built Android targets.
Returns the first non-zero exit status of any command it executes,
or zero on success.
"""
target_dir = os.path.join(build_directory.GetBuildOutputDirectory(SRC_DIR),
options.target)
binaries = [
'chrome_public_apk/libs/armeabi-v7a/libchrome.so',
'lib/libchrome.so',
]
return check_android_binaries(binaries, target_dir, options)
def main_android_webview(options, args, results_collector):
"""Print appropriate size information about Android WebViewChromium targets.
Returns the first non-zero exit status of any command it executes,
or zero on success.
"""
target_dir = os.path.join(build_directory.GetBuildOutputDirectory(SRC_DIR),
options.target)
binaries = ['lib/libwebviewchromium.so']
return check_android_binaries(binaries, target_dir, options)
def main_android_cronet(options, args, results_collector):
"""Print appropriate size information about Android Cronet targets.
Returns the first non-zero exit status of any command it executes,
or zero on success.
"""
target_dir = os.path.join(build_directory.GetBuildOutputDirectory(SRC_DIR),
options.target)
binaries = ['cronet_sample_apk/libs/arm64-v8a/libcronet.so',
'cronet_sample_apk/libs/armeabi-v7a/libcronet.so',
'cronet_sample_apk/libs/armeabi/libcronet.so',
'cronet_sample_apk/libs/mips/libcronet.so',
'cronet_sample_apk/libs/x86_64/libcronet.so',
'cronet_sample_apk/libs/x86/libcronet.so']
return result, failures return check_android_binaries(binaries, target_dir, options)
def main_win(options, args, results_collector):
"""Print appropriate size information about built Windows targets.
Returns the first non-zero exit status of any command it executes,
or zero on success.
"""
build_dir = build_directory.GetBuildOutputDirectory(SRC_DIR)
target_dir = os.path.join(build_dir, options.target)
chrome_dll = os.path.join(target_dir, 'chrome.dll')
chrome_child_dll = os.path.join(target_dir, 'chrome_child.dll')
chrome_exe = os.path.join(target_dir, 'chrome.exe')
mini_installer_exe = os.path.join(target_dir, 'mini_installer.exe')
setup_exe = os.path.join(target_dir, 'setup.exe')
result = 0
print 'RESULT chrome.dll: chrome.dll= %s bytes' % get_size(chrome_dll)
if os.path.exists(chrome_child_dll):
fmt = 'RESULT chrome_child.dll: chrome_child.dll= %s bytes'
print fmt % get_size(chrome_child_dll)
print 'RESULT chrome.exe: chrome.exe= %s bytes' % get_size(chrome_exe)
if os.path.exists(mini_installer_exe):
fmt = 'RESULT mini_installer.exe: mini_installer.exe= %s bytes'
print fmt % get_size(mini_installer_exe)
if os.path.exists(setup_exe):
print 'RESULT setup.exe: setup.exe= %s bytes' % get_size(setup_exe)
return result
def main(): def main():
if sys.platform.startswith('darwin'): if sys.platform in ('win32', 'cygwin'):
default_platform = 'win'
elif sys.platform.startswith('darwin'):
default_platform = 'mac' default_platform = 'mac'
elif sys.platform.startswith('linux'): elif sys.platform == 'linux2':
default_platform = 'linux' default_platform = 'linux'
else: else:
default_platform = None default_platform = None
main_map = { main_map = {
'linux': main_linux, 'android' : main_android,
'mac': main_mac, 'android-webview' : main_android_webview,
'android-cronet' : main_android_cronet,
'linux' : main_linux,
'mac' : main_mac,
'win' : main_win,
} }
platforms = sorted(main_map.keys()) platforms = sorted(main_map.keys())
option_parser = optparse.OptionParser() option_parser = optparse.OptionParser()
option_parser.add_option( option_parser.add_option('--target',
'--target',
default='Release', default='Release',
help='build target (Debug, Release) [default: %default]') help='build target (Debug, Release) '
option_parser.add_option( '[default: %default]')
'--platform', option_parser.add_option('--target-dir', help='ignored')
option_parser.add_option('--build-dir', help='ignored')
option_parser.add_option('--platform',
default=default_platform, default=default_platform,
help='specify platform (%s) [default: %%default]' % ', '.join(platforms)) help='specify platform (%s) [default: %%default]'
% ', '.join(platforms))
option_parser.add_option('--json', help='Path to JSON output file') option_parser.add_option('--json', help='Path to JSON output file')
options, _ = option_parser.parse_args() options, args = option_parser.parse_args()
real_main = main_map.get(options.platform) real_main = main_map.get(options.platform)
if not real_main: if not real_main:
if options.platform is None:
sys.stderr.write('Unsupported sys.platform %s.\n' % repr(sys.platform)) sys.stderr.write('Unsupported sys.platform %s.\n' % repr(sys.platform))
else:
sys.stderr.write('Unknown platform %s.\n' % repr(options.platform))
msg = 'Use the --platform= option to specify a supported platform:\n' msg = 'Use the --platform= option to specify a supported platform:\n'
sys.stderr.write(msg + ' ' + ' '.join(platforms) + '\n') sys.stderr.write(msg + ' ' + ' '.join(platforms) + '\n')
return 2 return 2
rc, failures = real_main(options) results_collector = ResultsCollector()
rc = real_main(options, args, results_collector)
if options.json: if options.json:
with open(options.json, 'w') as f: with open(options.json, 'w') as f:
json.dump(failures, f) json.dump(results_collector.results, f)
return rc return rc
......
...@@ -38,10 +38,8 @@ NOTES = { ...@@ -38,10 +38,8 @@ NOTES = {
IS_GIT_WORKSPACE = (subprocess.Popen( IS_GIT_WORKSPACE = (subprocess.Popen(
['git', 'rev-parse'], stderr=subprocess.PIPE).wait() == 0) ['git', 'rev-parse'], stderr=subprocess.PIPE).wait() == 0)
class Demangler(object): class Demangler(object):
"""A wrapper around c++filt to provide a function to demangle symbols.""" """A wrapper around c++filt to provide a function to demangle symbols."""
def __init__(self, toolchain): def __init__(self, toolchain):
self.cppfilt = subprocess.Popen([toolchain + 'c++filt'], self.cppfilt = subprocess.Popen([toolchain + 'c++filt'],
stdin=subprocess.PIPE, stdin=subprocess.PIPE,
...@@ -52,7 +50,6 @@ class Demangler(object): ...@@ -52,7 +50,6 @@ class Demangler(object):
self.cppfilt.stdin.write(sym + '\n') self.cppfilt.stdin.write(sym + '\n')
return self.cppfilt.stdout.readline().strip() return self.cppfilt.stdout.readline().strip()
# Matches for example: "cert_logger.pb.cc", capturing "cert_logger". # Matches for example: "cert_logger.pb.cc", capturing "cert_logger".
protobuf_filename_re = re.compile(r'(.*)\.pb\.cc$') protobuf_filename_re = re.compile(r'(.*)\.pb\.cc$')
def QualifyFilenameAsProto(filename): def QualifyFilenameAsProto(filename):
...@@ -75,7 +72,6 @@ def QualifyFilenameAsProto(filename): ...@@ -75,7 +72,6 @@ def QualifyFilenameAsProto(filename):
candidate = line.strip() candidate = line.strip()
return candidate return candidate
# Regex matching the substring of a symbol's demangled text representation most # Regex matching the substring of a symbol's demangled text representation most
# likely to appear in a source file. # likely to appear in a source file.
# Example: "v8::internal::Builtins::InitBuiltinFunctionTable()" becomes # Example: "v8::internal::Builtins::InitBuiltinFunctionTable()" becomes
...@@ -103,7 +99,6 @@ def QualifyFilename(filename, symbol): ...@@ -103,7 +99,6 @@ def QualifyFilename(filename, symbol):
candidate = line.strip() candidate = line.strip()
return candidate return candidate
# Regex matching nm output for the symbols we're interested in. # Regex matching nm output for the symbols we're interested in.
# See test_ParseNmLine for examples. # See test_ParseNmLine for examples.
nm_re = re.compile(r'(\S+) (\S+) t (?:_ZN12)?_GLOBAL__(?:sub_)?I_(.*)') nm_re = re.compile(r'(\S+) (\S+) t (?:_ZN12)?_GLOBAL__(?:sub_)?I_(.*)')
...@@ -128,7 +123,6 @@ def test_ParseNmLine(): ...@@ -128,7 +123,6 @@ def test_ParseNmLine():
'_GLOBAL__sub_I_extension_specifics.pb.cc') '_GLOBAL__sub_I_extension_specifics.pb.cc')
assert parse == ('extension_specifics.pb.cc', 40607408, 36), parse assert parse == ('extension_specifics.pb.cc', 40607408, 36), parse
# Just always run the test; it is fast enough. # Just always run the test; it is fast enough.
test_ParseNmLine() test_ParseNmLine()
...@@ -142,7 +136,6 @@ def ParseNm(toolchain, binary): ...@@ -142,7 +136,6 @@ def ParseNm(toolchain, binary):
if parse: if parse:
yield parse yield parse
# Regex matching objdump output for the symbols we're interested in. # Regex matching objdump output for the symbols we're interested in.
# Example line: # Example line:
# 12354ab: (disassembly, including <FunctionReference>) # 12354ab: (disassembly, including <FunctionReference>)
...@@ -165,14 +158,13 @@ def ExtractSymbolReferences(toolchain, binary, start, end): ...@@ -165,14 +158,13 @@ def ExtractSymbolReferences(toolchain, binary, start, end):
if ref.startswith('.LC') or ref.startswith('_DYNAMIC'): if ref.startswith('.LC') or ref.startswith('_DYNAMIC'):
# Ignore these, they are uninformative. # Ignore these, they are uninformative.
continue continue
if re.match('_GLOBAL__(?:sub_)?I_', ref): if ref.startswith('_GLOBAL__I_'):
# Probably a relative jump within this function. # Probably a relative jump within this function.
continue continue
refs.add(ref) refs.add(ref)
return sorted(refs) return sorted(refs)
def main(): def main():
parser = optparse.OptionParser(usage='%prog [option] filename') parser = optparse.OptionParser(usage='%prog [option] filename')
parser.add_option('-d', '--diffable', dest='diffable', parser.add_option('-d', '--diffable', dest='diffable',
...@@ -244,6 +236,5 @@ def main(): ...@@ -244,6 +236,5 @@ def main():
return 0 return 0
if '__main__' == __name__: if '__main__' == __name__:
sys.exit(main()) sys.exit(main())
jochen@chromium.org
thakis@chromium.org
thestig@chromium.org
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for perf_expectations.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts for
details on the presubmit API built into depot_tools.
"""
PERF_EXPECTATIONS = 'tools/perf_expectations/perf_expectations.json'
CONFIG_FILE = 'tools/perf_expectations/chromium_perf_expectations.cfg'
def CheckChangeOnUpload(input_api, output_api):
run_tests = False
for path in input_api.LocalPaths():
path = path.replace('\\', '/')
if (PERF_EXPECTATIONS == path or CONFIG_FILE == path):
run_tests = True
output = []
if run_tests:
whitelist = [r'.+_unittest\.py$']
output.extend(input_api.canned_checks.RunUnitTestsInDirectory(
input_api, output_api, 'tests', whitelist))
return output
def CheckChangeOnCommit(input_api, output_api):
run_tests = False
for path in input_api.LocalPaths():
path = path.replace('\\', '/')
if (PERF_EXPECTATIONS == path or CONFIG_FILE == path):
run_tests = True
output = []
if run_tests:
whitelist = [r'.+_unittest\.py$']
output.extend(input_api.canned_checks.RunUnitTestsInDirectory(
input_api, output_api, 'tests', whitelist))
output.extend(input_api.canned_checks.CheckDoNotSubmit(input_api,
output_api))
return output
For instructions see
http://www.chromium.org/developers/tree-sheriffs/perf-sheriffs
{
"base_url": "http://build.chromium.org/f/chromium/perf",
"perf_file": "perf_expectations.json"
}
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# For instructions see:
# http://www.chromium.org/developers/tree-sheriffs/perf-sheriffs
import hashlib
import math
import optparse
import os
import re
import subprocess
import sys
import time
import urllib2
try:
import json
except ImportError:
import simplejson as json
__version__ = '1.0'
EXPECTATIONS_DIR = os.path.dirname(os.path.abspath(__file__))
DEFAULT_CONFIG_FILE = os.path.join(EXPECTATIONS_DIR,
'chromium_perf_expectations.cfg')
DEFAULT_TOLERANCE = 0.05
USAGE = ''
def ReadFile(filename):
try:
file = open(filename, 'rb')
except IOError, e:
print >> sys.stderr, ('I/O Error reading file %s(%s): %s' %
(filename, e.errno, e.strerror))
raise e
contents = file.read()
file.close()
return contents
def ConvertJsonIntoDict(string):
"""Read a JSON string and convert its contents into a Python datatype."""
if len(string) == 0:
print >> sys.stderr, ('Error could not parse empty string')
raise Exception('JSON data missing')
try:
jsondata = json.loads(string)
except ValueError, e:
print >> sys.stderr, ('Error parsing string: "%s"' % string)
raise e
return jsondata
# Floating point representation of last time we fetched a URL.
last_fetched_at = None
def FetchUrlContents(url):
global last_fetched_at
if last_fetched_at and ((time.time() - last_fetched_at) <= 0.5):
# Sleep for half a second to avoid overloading the server.
time.sleep(0.5)
try:
last_fetched_at = time.time()
connection = urllib2.urlopen(url)
except urllib2.HTTPError, e:
if e.code == 404:
return None
raise e
text = connection.read().strip()
connection.close()
return text
def GetRowData(data, key):
rowdata = []
# reva and revb always come first.
for subkey in ['reva', 'revb']:
if subkey in data[key]:
rowdata.append('"%s": %s' % (subkey, data[key][subkey]))
# Strings, like type, come next.
for subkey in ['type', 'better']:
if subkey in data[key]:
rowdata.append('"%s": "%s"' % (subkey, data[key][subkey]))
# Finally the main numbers come last.
for subkey in ['improve', 'regress', 'tolerance']:
if subkey in data[key]:
rowdata.append('"%s": %s' % (subkey, data[key][subkey]))
return rowdata
def GetRowDigest(rowdata, key):
sha1 = hashlib.sha1()
rowdata = [str(possibly_unicode_string).encode('ascii')
for possibly_unicode_string in rowdata]
sha1.update(str(rowdata) + key)
return sha1.hexdigest()[0:8]
def WriteJson(filename, data, keys, calculate_sha1=True):
"""Write a list of |keys| in |data| to the file specified in |filename|."""
try:
file = open(filename, 'wb')
except IOError, e:
print >> sys.stderr, ('I/O Error writing file %s(%s): %s' %
(filename, e.errno, e.strerror))
return False
jsondata = []
for key in keys:
rowdata = GetRowData(data, key)
if calculate_sha1:
# Include an updated checksum.
rowdata.append('"sha1": "%s"' % GetRowDigest(rowdata, key))
else:
if 'sha1' in data[key]:
rowdata.append('"sha1": "%s"' % (data[key]['sha1']))
jsondata.append('"%s": {%s}' % (key, ', '.join(rowdata)))
jsondata.append('"load": true')
jsontext = '{%s\n}' % ',\n '.join(jsondata)
file.write(jsontext + '\n')
file.close()
return True
def FloatIsInt(f):
epsilon = 1.0e-10
return abs(f - int(f)) <= epsilon
last_key_printed = None
def Main(args):
def OutputMessage(message, verbose_message=True):
global last_key_printed
if not options.verbose and verbose_message:
return
if key != last_key_printed:
last_key_printed = key
print '\n' + key + ':'
print ' %s' % message
parser = optparse.OptionParser(usage=USAGE, version=__version__)
parser.add_option('-v', '--verbose', action='store_true', default=False,
help='enable verbose output')
parser.add_option('-s', '--checksum', action='store_true',
help='test if any changes are pending')
parser.add_option('-c', '--config', dest='config_file',
default=DEFAULT_CONFIG_FILE,
help='set the config file to FILE', metavar='FILE')
options, args = parser.parse_args(args)
if options.verbose:
print 'Verbose output enabled.'
config = ConvertJsonIntoDict(ReadFile(options.config_file))
# Get the list of summaries for a test.
base_url = config['base_url']
# Make the perf expectations file relative to the path of the config file.
perf_file = os.path.join(
os.path.dirname(options.config_file), config['perf_file'])
perf = ConvertJsonIntoDict(ReadFile(perf_file))
# Fetch graphs.dat for this combination.
perfkeys = perf.keys()
# In perf_expectations.json, ignore the 'load' key.
perfkeys.remove('load')
perfkeys.sort()
write_new_expectations = False
found_checksum_mismatch = False
for key in perfkeys:
value = perf[key]
tolerance = value.get('tolerance', DEFAULT_TOLERANCE)
better = value.get('better', None)
# Verify the checksum.
original_checksum = value.get('sha1', '')
if 'sha1' in value:
del value['sha1']
rowdata = GetRowData(perf, key)
computed_checksum = GetRowDigest(rowdata, key)
if original_checksum == computed_checksum:
OutputMessage('checksum matches, skipping')
continue
elif options.checksum:
found_checksum_mismatch = True
continue
# Skip expectations that are missing a reva or revb. We can't generate
# expectations for those.
if not(value.has_key('reva') and value.has_key('revb')):
OutputMessage('missing revision range, skipping')
continue
revb = int(value['revb'])
reva = int(value['reva'])
# Ensure that reva is less than revb.
if reva > revb:
temp = reva
reva = revb
revb = temp
# Get the system/test/graph/tracename and reftracename for the current key.
matchData = re.match(r'^([^/]+)\/([^/]+)\/([^/]+)\/([^/]+)$', key)
if not matchData:
OutputMessage('cannot parse key, skipping')
continue
system = matchData.group(1)
test = matchData.group(2)
graph = matchData.group(3)
tracename = matchData.group(4)
reftracename = tracename + '_ref'
# Create the summary_url and get the json data for that URL.
# FetchUrlContents() may sleep to avoid overloading the server with
# requests.
summary_url = '%s/%s/%s/%s-summary.dat' % (base_url, system, test, graph)
summaryjson = FetchUrlContents(summary_url)
if not summaryjson:
OutputMessage('ERROR: cannot find json data, please verify',
verbose_message=False)
return 0
# Set value's type to 'relative' by default.
value_type = value.get('type', 'relative')
summarylist = summaryjson.split('\n')
trace_values = {}
traces = [tracename]
if value_type == 'relative':
traces += [reftracename]
for trace in traces:
trace_values.setdefault(trace, {})
# Find the high and low values for each of the traces.
scanning = False
for line in summarylist:
jsondata = ConvertJsonIntoDict(line)
try:
rev = int(jsondata['rev'])
except ValueError:
print ('Warning: skipping rev %r because could not be parsed '
'as an integer.' % jsondata['rev'])
continue
if rev <= revb:
scanning = True
if rev < reva:
break
# We found the upper revision in the range. Scan for trace data until we
# find the lower revision in the range.
if scanning:
for trace in traces:
if trace not in jsondata['traces']:
OutputMessage('trace %s missing' % trace)
continue
if type(jsondata['traces'][trace]) != type([]):
OutputMessage('trace %s format not recognized' % trace)
continue
try:
tracevalue = float(jsondata['traces'][trace][0])
except ValueError:
OutputMessage('trace %s value error: %s' % (
trace, str(jsondata['traces'][trace][0])))
continue
for bound in ['high', 'low']:
trace_values[trace].setdefault(bound, tracevalue)
trace_values[trace]['high'] = max(trace_values[trace]['high'],
tracevalue)
trace_values[trace]['low'] = min(trace_values[trace]['low'],
tracevalue)
if 'high' not in trace_values[tracename]:
OutputMessage('no suitable traces matched, skipping')
continue
if value_type == 'relative':
# Calculate assuming high deltas are regressions and low deltas are
# improvements.
regress = (float(trace_values[tracename]['high']) -
float(trace_values[reftracename]['low']))
improve = (float(trace_values[tracename]['low']) -
float(trace_values[reftracename]['high']))
elif value_type == 'absolute':
# Calculate assuming high absolutes are regressions and low absolutes are
# improvements.
regress = float(trace_values[tracename]['high'])
improve = float(trace_values[tracename]['low'])
# So far we've assumed better is lower (regress > improve). If the actual
# values for regress and improve are equal, though, and better was not
# specified, alert the user so we don't let them create a new file with
# ambiguous rules.
if better == None and regress == improve:
OutputMessage('regress (%s) is equal to improve (%s), and "better" is '
'unspecified, please fix by setting "better": "lower" or '
'"better": "higher" in this perf trace\'s expectation' % (
regress, improve), verbose_message=False)
return 1
# If the existing values assume regressions are low deltas relative to
# improvements, swap our regress and improve. This value must be a
# scores-like result.
if 'regress' in perf[key] and 'improve' in perf[key]:
if perf[key]['regress'] < perf[key]['improve']:
assert(better != 'lower')
better = 'higher'
temp = regress
regress = improve
improve = temp
else:
# Sometimes values are equal, e.g., when they are both 0,
# 'better' may still be set to 'higher'.
assert(better != 'higher' or
perf[key]['regress'] == perf[key]['improve'])
better = 'lower'
# If both were ints keep as int, otherwise use the float version.
originally_ints = False
if FloatIsInt(regress) and FloatIsInt(improve):
originally_ints = True
if better == 'higher':
if originally_ints:
regress = int(math.floor(regress - abs(regress*tolerance)))
improve = int(math.ceil(improve + abs(improve*tolerance)))
else:
regress = regress - abs(regress*tolerance)
improve = improve + abs(improve*tolerance)
else:
if originally_ints:
improve = int(math.floor(improve - abs(improve*tolerance)))
regress = int(math.ceil(regress + abs(regress*tolerance)))
else:
improve = improve - abs(improve*tolerance)
regress = regress + abs(regress*tolerance)
# Calculate the new checksum to test if this is the only thing that may have
# changed.
checksum_rowdata = GetRowData(perf, key)
new_checksum = GetRowDigest(checksum_rowdata, key)
if ('regress' in perf[key] and 'improve' in perf[key] and
perf[key]['regress'] == regress and perf[key]['improve'] == improve and
original_checksum == new_checksum):
OutputMessage('no change')
continue
write_new_expectations = True
OutputMessage('traces: %s' % trace_values, verbose_message=False)
OutputMessage('before: %s' % perf[key], verbose_message=False)
perf[key]['regress'] = regress
perf[key]['improve'] = improve
OutputMessage('after: %s' % perf[key], verbose_message=False)
if options.checksum:
if found_checksum_mismatch:
return 1
else:
return 0
if write_new_expectations:
print '\nWriting expectations... ',
WriteJson(perf_file, perf, perfkeys)
print 'done'
else:
if options.verbose:
print ''
print 'No changes.'
return 0
if __name__ == '__main__':
sys.exit(Main(sys.argv))
{"linux-release-64/sizes/chrome-si/initializers": {"reva": 480969, "revb": 480969, "type": "absolute", "better": "lower", "improve": 8, "regress": 8, "tolerance": 0, "sha1": "de779422"},
"linux-release-64/sizes/nacl_helper-si/initializers": {"reva": 480969, "revb": 480969, "type": "absolute", "better": "lower", "improve": 6, "regress": 8, "sha1": "f29296a1"},
"linux-release-64/sizes/nacl_helper_bootstrap-si/initializers": {"reva": 114822, "revb": 115019, "type": "absolute", "better": "lower", "improve": 0, "regress": 0, "sha1": "228221af"},
"linux-release/sizes/chrome-si/initializers": {"reva": 480969, "revb": 480969, "type": "absolute", "better": "lower", "improve": 9, "regress": 9, "tolerance": 0, "sha1": "b639bbc4"},
"linux-release/sizes/nacl_helper-si/initializers": {"reva": 480969, "revb": 480969, "type": "absolute", "better": "lower", "improve": 7, "regress": 9, "sha1": "3394be7f"},
"linux-release/sizes/nacl_helper_bootstrap-si/initializers": {"reva": 114822, "revb": 115019, "type": "absolute", "better": "lower", "improve": 0, "regress": 0, "sha1": "dd908f29"},
"mac-release/sizes/chrome-si/initializers": {"reva": 281731, "revb": 281731, "type": "absolute", "better": "lower", "improve": 0, "regress": 0, "tolerance": 0, "sha1": "01759b7f"},
"load": true
}
{"linux-release/media_tests_av_perf/audio_latency/latency": {"reva": 180005, "revb": 180520, "type": "absolute", "better": "lower", "improve": 190, "regress": 222, "sha1": "fc9815d5"},
"linux-release/media_tests_av_perf/dropped_fps/tulip2.wav": {"reva": 181131, "revb": 181572, "type": "absolute", "better": "lower", "improve": 0, "regress": 0, "sha1": "fb8157f9"},
"linux-release/media_tests_av_perf/dropped_fps/tulip2.webm": {"reva": 181131, "revb": 181572, "type": "absolute", "better": "lower", "improve": 0, "regress": 0, "sha1": "c0fb3421"},
"linux-release/media_tests_av_perf/dropped_frames/crowd1080.webm": {"reva": 181131, "revb": 181572, "type": "absolute", "better": "lower", "improve": 0, "regress": 0, "sha1": "fa9582d3"},
"linux-release/media_tests_av_perf/dropped_frames/crowd2160.webm": {"reva": 181131, "revb": 181572, "type": "absolute", "better": "lower", "improve": 166, "regress": 231, "sha1": "ca3a7a47"},
"linux-release/media_tests_av_perf/fps/tulip2.m4a": {"reva": 34239, "revb": 1298213, "type": "absolute", "better": "higher", "improve": 0, "regress": 0},
"linux-release/media_tests_av_perf/fps/tulip2.mp3": {"reva": 34239, "revb": 1298213, "type": "absolute", "better": "higher", "improve": 0, "regress": 0},
"linux-release/media_tests_av_perf/fps/tulip2.mp4": {"reva": 34239, "revb": 1298213, "type": "absolute", "better": "higher", "improve": 32, "regress": 28},
"linux-release/media_tests_av_perf/fps/tulip2.ogg": {"reva": 34239, "revb": 1298213, "type": "absolute", "better": "higher", "improve": 0, "regress": 0},
"linux-release/media_tests_av_perf/fps/tulip2.ogv": {"reva": 34239, "revb": 1298213, "type": "absolute", "better": "higher", "improve": 32, "regress": 28},
"linux-release/media_tests_av_perf/fps/tulip2.wav": {"reva": 34239, "revb": 1298213, "type": "absolute", "better": "higher", "improve": 0, "regress": 0},
"win-release/media_tests_av_perf/dropped_fps/tulip2.wav": {"reva": 181131, "revb": 181572, "type": "absolute", "better": "lower", "improve": 0, "regress": 0, "sha1": "646c02f2"},
"win-release/media_tests_av_perf/dropped_fps/tulip2.webm": {"reva": 181131, "revb": 181572, "type": "absolute", "better": "lower", "improve": 0, "regress": 0, "sha1": "46c97b57"},
"win-release/media_tests_av_perf/dropped_frames/crowd1080.webm": {"reva": 181131, "revb": 181572, "type": "absolute", "better": "lower", "improve": 0, "regress": 0, "sha1": "9b709aab"},
"win-release/media_tests_av_perf/dropped_frames/crowd2160.webm": {"reva": 181131, "revb": 181572, "type": "absolute", "better": "lower", "improve": 174, "regress": 204, "sha1": "4c0270a6"},
"win-release/media_tests_av_perf/fps/crowd1080.webm": {"reva": 163299, "revb": 164141, "type": "absolute", "better": "higher", "improve": 53, "regress": 43, "sha1": "7ad49461"},
"win-release/media_tests_av_perf/fps/crowd2160.webm": {"reva": 176330, "revb": 176978, "type": "absolute", "better": "higher", "improve": 26.0399945997, "regress": 25.9062437562, "sha1": "700526a9"},
"win-release/media_tests_av_perf/fps/crowd360.webm": {"reva": 163299, "revb": 164141, "type": "absolute", "better": "higher", "improve": 51, "regress": 47, "sha1": "7f8ef21c"},
"win-release/media_tests_av_perf/fps/crowd480.webm": {"reva": 163299, "revb": 164141, "type": "absolute", "better": "higher", "improve": 50, "regress": 47, "sha1": "5dc96881"},
"win-release/media_tests_av_perf/fps/crowd720.webm": {"reva": 163299, "revb": 164141, "type": "absolute", "better": "higher", "improve": 52, "regress": 47, "sha1": "4fcfb653"},
"win-release/media_tests_av_perf/fps/tulip2.m4a": {"reva": 163299, "revb": 164141, "type": "absolute", "better": "higher", "improve": 0, "regress": 0, "sha1": "54d94538"},
"win-release/media_tests_av_perf/fps/tulip2.mp3": {"reva": 163299, "revb": 164141, "type": "absolute", "better": "higher", "improve": 0, "regress": 0, "sha1": "113aef17"},
"win-release/media_tests_av_perf/fps/tulip2.mp4": {"reva": 163299, "revb": 164141, "type": "absolute", "better": "higher", "improve": 30, "regress": 28, "sha1": "a22847d0"},
"win-release/media_tests_av_perf/fps/tulip2.ogg": {"reva": 163299, "revb": 164141, "type": "absolute", "better": "higher", "improve": 0, "regress": 0, "sha1": "6ee2e716"},
"win-release/media_tests_av_perf/fps/tulip2.ogv": {"reva": 163299, "revb": 164141, "type": "absolute", "better": "higher", "improve": 32, "regress": 26, "sha1": "dfadb872"},
"win-release/media_tests_av_perf/fps/tulip2.wav": {"reva": 163299, "revb": 164141, "type": "absolute", "better": "higher", "improve": 0, "regress": 0, "sha1": "530c5bf5"},
"win-release/media_tests_av_perf/fps/tulip2.webm": {"reva": 163299, "revb": 164141, "type": "absolute", "better": "higher", "improve": 30, "regress": 28, "sha1": "35b91c8e"}
}
\ No newline at end of file
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Verify perf_expectations.json can be loaded using simplejson.
perf_expectations.json is a JSON-formatted file. This script verifies
that simplejson can load it correctly. It should catch most common
formatting problems.
"""
import subprocess
import sys
import os
import unittest
import re
simplejson = None
def OnTestsLoad():
old_path = sys.path
script_path = os.path.dirname(sys.argv[0])
load_path = None
global simplejson
# This test script should be stored in src/tools/perf_expectations/. That
# directory will most commonly live in 2 locations:
#
# - a regular Chromium checkout, in which case src/third_party
# is where to look for simplejson
#
# - a buildbot checkout, in which case .../pylibs is where
# to look for simplejson
#
# Locate and install the correct path based on what we can find.
#
for path in ('../../../third_party', '../../../../../pylibs'):
path = os.path.join(script_path, path)
if os.path.exists(path) and os.path.isdir(path):
load_path = os.path.abspath(path)
break
if load_path is None:
msg = "%s expects to live within a Chromium checkout" % sys.argv[0]
raise Exception, "Error locating simplejson load path (%s)" % msg
# Try importing simplejson once. If this succeeds, we found it and will
# load it again later properly. Fail if we cannot load it.
sys.path.append(load_path)
try:
import simplejson as Simplejson
simplejson = Simplejson
except ImportError, e:
msg = "%s expects to live within a Chromium checkout" % sys.argv[0]
raise Exception, "Error trying to import simplejson from %s (%s)" % \
(load_path, msg)
finally:
sys.path = old_path
return True
def LoadJsonFile(filename):
f = open(filename, 'r')
try:
data = simplejson.load(f)
except ValueError, e:
f.seek(0)
print "Error reading %s:\n%s" % (filename,
f.read()[:50]+'...')
raise e
f.close()
return data
OnTestsLoad()
CONFIG_JSON = os.path.join(os.path.dirname(sys.argv[0]),
'../chromium_perf_expectations.cfg')
MAKE_EXPECTATIONS = os.path.join(os.path.dirname(sys.argv[0]),
'../make_expectations.py')
PERF_EXPECTATIONS = os.path.join(os.path.dirname(sys.argv[0]),
'../perf_expectations.json')
class PerfExpectationsUnittest(unittest.TestCase):
def testPerfExpectations(self):
# Test data is dictionary.
perf_data = LoadJsonFile(PERF_EXPECTATIONS)
if not isinstance(perf_data, dict):
raise Exception('perf expectations is not a dict')
# Test the 'load' key.
if not 'load' in perf_data:
raise Exception("perf expectations is missing a load key")
if not isinstance(perf_data['load'], bool):
raise Exception("perf expectations load key has non-bool value")
# Test all key values are dictionaries.
bad_keys = []
for key in perf_data:
if key == 'load':
continue
if not isinstance(perf_data[key], dict):
bad_keys.append(key)
if len(bad_keys) > 0:
msg = "perf expectations keys have non-dict values"
raise Exception("%s: %s" % (msg, bad_keys))
# Test all key values have delta and var keys.
for key in perf_data:
if key == 'load':
continue
# First check if regress/improve is in the key's data.
if 'regress' in perf_data[key]:
if 'improve' not in perf_data[key]:
bad_keys.append(key)
if (not isinstance(perf_data[key]['regress'], int) and
not isinstance(perf_data[key]['regress'], float)):
bad_keys.append(key)
if (not isinstance(perf_data[key]['improve'], int) and
not isinstance(perf_data[key]['improve'], float)):
bad_keys.append(key)
else:
# Otherwise check if delta/var is in the key's data.
if 'delta' not in perf_data[key] or 'var' not in perf_data[key]:
bad_keys.append(key)
if (not isinstance(perf_data[key]['delta'], int) and
not isinstance(perf_data[key]['delta'], float)):
bad_keys.append(key)
if (not isinstance(perf_data[key]['var'], int) and
not isinstance(perf_data[key]['var'], float)):
bad_keys.append(key)
if len(bad_keys) > 0:
msg = "perf expectations key values missing or invalid delta/var"
raise Exception("%s: %s" % (msg, bad_keys))
# Test all keys have the correct format.
for key in perf_data:
if key == 'load':
continue
# tools/buildbot/scripts/master/log_parser.py should have a matching
# regular expression.
if not re.match(r"^([\w\.-]+)/([\w\.-]+)/([\w\.-]+)/([\w\.-]+)$", key):
bad_keys.append(key)
if len(bad_keys) > 0:
msg = "perf expectations keys in bad format, expected a/b/c/d"
raise Exception("%s: %s" % (msg, bad_keys))
def testNoUpdatesNeeded(self):
p = subprocess.Popen([MAKE_EXPECTATIONS, '-s'], stdout=subprocess.PIPE)
p.wait();
self.assertEqual(p.returncode, 0,
msg='Update expectations first by running ./make_expectations.py')
def testConfigFile(self):
# Test that the config file can be parsed as JSON.
config = LoadJsonFile(CONFIG_JSON)
# Require the following keys.
if 'base_url' not in config:
raise Exception('base_url not specified in config file')
if 'perf_file' not in config:
raise Exception('perf_file not specified in config file')
if __name__ == '__main__':
unittest.main()
#!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Prepare tests that require re-baselining for input to make_expectations.py.
The regularly running perf-AV tests require re-baselineing of expectations
about once a week. The steps involved in rebaselining are:
1.) Identify the tests to update, based off reported e-mail results.
2.) Figure out reva and revb values, which is the starting and ending revision
numbers for the range that we should use to obtain new thresholds.
3.) Modify lines in perf_expectations.json referring to the tests to be updated,
so that they may be used as input to make_expectations.py.
This script automates the last step above.
Here's a sample line from perf_expectations.json:
"win-release/media_tests_av_perf/fps/tulip2.m4a": {"reva": 163299, \
"revb": 164141, "type": "absolute", "better": "higher", "improve": 0, \
"regress": 0, "sha1": "54d94538"},
To get the above test ready for input to make_expectations.py, it should become:
"win-release/media_tests_av_perf/fps/tulip2.m4a": {"reva": <new reva>, \
"revb": <new revb>, "type": "absolute", "better": "higher", "improve": 0, \
"regress": 0},
Examples:
1.) To update the test specified above and get baseline
values using the revision range 12345 and 23456, run this script with a command
line like this:
python update_perf_expectations.py -f \
win-release/media_tests_av_perf/fps/tulip2.m4a --reva 12345 --revb 23456
Or, using an input file,
where the input file contains a single line with text
win-release/media_tests_av_perf/fps/tulip2.m4a
run with this command line:
python update_perf_expectations.py -i input.txt --reva 12345 --revb 23456
2.) Let's say you want to update all seek tests on windows, and get baseline
values using the revision range 12345 and 23456.
Run this script with this command line:
python update_perf_expectations.py -f win-release/media_tests_av_perf/seek/ \
--reva 12345 --revb 23456
Or:
python update_perf_expectations.py -f win-release/.*/seek/ --reva 12345 \
--revb 23456
Or, using an input file,
where the input file contains a single line with text win-release/.*/seek/:
python update_perf_expectations.py -i input.txt --reva 12345 --revb 23456
3.) Similarly, if you want to update seek tests on all platforms
python update_perf_expectations.py -f .*-release/.*/seek/ --reva 12345 \
--revb 23456
"""
import logging
from optparse import OptionParser
import os
import re
import make_expectations as perf_ex_lib
# Default logging is INFO. Use --verbose to enable DEBUG logging.
_DEFAULT_LOG_LEVEL = logging.INFO
def GetTestsToUpdate(contents, all_test_keys):
"""Parses input contents and obtains tests to be re-baselined.
Args:
contents: string containing contents of input file.
all_test_keys: list of keys of test dictionary.
Returns:
A list of keys for tests that should be updated.
"""
# Each line of the input file specifies a test case to update.
tests_list = []
for test_case_filter in contents.splitlines():
# Skip any empty lines.
if test_case_filter:
# Sample expected line:
# win-release/media_tests_av_perf/seek/\
# CACHED_BUFFERED_SEEK_NoConstraints_crowd1080.ogv
# Or, if reg-ex, then sample line:
# win-release/media-tests_av_perf/seek*
# Skip any leading spaces if they exist in the input file.
logging.debug('Trying to match %s', test_case_filter)
tests_list.extend(GetMatchingTests(test_case_filter.strip(),
all_test_keys))
return tests_list
def GetMatchingTests(tests_to_update, all_test_keys):
"""Parses input reg-ex filter and obtains tests to be re-baselined.
Args:
tests_to_update: reg-ex string specifying tests to be updated.
all_test_keys: list of keys of tests dictionary.
Returns:
A list of keys for tests that should be updated.
"""
tests_list = []
search_string = re.compile(tests_to_update)
# Get matching tests from the dictionary of tests
for test_key in all_test_keys:
if search_string.match(test_key):
tests_list.append(test_key)
logging.debug('%s will be updated', test_key)
logging.info('%s tests found matching reg-ex: %s', len(tests_list),
tests_to_update)
return tests_list
def PrepareTestsForUpdate(tests_to_update, all_tests, reva, revb):
"""Modifies value of tests that are to re-baselined:
Set reva and revb values to specified new values. Remove sha1.
Args:
tests_to_update: list of tests to be updated.
all_tests: dictionary of all tests.
reva: oldest revision in range to use for new values.
revb: newest revision in range to use for new values.
Raises:
ValueError: If reva or revb are not valid ints, or if either
of them are negative.
"""
reva = int(reva)
revb = int(revb)
if reva < 0 or revb < 0:
raise ValueError('Revision values should be positive.')
# Ensure reva is less than revb.
# (this is similar to the check done in make_expectations.py)
if revb < reva:
temp = revb
revb = reva
reva = temp
for test_key in tests_to_update:
# Get original test from the dictionary of tests
test_value = all_tests[test_key]
if test_value:
# Sample line in perf_expectations.json:
# "linux-release/media_tests _av_perf/dropped_frames/crowd360.webm":\
# {"reva": 155180, "revb": 155280, "type": "absolute", \
# "better": "lower", "improve": 0, "regress": 3, "sha1": "276ba29c"},
# Set new revision range
test_value['reva'] = reva
test_value['revb'] = revb
# Remove sha1 to indicate this test requires an update
# Check first to make sure it exist.
if 'sha1' in test_value:
del test_value['sha1']
else:
logging.warning('%s does not exist.', test_key)
logging.info('Done preparing tests for update.')
def GetCommandLineOptions():
"""Parse command line arguments.
Returns:
An options object containing command line arguments and their values.
"""
parser = OptionParser()
parser.add_option('--reva', dest='reva', type='int',
help='Starting revision of new range.',
metavar='START_REVISION')
parser.add_option('--revb', dest='revb', type='int',
help='Ending revision of new range.',
metavar='END_REVISION')
parser.add_option('-f', dest='tests_filter',
help='Regex to use for filtering tests to be updated. '
'At least one of -filter or -input_file must be provided. '
'If both are provided, then input-file is used.',
metavar='FILTER', default='')
parser.add_option('-i', dest='input_file',
help='Optional path to file with reg-exes for tests to'
' update. If provided, it overrides the filter argument.',
metavar='INPUT_FILE', default='')
parser.add_option('--config', dest='config_file',
default=perf_ex_lib.DEFAULT_CONFIG_FILE,
help='Set the config file to FILE.', metavar='FILE')
parser.add_option('-v', dest='verbose', action='store_true', default=False,
help='Enable verbose output.')
options = parser.parse_args()[0]
return options
def Main():
"""Main driver function."""
options = GetCommandLineOptions()
_SetLogger(options.verbose)
# Do some command-line validation
if not options.input_file and not options.tests_filter:
logging.error('At least one of input-file or test-filter must be provided.')
exit(1)
if options.input_file and options.tests_filter:
logging.error('Specify only one of input file or test-filter.')
exit(1)
if not options.reva or not options.revb:
logging.error('Start and end revision of range must be specified.')
exit(1)
# Load config.
config = perf_ex_lib.ConvertJsonIntoDict(
perf_ex_lib.ReadFile(options.config_file))
# Obtain the perf expectations file from the config file.
perf_file = os.path.join(
os.path.dirname(options.config_file), config['perf_file'])
# We should have all the information we require now.
# On to the real thang.
# First, get all the existing tests from the original perf_expectations file.
all_tests = perf_ex_lib.ConvertJsonIntoDict(
perf_ex_lib.ReadFile(perf_file))
all_test_keys = all_tests.keys()
# Remove the load key, because we don't want to modify it.
all_test_keys.remove('load')
# Keep tests sorted, like in the original file.
all_test_keys.sort()
# Next, get all tests that have been identified for an update.
tests_to_update = []
if options.input_file:
# Tests to update have been specified in an input_file.
# Get contents of file.
tests_filter = perf_ex_lib.ReadFile(options.input_file)
elif options.tests_filter:
# Tests to update have been specified as a reg-ex filter.
tests_filter = options.tests_filter
# Get tests to update based on filter specified.
tests_to_update = GetTestsToUpdate(tests_filter, all_test_keys)
logging.info('Done obtaining matching tests.')
# Now, prepare tests for update.
PrepareTestsForUpdate(tests_to_update, all_tests, options.reva, options.revb)
# Finally, write modified tests back to perf_expectations file.
perf_ex_lib.WriteJson(perf_file, all_tests, all_test_keys,
calculate_sha1=False)
logging.info('Done writing tests for update to %s.', perf_file)
def _SetLogger(verbose):
log_level = _DEFAULT_LOG_LEVEL
if verbose:
log_level = logging.DEBUG
logging.basicConfig(level=log_level, format='%(message)s')
if __name__ == '__main__':
Main()
#!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for update_perf_expectations."""
import copy
from StringIO import StringIO
import unittest
import make_expectations as perf_ex_lib
import update_perf_expectations as upe_mod
# A separate .json file contains the list of test cases we'll use.
# The tests used to be defined inline here, but are >80 characters in length.
# Now they are expected to be defined in file ./sample_test_cases.json.
# Create a dictionary of tests using .json file.
all_tests = perf_ex_lib.ConvertJsonIntoDict(
perf_ex_lib.ReadFile('sample_test_cases.json'))
# Get all keys.
all_tests_keys = all_tests.keys()
def VerifyPreparedTests(self, tests_to_update, reva, revb):
# Work with a copy of the set of tests.
all_tests_copy = copy.deepcopy(all_tests)
upe_mod.PrepareTestsForUpdate(tests_to_update, all_tests_copy, reva, revb)
# Make sure reva < revb
if reva > revb:
temp = reva
reva = revb
revb = temp
# Run through all tests and make sure only those that were
# specified to be modified had their 'sha1' value removed.
for test_key in all_tests_keys:
new_test_value = all_tests_copy[test_key]
original_test_value = all_tests[test_key]
if test_key in tests_to_update:
# Make sure there is no "sha1".
self.assertFalse('sha1' in new_test_value)
# Make sure reva and revb values are correctly set.
self.assertEqual(reva, new_test_value['reva'])
self.assertEqual(revb, new_test_value['revb'])
else:
# Make sure there is an "sha1" value
self.assertTrue('sha1' in new_test_value)
# Make sure the sha1, reva and revb values have not changed.
self.assertEqual(original_test_value['sha1'], new_test_value['sha1'])
self.assertEqual(original_test_value['reva'], new_test_value['reva'])
self.assertEqual(original_test_value['revb'], new_test_value['revb'])
class UpdatePerfExpectationsTest(unittest.TestCase):
def testFilterMatch(self):
"""Verifies different regular expressions test filter."""
self.maxDiff = None
# Tests to update specified by a single literal string.
tests_to_update = 'win-release/media_tests_av_perf/fps/tulip2.webm'
expected_tests_list = ['win-release/media_tests_av_perf/fps/tulip2.webm']
self.assertEqual(expected_tests_list,
upe_mod.GetMatchingTests(tests_to_update,
all_tests_keys))
# Tests to update specified by a single reg-ex
tests_to_update = 'win-release/media_tests_av_perf/fps.*'
expected_tests_list = ['win-release/media_tests_av_perf/fps/crowd1080.webm',
'win-release/media_tests_av_perf/fps/crowd2160.webm',
'win-release/media_tests_av_perf/fps/crowd360.webm',
'win-release/media_tests_av_perf/fps/crowd480.webm',
'win-release/media_tests_av_perf/fps/crowd720.webm',
'win-release/media_tests_av_perf/fps/tulip2.m4a',
'win-release/media_tests_av_perf/fps/tulip2.mp3',
'win-release/media_tests_av_perf/fps/tulip2.mp4',
'win-release/media_tests_av_perf/fps/tulip2.ogg',
'win-release/media_tests_av_perf/fps/tulip2.ogv',
'win-release/media_tests_av_perf/fps/tulip2.wav',
'win-release/media_tests_av_perf/fps/tulip2.webm']
actual_list = upe_mod.GetMatchingTests(tests_to_update,
all_tests_keys)
actual_list.sort()
self.assertEqual(expected_tests_list, actual_list)
# Tests to update are specified by a single reg-ex, spanning multiple OSes.
tests_to_update = '.*-release/media_tests_av_perf/fps.*'
expected_tests_list = ['linux-release/media_tests_av_perf/fps/tulip2.m4a',
'linux-release/media_tests_av_perf/fps/tulip2.mp3',
'linux-release/media_tests_av_perf/fps/tulip2.mp4',
'linux-release/media_tests_av_perf/fps/tulip2.ogg',
'linux-release/media_tests_av_perf/fps/tulip2.ogv',
'linux-release/media_tests_av_perf/fps/tulip2.wav',
'win-release/media_tests_av_perf/fps/crowd1080.webm',
'win-release/media_tests_av_perf/fps/crowd2160.webm',
'win-release/media_tests_av_perf/fps/crowd360.webm',
'win-release/media_tests_av_perf/fps/crowd480.webm',
'win-release/media_tests_av_perf/fps/crowd720.webm',
'win-release/media_tests_av_perf/fps/tulip2.m4a',
'win-release/media_tests_av_perf/fps/tulip2.mp3',
'win-release/media_tests_av_perf/fps/tulip2.mp4',
'win-release/media_tests_av_perf/fps/tulip2.ogg',
'win-release/media_tests_av_perf/fps/tulip2.ogv',
'win-release/media_tests_av_perf/fps/tulip2.wav',
'win-release/media_tests_av_perf/fps/tulip2.webm']
actual_list = upe_mod.GetMatchingTests(tests_to_update,
all_tests_keys)
actual_list.sort()
self.assertEqual(expected_tests_list, actual_list)
def testLinesFromInputFile(self):
"""Verifies different string formats specified in input file."""
# Tests to update have been specified by a single literal string in
# an input file.
# Use the StringIO class to mock a file object.
lines_from_file = StringIO(
'win-release/media_tests_av_perf/fps/tulip2.webm')
contents = lines_from_file.read()
expected_tests_list = ['win-release/media_tests_av_perf/fps/tulip2.webm']
actual_list = upe_mod.GetTestsToUpdate(contents, all_tests_keys)
actual_list.sort()
self.assertEqual(expected_tests_list, actual_list)
lines_from_file.close()
# Tests to update specified by a single reg-ex in an input file.
lines_from_file = StringIO('win-release/media_tests_av_perf/fps/tulip2.*\n')
contents = lines_from_file.read()
expected_tests_list = ['win-release/media_tests_av_perf/fps/tulip2.m4a',
'win-release/media_tests_av_perf/fps/tulip2.mp3',
'win-release/media_tests_av_perf/fps/tulip2.mp4',
'win-release/media_tests_av_perf/fps/tulip2.ogg',
'win-release/media_tests_av_perf/fps/tulip2.ogv',
'win-release/media_tests_av_perf/fps/tulip2.wav',
'win-release/media_tests_av_perf/fps/tulip2.webm']
actual_list = upe_mod.GetTestsToUpdate(contents, all_tests_keys)
actual_list.sort()
self.assertEqual(expected_tests_list, actual_list)
lines_from_file.close()
# Tests to update specified by multiple lines in an input file.
lines_from_file = StringIO(
'.*-release/media_tests_av_perf/fps/tulip2.*\n'
'win-release/media_tests_av_perf/dropped_fps/tulip2.*\n'
'linux-release/media_tests_av_perf/audio_latency/latency')
contents = lines_from_file.read()
expected_tests_list = [
'linux-release/media_tests_av_perf/audio_latency/latency',
'linux-release/media_tests_av_perf/fps/tulip2.m4a',
'linux-release/media_tests_av_perf/fps/tulip2.mp3',
'linux-release/media_tests_av_perf/fps/tulip2.mp4',
'linux-release/media_tests_av_perf/fps/tulip2.ogg',
'linux-release/media_tests_av_perf/fps/tulip2.ogv',
'linux-release/media_tests_av_perf/fps/tulip2.wav',
'win-release/media_tests_av_perf/dropped_fps/tulip2.wav',
'win-release/media_tests_av_perf/dropped_fps/tulip2.webm',
'win-release/media_tests_av_perf/fps/tulip2.m4a',
'win-release/media_tests_av_perf/fps/tulip2.mp3',
'win-release/media_tests_av_perf/fps/tulip2.mp4',
'win-release/media_tests_av_perf/fps/tulip2.ogg',
'win-release/media_tests_av_perf/fps/tulip2.ogv',
'win-release/media_tests_av_perf/fps/tulip2.wav',
'win-release/media_tests_av_perf/fps/tulip2.webm']
actual_list = upe_mod.GetTestsToUpdate(contents, all_tests_keys)
actual_list.sort()
self.assertEqual(expected_tests_list, actual_list)
lines_from_file.close()
def testPreparingForUpdate(self):
"""Verifies that tests to be modified are changed as expected."""
tests_to_update = [
'linux-release/media_tests_av_perf/audio_latency/latency',
'linux-release/media_tests_av_perf/fps/tulip2.m4a',
'linux-release/media_tests_av_perf/fps/tulip2.mp3',
'linux-release/media_tests_av_perf/fps/tulip2.mp4',
'linux-release/media_tests_av_perf/fps/tulip2.ogg',
'linux-release/media_tests_av_perf/fps/tulip2.ogv',
'linux-release/media_tests_av_perf/fps/tulip2.wav',
'win-release/media_tests_av_perf/dropped_fps/tulip2.wav',
'win-release/media_tests_av_perf/dropped_fps/tulip2.webm',
'win-release/media_tests_av_perf/fps/tulip2.mp3',
'win-release/media_tests_av_perf/fps/tulip2.mp4',
'win-release/media_tests_av_perf/fps/tulip2.ogg',
'win-release/media_tests_av_perf/fps/tulip2.ogv',
'win-release/media_tests_av_perf/fps/tulip2.wav',
'win-release/media_tests_av_perf/fps/tulip2.webm']
# Test regular positive integers.
reva = 12345
revb = 54321
VerifyPreparedTests(self, tests_to_update, reva, revb)
# Test negative values.
reva = -54321
revb = 12345
with self.assertRaises(ValueError):
upe_mod.PrepareTestsForUpdate(tests_to_update, all_tests, reva, revb)
# Test reva greater than revb.
reva = 54321
revb = 12345
upe_mod.PrepareTestsForUpdate(tests_to_update, all_tests, reva, revb)
# Test non-integer values
reva = 'sds'
revb = 12345
with self.assertRaises(ValueError):
upe_mod.PrepareTestsForUpdate(tests_to_update, all_tests, reva, revb)
if __name__ == '__main__':
unittest.main()
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment