Commit c5b0e657 authored by Caleb Rouleau's avatar Caleb Rouleau Committed by Commit Bot

[Speed Waterfall] Rename variables: 'perf' => 'official'.

Now there are two types of benchmarks:
1. official benchmarks: benchmarks defined in tools/perf/benchmarks.
  -- These are officially supported and we will keep them working.
  -- They also are the only ones scheduled on main chrome.perf waterfall.
2. contrib benchmarks: benchmarks defined in tools/perf/contrib.
  -- We don't run these on our own bots (except in rare circumstances)
  -- We maintain the right to break these when necessary to move the
     framework forward.

"all benchmarks" now means "official benchmarks" + "contrib benchmarks".

Also remove the word "all" in places where it is redundant/confusing.

This changelist prepares the ground for the changes for issue 961830.

TBR: jbudorick@chromium.org
Bug: 961830
Change-Id: I9985cd75814b284fc3b748597ce6724f9a53068a
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1606223
Commit-Queue: Caleb Rouleau <crouleau@chromium.org>
Reviewed-by: default avatarJohn Chen <johnchen@chromium.org>
Cr-Commit-Position: refs/heads/master@{#659190}
parent 9ef8c134
......@@ -19,7 +19,7 @@ from py_utils import discover
def _GetAllPerfBenchmarks():
return discover.DiscoverClasses(
path_util.GetPerfBenchmarksDir(), path_util.GetPerfDir(),
path_util.GetOfficialBenchmarksDir(), path_util.GetPerfDir(),
benchmark_module.Benchmark, index_by_class_name=True).values()
......
......@@ -17,7 +17,7 @@ from py_utils import discover
def _GetAllSystemHealthBenchmarks():
all_perf_benchmarks = discover.DiscoverClasses(
path_util.GetPerfBenchmarksDir(), path_util.GetPerfDir(),
path_util.GetOfficialBenchmarksDir(), path_util.GetPerfDir(),
benchmark_module.Benchmark,
index_by_class_name=True).values()
return [b for b in all_perf_benchmarks if
......
......@@ -43,12 +43,12 @@ def GetBenchmarkNamesForFile(top_level_dir, benchmark_file_dir):
sys.path = original_sys_path
def GetAllPerfBenchmarks():
def GetOfficialBenchmarks():
"""Returns the list of all benchmarks to be run on perf waterfall.
The benchmarks are sorted by order of their names.
"""
benchmarks = discover.DiscoverClasses(
start_dir=path_util.GetPerfBenchmarksDir(),
start_dir=path_util.GetOfficialBenchmarksDir(),
top_level_dir=path_util.GetPerfDir(),
base_class=benchmark_module.Benchmark,
index_by_class_name=True).values()
......@@ -56,12 +56,12 @@ def GetAllPerfBenchmarks():
return benchmarks
def GetAllContribBenchmarks():
def GetContribBenchmarks():
"""Returns the list of all contrib benchmarks.
The benchmarks are sorted by order of their names.
"""
benchmarks = discover.DiscoverClasses(
start_dir=path_util.GetPerfContribDir(),
start_dir=path_util.GetContribDir(),
top_level_dir=path_util.GetPerfDir(),
base_class=benchmark_module.Benchmark,
index_by_class_name=True).values()
......@@ -73,9 +73,9 @@ def GetAllBenchmarks():
"""Returns all benchmarks in tools/perf directory.
The benchmarks are sorted by order of their names.
"""
all_perf_benchmarks = GetAllPerfBenchmarks()
all_contrib_benchmarks = GetAllContribBenchmarks()
benchmarks = all_perf_benchmarks + all_contrib_benchmarks
waterfall_benchmarks = GetOfficialBenchmarks()
contrib_benchmarks = GetContribBenchmarks()
benchmarks = waterfall_benchmarks + contrib_benchmarks
benchmarks.sort(key=lambda b: b.Name())
return benchmarks
......
......@@ -10,12 +10,12 @@ from core import benchmark_finders
_SHARD_MAP_DIR = os.path.join(os.path.dirname(__file__), 'shard_maps')
_ALL_TELEMETRY_BENCHMARKS_BY_NAMES= dict(
_ALL_BENCHMARKS_BY_NAMES= dict(
(b.Name(), b) for b in benchmark_finders.GetAllBenchmarks())
_ALL_PERF_WATERFALL_TELEMETRY_BENCHMARKS = frozenset(
benchmark_finders.GetAllPerfBenchmarks())
_OFFICIAL_BENCHMARKS = frozenset(
benchmark_finders.GetOfficialBenchmarks())
def _IsPlatformSupported(benchmark, platform):
......@@ -38,10 +38,10 @@ class PerfPlatform(object):
if benchmarks_names_to_run:
benchmarks = []
for benchmark_name in benchmarks_names_to_run:
benchmarks.append(_ALL_TELEMETRY_BENCHMARKS_BY_NAMES[benchmark_name])
benchmarks.append(_ALL_BENCHMARKS_BY_NAMES[benchmark_name])
benchmarks_to_run = frozenset(benchmarks)
else:
benchmarks_to_run = _ALL_PERF_WATERFALL_TELEMETRY_BENCHMARKS
benchmarks_to_run = _OFFICIAL_BENCHMARKS
platform = self._sort_key.split(' ', 1)[0]
self._benchmarks_to_run = frozenset([
b for b in benchmarks_to_run if _IsPlatformSupported(b, platform)])
......@@ -203,12 +203,12 @@ ALL_PLATFORMS = {
p for p in locals().values() if isinstance(p, PerfPlatform)
}
ALL_PERF_FYI_PLATFORMS = {
FYI_PLATFORMS = {
p for p in ALL_PLATFORMS if p.is_fyi
}
ALL_PERF_PLATFORMS = {
OFFICIAL_PLATFORMS = {
p for p in ALL_PLATFORMS if not p.is_fyi
}
......@@ -218,6 +218,6 @@ ALL_PLATFORM_NAMES = {
}
ALL_PERF_PLATFORM_NAMES = {
p.name for p in ALL_PERF_PLATFORMS
OFFICIAL_PLATFORM_NAMES = {
p.name for p in OFFICIAL_PLATFORMS
}
......@@ -34,11 +34,11 @@ def GetPerfStorySetsDir():
return os.path.join(GetPerfDir(), 'page_sets')
def GetPerfBenchmarksDir():
def GetOfficialBenchmarksDir():
return os.path.join(GetPerfDir(), 'benchmarks')
def GetPerfContribDir():
def GetContribDir():
return os.path.join(GetPerfDir(), 'contrib')
......
......@@ -791,7 +791,7 @@ NON_WATERFALL_BENCHMARKS = {
def _get_telemetry_perf_benchmarks_metadata():
metadata = {}
benchmark_list = benchmark_finders.GetAllPerfBenchmarks()
benchmark_list = benchmark_finders.GetOfficialBenchmarks()
for benchmark in benchmark_list:
emails = decorators.GetEmails(benchmark)
......
......@@ -161,14 +161,14 @@ def ValidatePerfConfigFile(file_handle, is_main_perf_waterfall):
else:
raise ValueError('%s has unrecognizable type: %s' % key)
if (is_main_perf_waterfall and
perf_testing_builder_names != bot_platforms.ALL_PERF_PLATFORM_NAMES):
perf_testing_builder_names != bot_platforms.OFFICIAL_PLATFORM_NAMES):
raise ValueError(
'Found mismatches between actual perf waterfall builders and platforms '
'in core.bot_platforms. Please update the platforms in '
'bot_platforms.py.\nPlatforms should be aded to core.bot_platforms:%s'
'\nPlatforms should be removed from core.bot_platforms:%s' % (
perf_testing_builder_names - bot_platforms.ALL_PERF_PLATFORM_NAMES,
bot_platforms.ALL_PERF_PLATFORM_NAMES - perf_testing_builder_names))
perf_testing_builder_names - bot_platforms.OFFICIAL_PLATFORM_NAMES,
bot_platforms.OFFICIAL_PLATFORM_NAMES - perf_testing_builder_names))
def main(args):
......
......@@ -128,7 +128,7 @@ def main(args):
raw_input(
'No benchmark name is specified. Fetching all benchmark deps. '
'Press enter to continue...')
for b in benchmark_finders.GetAllPerfBenchmarks():
for b in benchmark_finders.GetOfficialBenchmarks():
deps[b.Name()] = _FetchDepsForBenchmark(b)
if options.output_deps:
......
......@@ -28,7 +28,7 @@ def main(args):
if options.include_contrib:
benchmarks = benchmark_finders.GetAllBenchmarks()
else:
benchmarks = benchmark_finders.GetAllPerfBenchmarks()
benchmarks = benchmark_finders.GetOfficialBenchmarks()
for b in benchmarks:
print '{:<60}'.format(b.Name())
......
......@@ -13,6 +13,6 @@ from telemetry import record_wpr
if __name__ == '__main__':
config = chromium_config.ChromiumConfig(
benchmark_dirs=[path_util.GetPerfBenchmarksDir()],
benchmark_dirs=[path_util.GetOfficialBenchmarksDir()],
top_level_dir=path_util.GetPerfDir())
sys.exit(record_wpr.Main(environment=config))
......@@ -15,8 +15,8 @@ from chrome_telemetry_build import chromium_config
def main():
config = chromium_config.ChromiumConfig(
benchmark_dirs=[path_util.GetPerfBenchmarksDir(),
path_util.GetPerfContribDir()],
benchmark_dirs=[path_util.GetOfficialBenchmarksDir(),
path_util.GetContribDir()],
top_level_dir=path_util.GetPerfDir(),
expectations_files=[path_util.GetExpectationsPath()])
return benchmark_runner.main(config)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment