Commit c5b0e657 authored by Caleb Rouleau's avatar Caleb Rouleau Committed by Commit Bot

[Speed Waterfall] Rename variables: 'perf' => 'official'.

Now there are two types of benchmarks:
1. official benchmarks: benchmarks defined in tools/perf/benchmarks.
  -- These are officially supported and we will keep them working.
  -- They also are the only ones scheduled on main chrome.perf waterfall.
2. contrib benchmarks: benchmarks defined in tools/perf/contrib.
  -- We don't run these on our own bots (except in rare circumstances)
  -- We maintain the right to break these when necessary to move the
     framework forward.

"all benchmarks" now means "official benchmarks" + "contrib benchmarks".

Also remove the word "all" in places where it is redundant/confusing.

This changelist prepares the ground for the changes for issue 961830.

TBR: jbudorick@chromium.org
Bug: 961830
Change-Id: I9985cd75814b284fc3b748597ce6724f9a53068a
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1606223
Commit-Queue: Caleb Rouleau <crouleau@chromium.org>
Reviewed-by: default avatarJohn Chen <johnchen@chromium.org>
Cr-Commit-Position: refs/heads/master@{#659190}
parent 9ef8c134
...@@ -19,7 +19,7 @@ from py_utils import discover ...@@ -19,7 +19,7 @@ from py_utils import discover
def _GetAllPerfBenchmarks(): def _GetAllPerfBenchmarks():
return discover.DiscoverClasses( return discover.DiscoverClasses(
path_util.GetPerfBenchmarksDir(), path_util.GetPerfDir(), path_util.GetOfficialBenchmarksDir(), path_util.GetPerfDir(),
benchmark_module.Benchmark, index_by_class_name=True).values() benchmark_module.Benchmark, index_by_class_name=True).values()
......
...@@ -17,7 +17,7 @@ from py_utils import discover ...@@ -17,7 +17,7 @@ from py_utils import discover
def _GetAllSystemHealthBenchmarks(): def _GetAllSystemHealthBenchmarks():
all_perf_benchmarks = discover.DiscoverClasses( all_perf_benchmarks = discover.DiscoverClasses(
path_util.GetPerfBenchmarksDir(), path_util.GetPerfDir(), path_util.GetOfficialBenchmarksDir(), path_util.GetPerfDir(),
benchmark_module.Benchmark, benchmark_module.Benchmark,
index_by_class_name=True).values() index_by_class_name=True).values()
return [b for b in all_perf_benchmarks if return [b for b in all_perf_benchmarks if
......
...@@ -43,12 +43,12 @@ def GetBenchmarkNamesForFile(top_level_dir, benchmark_file_dir): ...@@ -43,12 +43,12 @@ def GetBenchmarkNamesForFile(top_level_dir, benchmark_file_dir):
sys.path = original_sys_path sys.path = original_sys_path
def GetAllPerfBenchmarks(): def GetOfficialBenchmarks():
"""Returns the list of all benchmarks to be run on perf waterfall. """Returns the list of all benchmarks to be run on perf waterfall.
The benchmarks are sorted by order of their names. The benchmarks are sorted by order of their names.
""" """
benchmarks = discover.DiscoverClasses( benchmarks = discover.DiscoverClasses(
start_dir=path_util.GetPerfBenchmarksDir(), start_dir=path_util.GetOfficialBenchmarksDir(),
top_level_dir=path_util.GetPerfDir(), top_level_dir=path_util.GetPerfDir(),
base_class=benchmark_module.Benchmark, base_class=benchmark_module.Benchmark,
index_by_class_name=True).values() index_by_class_name=True).values()
...@@ -56,12 +56,12 @@ def GetAllPerfBenchmarks(): ...@@ -56,12 +56,12 @@ def GetAllPerfBenchmarks():
return benchmarks return benchmarks
def GetAllContribBenchmarks(): def GetContribBenchmarks():
"""Returns the list of all contrib benchmarks. """Returns the list of all contrib benchmarks.
The benchmarks are sorted by order of their names. The benchmarks are sorted by order of their names.
""" """
benchmarks = discover.DiscoverClasses( benchmarks = discover.DiscoverClasses(
start_dir=path_util.GetPerfContribDir(), start_dir=path_util.GetContribDir(),
top_level_dir=path_util.GetPerfDir(), top_level_dir=path_util.GetPerfDir(),
base_class=benchmark_module.Benchmark, base_class=benchmark_module.Benchmark,
index_by_class_name=True).values() index_by_class_name=True).values()
...@@ -73,9 +73,9 @@ def GetAllBenchmarks(): ...@@ -73,9 +73,9 @@ def GetAllBenchmarks():
"""Returns all benchmarks in tools/perf directory. """Returns all benchmarks in tools/perf directory.
The benchmarks are sorted by order of their names. The benchmarks are sorted by order of their names.
""" """
all_perf_benchmarks = GetAllPerfBenchmarks() waterfall_benchmarks = GetOfficialBenchmarks()
all_contrib_benchmarks = GetAllContribBenchmarks() contrib_benchmarks = GetContribBenchmarks()
benchmarks = all_perf_benchmarks + all_contrib_benchmarks benchmarks = waterfall_benchmarks + contrib_benchmarks
benchmarks.sort(key=lambda b: b.Name()) benchmarks.sort(key=lambda b: b.Name())
return benchmarks return benchmarks
......
...@@ -10,12 +10,12 @@ from core import benchmark_finders ...@@ -10,12 +10,12 @@ from core import benchmark_finders
_SHARD_MAP_DIR = os.path.join(os.path.dirname(__file__), 'shard_maps') _SHARD_MAP_DIR = os.path.join(os.path.dirname(__file__), 'shard_maps')
_ALL_TELEMETRY_BENCHMARKS_BY_NAMES= dict( _ALL_BENCHMARKS_BY_NAMES= dict(
(b.Name(), b) for b in benchmark_finders.GetAllBenchmarks()) (b.Name(), b) for b in benchmark_finders.GetAllBenchmarks())
_ALL_PERF_WATERFALL_TELEMETRY_BENCHMARKS = frozenset( _OFFICIAL_BENCHMARKS = frozenset(
benchmark_finders.GetAllPerfBenchmarks()) benchmark_finders.GetOfficialBenchmarks())
def _IsPlatformSupported(benchmark, platform): def _IsPlatformSupported(benchmark, platform):
...@@ -38,10 +38,10 @@ class PerfPlatform(object): ...@@ -38,10 +38,10 @@ class PerfPlatform(object):
if benchmarks_names_to_run: if benchmarks_names_to_run:
benchmarks = [] benchmarks = []
for benchmark_name in benchmarks_names_to_run: for benchmark_name in benchmarks_names_to_run:
benchmarks.append(_ALL_TELEMETRY_BENCHMARKS_BY_NAMES[benchmark_name]) benchmarks.append(_ALL_BENCHMARKS_BY_NAMES[benchmark_name])
benchmarks_to_run = frozenset(benchmarks) benchmarks_to_run = frozenset(benchmarks)
else: else:
benchmarks_to_run = _ALL_PERF_WATERFALL_TELEMETRY_BENCHMARKS benchmarks_to_run = _OFFICIAL_BENCHMARKS
platform = self._sort_key.split(' ', 1)[0] platform = self._sort_key.split(' ', 1)[0]
self._benchmarks_to_run = frozenset([ self._benchmarks_to_run = frozenset([
b for b in benchmarks_to_run if _IsPlatformSupported(b, platform)]) b for b in benchmarks_to_run if _IsPlatformSupported(b, platform)])
...@@ -203,12 +203,12 @@ ALL_PLATFORMS = { ...@@ -203,12 +203,12 @@ ALL_PLATFORMS = {
p for p in locals().values() if isinstance(p, PerfPlatform) p for p in locals().values() if isinstance(p, PerfPlatform)
} }
ALL_PERF_FYI_PLATFORMS = { FYI_PLATFORMS = {
p for p in ALL_PLATFORMS if p.is_fyi p for p in ALL_PLATFORMS if p.is_fyi
} }
ALL_PERF_PLATFORMS = { OFFICIAL_PLATFORMS = {
p for p in ALL_PLATFORMS if not p.is_fyi p for p in ALL_PLATFORMS if not p.is_fyi
} }
...@@ -218,6 +218,6 @@ ALL_PLATFORM_NAMES = { ...@@ -218,6 +218,6 @@ ALL_PLATFORM_NAMES = {
} }
ALL_PERF_PLATFORM_NAMES = { OFFICIAL_PLATFORM_NAMES = {
p.name for p in ALL_PERF_PLATFORMS p.name for p in OFFICIAL_PLATFORMS
} }
...@@ -34,11 +34,11 @@ def GetPerfStorySetsDir(): ...@@ -34,11 +34,11 @@ def GetPerfStorySetsDir():
return os.path.join(GetPerfDir(), 'page_sets') return os.path.join(GetPerfDir(), 'page_sets')
def GetPerfBenchmarksDir(): def GetOfficialBenchmarksDir():
return os.path.join(GetPerfDir(), 'benchmarks') return os.path.join(GetPerfDir(), 'benchmarks')
def GetPerfContribDir(): def GetContribDir():
return os.path.join(GetPerfDir(), 'contrib') return os.path.join(GetPerfDir(), 'contrib')
......
...@@ -791,7 +791,7 @@ NON_WATERFALL_BENCHMARKS = { ...@@ -791,7 +791,7 @@ NON_WATERFALL_BENCHMARKS = {
def _get_telemetry_perf_benchmarks_metadata(): def _get_telemetry_perf_benchmarks_metadata():
metadata = {} metadata = {}
benchmark_list = benchmark_finders.GetAllPerfBenchmarks() benchmark_list = benchmark_finders.GetOfficialBenchmarks()
for benchmark in benchmark_list: for benchmark in benchmark_list:
emails = decorators.GetEmails(benchmark) emails = decorators.GetEmails(benchmark)
......
...@@ -161,14 +161,14 @@ def ValidatePerfConfigFile(file_handle, is_main_perf_waterfall): ...@@ -161,14 +161,14 @@ def ValidatePerfConfigFile(file_handle, is_main_perf_waterfall):
else: else:
raise ValueError('%s has unrecognizable type: %s' % key) raise ValueError('%s has unrecognizable type: %s' % key)
if (is_main_perf_waterfall and if (is_main_perf_waterfall and
perf_testing_builder_names != bot_platforms.ALL_PERF_PLATFORM_NAMES): perf_testing_builder_names != bot_platforms.OFFICIAL_PLATFORM_NAMES):
raise ValueError( raise ValueError(
'Found mismatches between actual perf waterfall builders and platforms ' 'Found mismatches between actual perf waterfall builders and platforms '
'in core.bot_platforms. Please update the platforms in ' 'in core.bot_platforms. Please update the platforms in '
'bot_platforms.py.\nPlatforms should be aded to core.bot_platforms:%s' 'bot_platforms.py.\nPlatforms should be aded to core.bot_platforms:%s'
'\nPlatforms should be removed from core.bot_platforms:%s' % ( '\nPlatforms should be removed from core.bot_platforms:%s' % (
perf_testing_builder_names - bot_platforms.ALL_PERF_PLATFORM_NAMES, perf_testing_builder_names - bot_platforms.OFFICIAL_PLATFORM_NAMES,
bot_platforms.ALL_PERF_PLATFORM_NAMES - perf_testing_builder_names)) bot_platforms.OFFICIAL_PLATFORM_NAMES - perf_testing_builder_names))
def main(args): def main(args):
......
...@@ -128,7 +128,7 @@ def main(args): ...@@ -128,7 +128,7 @@ def main(args):
raw_input( raw_input(
'No benchmark name is specified. Fetching all benchmark deps. ' 'No benchmark name is specified. Fetching all benchmark deps. '
'Press enter to continue...') 'Press enter to continue...')
for b in benchmark_finders.GetAllPerfBenchmarks(): for b in benchmark_finders.GetOfficialBenchmarks():
deps[b.Name()] = _FetchDepsForBenchmark(b) deps[b.Name()] = _FetchDepsForBenchmark(b)
if options.output_deps: if options.output_deps:
......
...@@ -28,7 +28,7 @@ def main(args): ...@@ -28,7 +28,7 @@ def main(args):
if options.include_contrib: if options.include_contrib:
benchmarks = benchmark_finders.GetAllBenchmarks() benchmarks = benchmark_finders.GetAllBenchmarks()
else: else:
benchmarks = benchmark_finders.GetAllPerfBenchmarks() benchmarks = benchmark_finders.GetOfficialBenchmarks()
for b in benchmarks: for b in benchmarks:
print '{:<60}'.format(b.Name()) print '{:<60}'.format(b.Name())
......
...@@ -13,6 +13,6 @@ from telemetry import record_wpr ...@@ -13,6 +13,6 @@ from telemetry import record_wpr
if __name__ == '__main__': if __name__ == '__main__':
config = chromium_config.ChromiumConfig( config = chromium_config.ChromiumConfig(
benchmark_dirs=[path_util.GetPerfBenchmarksDir()], benchmark_dirs=[path_util.GetOfficialBenchmarksDir()],
top_level_dir=path_util.GetPerfDir()) top_level_dir=path_util.GetPerfDir())
sys.exit(record_wpr.Main(environment=config)) sys.exit(record_wpr.Main(environment=config))
...@@ -15,8 +15,8 @@ from chrome_telemetry_build import chromium_config ...@@ -15,8 +15,8 @@ from chrome_telemetry_build import chromium_config
def main(): def main():
config = chromium_config.ChromiumConfig( config = chromium_config.ChromiumConfig(
benchmark_dirs=[path_util.GetPerfBenchmarksDir(), benchmark_dirs=[path_util.GetOfficialBenchmarksDir(),
path_util.GetPerfContribDir()], path_util.GetContribDir()],
top_level_dir=path_util.GetPerfDir(), top_level_dir=path_util.GetPerfDir(),
expectations_files=[path_util.GetExpectationsPath()]) expectations_files=[path_util.GetExpectationsPath()])
return benchmark_runner.main(config) return benchmark_runner.main(config)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment