Commit 522d21ad authored by Wenbin Zhang's avatar Wenbin Zhang Committed by Commit Bot

[benchmarking] allow changing estimate on each builder for gtests, and remove unused gtests

The run time estimate is hardcoded for each gtest regardless of the platforms. This is inaccurate and will cause uneven sharding. Update to allow different estimate on different platforms.

Also removing some unused gtests.

Bug: chromium:1130157
Change-Id: I467f851e95f4aa2c4a7ad089a21862195eca42bc
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2470117Reviewed-by: default avatarJohn Chen <johnchen@chromium.org>
Commit-Queue: Wenbin Zhang <wenbinzhang@google.com>
Cr-Commit-Position: refs/heads/master@{#817147}
parent 7c3bad99
...@@ -227,70 +227,92 @@ _OFFICIAL_EXCEPT_DISPLAY_LOCKING_JETSTREAM2 = PerfSuite( ...@@ -227,70 +227,92 @@ _OFFICIAL_EXCEPT_DISPLAY_LOCKING_JETSTREAM2 = PerfSuite(
OFFICIAL_BENCHMARK_CONFIGS).Remove( OFFICIAL_BENCHMARK_CONFIGS).Remove(
['blink_perf.display_locking', 'jetstream2']) ['blink_perf.display_locking', 'jetstream2'])
_TRACING_PERFTESTS = ExecutableConfig('tracing_perftests', estimated_runtime=50)
_COMPONENTS_PERFTESTS = ExecutableConfig( def _angle_perftests(estimated_runtime=1988):
'components_perftests', flags=[ return ExecutableConfig('angle_perftests',
'--xvfb', flags=[
], estimated_runtime=110,) '--test-launcher-retry-limit=0',
_GPU_PERFTESTS = ExecutableConfig('gpu_perftests', estimated_runtime=60) '--test-launcher-jobs=1',
_LOAD_LIBRARY_PERF_TESTS = ExecutableConfig( ],
'load_library_perf_tests', estimated_runtime=3) estimated_runtime=estimated_runtime)
_MEDIA_PERFTESTS = ExecutableConfig(
'media_perftests', flags=[
'--single-process-tests', '--test-launcher-retry-limit=0', def _base_perftests(estimated_runtime=270):
'--isolated-script-test-filter=*::-*_unoptimized::*_unaligned::' return ExecutableConfig(
'*unoptimized_aligned', 'base_perftests',
], estimated_runtime=16) flags=['--test-launcher-jobs=1', '--test-launcher-retry-limit=0'],
_ANGLE_PERFTESTS = ExecutableConfig( estimated_runtime=estimated_runtime)
'angle_perftests', flags=[
'--test-launcher-retry-limit=0',
'--test-launcher-jobs=1', def _components_perftests(estimated_runtime=110):
], estimated_runtime=1988) return ExecutableConfig('components_perftests',
_PASSTHROUGH_COMMAND_BUFFER_PERFTESTS = ExecutableConfig( flags=[
'passthrough_command_buffer_perftests', '--xvfb',
path='command_buffer_perftests', ],
flags=[ estimated_runtime=estimated_runtime)
'--use-cmd-decoder=passthrough',
'--use-angle=gl-null',
], estimated_runtime=30) def _dawn_perf_tests(estimated_runtime=270):
_VALIDATING_COMMAND_BUFFER_PERFTESTS = ExecutableConfig( return ExecutableConfig(
'validating_command_buffer_perftests', 'dawn_perf_tests',
path='command_buffer_perftests', flags=['--test-launcher-jobs=1', '--test-launcher-retry-limit=0'],
flags=[ estimated_runtime=estimated_runtime)
'--use-cmd-decoder=validating',
'--use-stub',
], estimated_runtime=23) def _gpu_perftests(estimated_runtime=60):
_VIEWS_PERFTESTS = ExecutableConfig( return ExecutableConfig('gpu_perftests', estimated_runtime=estimated_runtime)
'views_perftests', flags=[
'--xvfb'
], estimated_runtime=7) def _load_library_perf_tests(estimated_runtime=3):
_BASE_PERFTESTS = ExecutableConfig( return ExecutableConfig('load_library_perf_tests',
'base_perftests', flags=[ estimated_runtime=estimated_runtime)
'--test-launcher-jobs=1',
'--test-launcher-retry-limit=0'
], estimated_runtime=270) def _media_perftests(estimated_runtime=16):
_NET_PERFTESTS = ExecutableConfig('net_perftests', estimated_runtime=60) return ExecutableConfig(
_DAWN_PERF_TESTS = ExecutableConfig( 'media_perftests',
'dawn_perf_tests', flags=[ flags=[
'--test-launcher-jobs=1', '--single-process-tests',
'--test-launcher-retry-limit=0' '--test-launcher-retry-limit=0',
], estimated_runtime=270) '--isolated-script-test-filter=*::-*_unoptimized::*_unaligned::'
_PERFORMANCE_BROWSER_TESTS = ExecutableConfig( '*unoptimized_aligned',
'performance_browser_tests', ],
path='browser_tests', estimated_runtime=estimated_runtime)
flags=[
'--full-performance-run',
'--test-launcher-jobs=1', def _net_perftests(estimated_runtime=60):
'--test-launcher-retry-limit=0', return ExecutableConfig('net_perftests', estimated_runtime=estimated_runtime)
# Allow the full performance runs to take up to 60 seconds (rather than
# the default of 30 for normal CQ browser test runs).
'--ui-test-action-timeout=60000', def _performance_browser_tests(estimated_runtime=67):
'--ui-test-action-max-timeout=60000', return ExecutableConfig(
'--test-launcher-timeout=60000', 'performance_browser_tests',
'--gtest_filter=*/TabCapturePerformanceTest.*:' path='browser_tests',
'*/CastV2PerformanceTest.*', flags=[
], '--full-performance-run',
estimated_runtime=67) '--test-launcher-jobs=1',
'--test-launcher-retry-limit=0',
# Allow the full performance runs to take up to 60 seconds (rather
# than the default of 30 for normal CQ browser test runs).
'--ui-test-action-timeout=60000',
'--ui-test-action-max-timeout=60000',
'--test-launcher-timeout=60000',
'--gtest_filter=*/TabCapturePerformanceTest.*:'
'*/CastV2PerformanceTest.*',
],
estimated_runtime=estimated_runtime)
def _tracing_perftests(estimated_runtime=50):
return ExecutableConfig('tracing_perftests',
estimated_runtime=estimated_runtime)
def _views_perftests(estimated_runtime=7):
return ExecutableConfig('views_perftests',
flags=['--xvfb'],
estimated_runtime=estimated_runtime)
_LINUX_BENCHMARK_CONFIGS = PerfSuite(OFFICIAL_BENCHMARK_CONFIGS).Remove([ _LINUX_BENCHMARK_CONFIGS = PerfSuite(OFFICIAL_BENCHMARK_CONFIGS).Remove([
'blink_perf.display_locking', 'blink_perf.display_locking',
...@@ -298,40 +320,45 @@ _LINUX_BENCHMARK_CONFIGS = PerfSuite(OFFICIAL_BENCHMARK_CONFIGS).Remove([ ...@@ -298,40 +320,45 @@ _LINUX_BENCHMARK_CONFIGS = PerfSuite(OFFICIAL_BENCHMARK_CONFIGS).Remove([
]) ])
_LINUX_EXECUTABLE_CONFIGS = frozenset([ _LINUX_EXECUTABLE_CONFIGS = frozenset([
# TODO(crbug.com/811766): Add views_perftests. # TODO(crbug.com/811766): Add views_perftests.
_PERFORMANCE_BROWSER_TESTS, _base_perftests(),
_LOAD_LIBRARY_PERF_TESTS, _load_library_perf_tests(),
_NET_PERFTESTS, _media_perftests(),
_TRACING_PERFTESTS, _net_perftests(),
_MEDIA_PERFTESTS, _performance_browser_tests(),
_BASE_PERFTESTS, _tracing_perftests(),
]) ])
_MAC_HIGH_END_BENCHMARK_CONFIGS = PerfSuite(OFFICIAL_BENCHMARK_CONFIGS).Remove([ _MAC_HIGH_END_BENCHMARK_CONFIGS = PerfSuite(OFFICIAL_BENCHMARK_CONFIGS).Remove([
'blink_perf.display_locking', 'blink_perf.display_locking',
'v8.runtime_stats.top_25', 'v8.runtime_stats.top_25',
]) ])
_MAC_HIGH_END_EXECUTABLE_CONFIGS = frozenset([ _MAC_HIGH_END_EXECUTABLE_CONFIGS = frozenset([
_DAWN_PERF_TESTS, _base_perftests(),
_PERFORMANCE_BROWSER_TESTS, _dawn_perf_tests(),
_NET_PERFTESTS, _media_perftests(),
_MEDIA_PERFTESTS, _net_perftests(),
_BASE_PERFTESTS, _performance_browser_tests(),
_VIEWS_PERFTESTS, _views_perftests(),
]) ])
_MAC_LOW_END_BENCHMARK_CONFIGS = PerfSuite(OFFICIAL_BENCHMARK_CONFIGS).Remove([ _MAC_LOW_END_BENCHMARK_CONFIGS = PerfSuite(OFFICIAL_BENCHMARK_CONFIGS).Remove([
'jetstream2', 'jetstream2',
'v8.runtime_stats.top_25', 'v8.runtime_stats.top_25',
]) ])
_MAC_LOW_END_EXECUTABLE_CONFIGS = frozenset([ _MAC_LOW_END_EXECUTABLE_CONFIGS = frozenset([
_PERFORMANCE_BROWSER_TESTS, _load_library_perf_tests(),
_LOAD_LIBRARY_PERF_TESTS, _performance_browser_tests(),
]) ])
_WIN_10_BENCHMARK_CONFIGS = PerfSuite(OFFICIAL_BENCHMARK_CONFIGS).Remove([ _WIN_10_BENCHMARK_CONFIGS = PerfSuite(OFFICIAL_BENCHMARK_CONFIGS).Remove([
'blink_perf.display_locking', 'blink_perf.display_locking',
'v8.runtime_stats.top_25', 'v8.runtime_stats.top_25',
]) ])
_WIN_10_EXECUTABLE_CONFIGS = frozenset([ _WIN_10_EXECUTABLE_CONFIGS = frozenset([
_ANGLE_PERFTESTS, _MEDIA_PERFTESTS, _COMPONENTS_PERFTESTS, _VIEWS_PERFTESTS, _angle_perftests(),
_BASE_PERFTESTS, _DAWN_PERF_TESTS]) _base_perftests(),
_components_perftests(),
_dawn_perf_tests(),
_media_perftests(),
_views_perftests(),
])
_WIN_10_LOW_END_BENCHMARK_CONFIGS = PerfSuite( _WIN_10_LOW_END_BENCHMARK_CONFIGS = PerfSuite(
OFFICIAL_BENCHMARK_CONFIGS).Remove([ OFFICIAL_BENCHMARK_CONFIGS).Remove([
'blink_perf.display_locking', 'blink_perf.display_locking',
...@@ -366,7 +393,10 @@ _ANDROID_NEXUS_5_BENCHMARK_CONFIGS = PerfSuite([ ...@@ -366,7 +393,10 @@ _ANDROID_NEXUS_5_BENCHMARK_CONFIGS = PerfSuite([
'system_health.webview_startup', 'system_health.webview_startup',
]).Abridge(['loading.mobile', 'startup.mobile', 'system_health.common_mobile']) ]).Abridge(['loading.mobile', 'startup.mobile', 'system_health.common_mobile'])
_ANDROID_NEXUS_5_EXECUTABLE_CONFIGS = frozenset([ _ANDROID_NEXUS_5_EXECUTABLE_CONFIGS = frozenset([
_TRACING_PERFTESTS, _COMPONENTS_PERFTESTS, _GPU_PERFTESTS]) _components_perftests(),
_gpu_perftests(),
_tracing_perftests(),
])
_ANDROID_NEXUS_5X_WEBVIEW_BENCHMARK_CONFIGS = PerfSuite( _ANDROID_NEXUS_5X_WEBVIEW_BENCHMARK_CONFIGS = PerfSuite(
OFFICIAL_BENCHMARK_CONFIGS).Remove([ OFFICIAL_BENCHMARK_CONFIGS).Remove([
'blink_perf.display_locking', 'blink_perf.display_locking',
...@@ -375,7 +405,9 @@ _ANDROID_NEXUS_5X_WEBVIEW_BENCHMARK_CONFIGS = PerfSuite( ...@@ -375,7 +405,9 @@ _ANDROID_NEXUS_5X_WEBVIEW_BENCHMARK_CONFIGS = PerfSuite(
]) ])
_ANDROID_PIXEL2_BENCHMARK_CONFIGS = _OFFICIAL_EXCEPT_DISPLAY_LOCKING _ANDROID_PIXEL2_BENCHMARK_CONFIGS = _OFFICIAL_EXCEPT_DISPLAY_LOCKING
_ANDROID_PIXEL2_EXECUTABLE_CONFIGS = frozenset([ _ANDROID_PIXEL2_EXECUTABLE_CONFIGS = frozenset([
_COMPONENTS_PERFTESTS, _MEDIA_PERFTESTS]) _components_perftests(),
_media_perftests(),
])
_ANDROID_PIXEL2_WEBVIEW_BENCHMARK_CONFIGS = PerfSuite( _ANDROID_PIXEL2_WEBVIEW_BENCHMARK_CONFIGS = PerfSuite(
OFFICIAL_BENCHMARK_CONFIGS).Remove([ OFFICIAL_BENCHMARK_CONFIGS).Remove([
'blink_perf.display_locking', 'blink_perf.display_locking',
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment