Commit 3622289d authored by Caleb Rouleau's avatar Caleb Rouleau Committed by Commit Bot

[Benchmarking] Add gtest perf tests to OBBS for Win 7 builders.

Note that this change also reshards the Win 7 Builders, which
may cause false positive regressions. In the event of
perf regressions, this CL should not be reverted, since it
is not causing the regression; it is just moving the test
to run on a different device (of the same model and OS).

Also, move to only use 4 shards instead of 5. We are seeing
that these bots die sometimes (examples: crbug.com/1038720,
crbug.com/1038719), so it is better to keep one device in
reserve to help out if another one dies.

See
https://chromium-review.googlesource.com/c/chromium/src/+/1983326
for background.

This moves all the gtest perf tests on Win 7 Perf and Win 7
Nvidia GPU Perf into the performance_test_suite and onto the
shard maps. This allows us to run them in parallel with the Telemetry
benchmarks and account for their runtime within our system.

Bug: 921353
Change-Id: I284924447435325da2263131e0b7df6411b04e4c
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1985134
Commit-Queue: Caleb Rouleau <crouleau@chromium.org>
Commit-Queue: John Chen <johnchen@chromium.org>
Reviewed-by: default avatarJohn Chen <johnchen@chromium.org>
Auto-Submit: Caleb Rouleau <crouleau@chromium.org>
Cr-Commit-Position: refs/heads/master@{#728068}
parent 7354503d
......@@ -2884,12 +2884,19 @@ group("ct_telemetry_perf_tests_without_chrome") {
group("performance_test_suite") {
testonly = true
deps = [
"//chrome/test:angle_perftests",
"//chrome/test:telemetry_perf_tests",
"//components:components_perftests",
"//components/tracing:tracing_perftests",
"//gpu:command_buffer_perftests",
"//gpu:command_buffer_perftests",
"//gpu:gpu_perftests",
]
if (!is_android && !is_fuchsia) {
deps += [ "//chrome/test:load_library_perf_tests" ]
}
data_deps = [
"//testing:run_perf_test",
]
......
This diff is collapsed.
This diff is collapsed.
......@@ -218,9 +218,38 @@ _OFFICIAL_EXCEPT_DISPLAY_LOCKING_JETSTREAM2 = PerfSuite(
['blink_perf.display_locking', 'jetstream2'])
_TRACING_PERFTESTS = ExecutableConfig('tracing_perftests', estimated_runtime=50)
_COMPONENTS_PERFTESTS = ExecutableConfig('components_perftests',
estimated_runtime=110)
_COMPONENTS_PERFTESTS = ExecutableConfig(
'components_perftests', flags=[
'--xvfb',
], estimated_runtime=110,)
_GPU_PERFTESTS = ExecutableConfig('gpu_perftests', estimated_runtime=60)
_LOAD_LIBRARY_PERF_TESTS = ExecutableConfig(
'load_library_perf_tests', estimated_runtime=3)
_MEDIA_PERFTESTS = ExecutableConfig(
'media_perftests', flags=[
'--single-process-tests', '--test-launcher-retry-limit=0',
'--isolated-script-test-filter=*::-*_unoptimized::*_unaligned::'
'*unoptimized_aligned',
], estimated_runtime=16)
_ANGLE_PERFTESTS = ExecutableConfig(
'angle_perftests', flags=[
'--test-launcher-retry-limit=0',
'--test-launcher-jobs=1',
], estimated_runtime=1988)
_PASSTHROUGH_COMMAND_BUFFER_PERFTESTS = ExecutableConfig(
'passthrough_command_buffer_perftests',
path='command_buffer_perftests',
flags=[
'--use-cmd-decoder=passthrough',
'--use-angle=gl-null',
], estimated_runtime=30)
_VALIDATING_COMMAND_BUFFER_PERFTESTS = ExecutableConfig(
'validating_command_buffer_perftests',
path='command_buffer_perftests',
flags=[
'--use-cmd-decoder=validating',
'--use-stub',
], estimated_runtime=23)
_LINUX_BENCHMARK_CONFIGS = _OFFICIAL_EXCEPT_DISPLAY_LOCKING
_MAC_HIGH_END_BENCHMARK_CONFIGS = _OFFICIAL_EXCEPT_DISPLAY_LOCKING
......@@ -231,7 +260,13 @@ _WIN_10_LOW_END_HP_CANDIDATE_BENCHMARK_CONFIGS = PerfSuite(
[_GetBenchmarkConfig('v8.browsing_desktop')])
_WIN_7_BENCHMARK_CONFIGS = PerfSuite(
_OFFICIAL_EXCEPT_DISPLAY_LOCKING_JETSTREAM2).Remove(['rendering.desktop'])
_WIN_7_EXECUTABLE_CONFIGS = frozenset([
_LOAD_LIBRARY_PERF_TESTS, _COMPONENTS_PERFTESTS, _MEDIA_PERFTESTS])
_WIN_7_GPU_BENCHMARK_CONFIGS = _OFFICIAL_EXCEPT_DISPLAY_LOCKING_JETSTREAM2
_WIN_7_GPU_EXECUTABLE_CONFIGS = frozenset([
_LOAD_LIBRARY_PERF_TESTS, _ANGLE_PERFTESTS, _MEDIA_PERFTESTS,
_PASSTHROUGH_COMMAND_BUFFER_PERFTESTS,
_VALIDATING_COMMAND_BUFFER_PERFTESTS])
_ANDROID_GO_BENCHMARK_CONFIGS = PerfSuite([
_GetBenchmarkConfig('system_health.memory_mobile'),
_GetBenchmarkConfig('system_health.common_mobile'),
......@@ -309,10 +344,10 @@ WIN_10 = PerfPlatform(
26, 'win')
WIN_7 = PerfPlatform(
'Win 7 Perf', 'N/A', _WIN_7_BENCHMARK_CONFIGS,
5, 'win')
4, 'win', executables=_WIN_7_EXECUTABLE_CONFIGS)
WIN_7_GPU = PerfPlatform(
'Win 7 Nvidia GPU Perf', 'N/A', _WIN_7_GPU_BENCHMARK_CONFIGS,
5, 'win')
4, 'win', executables=_WIN_7_GPU_EXECUTABLE_CONFIGS)
# Android
ANDROID_GO = PerfPlatform(
......
......@@ -600,21 +600,6 @@ BUILDERS = {
{
'isolate': 'performance_test_suite',
},
{
'isolate': 'load_library_perf_tests',
'num_shards': 1,
'type': TEST_TYPES.GTEST,
},
{
'isolate': 'components_perftests',
'num_shards': 1,
'type': TEST_TYPES.GTEST,
},
{
'isolate': 'media_perftests',
'num_shards': 1,
'type': TEST_TYPES.GTEST,
}
],
'platform': 'win',
'target_bits': 32,
......@@ -633,41 +618,6 @@ BUILDERS = {
'--assert-gpu-compositing',
],
},
{
'isolate': 'load_library_perf_tests',
'num_shards': 1,
'type': TEST_TYPES.GTEST,
},
{
'isolate': 'angle_perftests',
'num_shards': 1,
'type': TEST_TYPES.GTEST,
},
{
'isolate': 'media_perftests',
'num_shards': 1,
'type': TEST_TYPES.GTEST,
},
{
'name': 'passthrough_command_buffer_perftests',
'isolate': 'command_buffer_perftests',
'num_shards': 1,
'type': TEST_TYPES.GTEST,
'extra_args': [
'--use-cmd-decoder=passthrough',
'--use-angle=gl-null',
],
},
{
'name': 'validating_command_buffer_perftests',
'isolate': 'command_buffer_perftests',
'num_shards': 1,
'type': TEST_TYPES.GTEST,
'extra_args': [
'--use-cmd-decoder=validating',
'--use-stub',
],
},
],
'platform': 'win',
'target_bits': 64,
......@@ -946,6 +896,10 @@ def get_scheduled_non_telemetry_benchmarks(perf_waterfall_file):
'performance_weblayer_test_suite'):
test_names.add(name)
for platform in bot_platforms.ALL_PLATFORMS:
for executable in platform.executables:
test_names.add(executable.name)
return test_names
......
......@@ -45,10 +45,12 @@ class PerfDataGeneratorTest(unittest.TestCase):
try:
with open(fake_perf_waterfall_file, 'w') as f:
json.dump(data, f)
self.assertEquals(
perf_data_generator.get_scheduled_non_telemetry_benchmarks(
fake_perf_waterfall_file),
{'ninja_test', 'gun_slinger', 'test_dancing', 'test_singing'})
benchmarks = perf_data_generator.get_scheduled_non_telemetry_benchmarks(
fake_perf_waterfall_file)
self.assertIn('ninja_test', benchmarks)
self.assertIn('gun_slinger', benchmarks)
self.assertIn('test_dancing', benchmarks)
self.assertIn('test_singing', benchmarks)
finally:
os.remove(fake_perf_waterfall_file)
......@@ -83,8 +85,8 @@ class TestIsPerfBenchmarksSchedulingValid(unittest.TestCase):
valid = perf_data_generator.is_perf_benchmarks_scheduling_valid(
'dummy', self.test_stream)
self.assertEquals(valid, True)
self.assertEquals(self.test_stream.getvalue(), '')
self.assertEquals(valid, True)
def test_UnscheduledCppBenchmarks(self):
self.get_non_telemetry_benchmarks.return_value = {'honda'}
......
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -56,15 +56,27 @@
"abridged": false
},
"loading.desktop": {
"end": 87,
"end": 86,
"abridged": false
}
},
"executables": {
"angle_perftests": {
"path": "angle_perftests",
"arguments": [
"--test-launcher-retry-limit=0",
"--test-launcher-jobs=1"
]
},
"load_library_perf_tests": {
"path": "load_library_perf_tests"
}
}
},
"1": {
"benchmarks": {
"loading.desktop": {
"begin": 87,
"begin": 86,
"abridged": false
},
"media.desktop": {
......@@ -86,15 +98,6 @@
"abridged": false
},
"rendering.desktop": {
"end": 197,
"abridged": false
}
}
},
"2": {
"benchmarks": {
"rendering.desktop": {
"begin": 197,
"abridged": false
},
"speedometer": {
......@@ -110,10 +113,36 @@
"abridged": false
},
"system_health.common_desktop": {
"end": 5,
"abridged": false
}
},
"executables": {
"media_perftests": {
"path": "media_perftests",
"arguments": [
"--single-process-tests",
"--test-launcher-retry-limit=0",
"--isolated-script-test-filter=*::-*_unoptimized::*_unaligned::*unoptimized_aligned"
]
},
"passthrough_command_buffer_perftests": {
"path": "command_buffer_perftests",
"arguments": [
"--use-cmd-decoder=passthrough",
"--use-angle=gl-null"
]
}
}
},
"2": {
"benchmarks": {
"system_health.common_desktop": {
"begin": 5,
"abridged": false
},
"system_health.memory_desktop": {
"end": 21,
"end": 78,
"abridged": false
}
}
......@@ -121,7 +150,7 @@
"3": {
"benchmarks": {
"system_health.memory_desktop": {
"begin": 21,
"begin": 78,
"abridged": false
},
"tab_switching.typical_25": {
......@@ -131,15 +160,6 @@
"abridged": false
},
"v8.browsing_desktop": {
"end": 23,
"abridged": false
}
}
},
"4": {
"benchmarks": {
"v8.browsing_desktop": {
"begin": 23,
"abridged": false
},
"v8.browsing_desktop-future": {
......@@ -151,18 +171,26 @@
"webrtc": {
"abridged": false
}
},
"executables": {
"validating_command_buffer_perftests": {
"path": "command_buffer_perftests",
"arguments": [
"--use-cmd-decoder=validating",
"--use-stub"
]
}
}
},
"extra_infos": {
"num_stories": 1148,
"predicted_min_shard_time": 9648.0,
"predicted_min_shard_index": 0,
"predicted_max_shard_time": 9692.0,
"num_stories": 1170,
"predicted_min_shard_time": 15618.0,
"predicted_min_shard_index": 1,
"predicted_max_shard_time": 15708.0,
"predicted_max_shard_index": 2,
"shard #0": 9648.0,
"shard #1": 9686.0,
"shard #2": 9692.0,
"shard #3": 9670.0,
"shard #4": 9652.0
"shard #0": 15668.0,
"shard #1": 15618.0,
"shard #2": 15708.0,
"shard #3": 15638.0
}
}
\ No newline at end of file
......@@ -56,15 +56,26 @@
"abridged": false
},
"loading.desktop": {
"end": 60,
"end": 77,
"abridged": false
}
},
"executables": {
"components_perftests": {
"path": "components_perftests",
"arguments": [
"--xvfb"
]
},
"load_library_perf_tests": {
"path": "load_library_perf_tests"
}
}
},
"1": {
"benchmarks": {
"loading.desktop": {
"begin": 60,
"begin": 77,
"abridged": false
},
"media.desktop": {
......@@ -98,27 +109,28 @@
"abridged": false
},
"system_health.common_desktop": {
"end": 12,
"abridged": false
}
}
},
"2": {
"benchmarks": {
"system_health.common_desktop": {
"begin": 12,
"abridged": false
},
"system_health.memory_desktop": {
"end": 45,
"end": 39,
"abridged": false
}
},
"executables": {
"media_perftests": {
"path": "media_perftests",
"arguments": [
"--single-process-tests",
"--test-launcher-retry-limit=0",
"--isolated-script-test-filter=*::-*_unoptimized::*_unaligned::*unoptimized_aligned"
]
}
}
},
"3": {
"2": {
"benchmarks": {
"system_health.memory_desktop": {
"begin": 45,
"begin": 39,
"abridged": false
},
"tab_switching.typical_25": {
......@@ -131,15 +143,15 @@
"abridged": false
},
"v8.browsing_desktop-future": {
"end": 15,
"end": 23,
"abridged": false
}
}
},
"4": {
"3": {
"benchmarks": {
"v8.browsing_desktop-future": {
"begin": 15,
"begin": 23,
"abridged": false
},
"v8.runtime_stats.top_25": {
......@@ -151,15 +163,14 @@
}
},
"extra_infos": {
"num_stories": 902,
"predicted_min_shard_time": 8348.0,
"num_stories": 916,
"predicted_min_shard_time": 12204.0,
"predicted_min_shard_index": 2,
"predicted_max_shard_time": 8462.0,
"predicted_max_shard_index": 1,
"shard #0": 8426.0,
"shard #1": 8462.0,
"shard #2": 8348.0,
"shard #3": 8454.0,
"shard #4": 8450.0
"predicted_max_shard_time": 12310.0,
"predicted_max_shard_index": 3,
"shard #0": 12258.0,
"shard #1": 12236.0,
"shard #2": 12204.0,
"shard #3": 12310.0
}
}
\ No newline at end of file
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment