Commit 8c2eb4f7 authored by Ned Nguyen's avatar Ned Nguyen Committed by Commit Bot

Always put Telemetry tests at the end to avoid starving fast tests

This also add this check to PRESUBMIT perf_json_config_validator script

Bug: 873389
Cq-Include-Trybots: master.tryserver.chromium.perf:obbs_fyi
Change-Id: I16fd762e567d5a41d75a221456e12e14a03f0b85
Reviewed-on: https://chromium-review.googlesource.com/1171578Reviewed-by: default avatarStephen Martinis <martiniss@chromium.org>
Commit-Queue: Ned Nguyen <nednguyen@google.com>
Cr-Commit-Position: refs/heads/master@{#582422}
parent 6d603296
...@@ -184,13 +184,12 @@ ...@@ -184,13 +184,12 @@
}, },
{ {
"args": [ "args": [
"-v", "--gtest-benchmark-name",
"--browser=release", "views_perftests",
"--upload-results", "--non-telemetry=true",
"--run-ref-build", "--migrated-test=true"
"--test-shard-map-filename=mac1012_5_shard_map.json"
], ],
"isolate_name": "performance_test_suite", "isolate_name": "views_perftests",
"merge": { "merge": {
"args": [ "args": [
"--service-account-file", "--service-account-file",
...@@ -198,9 +197,9 @@ ...@@ -198,9 +197,9 @@
], ],
"script": "//tools/perf/process_perf_results.py" "script": "//tools/perf/process_perf_results.py"
}, },
"name": "performance_test_suite", "name": "views_perftests",
"override_compile_targets": [ "override_compile_targets": [
"performance_test_suite" "views_perftests"
], ],
"swarming": { "swarming": {
"can_use_on_swarming_builders": true, "can_use_on_swarming_builders": true,
...@@ -215,7 +214,7 @@ ...@@ -215,7 +214,7 @@
"hard_timeout": 25200, "hard_timeout": 25200,
"ignore_task_failure": false, "ignore_task_failure": false,
"io_timeout": 1800, "io_timeout": 1800,
"shards": 5, "shards": 1,
"upload_test_results": true "upload_test_results": true
}, },
"trigger_script": { "trigger_script": {
...@@ -228,12 +227,13 @@ ...@@ -228,12 +227,13 @@
}, },
{ {
"args": [ "args": [
"--gtest-benchmark-name", "-v",
"views_perftests", "--browser=release",
"--non-telemetry=true", "--upload-results",
"--migrated-test=true" "--run-ref-build",
"--test-shard-map-filename=mac1012_5_shard_map.json"
], ],
"isolate_name": "views_perftests", "isolate_name": "performance_test_suite",
"merge": { "merge": {
"args": [ "args": [
"--service-account-file", "--service-account-file",
...@@ -241,9 +241,9 @@ ...@@ -241,9 +241,9 @@
], ],
"script": "//tools/perf/process_perf_results.py" "script": "//tools/perf/process_perf_results.py"
}, },
"name": "views_perftests", "name": "performance_test_suite",
"override_compile_targets": [ "override_compile_targets": [
"views_perftests" "performance_test_suite"
], ],
"swarming": { "swarming": {
"can_use_on_swarming_builders": true, "can_use_on_swarming_builders": true,
...@@ -258,7 +258,7 @@ ...@@ -258,7 +258,7 @@
"hard_timeout": 25200, "hard_timeout": 25200,
"ignore_task_failure": false, "ignore_task_failure": false,
"io_timeout": 1800, "io_timeout": 1800,
"shards": 1, "shards": 5,
"upload_test_results": true "upload_test_results": true
}, },
"trigger_script": { "trigger_script": {
......
This diff is collapsed.
...@@ -70,14 +70,6 @@ NEW_PERF_RECIPE_FYI_TESTERS = { ...@@ -70,14 +70,6 @@ NEW_PERF_RECIPE_FYI_TESTERS = {
'testers' : { 'testers' : {
'OBBS Mac 10.12 Perf': { 'OBBS Mac 10.12 Perf': {
'tests': [ 'tests': [
{
'isolate': 'performance_test_suite',
'extra_args': [
'--run-ref-build',
'--test-shard-map-filename=mac1012_5_shard_map.json',
],
'num_shards': 5
},
{ {
'isolate': 'net_perftests', 'isolate': 'net_perftests',
'num_shards': 1, 'num_shards': 1,
...@@ -87,6 +79,14 @@ NEW_PERF_RECIPE_FYI_TESTERS = { ...@@ -87,6 +79,14 @@ NEW_PERF_RECIPE_FYI_TESTERS = {
'isolate': 'views_perftests', 'isolate': 'views_perftests',
'num_shards': 1, 'num_shards': 1,
'telemetry': False, 'telemetry': False,
},
{
'isolate': 'performance_test_suite',
'extra_args': [
'--run-ref-build',
'--test-shard-map-filename=mac1012_5_shard_map.json',
],
'num_shards': 5
} }
], ],
'platform': 'mac', 'platform': 'mac',
...@@ -949,11 +949,20 @@ def load_and_update_fyi_json(fyi_waterfall_file): ...@@ -949,11 +949,20 @@ def load_and_update_fyi_json(fyi_waterfall_file):
def generate_telemetry_tests(testers, tests): def generate_telemetry_tests(testers, tests):
for tester, tester_config in testers['testers'].iteritems(): for tester, tester_config in testers['testers'].iteritems():
isolated_scripts = [] telemetry_tests = []
gtest_tests = []
for test in tester_config['tests']: for test in tester_config['tests']:
isolated_scripts.append(generate_performance_test(tester_config, test)) generated_script = generate_performance_test(tester_config, test)
if test.get('telemetry', True):
telemetry_tests.append(generated_script)
else:
gtest_tests.append(generated_script)
telemetry_tests.sort(key=lambda x: x['name'])
gtest_tests.sort(key=lambda x: x['name'])
tests[tester] = { tests[tester] = {
'isolated_scripts': sorted(isolated_scripts, key=lambda x: x['name']) # Put Telemetry tests as the end since they tend to run longer to avoid
# starving gtests (see crbug.com/873389).
'isolated_scripts': gtest_tests + telemetry_tests
} }
......
...@@ -101,7 +101,9 @@ def _ValidateBrowserType(builder_name, test_config): ...@@ -101,7 +101,9 @@ def _ValidateBrowserType(builder_name, test_config):
def ValidateTestingBuilder(builder_name, builder_data): def ValidateTestingBuilder(builder_name, builder_data):
isolated_scripts = builder_data['isolated_scripts'] isolated_scripts = builder_data['isolated_scripts']
test_names = []
for test_config in isolated_scripts: for test_config in isolated_scripts:
test_names.append(test_config['name'])
_ValidateSwarmingDimension( _ValidateSwarmingDimension(
builder_name, builder_name,
swarming_dimensions=test_config['swarming'].get('dimension_sets', {})) swarming_dimensions=test_config['swarming'].get('dimension_sets', {}))
...@@ -110,6 +112,17 @@ def ValidateTestingBuilder(builder_name, builder_data): ...@@ -110,6 +112,17 @@ def ValidateTestingBuilder(builder_name, builder_data):
_ValidateShardingData(builder_name, test_config) _ValidateShardingData(builder_name, test_config)
_ValidateBrowserType(builder_name, test_config) _ValidateBrowserType(builder_name, test_config)
if ('performance_test_suite' in test_names or
'performance_webview_test_suite' in test_names):
if test_names[-1] not in ('performance_test_suite',
'performance_webview_test_suite'):
raise ValueError(
'performance_test_suite or performance_webview_test_suite must run '
'at the end of builder %s to avoid starving other test step '
'(see crbug.com/873389). Instead found %s' % (
repr(builder_name), test_names[-1]))
def _IsBuilderName(name): def _IsBuilderName(name):
return not name.startswith('AAA') return not name.startswith('AAA')
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment