Commit 18d42d9e authored by Emily Hanley's avatar Emily Hanley Committed by Commit Bot

Auto-generating new perf recipe json.

Bug: 757933
Change-Id: I20f5b6f70d125fc9aa6c5d7f541d378454a33445
Reviewed-on: https://chromium-review.googlesource.com/960935
Commit-Queue: Ned Nguyen <nednguyen@google.com>
Reviewed-by: default avatarNed Nguyen <nednguyen@google.com>
Cr-Commit-Position: refs/heads/master@{#543320}
parent 3760b8c8
......@@ -406,7 +406,7 @@
"args": [
"-v",
"--xvfb",
"--browser=release_x64"
"--browser=release"
],
"isolate_name": "performance_test_suite",
"merge": {
......@@ -430,14 +430,16 @@
"expiration": 36000,
"hard_timeout": 36000,
"ignore_task_failure": false,
"io_timeout": 3600,
"io_timeout": 1800,
"shards": 26,
"upload_test_results": true
},
"trigger_script": {
"args": [
"--multiple-trigger-configs",
"[{\"id\": \"build195-a9\"}, {\"id\": \"build196-a9\"}, {\"id\": \"build197-a9\"}, {\"id\": \"build198-a9\"}, {\"id\": \"build199-a9\"}, {\"id\": \"build200-a9\"}, {\"id\": \"build201-a9\"}, {\"id\": \"build202-a9\"}, {\"id\": \"build203-a9\"}, {\"id\": \"build204-a9\"}, {\"id\": \"build205-a9\"}, {\"id\": \"build206-a9\"}, {\"id\": \"build207-a9\"}, {\"id\": \"build208-a9\"}, {\"id\": \"build209-a9\"}, {\"id\": \"build210-a9\"}, {\"id\": \"build211-a9\"}, {\"id\": \"build212-a9\"}, {\"id\": \"build213-a9\"}, {\"id\": \"build214-a9\"}, {\"id\": \"build215-a9\"}, {\"id\": \"build216-a9\"}, {\"id\": \"build217-a9\"}, {\"id\": \"build218-a9\"}, {\"id\": \"build219-a9\"}, {\"id\": \"build220-a9\"}]"
"[{\"id\": \"build195-a9\"}, {\"id\": \"build196-a9\"}, {\"id\": \"build197-a9\"}, {\"id\": \"build198-a9\"}, {\"id\": \"build199-a9\"}, {\"id\": \"build200-a9\"}, {\"id\": \"build201-a9\"}, {\"id\": \"build202-a9\"}, {\"id\": \"build203-a9\"}, {\"id\": \"build204-a9\"}, {\"id\": \"build205-a9\"}, {\"id\": \"build206-a9\"}, {\"id\": \"build207-a9\"}, {\"id\": \"build208-a9\"}, {\"id\": \"build209-a9\"}, {\"id\": \"build210-a9\"}, {\"id\": \"build211-a9\"}, {\"id\": \"build212-a9\"}, {\"id\": \"build213-a9\"}, {\"id\": \"build214-a9\"}, {\"id\": \"build215-a9\"}, {\"id\": \"build216-a9\"}, {\"id\": \"build217-a9\"}, {\"id\": \"build218-a9\"}, {\"id\": \"build219-a9\"}, {\"id\": \"build220-a9\"}]",
"--multiple-dimension-script-verbose",
"True"
],
"script": "//testing/trigger_scripts/perf_device_trigger.py"
}
......@@ -489,8 +491,6 @@
"isolate_name": "telemetry_perf_tests_experimental",
"merge": {
"args": [
"--configuration-name",
"buildbot-test",
"--service-account-file",
"/creds/service_accounts/service-account-chromium-perf-histograms.json"
],
......@@ -510,14 +510,16 @@
"expiration": 36000,
"hard_timeout": 36000,
"ignore_task_failure": false,
"io_timeout": 3600,
"shards": 2,
"io_timeout": 1800,
"shards": 3,
"upload_test_results": true
},
"trigger_script": {
"args": [
"--multiple-trigger-configs",
"[{\"id\": \"swarm846-c4\"}, {\"id\": \"swarm847-c4\"}]"
"[{\"id\": \"swarm823-c4\"}, {\"id\": \"swarm846-c4\"}, {\"id\": \"swarm847-c4\"}]",
"--multiple-dimension-script-verbose",
"True"
],
"script": "//testing/trigger_scripts/perf_device_trigger.py"
}
......
......@@ -13,5 +13,12 @@
"memory.top_10_mobile",
"scheduler.tough_scheduling_cases"
]
},
"2": {
"benchmarks": [
"blink_perf.paint",
"rasterize_and_record_micro.partial_invalidation",
"smoothness.tough_pinch_zoom_cases"
]
}
}
......@@ -1050,6 +1050,145 @@ def validate_tests(waterfall, waterfall_file, benchmark_file):
return up_to_date
# This section is how we will generate json with the new perf recipe.
# We will only be generating one entry per isolate in the new world.
# Right now this is simply adding and/or updating chromium.perf.fyi.json
# until migration is complete. See crbug.com/757933 for more info.
NEW_PERF_RECIPE_FYI_TESTERS = {
'testers' : {
'One Buildbot Step Test Builder': {
'isolate': 'telemetry_perf_tests_experimental',
'platform': 'linux',
'testing': True,
'device_ids': [
'swarm823-c4',
'swarm846-c4',
'swarm847-c4'
],
},
'Mac 10.12 Laptop Low End': {
'isolate': 'performance_test_suite',
'platform': 'mac',
'device_ids': [
'build195-a9', 'build196-a9', 'build197-a9', 'build198-a9',
'build199-a9', 'build200-a9', 'build201-a9', 'build202-a9',
'build203-a9', 'build204-a9', 'build205-a9', 'build206-a9',
'build207-a9', 'build208-a9', 'build209-a9', 'build210-a9',
'build211-a9', 'build212-a9', 'build213-a9', 'build214-a9',
'build215-a9', 'build216-a9', 'build217-a9', 'build218-a9',
'build219-a9', 'build220-a9'
],
}
}
}
def add_common_test_properties(test, tester_config):
dimensions = []
for device_id in tester_config['device_ids']:
dimensions.append({'id': device_id})
test['trigger_script'] = {
'script': '//testing/trigger_scripts/perf_device_trigger.py',
'args': [
'--multiple-trigger-configs',
json.dumps(dimensions),
'--multiple-dimension-script-verbose',
'True'
],
}
test['merge'] = {
'script': '//tools/perf/process_perf_results.py',
'args': [
'--service-account-file',
'/creds/service_accounts/service-account-chromium-perf-histograms.json'
],
}
return len(dimensions)
def generate_performance_test_suite(tester_config):
# First determine the browser that you need based on the tester
browser_name = ''
# For trybot testing we always use the reference build
if tester_config.get('testing', False):
browser_name = 'reference'
elif tester_config['platform'] == 'android':
if tester_config.get('replace_system_webview', False):
browser_name = 'android-webview'
else:
browser_name = 'android-chromium'
elif (tester_config['platform'] == 'win'
and tester_config['target_bits'] == 64):
browser_name = 'release_x64'
else:
browser_name ='release'
test_args = [
'-v',
'--xvfb',
'--browser=%s' % browser_name
]
# Appending testing=true if we only want to run a subset of benchmarks
# for quicker testing
if tester_config.get('testing', False):
test_args.append('--testing=true')
isolate_name = tester_config['isolate']
if browser_name == 'android-webview':
test_args.append(
'--webview-embedder-apk=../../out/Release/apks/SystemWebViewShell.apk')
isolate_name = 'telemetry_perf_webview_tests'
result = {
'args': test_args,
'isolate_name': isolate_name,
'name': isolate_name,
'override_compile_targets': [
isolate_name
]
}
shards = add_common_test_properties(result, tester_config)
result['swarming'] = {
# Always say this is true regardless of whether the tester
# supports swarming. It doesn't hurt.
'can_use_on_swarming_builders': True,
'expiration': 10 * 60 * 60, # 10 hour timeout
'hard_timeout': 10 * 60 * 60, # 10 hours for full suite
'ignore_task_failure': False,
'io_timeout': 30 * 60, # 30 minutes
'dimension_sets': [
{
'pool': 'Chrome-perf-fyi',
}
],
'upload_test_results': True,
'shards': shards,
}
return result
def load_and_update_new_recipe_json():
tests = {}
filename = 'chromium.perf.fyi.json'
buildbot_dir = os.path.join(
path_util.GetChromiumSrcDir(), 'testing', 'buildbot')
fyi_filepath = os.path.join(buildbot_dir, filename)
with open(fyi_filepath) as fp_r:
tests = json.load(fp_r)
with open(fyi_filepath, 'w') as fp:
# We have loaded what is there, we want to update or add
# what we have listed here
testers = NEW_PERF_RECIPE_FYI_TESTERS
for tester, tester_config in testers['testers'].iteritems():
isolated_scripts = [generate_performance_test_suite(tester_config)]
tests[tester] = {
'isolated_scripts': sorted(isolated_scripts, key=lambda x: x['name'])
}
json.dump(tests, fp, indent=2, separators=(',', ': '), sort_keys=True)
fp.write('\n')
def main(args):
parser = argparse.ArgumentParser(
description=('Generate perf test\' json config and benchmark.csv. '
......@@ -1079,6 +1218,7 @@ def main(args):
'configs and benchmark.csv.') % sys.argv[0]
return 1
else:
load_and_update_new_recipe_json()
update_all_tests(get_waterfall_config(), waterfall_file)
update_benchmark_csv(benchmark_file)
return 0
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment