Commit 80c50c4b authored by Jamie Madill's avatar Jamie Madill Committed by Commit Bot

Add perf_tests_with_args test type.

These tests are command line tests which may use the same isolate and binary,
but with different permutations of command line arguments. This allows us to
run the same test suite with multiple sets of arguments. This is useful for
testing chromium-based tests that don't want to launch child processeses from
a single test, but rather run different permutations of the same test binary.

New tests added for the new functionality and for the previous "c plus plus"
isolate test functionality.

BUG=angleproject:2188

Change-Id: I97434db67ffbd9fc75edb712f6706543c7a5af54
Reviewed-on: https://chromium-review.googlesource.com/743683
Commit-Queue: Jamie Madill <jmadill@chromium.org>
Reviewed-by: default avatarNed Nguyen <nednguyen@google.com>
Cr-Commit-Position: refs/heads/master@{#512675}
parent 2ec883ba
...@@ -61,6 +61,14 @@ def add_builder(waterfall, name, additional_compile_targets=None): ...@@ -61,6 +61,14 @@ def add_builder(waterfall, name, additional_compile_targets=None):
def add_tester(waterfall, name, perf_id, platform, target_bits=64, def add_tester(waterfall, name, perf_id, platform, target_bits=64,
num_host_shards=1, num_device_shards=1, swarming=None, num_host_shards=1, num_device_shards=1, swarming=None,
replace_system_webview=False): replace_system_webview=False):
""" Adds tester named |name| to |waterfall|.
Tests can be added via 'perf_tests', which expects a 2 element tuple of
(isolate_name, shard), or via 'perf_tests_with_args', which allows you
to specify command line arguments for the tests. 'perf_tests_with_args'
expects a tuple of 4 elements: (name, shard, test_args, isolate_name).
'test_args' is a list of strings pass via the test's command line.
"""
del perf_id # this will be needed del perf_id # this will be needed
waterfall['testers'][name] = { waterfall['testers'][name] = {
'platform': platform, 'platform': platform,
...@@ -697,15 +705,30 @@ def get_swarming_dimension(dimension, device_id): ...@@ -697,15 +705,30 @@ def get_swarming_dimension(dimension, device_id):
return complete_dimension return complete_dimension
def generate_cplusplus_isolate_script_entry(
dimension, name, shard, test_args, isolate_name):
return generate_isolate_script_entry(
[get_swarming_dimension(dimension, shard)], test_args, isolate_name,
name, ignore_task_failure=False)
def generate_cplusplus_isolate_script_test(dimension): def generate_cplusplus_isolate_script_test(dimension):
return [ return [
generate_isolate_script_entry( generate_cplusplus_isolate_script_entry(
[get_swarming_dimension(dimension, shard)], [], name, dimension, name, shard, [], name)
name, ignore_task_failure=False)
for name, shard in dimension['perf_tests'] for name, shard in dimension['perf_tests']
] ]
def generate_cplusplus_isolate_script_test_with_args(dimension):
return [
generate_cplusplus_isolate_script_entry(
dimension, name, shard, test_args, isolate_name)
for name, shard, test_args, isolate_name
in dimension['perf_tests_with_args']
]
def ShouldBenchmarksBeScheduled( def ShouldBenchmarksBeScheduled(
benchmark, name, os_name, browser_name): benchmark, name, os_name, browser_name):
# StoryExpectations uses finder_options.browser_type, platform.GetOSName, # StoryExpectations uses finder_options.browser_type, platform.GetOSName,
...@@ -908,6 +931,9 @@ def generate_all_tests(waterfall): ...@@ -908,6 +931,9 @@ def generate_all_tests(waterfall):
if config['swarming_dimensions'][0].get('perf_tests', False): if config['swarming_dimensions'][0].get('perf_tests', False):
isolated_scripts += generate_cplusplus_isolate_script_test( isolated_scripts += generate_cplusplus_isolate_script_test(
config['swarming_dimensions'][0]) config['swarming_dimensions'][0])
if config['swarming_dimensions'][0].get('perf_tests_with_args', False):
isolated_scripts += generate_cplusplus_isolate_script_test_with_args(
config['swarming_dimensions'][0])
isolated_scripts, devices_to_test_skipped = remove_blacklisted_device_tests( isolated_scripts, devices_to_test_skipped = remove_blacklisted_device_tests(
isolated_scripts, BLACKLISTED_DEVICES) isolated_scripts, BLACKLISTED_DEVICES)
......
...@@ -344,3 +344,44 @@ class PerfDataGeneratorTest(unittest.TestCase): ...@@ -344,3 +344,44 @@ class PerfDataGeneratorTest(unittest.TestCase):
for key in keys: for key in keys:
lst = getattr(perf_data_generator, key) lst = getattr(perf_data_generator, key)
self.assertEqual(sorted(lst), lst, 'please sort %s' % key) self.assertEqual(sorted(lst), lst, 'please sort %s' % key)
def testGenerateCplusplusIsolateScriptTest(self):
dimension={
'gpu': '10de:104a',
'os': 'Windows-2008ServerR2-SP1',
'pool': 'Chrome-perf',
'device_ids': [
'build92-m1', 'build93-m1',
'build94-m1', 'build95-m1', 'build96-m1'
],
'perf_tests': [
('angle_perftests', 'build94-m1'),
],
}
test = perf_data_generator.generate_cplusplus_isolate_script_test(dimension)
test = test[0]
self.assertEqual(test['name'], 'angle_perftests')
self.assertEqual(test['isolate_name'], 'angle_perftests')
def testGenerateCplusplusIsolateScriptTestWithArgs(self):
dimension={
'gpu': '10de:104a',
'os': 'Windows-2008ServerR2-SP1',
'pool': 'Chrome-perf',
'device_ids': [
'build92-m1', 'build93-m1',
'build94-m1', 'build95-m1', 'build96-m1'
],
'perf_tests_with_args': [
('passthrough_command_buffer_perftests', 'build94-m1',
['--use-cmd-decoder=passthrough', '--use-angle=gl-null'],
'command_buffer_perftests')
]
}
test = perf_data_generator.generate_cplusplus_isolate_script_test_with_args(
dimension)
test = test[0]
self.assertEqual(test['name'], 'passthrough_command_buffer_perftests')
self.assertEqual(test['isolate_name'], 'command_buffer_perftests')
self.assertTrue('--use-cmd-decoder=passthrough' in test['args'])
self.assertTrue('--use-angle=gl-null' in test['args'])
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment