Commit 8a0edf5c authored by Emily Hanley's avatar Emily Hanley Committed by Commit Bot

Adding ability to trigger one benchmark in the performance_test_suite

Bug: 836037
Change-Id: I9a927fe4655261a77965ca27cdfcf1055a9a0f1d
Reviewed-on: https://chromium-review.googlesource.com/1030773
Commit-Queue: Ned Nguyen <nednguyen@google.com>
Reviewed-by: default avatarNed Nguyen <nednguyen@google.com>
Reviewed-by: default avatarDavid Tu <dtu@chromium.org>
Cr-Commit-Position: refs/heads/master@{#554597}
parent 7785880b
...@@ -118,22 +118,14 @@ def write_results( ...@@ -118,22 +118,14 @@ def write_results(
def execute_benchmark(benchmark, isolated_out_dir, def execute_benchmark(benchmark, isolated_out_dir,
args, rest_args, is_reference): args, rest_args, is_reference):
# While we are between chartjson and histogram set we need # While we are between chartjson and histogram set we need
# to determine which output format to look for. # to determine which output format to look for or see if it was
# We need to append this both to the args and the per benchmark # already passed in in which case that format applies to all benchmarks
# args so the run_benchmark call knows what format it is # in this run.
# as well as triggers the benchmark correctly. is_histograms = append_output_format(benchmark, args, rest_args)
output_format = None
is_histograms = False
if benchmark in BENCHMARKS_TO_OUTPUT_HISTOGRAMS:
output_format = '--output-format=histograms'
is_histograms = True
else:
output_format = '--output-format=chartjson'
# Need to run the benchmark twice on browser and reference build
# Insert benchmark name as first argument to run_benchmark call # Insert benchmark name as first argument to run_benchmark call
# Need to append output format. # which is the first argument in the rest_args. Also need to append
per_benchmark_args = (rest_args[:1] + [benchmark] # output format.
+ rest_args[1:] + [output_format]) per_benchmark_args = (rest_args[:1] + [benchmark] + rest_args[1:])
benchmark_name = benchmark benchmark_name = benchmark
if is_reference: if is_reference:
# Need to parse out the browser to replace browser flag with # Need to parse out the browser to replace browser flag with
...@@ -161,6 +153,30 @@ def execute_benchmark(benchmark, isolated_out_dir, ...@@ -161,6 +153,30 @@ def execute_benchmark(benchmark, isolated_out_dir,
return rc return rc
def append_output_format(benchmark, args, rest_args):
# We need to determine if the output format is already passed in
# or if we need to define it for this benchmark
perf_output_specified = False
is_histograms = False
if args.output_format:
for output_format in args.output_format:
if 'histograms' in output_format:
perf_output_specified = True
is_histograms = True
if 'chartjson' in output_format:
perf_output_specified = True
rest_args.append('--output-format=' + output_format)
# When crbug.com/744736 is resolved we no longer have to check
# the type of format per benchmark and can rely on it being passed
# in as an arg as all benchmarks will output the same format.
if not perf_output_specified:
if benchmark in BENCHMARKS_TO_OUTPUT_HISTOGRAMS:
rest_args.append('--output-format=histograms')
is_histograms = True
else:
rest_args.append('--output-format=chartjson')
return is_histograms
def main(): def main():
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument( parser.add_argument(
...@@ -177,15 +193,19 @@ def main(): ...@@ -177,15 +193,19 @@ def main():
parser.add_argument( parser.add_argument(
'--isolated-script-test-filter', type=str, required=False) '--isolated-script-test-filter', type=str, required=False)
parser.add_argument('--xvfb', help='Start xvfb.', action='store_true') parser.add_argument('--xvfb', help='Start xvfb.', action='store_true')
# TODO(eyaich) We could potentially assume this based on shards == 1 since
# benchmarks will always have multiple shards.
parser.add_argument('--non-telemetry', parser.add_argument('--non-telemetry',
help='Type of perf test', type=bool, default=False) help='Type of perf test', type=bool, default=False)
parser.add_argument('--testing', help='Testing instance', parser.add_argument('--testing', help='Test run, execute subset of tests',
type=bool, default=False) type=bool, default=False)
parser.add_argument('--benchmarks',
help='Comma separated list of benchmark names'
' to run in lieu of indexing into our benchmark bot maps',
required=False)
parser.add_argument('--output-format', action='append')
args, rest_args = parser.parse_known_args() args, rest_args = parser.parse_known_args()
isolated_out_dir = os.path.dirname(args.isolated_script_test_output) isolated_out_dir = os.path.dirname(args.isolated_script_test_output)
return_code = 0
if args.non_telemetry: if args.non_telemetry:
# For non telemetry tests the benchmark name is the name of the executable. # For non telemetry tests the benchmark name is the name of the executable.
...@@ -195,34 +215,42 @@ def main(): ...@@ -195,34 +215,42 @@ def main():
write_results(benchmark_name, charts, output_json, isolated_out_dir, True) write_results(benchmark_name, charts, output_json, isolated_out_dir, True)
else: else:
# First determine what shard we are running on to know how to # If the user has supplied a list of benchmark names, execute those instead
# index into the bot map to get list of benchmarks to run. # of the entire suite of benchmarks.
total_shards = None if args.benchmarks:
shard_index = None benchmarks = args.benchmark_names.split(',')
for benchmark in benchmarks:
env = os.environ.copy() return_code = (execute_benchmark(
if 'GTEST_TOTAL_SHARDS' in env: benchmark, isolated_out_dir, args, rest_args, False) or return_code)
total_shards = env['GTEST_TOTAL_SHARDS'] else:
if 'GTEST_SHARD_INDEX' in env: # First determine what shard we are running on to know how to
shard_index = env['GTEST_SHARD_INDEX'] # index into the bot map to get list of benchmarks to run.
total_shards = None
if not (total_shards or shard_index): shard_index = None
raise Exception('Shard indicators must be present for perf tests')
env = os.environ.copy()
sharding_map_path = get_sharding_map_path( if 'GTEST_TOTAL_SHARDS' in env:
total_shards, args.testing or False) total_shards = env['GTEST_TOTAL_SHARDS']
with open(sharding_map_path) as f: if 'GTEST_SHARD_INDEX' in env:
sharding_map = json.load(f) shard_index = env['GTEST_SHARD_INDEX']
sharding = None
sharding = sharding_map[shard_index]['benchmarks'] if not (total_shards or shard_index):
return_code = 0 raise Exception('Shard indicators must be present for perf tests')
for benchmark in sharding: sharding_map_path = get_sharding_map_path(
return_code = (execute_benchmark( total_shards, args.testing or False)
benchmark, isolated_out_dir, args, rest_args, False) or return_code) with open(sharding_map_path) as f:
# We ignore the return code of the reference build since we do not sharding_map = json.load(f)
# monitor it. sharding = None
execute_benchmark(benchmark, isolated_out_dir, args, rest_args, True) sharding = sharding_map[shard_index]['benchmarks']
for benchmark in sharding:
# Need to run the benchmark twice on browser and reference build
return_code = (execute_benchmark(
benchmark, isolated_out_dir, args, rest_args, False) or return_code)
# We ignore the return code of the reference build since we do not
# monitor it.
execute_benchmark(benchmark, isolated_out_dir, args, rest_args, True)
return return_code return return_code
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment