Commit 0b18efa1 authored by Stephen Martinis's avatar Stephen Martinis Committed by Commit Bot

Add new telemetry_perf_tests isolate and script

This CL adds a new isolate called telemetry_perf_tests_new. This
is a temporary name, so that we can test it out while keeping the
old isolate name. Eventually, this isolate will probably replace the
existing isolate.

The main difference between this and the current telemetry_perf_tests
is the script it's running, which is named
run_multiple_telemetry_benchmarks_as_googletest.py. As the name
suggests, the script runs multiple telemetry benchmarks, which is the
plan for a single swarming task in the future.

Bug: 758630
Change-Id: If3a8afbb22ae2dbf0ce9e69f9036449d47914d03
Reviewed-on: https://chromium-review.googlesource.com/668111Reviewed-by: default avatarDirk Pranke <dpranke@chromium.org>
Reviewed-by: default avatarEmily Hanley <eyaich@chromium.org>
Commit-Queue: Stephen Martinis <martiniss@chromium.org>
Cr-Commit-Position: refs/heads/master@{#504196}
parent c5ea9b95
......@@ -2952,6 +2952,9 @@ group("telemetry_perf_unittests") {
# For smoke testing run_telemetry_benchmark_as_googletest
"//testing/scripts/run_telemetry_benchmark_as_googletest.py",
# For smoke testing run_multiple_telemetry_benchmarks_as_googletest
"//testing/scripts/run_multiple_telemetry_benchmarks_as_googletest.py",
]
if (enable_package_mash_services) {
......@@ -2986,6 +2989,16 @@ group("telemetry_perf_webview_tests") {
]
}
# New target for new script we're using to execute perf tests. Will replace the
# current version, once testing has been done. See
# //testing/buildbot/gn_isolate_map.pyl for differences.
group("telemetry_perf_tests_new") {
testonly = true
deps = [
"//chrome/test:telemetry_perf_tests",
]
}
group("angle_perftests") {
testonly = true
if (is_win || is_linux) {
......
......@@ -55,6 +55,34 @@
}
]
},
"One Buildbot Step Test Builder": {
"isolated_scripts": [
{
"args": [
"--bot",
"build13-b1--device2",
"--builder",
"Android Nexus5 Perf",
"-v",
"--output-format=histograms",
"--output-format=json-test-results",
"--browser=release"
],
"isolate_name": "telemetry_perf_tests_new",
"name": "benchmarks for bot 1",
"override_compile_targets": [
"telemetry_perf_tests"
],
"swarming": {
"can_use_on_swarming_builders": true,
"expiration": 36000,
"hard_timeout": 10800,
"ignore_task_failure": false,
"io_timeout": 3600
}
}
]
},
"Win 10 4 Core Low-End Perf Tests": {
"isolated_scripts": [
{
......
......@@ -941,6 +941,14 @@
"../../tools/perf/run_benchmark",
],
},
"telemetry_perf_tests_new": {
"label": "//chrome/test:telemetry_perf_tests",
"type": "script",
"script": "//testing/scripts/run_multiple_telemetry_benchmarks_as_googletest.py",
"args": [
"../../tools/perf/run_benchmark",
],
},
"telemetry_perf_webview_tests": {
"label": "//chrome/test:telemetry_perf_webview_tests",
"type": "script",
......
......@@ -14,4 +14,6 @@ per-file run_gpu*=nednguyen@google.com
per-file run_telemetry*=kbr@chromium.org
per-file run_telemetry*=nednguyen@google.com
per-file run_telemetry*=eyaich@chromium.org
per-file run_multiple_telemetry*=martiniss@chromium.org
per-file run_multiple_telemetry*=eyaich@chromium.org
per-file run_gtest*=eyaich@chromium.org
#!/usr/bin/env python
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs several telemetry benchmarks.
This script attempts to emulate the contract of gtest-style tests
invoked via recipes. The main contract is that the caller passes the
argument:
--isolated-script-test-output=[FILENAME]
json is written to that file in the format detailed here:
https://www.chromium.org/developers/the-json-test-results-format
This script is intended to be the base command invoked by the isolate,
followed by a subsequent Python script. It could be generalized to
invoke an arbitrary executable.
It currently runs several benchmarks. The benchmarks it will execute are
determined by the --bot and sharding map location (see sharding_map_path()).
The results of running the benchmark are put in separate directories per
benchmark. Two files will be present in each directory; perf_results.json, which
is the perf specific results (with unenforced format, could be histogram,
legacy, or chartjson), and test_results.json, which is a JSON test results
format file
(https://www.chromium.org/developers/the-json-test-results-format)
This script was derived from run_telemetry_benchmark_as_googletest, and calls
into that script.
"""
import argparse
import json
import os
import shutil
import sys
import tempfile
import traceback
import common
import run_telemetry_benchmark_as_googletest
def sharding_map_path():
return os.path.join(
os.path.dirname(__file__), '..', '..', 'tools', 'perf', 'benchmarks',
'benchmark_sharding_map.json')
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--isolated-script-test-output', type=argparse.FileType('w'),
required=True)
parser.add_argument(
'--isolated-script-test-chartjson-output', required=False)
parser.add_argument(
'--isolated-script-test-perf-output', required=False)
parser.add_argument('--xvfb', help='Start xvfb.', action='store_true')
parser.add_argument('--output-format', action='append')
parser.add_argument('--builder', required=True)
parser.add_argument('--bot', required=True,
help='Bot ID to use to determine which tests to run. Will'
' use //tools/perf/core/benchmark_sharding_map.json'
' with this as a key to determine which benchmarks'
' to execute')
args, rest_args = parser.parse_known_args()
for output_format in args.output_format:
rest_args.append('--output-format=' + output_format)
isolated_out_dir = os.path.dirname(args.isolated_script_test_output)
with open(sharding_map_path()) as f:
sharding_map = json.load(f)
sharding = sharding_map[args.builder][args.bot]['benchmarks']
return_code = 0
for benchmark in sharding:
per_benchmark_args = [benchmark] + rest_args[:]
# We don't care exactly what these are. In particular, the perf results
# could be any format (chartjson, legacy, histogram). We just pass these
# through, and expose these as results for this task.
rc, perf_results, json_test_results = (
run_telemetry_benchmark_as_googletest.run_benchmark(
args, per_benchmark_args))
return_code = return_code or rc
benchmark_path = os.path.join(isolated_out_dir, benchmark)
with open(os.path.join(benchmark_path, 'perf_results.json')) as f:
json.dump(perf_results, f)
with open(os.path.join(benchmark_path, 'test_results.json')) as f:
json.dump(json_test_results, f)
return return_code
# This is not really a "script test" so does not need to manually add
# any additional compile targets.
def main_compile_targets(args):
json.dump([], args.output)
if __name__ == '__main__':
# Conform minimally to the protocol defined by ScriptTest.
if 'compile_targets' in sys.argv:
funcs = {
'run': None,
'compile_targets': main_compile_targets,
}
sys.exit(common.run_script(sys.argv[1:], funcs))
sys.exit(main())
......@@ -53,6 +53,26 @@ def main():
args, rest_args = parser.parse_known_args()
for output_format in args.output_format:
rest_args.append('--output-format=' + output_format)
rc, chartresults, json_test_results = run_benchmark(args, rest_args)
if chartresults:
if args.isolated_script_test_perf_output:
filename = args.isolated_script_test_perf_output
elif args.isolated_script_test_chartjson_output:
filename = args.isolated_script_test_chartjson_output
else:
filename = None
if filename is not None:
with open(filename, 'w') as chartjson_output_file:
json.dump(chartresults, chartjson_output_file)
json.dump(json_test_results, args.isolated_script_test_output)
return rc
def run_benchmark(args, rest_args):
env = os.environ.copy()
# Assume we want to set up the sandbox environment variables all the
# time; doing so is harmless on non-Linux platforms and is needed
......@@ -105,20 +125,7 @@ def main():
if rc == 0:
rc = 1 # Signal an abnormal exit.
if chartjson_results_present:
if args.isolated_script_test_perf_output:
filename = args.isolated_script_test_perf_output
elif args.isolated_script_test_chartjson_output:
filename = args.isolated_script_test_chartjson_output
else:
filename = None
if filename is not None:
with open(filename, 'w') as chartjson_output_file:
json.dump(chartresults, chartjson_output_file)
json.dump(json_test_results, args.isolated_script_test_output)
return rc
return rc, chartresults, json_test_results
# This is not really a "script test" so does not need to manually add
......
......@@ -56,5 +56,31 @@
}
}
]
},
"One Buildbot Step Test Builder": {
"isolated_scripts": [
{
"args": [
"--bot", "build13-b1--device2",
"--builder", "Android Nexus5 Perf",
"-v",
"--output-format=histograms",
"--output-format=json-test-results",
"--browser=release"
],
"isolate_name": "telemetry_perf_tests_new",
"name": "benchmarks for bot 1",
"override_compile_targets": [
"telemetry_perf_tests"
],
"swarming": {
"can_use_on_swarming_builders": true,
"expiration": 36000,
"hard_timeout": 10800,
"ignore_task_failure": false,
"io_timeout": 3600
}
}
]
}
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment