Commit 76044ee0 authored by Caleb Rouleau's avatar Caleb Rouleau

Reland "Cleanup run_performance_tests.py's output files handling"

Relanding https://chromium-review.googlesource.com/c/1449151 with fixes.

Patchset 1 is https://chromium-review.googlesource.com/c/1449151

Patchset 2 is https://chromium-review.googlesource.com/c/1464228

Patchset 3 is the new fix. When you run a telemetry benchmark as a
reference, you are supposed to do
`run_benchmark benchmark_name --browser=reference`
but I thought that you were supposed to do
`run_benchmark benchmark_name.reference --browser=reference`
because I read the earlier code wrong.
I also added a end to end test for this behavior, which also
covers sharding code as well.

Patchset 5 is a unittest for
https://chromium-review.googlesource.com/c/1464228

Bug: 930879, 928928, 927507, 923628, 920002, 744736
Change-Id: If73d929b8e1b32c33d2bed7d2298b64d205658c1
Reviewed-on: https://chromium-review.googlesource.com/c/1467907Reviewed-by: default avatarEmily Hanley <eyaich@chromium.org>
Reviewed-by: default avatarJohn Chen <johnchen@chromium.org>
Cr-Commit-Position: refs/heads/master@{#632333}
parent e96af993
This diff is collapsed.
...@@ -13,16 +13,22 @@ import unittest ...@@ -13,16 +13,22 @@ import unittest
from telemetry import decorators from telemetry import decorators
from telemetry.testing import options_for_unittests from telemetry.testing import options_for_unittests
RUNNER_SCRIPTS_DIR = os.path.join(os.path.dirname(__file__),
'..', '..', 'testing', 'scripts')
sys.path.append(RUNNER_SCRIPTS_DIR)
import run_performance_tests # pylint: disable=wrong-import-position,import-error
class ScriptsSmokeTest(unittest.TestCase): class ScriptsSmokeTest(unittest.TestCase):
perf_dir = os.path.dirname(__file__) perf_dir = os.path.dirname(__file__)
def RunPerfScript(self, command): def RunPerfScript(self, command, env=None):
main_command = [sys.executable] main_command = [sys.executable]
args = main_command + command.split(' ') args = main_command + command.split(' ')
proc = subprocess.Popen(args, stdout=subprocess.PIPE, proc = subprocess.Popen(args, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, cwd=self.perf_dir) stderr=subprocess.STDOUT, cwd=self.perf_dir,
env=env)
stdout = proc.communicate()[0] stdout = proc.communicate()[0]
return_code = proc.returncode return_code = proc.returncode
return return_code, stdout return return_code, stdout
...@@ -59,7 +65,7 @@ class ScriptsSmokeTest(unittest.TestCase): ...@@ -59,7 +65,7 @@ class ScriptsSmokeTest(unittest.TestCase):
self.assertIn('kraken', stdout) self.assertIn('kraken', stdout)
@decorators.Disabled('chromeos') # crbug.com/754913 @decorators.Disabled('chromeos') # crbug.com/754913
def testRunTelemetryBenchmarkAsGoogletest(self): def testRunPerformanceTestsTelemetry_end2end(self):
options = options_for_unittests.GetCopy() options = options_for_unittests.GetCopy()
browser_type = options.browser_type browser_type = options.browser_type
tempdir = tempfile.mkdtemp() tempdir = tempfile.mkdtemp()
...@@ -88,7 +94,138 @@ class ScriptsSmokeTest(unittest.TestCase): ...@@ -88,7 +94,138 @@ class ScriptsSmokeTest(unittest.TestCase):
test_repeats = test_results['num_failures_by_type']['PASS'] test_repeats = test_results['num_failures_by_type']['PASS']
self.assertEqual( self.assertEqual(
test_repeats, 2, '--isolated-script-test-repeat=2 should work.') test_repeats, 2, '--isolated-script-test-repeat=2 should work.')
with open(os.path.join(tempdir, benchmark, 'perf_results.json')) as f:
perf_results = json.load(f)
self.assertIsNotNone(
perf_results, 'json perf results should be populated: ' + stdout)
except IOError as e: except IOError as e:
self.fail('json_test_results should be populated: ' + stdout + str(e)) self.fail('json_test_results should be populated: ' + stdout + str(e))
finally: finally:
shutil.rmtree(tempdir) shutil.rmtree(tempdir)
@decorators.Disabled('chromeos') # crbug.com/754913
def testRunPerformanceTestsTelemetrySharded_end2end(self):
options = options_for_unittests.GetCopy()
browser_type = options.browser_type
tempdir = tempfile.mkdtemp()
env = os.environ.copy()
env['GTEST_SHARD_INDEX'] = '0'
env['GTEST_TOTAL_SHARDS'] = '2'
return_code, stdout = self.RunPerfScript(
'../../testing/scripts/run_performance_tests.py '
'../../tools/perf/run_benchmark '
'--test-shard-map-filename=smoke_test_benchmark_shard_map.json '
'--browser=%s '
'--run-ref-build '
'--isolated-script-test-repeat=2 '
'--isolated-script-test-also-run-disabled-tests '
'--isolated-script-test-output=%s' % (
browser_type,
os.path.join(tempdir, 'output.json')
), env=env)
self.assertEquals(return_code, 0, stdout)
try:
expected_benchmark_folders = (
'dummy_benchmark.stable_benchmark_1',
'dummy_benchmark.stable_benchmark_1.reference')
for folder in expected_benchmark_folders:
with open(os.path.join(tempdir, folder, 'test_results.json')) as f:
test_results = json.load(f)
self.assertIsNotNone(
test_results, 'json test results should be populated: ' + stdout)
test_repeats = test_results['num_failures_by_type']['PASS']
self.assertEqual(
test_repeats, 2, '--isolated-script-test-repeat=2 should work.')
with open(os.path.join(tempdir, folder, 'perf_results.json')) as f:
perf_results = json.load(f)
self.assertIsNotNone(
perf_results, 'json perf results should be populated: ' + stdout)
except IOError as e:
self.fail('IOError: ' + stdout + str(e))
except KeyError as e:
self.fail('KeyError: ' + stdout + str(e))
finally:
shutil.rmtree(tempdir)
@decorators.Disabled('win') # ".exe" is auto-added which breaks Windows.
def testRunPerformanceTestsGtest_end2end(self):
tempdir = tempfile.mkdtemp()
benchmark = 'dummy_gtest'
return_code, stdout = self.RunPerfScript(
'../../testing/scripts/run_performance_tests.py ' +
os.path.join('..', '..', 'tools', 'perf', 'testdata', 'dummy_gtest') +
' --non-telemetry=true '
'--this-arg=passthrough '
'--gtest-benchmark-name dummy_gtest '
'--isolated-script-test-output=/x/y/z/output.json '
'--isolated-script-test-output=%s' % (
os.path.join(tempdir, 'output.json')
))
self.assertEquals(return_code, 0, stdout)
try:
# By design, run_performance_tests.py does not output test results
# to the location passed in by --isolated-script-test-output. Instead
# it uses the directory of that file and puts stuff in its own
# subdirectories for the purposes of merging later.
with open(os.path.join(tempdir, benchmark, 'test_results.json')) as f:
test_results = json.load(f)
self.assertIsNotNone(
test_results, 'json_test_results should be populated: ' + stdout)
with open(os.path.join(tempdir, benchmark, 'perf_results.json')) as f:
perf_results = json.load(f)
self.assertIsNotNone(
perf_results, 'json perf results should be populated: ' + stdout)
except IOError as e:
self.fail('json_test_results should be populated: ' + stdout + str(e))
finally:
shutil.rmtree(tempdir)
def testRunPerformanceTestsTelemetryArgsParser(self):
options = run_performance_tests.parse_arguments([
'../../tools/perf/run_benchmark', '-v', '--browser=release_x64',
'--upload-results', '--run-ref-build',
'--test-shard-map-filename=win-10-perf_map.json',
'--assert-gpu-compositing',
r'--isolated-script-test-output=c:\a\b\c\output.json',
r'--isolated-script-test-perf-output=c:\a\b\c\perftest-output.json',
'--passthrough-arg=--a=b',
])
self.assertIn('--assert-gpu-compositing', options.passthrough_args)
self.assertIn('--browser=release_x64', options.passthrough_args)
self.assertIn('-v', options.passthrough_args)
self.assertIn('--a=b', options.passthrough_args)
self.assertEqual(options.executable, '../../tools/perf/run_benchmark')
self.assertEqual(options.isolated_script_test_output,
r'c:\a\b\c\output.json')
def testRunPerformanceTestsTelemetryCommandGenerator_ReferenceBrowserComeLast(self):
"""This tests for crbug.com/928928."""
options = run_performance_tests.parse_arguments([
'../../tools/perf/run_benchmark', '--browser=release_x64',
'--run-ref-build',
'--test-shard-map-filename=win-10-perf_map.json',
r'--isolated-script-test-output=c:\a\b\c\output.json',
])
self.assertIn('--browser=release_x64', options.passthrough_args)
command = run_performance_tests.TelemetryCommandGenerator(
'fake_benchmark_name', options, is_reference=True).generate(
'fake_output_dir')
original_browser_arg_index = command.index('--browser=release_x64')
reference_browser_arg_index = command.index('--browser=reference')
self.assertTrue(reference_browser_arg_index > original_browser_arg_index)
def testRunPerformanceTestsGtestArgsParser(self):
options = run_performance_tests.parse_arguments([
'media_perftests', '--non-telemetry=true', '--single-process-tests',
'--test-launcher-retry-limit=0',
'--isolated-script-test-filter=*::-*_unoptimized::*_unaligned::'
'*unoptimized_aligned',
'--gtest-benchmark-name', 'media_perftests',
'--isolated-script-test-output=/x/y/z/output.json',
])
self.assertIn('--single-process-tests', options.passthrough_args)
self.assertIn('--test-launcher-retry-limit=0', options.passthrough_args)
self.assertEqual(options.executable, 'media_perftests')
self.assertEqual(options.isolated_script_test_output,
r'/x/y/z/output.json')
#!/usr/bin/env vpython
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This is a fake version of a non-Telemetry performance test (gtest_perf_test).
It is used by ../scripts_smoke_unittest.py.
"""
# This is just a random line of output that can be converted into graph_json.
print r"*RESULT clockless_video_playback_vp8: bear_silent.webm= 53.406108056578425 runs/s"
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment