Allow pyauto performance tests in perf.py to run as an autotest.

This change adds autotest control files and an associated pyauto suite
for running the tests in perf.py as an autotest.  This change only
allows execution as an autotest, but does not enable the tests to run
continuously in the autotest lab.  That will be done in a later CL.

I also modified perf.py so that the v8 benchmark test only runs for a
single iteration, and for the other tests, the number of iterations
can optionally be tweaked by setting an environment variable (useful when
running the perf tests manually).

BUG=chromium-os:18187,chromium-os:18185,chromium-os:18458
TEST=None

Review URL: http://codereview.chromium.org/7617014

git-svn-id: svn://svn.chromium.org/chrome/trunk/src@96461 0039d316-1c4b-4281-b951-d872f2087c98
parent da66f4ec
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
AUTHOR = "Chrome OS Team"
NAME = "desktopui_PyAutoPerfTests"
PURPOSE = "PyAuto-based Chrome performance tests."
CRITERIA = "This test will fail if running Chrome returns a command error."
TIME = "LONG"
TEST_CATEGORY = "Functional"
TEST_CLASS = "desktopui"
TEST_TYPE = "client"
DOC = """
This is a wrapper test for Chrome pyauto-based performance tests.
http://dev.chromium.org/developers/testing/pyauto
"""
job.run_test('desktopui_PyAutoPerfTests')
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import pwd
import shutil
import subprocess
from autotest_lib.client.bin import utils
from autotest_lib.client.cros import constants, chrome_test, cros_ui, login
class desktopui_PyAutoPerfTests(chrome_test.ChromeTestBase):
"""Wrapper for running Chrome's PyAuto-based performance tests.
Performs all setup and fires off the CHROMEOS_PERF PyAuto suite.
"""
version = 1
def initialize(self):
chrome_test.ChromeTestBase.initialize(self)
assert os.geteuid() == 0, 'Need superuser privileges'
deps_dir = os.path.join(self.autodir, 'deps')
subprocess.check_call(['chown', '-R', 'chronos', self.cr_source_dir])
# Setup suid python binary which can enable Chrome testing interface.
suid_python = os.path.join(self.test_binary_dir, 'suid-python')
py_path = subprocess.Popen(['which', 'python'],
stdout=subprocess.PIPE).communicate()[0]
py_path = py_path.strip()
assert os.path.exists(py_path), 'Could not find python'
if os.path.islink(py_path):
linkto = os.readlink(py_path)
py_path = os.path.join(os.path.dirname(py_path), linkto)
shutil.copy(py_path, suid_python)
os.chown(suid_python, 0, 0)
os.chmod(suid_python, 04755)
# User chronos should own the current directory.
chronos_id = pwd.getpwnam('chronos')
os.chown(os.getcwd(), chronos_id.pw_uid, chronos_id.pw_gid)
# Make sure Chrome minidumps are written locally.
minidumps_file = '/mnt/stateful_partition/etc/enable_chromium_minidumps'
if not os.path.exists(minidumps_file):
open(minidumps_file, 'w').close()
# Allow browser restart by its babysitter (session_manager).
if os.path.exists(constants.DISABLE_BROWSER_RESTART_MAGIC_FILE):
os.remove(constants.DISABLE_BROWSER_RESTART_MAGIC_FILE)
login.nuke_login_manager()
assert os.path.exists(minidumps_file)
# Setup /tmp/disable_chrome_restart.
# Disallow further browser restart by its babysitter.
if not os.path.exists(constants.DISABLE_BROWSER_RESTART_MAGIC_FILE):
open(constants.DISABLE_BROWSER_RESTART_MAGIC_FILE, 'w').close()
assert os.path.exists(constants.DISABLE_BROWSER_RESTART_MAGIC_FILE)
def run_once(self):
"""Runs the PyAuto performance tests."""
# Enable Chrome testing interface and login to a default account.
deps_dir = os.path.join(self.autodir, 'deps')
pyautolib_dir = os.path.join(self.cr_source_dir,
'chrome', 'test', 'pyautolib')
login_cmd = cros_ui.xcommand_as(
'python %s chromeos_utils.ChromeosUtils.LoginToDefaultAccount '
'-v --no-http-server' %
os.path.join(pyautolib_dir, 'chromeos', 'chromeos_utils.py'))
utils.system(login_cmd)
# Run the PyAuto performance tests.
functional_cmd = cros_ui.xcommand_as(
'%s/chrome_test/test_src/chrome/test/functional/'
'pyauto_functional.py --suite=CHROMEOS_PERF -v' % deps_dir)
utils.system(functional_cmd)
......@@ -465,6 +465,13 @@
],
},
# ChromeOS performance tests.
'CHROMEOS_PERF': {
'chromeos': [
'perf',
],
},
# HTML5 media performance tests.
'AV_PERF': {
'linux': [
......
......@@ -3,6 +3,16 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Basic pyauto performance tests.
For tests that need to be run for multiple iterations (e.g., so that average
and standard deviation values can be reported), the default number of iterations
run for each of these tests is specified by |_DEFAULT_NUM_ITERATIONS|.
That value can optionally be tweaked by setting an environment variable
'NUM_ITERATIONS' to a positive integer, representing the number of iterations
to run.
"""
import logging
import math
import os
......@@ -15,7 +25,20 @@ import pyauto
class PerfTest(pyauto.PyUITest):
"""Basic performance tests."""
_NUM_ITERATIONS = 50
_DEFAULT_NUM_ITERATIONS = 50
def setUp(self):
"""Performs necessary setup work before running each test."""
self._num_iterations = self._DEFAULT_NUM_ITERATIONS
if 'NUM_ITERATIONS' in os.environ:
try:
self._num_iterations = int(os.environ['NUM_ITERATIONS'])
if self._num_iterations <= 0:
raise ValueError('Environment variable NUM_ITERATIONS must be an '
'integer > 0.')
except ValueError, e:
self.fail('Error processing environment variable: %s' % e)
pyauto.PyUITest.setUp(self)
def _MeasureElapsedTime(self, python_command, num_invocations):
"""Measures time (in msec) to execute a python command one or more times.
......@@ -49,23 +72,23 @@ class PerfTest(pyauto.PyUITest):
std_dev = math.sqrt(sum(temp_vals) / len(temp_vals))
return avg, std_dev
def _PrintSummaryResults(self, first_val, num_iter, values, units):
def _PrintSummaryResults(self, first_val, units, values=[]):
"""Logs summary measurement information.
Args:
first_val: A numeric measurement value for a single initial trial.
num_iter: An integer number of iterations used for multiple trials.
values: A list of numeric value measurements.
units: A string specifying the units for the specified measurements.
values: A list of numeric value measurements.
"""
avg, std_dev = self._AvgAndStdDev(values)
logging.debug('First trial: %.2f %s', first_val, units)
logging.info('Number of iterations: %d', num_iter)
for val in values:
logging.info(' %.2f %s', val, units)
logging.info(' --------------------------')
logging.info(' Average: %.2f %s', avg, units)
logging.info(' Std dev: %.2f %s', std_dev, units)
logging.debug('Single trial: %.2f %s', first_val, units)
if values:
avg, std_dev = self._AvgAndStdDev(values)
logging.info('Number of iterations: %d', len(values))
for val in values:
logging.info(' %.2f %s', val, units)
logging.info(' --------------------------')
logging.info(' Average: %.2f %s', avg, units)
logging.info(' Std dev: %.2f %s', std_dev, units)
def _RunNewTabTest(self, open_tab_command, num_tabs=1):
"""Runs a perf test that involves opening new tab(s).
......@@ -87,7 +110,7 @@ class PerfTest(pyauto.PyUITest):
self.GetBrowserWindow(0).GetTab(1).Close(True)
timings = []
for _ in range(self._NUM_ITERATIONS):
for _ in range(self._num_iterations):
elapsed = self._MeasureElapsedTime(open_tab_command, num_tabs)
self.assertEqual(1 + num_tabs, self.GetTabCount(),
msg='Did not open %d new tab(s).' % num_tabs)
......@@ -95,7 +118,7 @@ class PerfTest(pyauto.PyUITest):
self.GetBrowserWindow(0).GetTab(1).Close(True)
timings.append(elapsed)
self._PrintSummaryResults(orig_elapsed, self._NUM_ITERATIONS, timings, 'ms')
self._PrintSummaryResults(orig_elapsed, 'ms', values=timings)
def testNewTab(self):
"""Measures time to open a new tab."""
......@@ -118,39 +141,20 @@ class PerfTest(pyauto.PyUITest):
def testV8BenchmarkSuite(self):
"""Measures score from online v8 benchmark suite."""
def _RunSingleV8BenchmarkSuite():
"""Runs a single v8 benchmark suite test and returns the final score.
Returns:
The integer score computed from running the v8 benchmark suite.
"""
url = self.GetFileURLForDataPath('v8_benchmark_v6', 'run.html')
self.AppendTab(pyauto.GURL(url))
js = """
var val = document.getElementById("status").innerHTML;
window.domAutomationController.send(val);
"""
self.WaitUntil(
lambda: 'Score:' in self.ExecuteJavascript(js, 0, 1), timeout=300,
expect_retval=True)
val = self.ExecuteJavascript(js, 0, 1)
score = val[val.find(':') + 2:]
self.GetBrowserWindow(0).GetTab(1).Close(True)
return int(score)
orig_score = _RunSingleV8BenchmarkSuite()
self.assertEqual(1, self.GetTabCount(),
msg='Did not clean up after running benchmark suite.')
scores = []
for _ in range(self._NUM_ITERATIONS):
score = _RunSingleV8BenchmarkSuite()
self.assertEqual(1, self.GetTabCount(),
msg='Did not clean up after running benchmark suite.')
scores.append(score)
self._PrintSummaryResults(orig_score, self._NUM_ITERATIONS, scores, 'score')
url = self.GetFileURLForDataPath('v8_benchmark_v6', 'run.html')
self.AppendTab(pyauto.GURL(url))
js = """
var val = document.getElementById("status").innerHTML;
window.domAutomationController.send(val);
"""
self.assertTrue(
self.WaitUntil(
lambda: 'Score:' in self.ExecuteJavascript(js, 0, 1), timeout=300,
expect_retval=True),
msg='Timed out when waiting for v8 benchmark score.')
val = self.ExecuteJavascript(js, 0, 1)
score = int(val[val.find(':') + 2:])
self._PrintSummaryResults(score, 'score')
if __name__ == '__main__':
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment