Pyauto performance tests now output data to be graphed using autotest.

BUG=chromium-os:18185
TEST=None

Review URL: http://codereview.chromium.org/7745007

git-svn-id: svn://svn.chromium.org/chrome/trunk/src@98517 0039d316-1c4b-4281-b951-d872f2087c98
parent d6e2849d
......@@ -4,10 +4,12 @@
import os
import pwd
import re
import shutil
import subprocess
from autotest_lib.client.bin import utils
from autotest_lib.client.common_lib import error
from autotest_lib.client.cros import constants, chrome_test, cros_ui, login
......@@ -16,6 +18,9 @@ class desktopui_PyAutoPerfTests(chrome_test.ChromeTestBase):
Performs all setup and fires off the CHROMEOS_PERF PyAuto suite.
"""
_PERF_MARKER_PRE = '_PERF_PRE_'
_PERF_MARKER_POST = '_PERF_POST_'
version = 1
def initialize(self):
......@@ -74,4 +79,19 @@ class desktopui_PyAutoPerfTests(chrome_test.ChromeTestBase):
functional_cmd = cros_ui.xcommand_as(
'%s/chrome_test/test_src/chrome/test/functional/'
'pyauto_functional.py --suite=CHROMEOS_PERF -v' % deps_dir)
utils.system(functional_cmd)
proc = subprocess.Popen(
functional_cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
output = proc.communicate()[0]
if proc.returncode != 0:
raise error.TestFail(
'Unexpected return code from pyauto_functional.py when running '
'with the CHROMEOS_PERF suite: %d' % proc.returncode)
re_compiled = re.compile('%s(.+)%s' % (self._PERF_MARKER_PRE,
self._PERF_MARKER_POST))
perf_lines = [line for line in output.split('\n')
if re_compiled.match(line)]
if perf_lines:
perf_dict = dict([eval(re_compiled.match(line).group(1))
for line in perf_lines])
self.write_perf_keyval(perf_dict)
......@@ -16,7 +16,7 @@ to run.
import logging
import math
import os
import time
import timeit
import pyauto_functional # Must be imported before pyauto.
import pyauto
......@@ -26,6 +26,8 @@ class PerfTest(pyauto.PyUITest):
"""Basic performance tests."""
_DEFAULT_NUM_ITERATIONS = 50
_PERF_OUTPUT_MARKER_PRE = '_PERF_PRE_'
_PERF_OUTPUT_MARKER_POST = '_PERF_POST_'
def setUp(self):
"""Performs necessary setup work before running each test."""
......@@ -40,6 +42,9 @@ class PerfTest(pyauto.PyUITest):
self.fail('Error processing environment variable: %s' % e)
pyauto.PyUITest.setUp(self)
# TODO(dennisjeffrey): Reorganize the code to create a base PerfTest class
# to separate out common functionality, then create specialized subclasses
# such as TabPerfTest that implement the test-specific functionality.
def _MeasureElapsedTime(self, python_command, num_invocations):
"""Measures time (in msec) to execute a python command one or more times.
......@@ -52,11 +57,11 @@ class PerfTest(pyauto.PyUITest):
times, in milliseconds as a float.
"""
assert callable(python_command)
start_time = time.time()
for _ in range(num_invocations):
python_command()
stop_time = time.time()
return (stop_time - start_time) * 1000 # Convert to milliseconds.
def RunCommand():
for _ in range(num_invocations):
python_command()
timer = timeit.Timer(stmt=lambda: RunCommand())
return timer.timeit(number=1) * 1000 # Convert seconds to milliseconds.
def _AvgAndStdDev(self, values):
"""Computes the average and standard deviation of a set of values.
......@@ -67,19 +72,38 @@ class PerfTest(pyauto.PyUITest):
Returns:
A 2-tuple of floats (average, standard_deviation).
"""
avg = float(sum(values)) / len(values)
temp_vals = [math.pow(x - avg, 2) for x in values]
std_dev = math.sqrt(sum(temp_vals) / len(temp_vals))
avg = 0.0
std_dev = 0.0
if values:
avg = float(sum(values)) / len(values)
if len(values) > 1:
temp_vals = [math.pow(x - avg, 2) for x in values]
std_dev = math.sqrt(sum(temp_vals) / (len(temp_vals) - 1))
return avg, std_dev
def _PrintSummaryResults(self, first_val, units, values=[]):
def _OutputPerfGraphValue(self, description, value):
"""Outputs a performance value to have it graphed on the performance bots.
Only used for ChromeOS.
Args:
description: A string description of the performance value.
value: A numeric value representing a single performance measurement.
"""
if self.IsChromeOS():
print '\n%s(\'%s\', %.2f)%s' % (self._PERF_OUTPUT_MARKER_PRE, description,
value, self._PERF_OUTPUT_MARKER_POST)
def _PrintSummaryResults(self, description, first_val, units, values=[]):
"""Logs summary measurement information.
Args:
description: A string description for the specified results.
first_val: A numeric measurement value for a single initial trial.
units: A string specifying the units for the specified measurements.
values: A list of numeric value measurements.
"""
logging.info('Results for: ' + description)
logging.debug('Single trial: %.2f %s', first_val, units)
if values:
avg, std_dev = self._AvgAndStdDev(values)
......@@ -89,8 +113,11 @@ class PerfTest(pyauto.PyUITest):
logging.info(' --------------------------')
logging.info(' Average: %.2f %s', avg, units)
logging.info(' Std dev: %.2f %s', std_dev, units)
self._OutputPerfGraphValue('%s_%s' % (units, description), avg)
else:
self._OutputPerfGraphValue('%s_%s' % (units, description), first_val)
def _RunNewTabTest(self, open_tab_command, num_tabs=1):
def _RunNewTabTest(self, description, open_tab_command, num_tabs=1):
"""Runs a perf test that involves opening new tab(s).
This helper function can be called from different tests to do perf testing
......@@ -98,11 +125,14 @@ class PerfTest(pyauto.PyUITest):
will open up a single tab.
Args:
description: A string description of the associated tab test.
open_tab_command: A callable that will open a single tab.
num_tabs: The number of tabs to open, i.e., the number of times to invoke
the |open_tab_command|.
"""
assert callable(open_tab_command)
# TODO(dennisjeffrey): Consider not taking an initial sample here.
orig_elapsed = self._MeasureElapsedTime(open_tab_command, num_tabs)
self.assertEqual(1 + num_tabs, self.GetTabCount(),
msg='Did not open %d new tab(s).' % num_tabs)
......@@ -118,25 +148,30 @@ class PerfTest(pyauto.PyUITest):
self.GetBrowserWindow(0).GetTab(1).Close(True)
timings.append(elapsed)
self._PrintSummaryResults(orig_elapsed, 'ms', values=timings)
self._PrintSummaryResults(description, orig_elapsed, 'milliseconds',
values=timings)
def testNewTab(self):
"""Measures time to open a new tab."""
self._RunNewTabTest(lambda: self.AppendTab(pyauto.GURL('chrome://newtab')))
self._RunNewTabTest('NewTabPage',
lambda: self.AppendTab(pyauto.GURL('chrome://newtab')))
def testNewTabPdf(self):
"""Measures time to open a new tab navigated to a PDF file."""
url = self.GetFileURLForDataPath('pyauto_private', 'pdf', 'TechCrunch.pdf')
self._RunNewTabTest(lambda: self.AppendTab(pyauto.GURL(url)))
self._RunNewTabTest('NewTabPdfPage',
lambda: self.AppendTab(pyauto.GURL(url)))
def testNewTabFlash(self):
"""Measures time to open a new tab navigated to a flash page."""
url = self.GetFileURLForDataPath('plugin', 'flash.swf')
self._RunNewTabTest(lambda: self.AppendTab(pyauto.GURL(url)))
self._RunNewTabTest('NewTabFlashPage',
lambda: self.AppendTab(pyauto.GURL(url)))
def test20Tabs(self):
"""Measures time to open 20 tabs."""
self._RunNewTabTest(
'20TabsNewTabPage',
lambda: self.AppendTab(pyauto.GURL('chrome://newtab')), num_tabs=20)
def testV8BenchmarkSuite(self):
......@@ -154,7 +189,7 @@ class PerfTest(pyauto.PyUITest):
msg='Timed out when waiting for v8 benchmark score.')
val = self.ExecuteJavascript(js, 0, 1)
score = int(val[val.find(':') + 2:])
self._PrintSummaryResults(score, 'score')
self._PrintSummaryResults('V8Benchmark', score, 'score')
if __name__ == '__main__':
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment