Commit 37a6c9fc authored by Mehrdad Hessar's avatar Mehrdad Hessar Committed by Commit Bot

This CL adds webdriver tests for TFLite experiments.

Change-Id: I2eff6643d4a9e1c1faace2d1bd4babb8b475ec24
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2363949
Commit-Queue: Mehrdad Hessar <mehrdadh@google.com>
Reviewed-by: default avatarSophie Chang <sophiechang@chromium.org>
Cr-Commit-Position: refs/heads/master@{#803305}
parent b3ce7eb6
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import time
import os
import json
import statistics
import sys
from common import TestDriver
from common import IntegrationTest
HISTOGRAMS = [
'TFLiteExperiment.Observer.TFLitePredictor.Null',
'TFLiteExperiment.Observer.TFLitePredictor.EvaluationRequested',
'TFLiteExperiment.Observer.Finish',
'TFLiteExperiment.Observer.TFLitePredictor.InputSetTime',
'TFLiteExperiment.Observer.TFLitePredictor.EvaluationTime',
'PageLoad.PaintTiming.NavigationToFirstContentfulPaint',
'PageLoad.PaintTiming.NavigationToLargestContentfulPaint',
]
URL_LOOP_NUM = 10
class TFLiteKeyedServiceTest(IntegrationTest):
def _set_tflite_experiment_config(self, test_driver):
if test_driver._flags.tflite_model:
test_driver.AddChromeArg("--tflite-model-path=%s" %
test_driver._flags.tflite_model)
if test_driver._flags.tflite_experiment_log:
test_driver.AddChromeArg("--tflite-experiment-log-path=%s" %
test_driver._flags.tflite_experiment_log)
test_driver.AddChromeArg('--tflite-predictor-num-threads=%s' %
str(test_driver._flags.tflite_num_threads))
# Log histogram for url using test_driver logger.
def _log_histogram(self, test_driver, histogram, url, is_tflite):
log_dict = {}
log_dict["url"] = url
if is_tflite:
log_dict['tflite'] = 'true'
else:
log_dict['tflite'] = 'false'
histogram_value = test_driver.GetBrowserHistogram(histogram)
test_driver._logger.info(histogram_value)
if not histogram_value:
return
bucket_list = []
for bucket in histogram_value['buckets']:
bucket_val = (bucket['low'] + bucket['high']) / 2
for loop in range(bucket['count']):
bucket_list.append(bucket_val)
std_val = statistics.stdev(bucket_list)
mean_val = statistics.mean(bucket_list)
if histogram == 'TFLiteExperiment.Observer.TFLitePredictor.EvaluationTime':
log_dict['name'] = 'Evaluation Std'
log_dict['value'] = std_val
test_driver._logger.info(log_dict)
log_dict['name'] = 'Evaluation Mean'
log_dict['value'] = mean_val
test_driver._logger.info(log_dict)
elif histogram == 'TFLiteExperiment.Observer.TFLitePredictor.InputSetTime':
log_dict['name'] = 'InputSet Std'
log_dict['value'] = std_val
test_driver._logger.info(log_dict)
log_dict['name'] = 'InputSet Mean'
log_dict['value'] = mean_val
test_driver._logger.info(log_dict)
elif histogram == 'PageLoad.PaintTiming.NavigationToFirstContentfulPaint':
log_dict['name'] = 'FirstContent Std'
log_dict['value'] = std_val
test_driver._logger.info(log_dict)
log_dict['name'] = 'FirstContent Mean'
log_dict['value'] = mean_val
test_driver._logger.info(log_dict)
elif histogram == 'PageLoad.PaintTiming.NavigationToLargestContentfulPaint':
log_dict['name'] = 'LargestContent Std'
log_dict['value'] = std_val
test_driver._logger.info(log_dict)
log_dict['name'] = 'LargestContent Mean'
log_dict['value'] = mean_val
test_driver._logger.info(log_dict)
def _get_url_list(self, test_driver):
if not test_driver:
return []
if not test_driver._flags.url_list:
return []
url_list = []
with open(test_driver._flags.url_list, 'r') as f:
line = f.readline()
while (line):
url_list.append(line)
line = f.readline()
return url_list
def _open_new_tab(self, test_driver, url):
test_driver._driver.execute_script('window.open(' ');')
test_driver._driver.switch_to.window(
test_driver._driver.window_handles[1])
test_driver._driver.get(url)
test_driver._driver.switch_to.window(
test_driver._driver.window_handles[0])
test_driver._driver.close()
test_driver._driver.switch_to.window(
test_driver._driver.window_handles[0])
# Records content load timing.
def _keyed_service_test(self, tflite_enabled):
with TestDriver() as test_driver:
url_list = self._get_url_list(test_driver)
for url in url_list:
with TestDriver() as test_driver:
if tflite_enabled:
self._set_tflite_experiment_config(test_driver)
test_driver.ClearCache()
test_driver.LoadURL(url, timeout=120)
for loop in range(URL_LOOP_NUM):
self._open_new_tab(test_driver, url)
for histogram in HISTOGRAMS:
self._log_histogram(test_driver,
histogram,
url,
is_tflite=tflite_enabled)
# Records content load timing when TFLite Keyed Service
# is enabled.
def test_with_tflite(self):
self._keyed_service_test(tflite_enabled=True)
# Records content load timing when TFLite Keyed Service
# is disabled.
def test_no_tflite(self):
self._keyed_service_test(tflite_enabled=False)
# Records content load timing with running
# with and without tflite.
def test_in_order(self):
url_list = self._get_url_list(TestDriver())
for url in url_list:
# Test with TFLite.
with TestDriver() as test_driver_1:
self._set_tflite_experiment_config(test_driver_1)
test_driver_1.ClearCache()
test_driver_1.LoadURL(url, timeout=120)
for loop in range(URL_LOOP_NUM):
self._open_new_tab(test_driver_1, url)
for histogram in HISTOGRAMS:
self._log_histogram(test_driver_1,
histogram,
url,
is_tflite=True)
# Test without TFLite.
with TestDriver() as test_driver_2:
test_driver_2.ClearCache()
test_driver_2.LoadURL(url, timeout=120)
for loop in range(URL_LOOP_NUM):
self._open_new_tab(test_driver_2, url)
for histogram in HISTOGRAMS:
self._log_histogram(test_driver_2,
histogram,
url,
is_tflite=False)
if __name__ == '__main__':
IntegrationTest.RunAllTests()
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import sys
import json
import csv
class LogParser():
"""The histogram log parser.
This class is the tool that is used to convert histogram logs
of the experiments to presentable formats like CSV.
Attributes:
_flags: A Namespace object from the call to parse_flags()
"""
def __init__(self):
self._flags = parse_flags()
# Generate CSV file from recorded histograms.
def generate_csv(self):
if not self._flags.log_file or not self._flags.csv_file:
return
# Adds entries based on URL.
url_list = {}
with open(self._flags.log_file, 'r') as f:
for line in f:
# Check if this line is a valid JSON histogram log.
if line.find('url') == -1 or line.find('name') == -1 or \
line.find('value') == -1 or line.find('tflite') == -1:
continue
# Find starting point of the JSON substring
# in this line.
start = line.find('INFO')
data_string = line[start + 6:len(line)]
data_string = data_string.replace("\'", "\"")
data_string = data_string.rstrip("\r")
data_string = data_string.rstrip("\n")
data = json.loads(data_string)
url = data['url']
url = url.rstrip('\n')
url = url.rstrip('\r')
if url not in url_list.keys():
url_list[url] = {}
name = data['name'] + ',' + str(data['tflite'])
url_list[url][name] = data['value']
with open(self._flags.csv_file, 'w') as csvfile:
csv_writer = csv.writer(csvfile, delimiter=',')
csv_header = ['url']
# Write CSV file header.
for key, val in url_list.iteritems():
for key1, val1 in val.iteritems():
csv_header.append(key1)
break
csv_writer.writerow(csv_header)
# Sort based on URL.
sorted(url_list.items())
# Write data row.
for key, val in url_list.iteritems():
url = [key][0]
csv_data = [url]
for key1 in csv_header[1:]:
if key1 in val.keys():
csv_data.append(val[key1])
else:
csv_data.append(0)
csv_writer.writerow(csv_data)
def parse_flags():
"""Parses the given command line arguments.
Returns:
A new Namespace object with class properties for each argument added below.
See pydoc for argparse.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--log_file',
type=str,
help='Path to the input log file.')
parser.add_argument('--csv_file',
type=str,
help='Path to the outut CSV file.')
return parser.parse_args(sys.argv[1:])
if __name__ == '__main__':
parser = LogParser()
parser.generate_csv()
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import os
import test_setup
sys.path.append(
os.path.join(os.path.dirname(__file__, ), os.pardir, os.pardir, os.pardir,
os.pardir, 'tools', 'chrome_proxy', 'webdriver'))
import common
common.ParseFlags = test_setup.parse_flags
if __name__ == "__main__":
results = test_setup.RunAllTests(run_all_tests=True)
if results.errors or results.failures:
sys.exit(2)
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import os
import re
import argparse
sys.path.append(
os.path.join(os.path.dirname(__file__, ), os.pardir, os.pardir, os.pardir,
os.pardir, 'tools', 'chrome_proxy', 'webdriver'))
import common
def parse_flags():
"""Parses the given command line arguments.
Returns:
A new Namespace object with class properties for each argument added below.
See pydoc for argparse.
"""
def TestFilter(v):
try:
# The filtering here allows for any number of * wildcards with a required
# . separator between classname and methodname, but no other special
# characters.
return re.match(r'^([A-Za-z_0-9\*]+\.)?[A-Za-z_0-9\*]+$',
v).group(0)
except:
raise argparse.ArgumentTypeError(
'Test filter "%s" is not a valid filter' % v)
parser = argparse.ArgumentParser()
parser.add_argument('--browser_args',
type=str,
help='Override browser flags '
'in code with these flags')
parser.add_argument('--via_header_value',
default='1.1 Chrome-Compression-Proxy',
help='What the via should match to '
'be considered valid')
parser.add_argument('--android',
help='If given, attempts to run the test on '
'Android via adb. Ignores usage of --chrome_exec',
action='store_true')
parser.add_argument('--android_package',
default='com.android.chrome',
help='Set the android package for Chrome')
parser.add_argument('--chrome_exec',
type=str,
help='The path to '
'the Chrome or Chromium executable')
parser.add_argument(
'chrome_driver',
type=str,
help='The path to '
'the ChromeDriver executable. If not given, the default system chrome '
'will be used.')
parser.add_argument(
'--disable_buffer',
help='Causes stdout and stderr from '
'tests to output normally. Otherwise, the standard output and standard '
'error streams are buffered during the test run, and output from a '
'passing test is discarded. Output will always be echoed normally on test '
'fail or error and is added to the failure messages.',
action='store_true')
parser.add_argument(
'-c',
'--catch',
help='Control-C during the test run '
'waits for the current test to end and then reports all the results so '
'far. A second Control-C raises the normal KeyboardInterrupt exception.',
action='store_true')
parser.add_argument('-f',
'--failfast',
help='Stop the test run on the first '
'error or failure.',
action='store_true')
parser.add_argument(
'--test_filter',
'--gtest_filter',
type=TestFilter,
help='The filter to use when discovering tests to run, in the form '
'<class name>.<method name> Wildcards (*) are accepted. Default=*',
default='*')
parser.add_argument(
'--logging_level',
choices=['DEBUG', 'INFO', 'WARN', 'ERROR', 'CRIT'],
default='WARN',
help='The logging verbosity for log '
'messages, printed to stderr. To see stderr logging output during a '
'successful test run, also pass --disable_buffer. Default=ERROR')
parser.add_argument('--log_file',
help='If given, write logging statements '
'to the given file instead of stderr.')
parser.add_argument('--chrome_log',
help='If given, write logging chrome statements '
'to the given file.')
parser.add_argument('--skip_slow',
action='store_true',
help='If set, tests '
'marked as slow will be skipped.',
default=False)
parser.add_argument(
'--chrome_start_time',
type=int,
default=0,
help='The '
'number of attempts to check if Chrome has fetched a proxy client config '
'before starting the test. Each check takes about one second.')
parser.add_argument(
'--ignore_logging_prefs_w3c',
action='store_true',
help='If given, use the loggingPrefs capability instead of the W3C '
'standard goog:loggingPrefs capability.')
parser.add_argument('--tflite_model',
type=str,
help='The path to '
'the TFLite model')
parser.add_argument('--tflite_experiment_log',
type=str,
help='The path to the TFLite experiment log file')
parser.add_argument('--tflite_num_threads',
type=int,
help='Number of threads for TFLite predictor.',
default=4)
parser.add_argument(
'--url_list',
type=str,
help='The path to the URL list file for TFLite experiment.')
return parser.parse_args(sys.argv[1:])
# Override parse flag method in common.
common.ParseFlags = parse_flags
def RunAllTests(run_all_tests=False):
"""A simple helper method to run all tests using unittest.main().
Args:
run_all_tests: If True, all tests in the directory will be run, Otherwise
only the tests in the file given on the command line will be run.
Returns:
the TestResult object from the test runner
"""
flags = parse_flags()
logger = common.GetLogger()
logger.debug('Command line args: %s', str(sys.argv))
logger.info('sys.argv parsed to %s', str(flags))
if flags.catch:
common.unittest.installHandler()
# Use python's test discovery to locate tests files that have subclasses of
# unittest.TestCase and methods beginning with 'test'.
pattern = '*.py' if run_all_tests else os.path.basename(sys.argv[0])
loader = common.unittest.TestLoader()
test_suite_iter = loader.discover(os.path.dirname(__file__),
pattern=pattern)
# Match class and method name on the given test filter from --test_filter.
tests = common.unittest.TestSuite()
test_filter_re = flags.test_filter.replace('.', r'\.').replace('*', '.*')
for test_suite in test_suite_iter:
for test_case in test_suite:
for test in test_case:
# Drop the file name in the form <filename>.<classname>.<methodname>
test_id = test.id()[test.id().find('.') + 1:]
if re.match(test_filter_re, test_id):
tests.addTest(test)
testRunner = common.unittest.runner.TextTestRunner(
verbosity=2,
failfast=flags.failfast,
buffer=(not flags.disable_buffer))
return testRunner.run(tests)
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment