Commit 6b7b079c authored by tzik's avatar tzik Committed by Commit bot

Revert of Switch telemetry over to use typ to run the unit tests. (patchset #9...

Revert of Switch telemetry over to use typ to run the unit tests. (patchset #9 id:170001 of https://codereview.chromium.org/659293003/)

Reason for revert:
This CL causes telemetry_perf_unittests failure:
http://build.chromium.org/p/chromium.linux/buildstatus?builder=Linux%20Tests&number=15802
http://build.chromium.org/p/chromium.mac/buildstatus?builder=Mac%2010.6%20Tests%20%28dbg%29%281%29&number=54625

Its stack trace was:
Traceback (most recent call last):
  <module> at tools/telemetry/telemetry/unittest/run_tests.py:179
    RunTestsCommand.main()
  main at tools/telemetry/telemetry/unittest/run_tests.py:85
    return obj.Run(options)
  Run at tools/telemetry/telemetry/unittest/run_tests.py:126
    ret, _, _ = runner.run()
  run at third_party/typ/typ/runner.py:187
    upload_ret = self._upload(full_results)
  _upload at third_party/typ/typ/runner.py:599
    full_results)
  make_upload_request at third_party/typ/typ/json_results.py:113
    content_type, data = _encode_multipart_form_data(attrs, full_results)
  _encode_multipart_form_data at third_party/typ/typ/json_results.py:183
    body = CRLF.join(lines)
TypeError: sequence item 3: expected string, NoneType found

Original issue's description:
> Switch telemetry over to use typ to run the unit tests.
>
> Using typ allows us to run the tests in parallel and share
> the logic for parsing the results and uploading them to the
> flakiness dashboard with other python test steps.
>
> R=dtu@chromium.org, tonyg@chromium.org, nduca@chromium.org
> BUG=402172, 388256
>
> Committed: https://crrev.com/3e7c2a6ee80e05fc6b8090bee850c40b8a2a3810
> Cr-Commit-Position: refs/heads/master@{#302570}

TBR=dtu@chromium.org,tonyg@chromium.org,nduca@chromium.org,achuith@chromium.org,dpranke@chromium.org
NOTREECHECKS=true
NOTRY=true
BUG=402172, 388256

Review URL: https://codereview.chromium.org/700703003

Cr-Commit-Position: refs/heads/master@{#302575}
parent 0167b486
......@@ -9,22 +9,17 @@ This script DOES NOT run benchmarks. run_benchmark does that.
"""
import os
import subprocess
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir, 'telemetry'))
if __name__ == '__main__':
perf_dir = os.path.dirname(os.path.realpath(__file__))
telemetry_dir = os.path.realpath(os.path.join(perf_dir, '..', 'telemetry'))
from telemetry.unittest import gtest_progress_reporter
from telemetry.unittest import run_tests
env = os.environ.copy()
if 'PYTHONPATH' in env:
env['PYTHONPATH'] = env['PYTHONPATH'] + os.pathsep + telemetry_dir
else:
env['PYTHONPATH'] = telemetry_dir
path_to_run_tests = os.path.join(telemetry_dir, 'telemetry', 'unittest',
'run_tests.py')
argv = ['--top-level-dir', perf_dir] + sys.argv[1:]
sys.exit(subprocess.call([sys.executable, path_to_run_tests] + argv,
env=env))
if __name__ == '__main__':
base_dir = os.path.dirname(os.path.realpath(__file__))
progress_reporters = [
gtest_progress_reporter.GTestProgressReporter(sys.stdout)]
run_tests.config = run_tests.Config(base_dir, [base_dir], progress_reporters)
sys.exit(run_tests.RunTestsCommand.main())
......@@ -33,9 +33,7 @@ def _CommonChecks(input_api, output_api):
return results
def GetPathsToPrepend(input_api):
return [input_api.PresubmitLocalPath(),
os.path.join(input_api.PresubmitLocalPath(), os.path.pardir,
os.path.pardir, 'third_party', 'typ')]
return [input_api.PresubmitLocalPath()]
def RunWithPrependedPath(prepended_path, fn, *args):
old_path = sys.path
......
......@@ -5,23 +5,36 @@
# This file specifies dependencies required to bootstrap Telemetry. It is in a
# minimal version of the format used by other DEPS files that gclient can read,
# but it should only be used to bootstrap Telemetry *outside* of a normal
# Chrome checkout. In particular, the normal 'value' part of the python
# dict is not used and hence does not contain real URLs for the repos.
# Chrome checkout.
deps = {
"src/tools/telemetry": "",
"src/build/android": "",
"src/build/util": "",
"src/chrome/test/data/extensions/profiles": "",
"src/third_party/android_testrunner": "",
"src/third_party/android_tools/sdk/platform-tools": "",
"src/third_party/chromite/ssh_keys": "",
"src/third_party/flot/jquery.flot.min.js": "",
"src/third_party/WebKit/PerformanceTests/resources/jquery.tablesorter.min.js": "",
"src/third_party/WebKit/PerformanceTests/resources/statistics.js": "",
"src/third_party/webpagereplay": "",
"src/third_party/trace-viewer": "",
"src/third_party/typ": "",
"src/tools/crx_id": "",
"src/tools/perf/unit-info.json": "",
"src/tools/telemetry":
"https://src.chromium.org/chrome/trunk/src/tools/telemetry",
"src/build/android":
"https://src.chromium.org/chrome/trunk/src/build/android",
"src/build/util":
"https://src.chromium.org/chrome/trunk/src/build/util",
"src/chrome/test/data/extensions/profiles":
"https://src.chromium.org/chrome/trunk/src/chrome/test/data/extensions/profiles",
"src/third_party/android_testrunner":
"https://src.chromium.org/chrome/trunk/src/third_party/android_testrunner",
"src/third_party/android_tools/sdk/platform-tools":
"https://src.chromium.org/chrome/trunk/src/third_party/android_tools/sdk/platform-tools",
"src/third_party/chromite/ssh_keys":
"https://src.chromium.org/chrome/trunk/src/third_party/chromite/ssh_keys",
"src/third_party/flot/jquery.flot.min.js":
"https://src.chromium.org/chrome/trunk/src/third_party/flot/jquery.flot.min.js",
"src/third_party/WebKit/PerformanceTests/resources/jquery.tablesorter.min.js":
"https://src.chromium.org/blink/trunk/PerformanceTests/resources/jquery.tablesorter.min.js",
"src/third_party/WebKit/PerformanceTests/resources/statistics.js":
"https://src.chromium.org/blink/trunk/PerformanceTests/resources/statistics.js",
"src/third_party/webpagereplay":
"https://web-page-replay.googlecode.com/svn/trunk",
"src/third_party/trace-viewer":
"https://trace-viewer.googlecode.com/svn/trunk",
"src/tools/crx_id":
"https://src.chromium.org/chrome/trunk/src/tools/crx_id",
"src/tools/perf/unit-info.json":
"https://src.chromium.org/chrome/trunk/src/tools/perf/unit-info.json"
}
......@@ -5,20 +5,14 @@
import os
import sys
import subprocess
from telemetry.unittest import gtest_progress_reporter
from telemetry.unittest import run_tests
if __name__ == '__main__':
telemetry_dir = os.path.dirname(os.path.realpath(__file__))
env = os.environ.copy()
if 'PYTHONPATH' in env:
env['PYTHONPATH'] = env['PYTHONPATH'] + os.pathsep + telemetry_dir
else:
env['PYTHONPATH'] = telemetry_dir
path_to_run_tests = os.path.join(telemetry_dir, 'telemetry', 'unittest',
'run_tests.py')
argv = ['--top-level-dir', telemetry_dir] + sys.argv[1:]
sys.exit(subprocess.call([sys.executable, path_to_run_tests] + argv,
env=env))
if __name__ == '__main__':
base_dir = os.path.dirname(os.path.realpath(__file__))
progress_reporters = [
gtest_progress_reporter.GTestProgressReporter(sys.stdout)]
run_tests.config = run_tests.Config(base_dir, [base_dir], progress_reporters)
sys.exit(run_tests.RunTestsCommand.main())
......@@ -92,31 +92,6 @@ def Enabled(*args):
return _Enabled
# TODO(dpranke): Remove if we don't need this.
def Isolated(*args):
"""Decorator for noting that tests must be run in isolation.
The test will be run by itself (not concurrently with any other tests)
if ANY of the args match the browser type, OS name, or OS version."""
def _Isolated(func):
if not isinstance(func, types.FunctionType):
func._isolated_strings = isolated_strings
return func
@functools.wraps(func)
def wrapper(*args, **kwargs):
func(*args, **kwargs)
wrapper._isolated_strings = isolated_strings
return wrapper
if len(args) == 1 and callable(args[0]):
isolated_strings = []
return _Isolated(args[0])
isolated_strings = list(args)
for isolated_string in isolated_strings:
# TODO(tonyg): Validate that these strings are recognized.
assert isinstance(isolated_string, str), 'Isolated accepts a list of strs'
return _Isolated
def IsEnabled(test, possible_browser):
"""Returns True iff |test| is enabled given the |possible_browser|.
......@@ -127,11 +102,6 @@ def IsEnabled(test, possible_browser):
_enabled_strings attributes.
possible_browser: A PossibleBrowser to check whether |test| may run against.
"""
should_skip, _ = ShouldSkip(test, possible_browser)
return not should_skip
def ShouldSkip(test, possible_browser):
"""Returns whether the test should be skipped and the reason for it."""
platform_attributes = [a.lower() for a in [
possible_browser.browser_type,
possible_browser.platform.GetOSName(),
......@@ -150,46 +120,28 @@ def ShouldSkip(test, possible_browser):
if hasattr(test, '_disabled_strings'):
disabled_strings = test._disabled_strings
if not disabled_strings:
return True, '' # No arguments to @Disabled means always disable.
return False # No arguments to @Disabled means always disable.
for disabled_string in disabled_strings:
if disabled_string in platform_attributes:
return (True,
'Skipping %s because it is disabled for %s. '
'You are running %s.' % (name,
' and '.join(disabled_strings),
' '.join(platform_attributes)))
print (
'Skipping %s because it is disabled for %s. '
'You are running %s.' % (name,
' and '.join(disabled_strings),
' '.join(platform_attributes)))
return False
if hasattr(test, '_enabled_strings'):
enabled_strings = test._enabled_strings
if not enabled_strings:
return False, None # No arguments to @Enabled means always enable.
return True # No arguments to @Enabled means always enable.
for enabled_string in enabled_strings:
if enabled_string in platform_attributes:
return False, None
return (True,
'Skipping %s because it is only enabled for %s. '
'You are running %s.' % (name,
' or '.join(enabled_strings),
' '.join(platform_attributes)))
return False, None
return False, None
def ShouldBeIsolated(test, possible_browser):
platform_attributes = [a.lower() for a in [
possible_browser.browser_type,
possible_browser.platform.GetOSName(),
possible_browser.platform.GetOSVersionName(),
]]
if possible_browser.supports_tab_control:
platform_attributes.append('has tabs')
if hasattr(test, '_isolated_strings'):
isolated_strings = test._isolated_strings
if not isolated_strings:
return True # No arguments to @Isolated means always isolate.
for isolated_string in isolated_strings:
if isolated_string in platform_attributes:
return True
print (
'Skipping %s because it is only enabled for %s. '
'You are running %s.' % (name,
' or '.join(enabled_strings),
' '.join(platform_attributes)))
return False
return False
return True
......@@ -9,49 +9,28 @@ from telemetry.core import browser_finder
from telemetry.unittest import options_for_unittests
from telemetry.util import path
current_browser_options = None
current_browser = None
def teardown_browser():
global current_browser
global current_browser_options
if current_browser:
current_browser.Close()
current_browser = None
current_browser_options = None
class BrowserTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
global current_browser
global current_browser_options
options = options_for_unittests.GetCopy()
cls.CustomizeBrowserOptions(options.browser_options)
if not current_browser or (current_browser_options !=
options.browser_options):
if current_browser:
teardown_browser()
browser_to_create = browser_finder.FindBrowser(options)
if not browser_to_create:
raise Exception('No browser found, cannot continue test.')
browser_to_create = browser_finder.FindBrowser(options)
if not browser_to_create:
raise Exception('No browser found, cannot continue test.')
try:
current_browser = browser_to_create.Create(options)
current_browser_options = options.browser_options
except:
cls.tearDownClass()
raise
cls._browser = current_browser
cls._browser = None
try:
cls._browser = browser_to_create.Create(options)
except:
cls.tearDownClass()
raise
@classmethod
def tearDownClass(cls):
pass
if cls._browser:
cls._browser.Close()
cls._browser = None
@classmethod
def CustomizeBrowserOptions(cls, options):
......
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import functools
import json
import re
import time
import unittest
import urllib2
# TODO(dpranke): This code is largely cloned from, and redundant with,
# src/mojo/tools/run_mojo_python_tests.py, and also duplicates logic
# in test-webkitpy and run-webkit-tests. We should consolidate the
# python TestResult parsing/converting/uploading code as much as possible.
def AddOptions(parser):
parser.add_option('--metadata', action='append', default=[],
help=('optional key=value metadata that will be stored '
'in the results files (can be used for revision '
'numbers, etc.)'))
parser.add_option('--write-full-results-to', metavar='FILENAME',
action='store',
help='The path to write the list of full results to.')
parser.add_option('--builder-name',
help='The name of the builder as shown on the waterfall.')
parser.add_option('--master-name',
help='The name of the buildbot master.')
parser.add_option("--test-results-server", default="",
help=('If specified, upload full_results.json file to '
'this server.'))
parser.add_option('--test-type',
help=('Name of test type / step on the waterfall '
'(e.g., "telemetry_unittests").'))
def ValidateArgs(parser, args):
for val in args.metadata:
if '=' not in val:
parser.error('Error: malformed metadata "%s"' % val)
if (args.test_results_server and
(not args.builder_name or not args.master_name or not args.test_type)):
parser.error('Error: --builder-name, --master-name, and --test-type '
'must be specified along with --test-result-server.')
def WriteFullResultsIfNecessary(args, full_results):
if not args.write_full_results_to:
return
with open(args.write_full_results_to, 'w') as fp:
json.dump(full_results, fp, indent=2)
fp.write("\n")
def UploadFullResultsIfNecessary(args, full_results):
if not args.test_results_server:
return False, ''
url = 'http://%s/testfile/upload' % args.test_results_server
attrs = [('builder', args.builder_name),
('master', args.master_name),
('testtype', args.test_type)]
content_type, data = _EncodeMultiPartFormData(attrs, full_results)
return _UploadData(url, data, content_type)
TEST_SEPARATOR = '.'
def FullResults(args, suite, results):
"""Convert the unittest results to the Chromium JSON test result format.
This matches run-webkit-tests (the layout tests) and the flakiness dashboard.
"""
full_results = {}
full_results['interrupted'] = False
full_results['path_delimiter'] = TEST_SEPARATOR
full_results['version'] = 3
full_results['seconds_since_epoch'] = time.time()
full_results['builder_name'] = args.builder_name or ''
for md in args.metadata:
key, val = md.split('=', 1)
full_results[key] = val
all_test_names = AllTestNames(suite)
sets_of_passing_test_names = map(PassingTestNames, results)
sets_of_failing_test_names = map(functools.partial(FailedTestNames, suite),
results)
# TODO(crbug.com/405379): This handles tests that are skipped via the
# unittest skip decorators (like skipUnless). The tests that are skipped via
# telemetry's decorators package are not included in the test suite at all so
# we need those to be passed in in order to include them.
skipped_tests = (set(all_test_names) - sets_of_passing_test_names[0]
- sets_of_failing_test_names[0])
num_tests = len(all_test_names)
num_failures = NumFailuresAfterRetries(suite, results)
num_skips = len(skipped_tests)
num_passes = num_tests - num_failures - num_skips
full_results['num_failures_by_type'] = {
'FAIL': num_failures,
'PASS': num_passes,
'SKIP': num_skips,
}
full_results['tests'] = {}
for test_name in all_test_names:
if test_name in skipped_tests:
value = {
'expected': 'SKIP',
'actual': 'SKIP',
}
else:
value = {
'expected': 'PASS',
'actual': ActualResultsForTest(test_name,
sets_of_failing_test_names,
sets_of_passing_test_names),
}
if value['actual'].endswith('FAIL'):
value['is_unexpected'] = True
_AddPathToTrie(full_results['tests'], test_name, value)
return full_results
def ActualResultsForTest(test_name, sets_of_failing_test_names,
sets_of_passing_test_names):
actuals = []
for retry_num in range(len(sets_of_failing_test_names)):
if test_name in sets_of_failing_test_names[retry_num]:
actuals.append('FAIL')
elif test_name in sets_of_passing_test_names[retry_num]:
assert ((retry_num == 0) or
(test_name in sets_of_failing_test_names[retry_num - 1])), (
'We should not have run a test that did not fail '
'on the previous run.')
actuals.append('PASS')
assert actuals, 'We did not find any result data for %s.' % test_name
return ' '.join(actuals)
def ExitCodeFromFullResults(full_results):
return 1 if full_results['num_failures_by_type']['FAIL'] else 0
def AllTestNames(suite):
test_names = []
# _tests is protected pylint: disable=W0212
for test in suite._tests:
if isinstance(test, unittest.suite.TestSuite):
test_names.extend(AllTestNames(test))
else:
test_names.append(test.id())
return test_names
def NumFailuresAfterRetries(suite, results):
return len(FailedTestNames(suite, results[-1]))
def FailedTestNames(suite, result):
failed_test_names = set()
for test, error in result.failures + result.errors:
if isinstance(test, unittest.TestCase):
failed_test_names.add(test.id())
elif isinstance(test, unittest.suite._ErrorHolder): # pylint: disable=W0212
# If there's an error in setUpClass or setUpModule, unittest gives us an
# _ErrorHolder object. We can parse the object's id for the class or
# module that failed, then find all tests in that class or module.
match = re.match('setUp[a-zA-Z]+ \\((.+)\\)', test.id())
assert match, "Don't know how to retry after this error:\n%s" % error
module_or_class = match.groups()[0]
failed_test_names |= _FindChildren(module_or_class, AllTestNames(suite))
else:
assert False, 'Unknown test type: %s' % test.__class__
return failed_test_names
def _FindChildren(parent, potential_children):
children = set()
parent_name_parts = parent.split('.')
for potential_child in potential_children:
child_name_parts = potential_child.split('.')
if parent_name_parts == child_name_parts[:len(parent_name_parts)]:
children.add(potential_child)
return children
def PassingTestNames(result):
return set(test.id() for test in result.successes)
def _AddPathToTrie(trie, path, value):
if TEST_SEPARATOR not in path:
trie[path] = value
return
directory, rest = path.split(TEST_SEPARATOR, 1)
if directory not in trie:
trie[directory] = {}
_AddPathToTrie(trie[directory], rest, value)
def _EncodeMultiPartFormData(attrs, full_results):
# Cloned from webkitpy/common/net/file_uploader.py
BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-'
CRLF = '\r\n'
lines = []
for key, value in attrs:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"' % key)
lines.append('')
lines.append(value)
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="file"; '
'filename="full_results.json"')
lines.append('Content-Type: application/json')
lines.append('')
lines.append(json.dumps(full_results))
lines.append('--' + BOUNDARY + '--')
lines.append('')
body = CRLF.join(lines)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
def _UploadData(url, data, content_type):
request = urllib2.Request(url, data, {'Content-Type': content_type})
try:
response = urllib2.urlopen(request)
if response.code == 200:
return False, ''
return True, ('Uploading the JSON results failed with %d: "%s"' %
(response.code, response.read()))
except Exception as e:
return True, 'Uploading the JSON results raised "%s"\n' % str(e)
......@@ -3,7 +3,9 @@
# found in the LICENSE file.
import logging
import os
import sys
from telemetry.unittest import gtest_progress_reporter
from telemetry.unittest import run_tests
from telemetry.core import util
......@@ -12,29 +14,35 @@ def RunTestsForChromeOS(browser_type, unit_tests, perf_tests):
stream = _LoggingOutputStream()
error_string = ''
if unit_tests:
logging.info('Running telemetry unit tests with browser_type "%s".' %
browser_type)
ret = _RunOneSetOfTests(browser_type, 'telemetry', unit_tests, stream)
if ret:
error_string += 'The unit tests failed.\n'
logging.info('Running telemetry unit tests with browser_type "%s".' %
browser_type)
ret = _RunOneSetOfTests(browser_type, 'telemetry',
os.path.join('telemetry', 'telemetry'),
unit_tests, stream)
if ret:
error_string += 'The unit tests failed.\n'
if perf_tests:
logging.info('Running telemetry perf tests with browser_type "%s".' %
browser_type)
ret = _RunOneSetOfTests(browser_type, 'perf', perf_tests, stream)
if ret:
error_string = 'The perf tests failed.\n'
logging.info('Running telemetry perf tests with browser_type "%s".' %
browser_type)
ret = _RunOneSetOfTests(browser_type, 'perf', 'perf', perf_tests, stream)
if ret:
error_string = 'The perf tests failed.\n'
return error_string
def _RunOneSetOfTests(browser_type, dir_name, tests, stream):
top_level_dir = os.path.join(util.GetChromiumSrcDir(), 'tools', dir_name)
args = ['--browser', browser_type,
'--top-level-dir', top_level_dir,
'--jobs', '1'] + tests
return run_tests.RunTestsCommand.main(args, stream=stream)
def _RunOneSetOfTests(browser_type, root_dir, sub_dir, tests, stream):
if not tests:
return
top_level_dir = os.path.join(util.GetChromiumSrcDir(), 'tools', root_dir)
sub_dir = os.path.join(util.GetChromiumSrcDir(), 'tools', sub_dir)
sys.path.append(top_level_dir)
output_formatters = [gtest_progress_reporter.GTestProgressReporter(stream)]
run_tests.config = run_tests.Config(top_level_dir, [sub_dir],
output_formatters)
return run_tests.RunTestsCommand.main(['--browser', browser_type] + tests)
class _LoggingOutputStream(object):
......
......@@ -4,17 +4,11 @@
import unittest
from telemetry import decorators
from telemetry.core import util
from telemetry.unittest import run_tests
class MockArgs(object):
def __init__(self):
self.positional_args = []
self.exact_test_filter = True
self.run_disabled_tests = False
class MockPossibleBrowser(object):
def __init__(self, browser_type, os_name, os_version_name,
supports_tab_control):
......@@ -37,19 +31,25 @@ class MockPlatform(object):
class RunTestsUnitTest(unittest.TestCase):
def setUp(self):
self.suite = unittest.TestSuite()
self.suite.addTests(run_tests.Discover(
util.GetTelemetryDir(), util.GetTelemetryDir(), 'disabled_cases.py'))
def _GetEnabledTests(self, browser_type, os_name, os_version_name,
supports_tab_control):
runner = run_tests.typ.Runner()
host = runner.host
runner.top_level_dir = util.GetTelemetryDir()
runner.args.tests = [host.join(util.GetTelemetryDir(),
'telemetry', 'unittest', 'disabled_cases.py')]
possible_browser = MockPossibleBrowser(
browser_type, os_name, os_version_name, supports_tab_control)
runner.classifier = run_tests.GetClassifier(MockArgs(), possible_browser)
_, test_set = runner.find_tests(runner.args)
return set(test.name.split('.')[-1] for test in test_set.parallel_tests)
# pylint: disable=W0212
def MockPredicate(test):
method = getattr(test, test._testMethodName)
return decorators.IsEnabled(method, MockPossibleBrowser(
browser_type, os_name, os_version_name, supports_tab_control))
enabled_tests = set()
for i in run_tests.FilterSuite(self.suite, MockPredicate)._tests:
for j in i:
for k in j:
enabled_tests.add(k._testMethodName)
return enabled_tests
def testSystemMacMavericks(self):
self.assertEquals(
......
......@@ -15,7 +15,6 @@ from telemetry.timeline import model as model_module
from telemetry.timeline import async_slice
from telemetry.unittest import options_for_unittests
from telemetry.unittest import page_test_test_case
from telemetry.unittest import browser_test_case
from telemetry.value import scalar
from telemetry.web_perf import timeline_based_measurement as tbm_module
from telemetry.web_perf import timeline_interaction_record as tir_module
......@@ -202,12 +201,9 @@ class TestTimelinebasedMeasurementPage(page_module.Page):
class TimelineBasedMeasurementTest(page_test_test_case.PageTestTestCase):
def setUp(self):
browser_test_case.teardown_browser()
self._options = options_for_unittests.GetCopy()
self._options.browser_options.wpr_mode = wpr_modes.WPR_OFF
# This test is flaky when run in parallel on the mac: crbug.com/426676
@benchmark.Disabled('mac')
def testSmoothnessTimelineBasedMeasurementForSmoke(self):
ps = self.CreateEmptyPageSet()
ps.AddPage(TestTimelinebasedMeasurementPage(
......@@ -225,8 +221,6 @@ class TimelineBasedMeasurementTest(page_test_test_case.PageTestTestCase):
'DrawerAnimation-frame_time_discrepancy')
self.assertEquals(len(v), 1)
# This test is flaky when run in parallel on the mac: crbug.com/426676
@benchmark.Disabled('mac')
def testFastTimelineBasedMeasurementForSmoke(self):
ps = self.CreateEmptyPageSet()
ps.AddPage(TestTimelinebasedMeasurementPage(
......@@ -262,8 +256,7 @@ class TimelineBasedMeasurementTest(page_test_test_case.PageTestTestCase):
self.assertGreaterEqual(v[0].value, 200.0)
# Disabled since mainthread_jank metric is not supported on windows platform.
# Also, flaky on the mac when run in parallel: crbug.com/426676
@benchmark.Disabled('win', 'mac')
@benchmark.Disabled('win')
def testMainthreadJankTimelineBasedMeasurement(self):
ps = self.CreateEmptyPageSet()
ps.AddPage(TestTimelinebasedMeasurementPage(
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment