Commit 4317a101 authored by Max Moroz's avatar Max Moroz Committed by Commit Bot

Blink web tests: add --no-expectations option for running arbitrary testcases.

https://chromium-review.googlesource.com/c/chromium/src/+/1906472 has broken the
ability to run arbitrary testcases without any expectations. This CL introduces
an explicit option --no-expectations that disables expectation parsing and test
results post-processing. This mode is useful for generating code coverage
reports, as it may work with any arbitrary testcases (passed via --test-list=)
and also should be running faster than the default mode, though likely not
significantly faster, hence I did not measure that aspect.

Bug: 957581
Change-Id: I3c91cb55ba0cad73fa6b39d386c1b37949332ef4
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1974699Reviewed-by: default avatarQuinten Yearsley <qyearsley@chromium.org>
Commit-Queue: Max Moroz <mmoroz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#726190}
parent 2165c926
......@@ -112,8 +112,9 @@ class Manager(object):
test_names.sort()
random.Random(self._options.seed).shuffle(test_names)
self._printer.write_update('Parsing expectations ...')
self._expectations = test_expectations.TestExpectations(self._port, test_names)
if not self._options.no_expectations:
self._printer.write_update('Parsing expectations ...')
self._expectations = test_expectations.TestExpectations(self._port, test_names)
tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)
......@@ -158,6 +159,9 @@ class Manager(object):
self._stop_servers()
self._clean_up_run()
if self._options.no_expectations:
return test_run_results.RunDetails(0, [], [], initial_results, all_retry_results)
# Some crash logs can take a long time to be written out so look
# for new logs after the test run finishes.
self._printer.write_update('Looking for new crash logs ...')
......@@ -302,6 +306,9 @@ class Manager(object):
return self._is_http_test(test_file) or self._is_perf_test(test_file)
def _test_is_slow(self, test_file):
if not self._expectations:
return False
expectations = self._expectations.model().get_expectations(test_file)
return (test_expectations.SLOW in expectations or
self._port.is_slow_wpt_test(test_file))
......
......@@ -44,8 +44,9 @@ _log = logging.getLogger(__name__)
def run_single_test(port, options, results_directory, worker_name, driver, test_input):
runner = SingleTestRunner(port, options, results_directory, worker_name, driver, test_input)
try:
test_result = runner.run()
test_result.create_artifacts()
test_result = runner.run()
if not options.no_expectations:
test_result.create_artifacts()
return test_result
except DeviceFailure as error:
_log.error('device failed: %s', error)
......
......@@ -162,6 +162,10 @@ class WebTestFinder(object):
return line
def skip_tests(self, paths, all_tests_list, expectations, http_tests):
if self._options.no_expectations:
# do not skip anything.
return []
all_tests = set(all_tests_list)
tests_to_skip = expectations.get_tests_with_result_type(test_expectations.SKIP)
......
......@@ -92,7 +92,8 @@ class WebTestRunner(object):
self._printer.num_completed = 0
if retry_attempt < 1:
self._printer.print_expected(test_run_results, self._expectations.get_tests_with_result_type)
if self._expectations:
self._printer.print_expected(test_run_results, self._expectations.get_tests_with_result_type)
for test_name in set(tests_to_skip):
result = test_results.TestResult(test_name)
......@@ -191,6 +192,9 @@ class WebTestRunner(object):
test_run_results.unexpected_crashes, test_run_results.unexpected_timeouts))
def _update_summary_with_result(self, test_run_results, result):
if not self._expectations:
return
expected = self._expectations.matches_an_expected_result(result.test_name, result.type)
expectation_string = self._expectations.get_expectations_string(result.test_name)
actual_string = self._expectations.expectation_to_string(result.type)
......
......@@ -177,6 +177,12 @@ def parse_args(args):
'--ignore-default-expectations',
action='store_true',
help=('Do not use the default set of TestExpectations files.')),
optparse.make_option(
'--no-expectations',
action='store_true',
help=('Do not use TestExpectations, only run the tests without '
'reporting any results. Useful for generating code '
'coverage reports.')),
optparse.make_option(
'--additional-platform-directory',
action='append',
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment