Commit 0b460e2f authored by Robert Ma's avatar Robert Ma Committed by Commit Bot

[run_web_tests] Fix --reset-results for testharness.js tests

In https://crrev.com/c/833751 we stopped to create new baselines for
testharness.js tests without existing baselines. This behaviour is in
fact undesired: if a testharness.js test used to pass (and hence didn't
have a baseline), we'd like to be able to create a new baseline for it
using --reset-results if it starts to fail.

The comment is improved so hopefully that is clear enough.

Bug: 864410
Change-Id: I9b21a6e8d909585f8eb44e53dd90b488f383ea1a
Reviewed-on: https://chromium-review.googlesource.com/1179363
Commit-Queue: Robert Ma <robertma@chromium.org>
Reviewed-by: default avatarXianzhu Wang <wangxianzhu@chromium.org>
Cr-Commit-Position: refs/heads/master@{#584069}
parent 30f9c8aa
......@@ -179,24 +179,34 @@ class SingleTestRunner(object):
if (test_failures.has_failure_type(test_failures.FailureTimeout, failures) or
test_failures.has_failure_type(test_failures.FailureCrash, failures)):
return
# We usually don't want to create a new baseline if there isn't one
# existing (which usually means this baseline isn't necessary, e.g.
# an image-first test without text expectation files). However, in the
# following cases, we do:
# 1. The failure is MISSING; a baseline is apparently needed.
# 2. A testharness.js test fails assertions: testharness.js tests
# without baselines are implicitly expected to pass all assertions;
# if there are failed assertions we need to create a new baseline.
# Note that the created baseline might be redundant, but users can
# optimize them later with optimize-baselines.
self._save_baseline_data(driver_output.text, '.txt',
test_failures.has_failure_type(test_failures.FailureMissingResult, failures))
test_failures.has_failure_type(test_failures.FailureMissingResult, failures) or
test_failures.has_failure_type(test_failures.FailureTestHarnessAssertion, failures))
self._save_baseline_data(driver_output.audio, '.wav',
test_failures.has_failure_type(test_failures.FailureMissingAudio, failures))
self._save_baseline_data(driver_output.image, '.png',
test_failures.has_failure_type(test_failures.FailureMissingImage, failures))
def _save_baseline_data(self, data, extension, is_missing):
def _save_baseline_data(self, data, extension, force_create_new_baseline):
if data is None:
return
port = self._port
fs = self._filesystem
# It's OK that a testharness test or image-first test doesn't have the
# particular baseline, as long as |is_missing| is False.
# Do not create a new baseline unless we are specifically told so.
current_expected_path = port.expected_filename(self._test_name, extension)
if not fs.exists(current_expected_path) and not is_missing:
if not fs.exists(current_expected_path) and not force_create_new_baseline:
return
flag_specific_dir = port.baseline_flag_specific_dir()
......
......@@ -1273,7 +1273,8 @@ class RebaselineTest(unittest.TestCase, StreamTestingMixin):
expected_extensions=['.txt'])
def test_reset_results_testharness_no_baseline(self):
# Tests that we don't create new result for a testharness test without baselines.
# Tests that we create new result for a failing testharness test without
# baselines, but don't create one for a passing one.
host = MockHost()
details, log_stream, _ = logging_run(
[
......@@ -1284,8 +1285,8 @@ class RebaselineTest(unittest.TestCase, StreamTestingMixin):
tests_included=True, host=host)
file_list = host.filesystem.written_files.keys()
self.assertEqual(details.exit_code, 0)
self.assertEqual(len(file_list), 5)
self.assert_baselines(file_list, log_stream, 'failures/unexpected/testharness', [])
self.assertEqual(len(file_list), 6)
self.assert_baselines(file_list, log_stream, 'failures/unexpected/testharness', ['.txt'])
self.assert_baselines(file_list, log_stream, 'passes/testharness', [])
def test_reset_results_testharness_existing_baseline(self):
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment