Commit db12f918 authored by Chromium WPT Sync's avatar Chromium WPT Sync Committed by Commit Bot

Import wpt@68e4c96059aa7d8c4bc6c521b6e2e405377a581b

Using wpt-import in Chromium 35a1546b.
With Chromium commits locally applied on WPT:
d5cd6b98 "SharedWorker: Assign unique names to SharedWorkers to avoid unintentional matching"


Note to sheriffs: This CL imports external tests and adds
expectations for those tests; if this CL is large and causes
a few new failures, please fix the failures by adding new
lines to TestExpectations rather than reverting. See:
https://chromium.googlesource.com/chromium/src/+/master/docs/testing/web_platform_tests.md

Directory owners for changes in this CL:
foolip@chromium.org, lpz@chromium.org, robertma@chromium.org:
  external/wpt/tools
nsatragno@chromium.org:
  external/wpt/webauthn

NOAUTOREVERT=true
TBR=smcgruer

No-Export: true
Change-Id: I87e113530a9b1106b00170ecafe976535872f063
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2083691Reviewed-by: default avatarWPT Autoroller <wpt-autoroller@chops-service-accounts.iam.gserviceaccount.com>
Commit-Queue: WPT Autoroller <wpt-autoroller@chops-service-accounts.iam.gserviceaccount.com>
Cr-Commit-Position: refs/heads/master@{#746116}
parent 704dc99b
......@@ -55,13 +55,25 @@ class ChromiumFormatter(base.BaseFormatter):
prefix += "%s: " % subtest
self.messages[test] += prefix + message + "\n"
def _store_test_result(self, name, actual, expected, message, subtest_failure=False):
def _append_artifact(self, cur_dict, artifact_name, artifact_value):
"""
Appends artifacts to the specified dictionary.
:param dict cur_dict: the test leaf dictionary to append to
:param str artifact_name: the name of the artifact
:param str artifact_value: the value of the artifact
"""
if "artifacts" not in cur_dict.keys():
cur_dict["artifacts"] = {}
cur_dict["artifacts"][artifact_name] = artifact_value
def _store_test_result(self, name, actual, expected, message, wpt_actual, subtest_failure):
"""
Stores the result of a single test in |self.tests|
:param str name: name of the test.
:param str actual: actual status of the test.
:param str expected: expected statuses of the test.
:param str message: test output, such as status, subtest, errors etc.
:param str wpt_actual: actual status reported by wpt, may differ from |actual|.
:param bool subtest_failure: whether this test failed because of subtests
"""
# The test name can contain a leading / which will produce an empty
......@@ -73,12 +85,12 @@ class ChromiumFormatter(base.BaseFormatter):
cur_dict = cur_dict.setdefault(name_part, {})
cur_dict["actual"] = actual
cur_dict["expected"] = expected
if subtest_failure or message:
cur_dict["artifacts"] = {"log": ""}
if subtest_failure:
cur_dict["artifacts"]["log"] += "subtest_failure\n"
if message != "":
cur_dict["artifacts"]["log"] += message
if subtest_failure:
self._append_artifact(cur_dict, "wpt_subtest_failure", "true")
if wpt_actual != actual:
self._append_artifact(cur_dict, "wpt_actual_status", wpt_actual)
if message != "":
self._append_artifact(cur_dict, "log", message)
# Figure out if there was a regression or unexpected status. This only
# happens for tests that were run
......@@ -108,7 +120,7 @@ class ChromiumFormatter(base.BaseFormatter):
return "SKIP"
if status == "EXTERNAL-TIMEOUT":
return "TIMEOUT"
if status in ("ERROR", "CRASH"):
if status in ("ERROR", "CRASH", "PRECONDITION_FAILED"):
# CRASH in WPT means a browser crash, which Chromium treats as a
# test failure.
return "FAIL"
......@@ -161,20 +173,28 @@ class ChromiumFormatter(base.BaseFormatter):
def test_end(self, data):
test_name = data["test"]
actual_status = self._map_status_name(data["status"])
# Save the status reported by WPT since we might change it when reporting
# to Chromium.
wpt_actual_status = data["status"]
actual_status = self._map_status_name(wpt_actual_status)
expected_statuses = self._get_expected_status_from_data(actual_status, data)
subtest_failure = False
if actual_status == "PASS" and test_name in self.tests_with_subtest_fails:
# This test passed but it has failing subtests, so we flip the status
# to FAIL.
actual_status = "FAIL"
if test_name in self.tests_with_subtest_fails:
subtest_failure = True
# Clean up the test list to avoid accumulating too many.
self.tests_with_subtest_fails.remove(test_name)
# This test passed but it has failing subtests. Since we can only
# report a single status to Chromium, we choose FAIL to indicate
# that something about this test did not run correctly.
if actual_status == "PASS":
actual_status = "FAIL"
if "message" in data:
self._append_test_message(test_name, None, actual_status, expected_statuses, data["message"])
self._store_test_result(test_name, actual_status, expected_statuses, self.messages[test_name], subtest_failure)
self._append_test_message(test_name, None, actual_status,
expected_statuses, data["message"])
self._store_test_result(test_name, actual_status, expected_statuses,
self.messages[test_name], wpt_actual_status,
subtest_failure)
# Remove the test from messages dict to avoid accumulating too many.
self.messages.pop(test_name)
......
......@@ -156,13 +156,13 @@ def test_subtest_messages(capfd):
output.seek(0)
output_json = json.load(output)
t1_log = output_json["tests"]["t1"]["artifacts"]["log"]
assert t1_log == "subtest_failure\n" \
"[FAIL expected PASS] t1_a: t1_a_message\n" \
"[PASS] t1_b: t1_b_message\n"
t2_log = output_json["tests"]["t2"]["artifacts"]["log"]
assert t2_log == "[TIMEOUT expected PASS] t2_message\n"
t1_artifacts = output_json["tests"]["t1"]["artifacts"]
assert t1_artifacts["log"] == "[FAIL expected PASS] t1_a: t1_a_message\n" \
"[PASS] t1_b: t1_b_message\n"
assert t1_artifacts["wpt_subtest_failure"] == "true"
t2_artifacts = output_json["tests"]["t2"]["artifacts"]
assert t2_artifacts["log"] == "[TIMEOUT expected PASS] t2_message\n"
assert "wpt_subtest_failure" not in t2_artifacts.keys()
def test_subtest_failure(capfd):
......@@ -203,13 +203,15 @@ def test_subtest_failure(capfd):
output_json = json.load(output)
test_obj = output_json["tests"]["t1"]
t1_log = test_obj["artifacts"]["log"]
assert t1_log == "subtest_failure\n" \
"[FAIL expected PASS] t1_a: t1_a_message\n" \
"[PASS] t1_b: t1_b_message\n" \
"[TIMEOUT expected PASS] t1_c: t1_c_message\n"
t1_artifacts = test_obj["artifacts"]
assert t1_artifacts["log"] == "[FAIL expected PASS] t1_a: t1_a_message\n" \
"[PASS] t1_b: t1_b_message\n" \
"[TIMEOUT expected PASS] t1_c: t1_c_message\n"
assert t1_artifacts["wpt_subtest_failure"] == "true"
# The status of the test in the output is a failure because subtests failed,
# despite the harness reporting that the test passed.
# despite the harness reporting that the test passed. But the harness status
# is logged as an artifact.
assert t1_artifacts["wpt_actual_status"] == "PASS"
assert test_obj["actual"] == "FAIL"
assert test_obj["expected"] == "PASS"
# Also ensure that the formatter cleaned up its internal state
......@@ -300,10 +302,12 @@ def test_unexpected_subtest_pass(capfd):
output_json = json.load(output)
test_obj = output_json["tests"]["t1"]
t1_log = test_obj["artifacts"]["log"]
assert t1_log == "subtest_failure\n" \
"[PASS expected FAIL] t1_a: t1_a_message\n"
# Since the subtest status is unexpected, we fail the test.
t1_artifacts = test_obj["artifacts"]
assert t1_artifacts["log"] == "[PASS expected FAIL] t1_a: t1_a_message\n"
assert t1_artifacts["wpt_subtest_failure"] == "true"
# Since the subtest status is unexpected, we fail the test. But we report
# wpt_actual_status as an artifact
assert t1_artifacts["wpt_actual_status"] == "PASS"
assert test_obj["actual"] == "FAIL"
assert test_obj["expected"] == "PASS"
# Also ensure that the formatter cleaned up its internal state
......@@ -448,3 +452,37 @@ def test_flaky_test_unexpected(capfd):
# one of the expected ones
assert test_obj["is_regression"] is True
assert test_obj["is_unexpected"] is True
def test_precondition_failed(capfd):
# Check that a failed precondition gets properly handled.
# set up the handler.
output = StringIO()
logger = structuredlog.StructuredLogger("test_a")
logger.add_handler(handlers.StreamHandler(output, ChromiumFormatter()))
# Run a test with a precondition failure
logger.suite_start(["t1"], run_info={}, time=123)
logger.test_start("t1")
logger.test_end("t1", status="PRECONDITION_FAILED", expected="OK")
logger.suite_end()
# check nothing got output to stdout/stderr
# (note that mozlog outputs exceptions during handling to stderr!)
captured = capfd.readouterr()
assert captured.out == ""
assert captured.err == ""
# check the actual output of the formatter
output.seek(0)
output_json = json.load(output)
test_obj = output_json["tests"]["t1"]
# The precondition failure should map to FAIL status, but we should also
# have an artifact containing the original PRECONDITION_FAILED status.
assert test_obj["actual"] == "FAIL"
assert test_obj["artifacts"]["wpt_actual_status"] == "PRECONDITION_FAILED"
# ...this is an unexpected regression because we expected a pass but failed
assert test_obj["is_regression"] is True
assert test_obj["is_unexpected"] is True
This is a testharness.js-based test.
PASS Set up the test environment
PASS Bad extensions: extensions is string
FAIL Bad extensions: extensions is null assert_unreached: Should have rejected: Expected bad parameters to fail Reached unreachable code
FAIL Bad extensions: extensions is empty Array assert_unreached: Should have rejected: Expected bad parameters to fail Reached unreachable code
......
......@@ -537,28 +537,22 @@ function validateAuthenticatorAssertionResponse(assert) {
function standardSetup(cb) {
// Setup an automated testing environment if available.
let authenticator;
promise_test(async t => {
try {
authenticator = await window.test_driver.add_virtual_authenticator({
protocol: "ctap1/u2f",
transport: "usb"
});
} catch (error) {
if (error !== "error: Action add_virtual_authenticator not implemented") {
throw error;
}
// The protocol is not available. Continue manually.
}
}, "Set up the test environment");
cb();
promise_test(t => {
if (authenticator) {
return window.test_driver.remove_virtual_authenticator(authenticator);
window.test_driver.add_virtual_authenticator({
protocol: "ctap1/u2f",
transport: "usb"
}).then(authenticator => {
cb();
// XXX add a subtest to clean up the virtual authenticator since
// testharness does not support waiting for promises on cleanup.
promise_test(() => window.test_driver.remove_virtual_authenticator(authenticator),
"Clean up the test environment");
}).catch(error => {
if (error !== "error: Action add_virtual_authenticator not implemented") {
throw error;
}
}, "Clean up the test environment");
// The protocol is not available. Continue manually.
cb();
});
}
/* JSHINT */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment