Commit 46e1c59d authored by Luke Zielinski's avatar Luke Zielinski Committed by Commit Bot

Add scripts to support running WPT on Chromium CI.

testing/scripts/run_wpt_tests.py is used to adapt swarming flags to WPT
flags (eg: for sharding), and also to kick-off the update_wpt_output.py
cleanup step.

t_p/b/t/build_wpt_metadata.py is for converting
Chromium TestExpectations files into WPT Metadata files for skipped
tests.

t_p/b/t/update_wpt_output.py is run as a cleanup step on
each shard. It rewrites the output of a WPT run by inserting expected
statuses from the TestExpectations file, which includes supporting flaky
tests (ie: multiple expected statuses) which WPT doesn't currently
understand.

Bug: 937369
Change-Id: Ic7d2c037ec545f38a1101759db6eb04db54057c6
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1660702
Commit-Queue: Luke Z <lpz@chromium.org>
Reviewed-by: default avatarDirk Pranke <dpranke@chromium.org>
Reviewed-by: default avatarRobert Ma <robertma@chromium.org>
Cr-Commit-Position: refs/heads/master@{#672059}
parent dbbe42a8
#!/usr/bin/env vpython
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs WPT as an isolate bundle.
This script maps flags supported by run_isolate_script_test.py to flags that are
understood by WPT.
Here's the mapping [isolate script flag] : [wpt flag]
--isolated-script-test-output : --log-chromium
--total-shards : --total-chunks
--shard-index : -- this-chunk
"""
import json
import os
import sys
import common
class WPTTestAdapter(common.BaseIsolatedScriptArgsAdapter):
def generate_test_output_args(self, output):
return ['--log-chromium', output]
def generate_sharding_args(self, total_shards, shard_index):
return ['--total-chunks=%d' % total_shards,
# shard_index is 0-based but WPT's this-chunk to be 1-based
'--this-chunk=%d' % (shard_index + 1)]
def add_extra_arguments(self, parser):
# These args are used to rewrite the output generated by WPT to include
# missing features, such as flakineess expectations.
parser.add_argument("--old-json-output-file-path")
parser.add_argument("--new-json-output-dir")
parser.add_argument("--new-json-output-filename")
def clean_up_after_test_run(self):
common.run_command([
sys.executable,
os.path.join(common.SRC_DIR, 'third_party', 'blink', 'tools',
'update_wpt_output.py'),
'--old-json-output-file-path',
self.options.old_json_output_file_path,
'--new-json-output-dir', self.options.new_json_output_dir,
'--new-json-output-filename', self.options.new_json_output_filename,
])
def main():
adapter = WPTTestAdapter()
return adapter.run_test()
# This is not really a "script test" so does not need to manually add
# any additional compile targets.
def main_compile_targets(args):
json.dump([], args.output)
if __name__ == '__main__':
# Conform minimally to the protocol defined by ScriptTest.
if 'compile_targets' in sys.argv:
funcs = {
'run': None,
'compile_targets': main_compile_targets,
}
sys.exit(common.run_script(sys.argv[1:], funcs))
sys.exit(main())
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Converts Chromium Test Expectations into WPT Metadata ini files.
This script loads TestExpectations for any WPT test and creates the metadata
files corresponding to the expectation. This script runs as a BUILD action rule.
The output is then bundled into the WPT isolate package to be shipped to bots
running the WPT test suite.
"""
import argparse
import logging
import os
from blinkpy.common.system.log_utils import configure_logging
from blinkpy.web_tests.models import test_expectations
_log = logging.getLogger(__name__)
class WPTMetadataBuilder(object):
def __init__(self, expectations):
"""
Args:
expectations: a blinkpy.web_tests.models.test_expectations.TestExpectations object
"""
self.expectations = expectations
self.metadata_output_dir = ""
def run(self, args=None):
"""Main entry point to parse flags and execute the script."""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--metadata-output-dir",
help="The directory to output the metadata files into.")
parser.add_argument('-v', '--verbose', action='store_true', help='More verbose logging.')
args = parser.parse_args(args)
log_level = logging.DEBUG if args.verbose else logging.INFO
configure_logging(logging_level=log_level, include_time=True)
self.metadata_output_dir = args.metadata_output_dir
self._build_metadata_and_write()
return 0
def _build_metadata_and_write(self):
"""Build the metadata files and write them to disk."""
if os.path.exists(self.metadata_output_dir):
_log.warning("Output dir exists, deleting: %s",
self.metadata_output_dir)
import shutil
shutil.rmtree(self.metadata_output_dir)
for test_name in self.get_test_names_for_metadata():
filename, file_contents = self.get_metadata_filename_and_contents(test_name)
if not filename or not file_contents:
continue
# Write the contents to the file name
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
with open(filename, "w") as metadata_file:
metadata_file.write(file_contents)
def get_test_names_for_metadata(self):
"""Determines which tests in the expectation file need metadata.
Returns:
A list of test names that need metadata.
"""
return self.expectations.get_tests_with_result_type(
test_expectations.SKIP)
def get_metadata_filename_and_contents(self, test_name):
"""Determines the metadata filename and contents for the specified test.
The metadata filename is derived from the test name but will differ if
the expectation is for a single test or for a directory of tests. The
contents of the metadata file will also differ for those two cases.
Args:
test_name: A test name from the expectation file.
Returns:
A pair of strings, the first is the path to the metadata file and
the second is the contents to write to that file. Or None if the
test does not need a metadata file.
"""
# Ignore expectations for non-WPT tests
if not test_name or not test_name.startswith('external/wpt'):
return None, None
# Split the test name by directory. We omit the first 2 entries because
# they are 'external' and 'wpt' and these don't exist in the WPT's test
# names.
test_name_parts = test_name.split("/")[2:]
# Check if this is a test file or a test directory
is_test_dir = test_name.endswith("/")
metadata_filename = None
metadata_file_contents = None
if is_test_dir:
# A test directory gets one metadata file called __dir__.ini and all
# tests in that dir are skipped.
metadata_filename = os.path.join(self.metadata_output_dir,
*test_name_parts)
metadata_filename = os.path.join(metadata_filename, "__dir__.ini")
_log.debug("Creating a dir-wide ini file %s", metadata_filename)
metadata_file_contents = "disabled: build_wpt_metadata.py"
else:
# For individual tests, we create one file per test, with the name
# of the test in the file as well.
test_filename = test_name_parts[-1]
# Append `.ini` to the test filename to indicate it's the metadata
# file.
test_name_parts[-1] += ".ini"
metadata_filename = os.path.join(self.metadata_output_dir,
*test_name_parts)
_log.debug("Creating a test ini file %s", metadata_filename)
# The contents of the metadata file is two lines:
# 1. the test name inside square brackets
# 2. an indented line with the test status and reason
metadata_file_contents = ("[%s]\n disabled: build_wpt_metadata.py" % test_filename)
return metadata_filename, metadata_file_contents
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from collections import OrderedDict
import os
import unittest
from blinkpy.common.host_mock import MockHost
from blinkpy.web_tests.models.test_expectations import TestExpectations
from blinkpy.web_tests.port.factory_mock import MockPortFactory
from blinkpy.w3c.wpt_metadata_builder import WPTMetadataBuilder
def _make_expectation(port, test_name, test_statuses):
"""Creates an expectation object for a single test.
Args:
port: the port to run against
test_name: the name of the test
test_status: the statuses of the test
Returns:
An expectation object with the given test and statuses.
"""
expectation_dict = OrderedDict()
expectation_dict["expectations"] = "Bug(test) %s [ %s ]" % (test_name, test_statuses)
return TestExpectations(port, tests=[test_name], expectations_dict=expectation_dict)
class WPTMetadataBuilderTest(unittest.TestCase):
def setUp(self):
self.num = 2
self.host = MockHost()
self.host.port_factory = MockPortFactory(self.host)
self.port = self.host.port_factory.get()
def test_skipped_test(self):
"""A skipped WPT test should get a test-specific metadata file."""
test_name = "external/wpt/test.html"
expectations = _make_expectation(self.port, test_name, "SKIP")
metadata_builder = WPTMetadataBuilder(expectations)
filename, contents = metadata_builder.get_metadata_filename_and_contents(test_name)
self.assertEqual("test.html.ini", filename)
self.assertEqual("[test.html]\n disabled: build_wpt_metadata.py", contents)
def test_skipped_directory(self):
"""A skipped WPT directory should get a dir-wide metadata file."""
test_name = "external/wpt/test_dir/"
expectations = _make_expectation(self.port, test_name, "SKIP")
metadata_builder = WPTMetadataBuilder(expectations)
filename, contents = metadata_builder.get_metadata_filename_and_contents(test_name)
self.assertEqual(os.path.join("test_dir", "__dir__.ini"), filename)
self.assertEqual("disabled: build_wpt_metadata.py", contents)
def test_non_wpt_test(self):
"""A non-WPT test should not get any metadata."""
test_name = "some/other/test.html"
expectations = _make_expectation(self.port, test_name, "SKIP")
metadata_builder = WPTMetadataBuilder(expectations)
filename, contents = metadata_builder.get_metadata_filename_and_contents(test_name)
self.assertIsNone(filename)
self.assertIsNone(contents)
def test_wpt_test_not_skipped(self):
"""A WPT test that is not skipped should not get any metadata."""
test_name = "external/wpt/test.html"
expectations = _make_expectation(self.port, test_name, "TIMEOUT")
metadata_builder = WPTMetadataBuilder(expectations)
test_names = metadata_builder.get_test_names_for_metadata()
self.assertFalse(test_names)
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Updates the output of a WPT run.
The purpose of this script is to do any post-processing on the WPT output that
is necessary to support features unavailable in WPT. For example, TestExpectations
for flaky tests where multiple possible statuses are listed for a test. However,
WPT only supports a single status so it can't understand that a test is flaky.
This script takes two inputs:
1. The output JSON file of a WPT run
2. The set of test expectations files
It then creates a new output JSON file with any required adjustments (eg: the
expected statuses of flaky tests filled in from the expectations file).
"""
import argparse
import json
import logging
import os
from blinkpy.common.system.log_utils import configure_logging
_log = logging.getLogger(__name__)
class WPTOutputUpdater(object):
def __init__(self, expectations):
"""
Args:
expectations: a blinkpy.web_tests.models.test_expectations.TestExpectations object
"""
self.expectations = expectations
self.old_json_output_file_path = None
self.new_json_output_dir = None
self.new_json_output_filename = None
def run(self, args=None):
"""Main entry point to parse flags and execute the script."""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--old-json-output-file-path",
help="The JSON output file to be updated, generated by WPT.")
parser.add_argument("--new-json-output-dir",
help="The directory to put the new JSON output file.")
parser.add_argument("--new-json-output-filename",
help="The name of the new JSON output file.")
parser.add_argument('-v', '--verbose', action='store_true', help='More verbose logging.')
args = parser.parse_args(args)
log_level = logging.DEBUG if args.verbose else logging.INFO
configure_logging(logging_level=log_level, include_time=True)
self.old_json_output_file_path = args.old_json_output_file_path
self.new_json_output_dir = args.new_json_output_dir
self.new_json_output_filename = args.new_json_output_filename
self._update_output_and_write()
return 0
def _update_output_and_write(self):
"""Updates the output JSON and writes it to disk."""
old_output_file = open(self.old_json_output_file_path, "r")
new_output_json = self.update_output_json(json.load(old_output_file))
# Ok, we updated all the tests that were in this file, output it
if not os.path.exists(self.new_json_output_dir):
os.makedirs(self.new_json_output_dir)
new_file_path = os.path.join(self.new_json_output_dir,
self.new_json_output_filename)
if os.path.exists(new_file_path):
_log.warning("Output file already exists, will be overwritten: %s",
new_file_path)
with open(new_file_path, "w") as new_output_file:
json.dump(new_output_json, new_output_file)
def update_output_json(self, output_json):
"""Updates the output JSON object in place.
Args:
output_json: a nested dict containing the JSON output from WPT.
Returns:
The updated JSON dictionary
"""
delim = output_json['path_delimiter']
# Go through each WPT expectation, try to find it in the output, and
# then update the expected statuses in the output file.
for e in self.expectations.expectations():
test_leaf = self._find_test_for_expectation(e, delim, output_json)
if test_leaf is not None:
self._update_output_for_test(e, test_leaf)
return output_json
def _find_test_for_expectation(self, exp, delim, output_json):
"""Finds the test output for the specified expectation.
We only handle expectations for WPT tests, so we skip non-WPT entries as
well as directory-wide expectations for WPT. We also want to ensure that
the test from the expectation was run by the specific shard that created
the output.
Args:
exp: an expectation object representing a line from the expectation
file.
delim: the delimiter character in the test names.
output_json: a nested dict containing the JSON output from WPT.
Returns:
The leaf node from the JSON output file for the test that needs to
be updated, which is a dictionary. Or None if this expectation is
not supposed to be handled.
"""
# Skip expectations for non-WPT tests
if not exp.name or not exp.name.startswith('external/wpt'):
return None
# Split the test name by the test delimiter. We omit the first 2 entries
# because they are 'external' and 'wpt' and these don't exist in the WPT
# run.
test_name_parts = exp.name.split(delim)[2:]
# Drill down through the JSON output file using the parts of the test
# name to find the leaf node containing the results for this test.
test_leaf = output_json['tests']
for name_part in test_name_parts:
# Since this script runs on each shard, it's possible that the test
# from the expectations file was not run in this shard. If we don't
# find the test in the WPT output, then skip the expectation.
if name_part not in test_leaf.keys():
_log.debug("Test was not run: %s", exp.name)
return None
test_leaf = test_leaf[name_part]
# Ensure that the expectation is for an actual test, not a directory. To
# do this we check that we're at a leaf, which should have 'actual' and
# 'expected' fields.
if 'actual' not in test_leaf or 'expected' not in test_leaf:
_log.debug("Expectation was not for a test, skipping: %s", exp.name)
return None
# If we get this far then we have an expectation for a single test that
# was actually run by this shard, so return the test leaf that needs to
# be updated in the JSON output.
return test_leaf
def _update_output_for_test(self, exp, test_leaf):
"""Updates the output of a specific test based on the expectations file.
Args:
exp: an expectation object representing a line from the expectation
file.
test_leaf: a dictionary containing the JSON output for a test.
"""
expectation_string = ' '.join(exp.expectations)
_log.info("Updating expectation for test %s from %s to %s",
exp.name, test_leaf['expected'], expectation_string)
test_leaf['expected'] = expectation_string
# Also update the "is_regression" and "is_unexpected" fields.
is_unexpected = test_leaf['actual'] not in expectation_string
test_leaf['is_unexpected'] = is_unexpected
test_leaf['is_regression'] = is_unexpected and test_leaf['actual'] != 'PASS'
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from collections import OrderedDict
import json
import unittest
from blinkpy.common.host_mock import MockHost
from blinkpy.web_tests.models.test_expectations import TestExpectations
from blinkpy.web_tests.port.factory_mock import MockPortFactory
from blinkpy.w3c.wpt_output_updater import WPTOutputUpdater
_EXPECTATIONS_TEST_LIST = [
"some/test.html",
"external/wpt/expected_crash.html",
"external/wpt/flake.html",
"external/wpt/subdir/unexpected_failure.html",
"passed/test.html"
]
_EXPECTATIONS_FILE_STRING = """
Bug(test) some/test.html [ Failure ]
Bug(test) external/wpt/expected_crash.html [ Crash ]
Bug(test) external/wpt/flake.html [ Timeout Failure ]
Bug(test) external/wpt/subdir/unexpected_failure.html [ Timeout ]
"""
class WPTOutputUpdaterTest(unittest.TestCase):
def setUp(self):
self.host = MockHost()
self.host.port_factory = MockPortFactory(self.host)
self.port = self.host.port_factory.get()
expectations_dict = OrderedDict()
expectations_dict['expectations'] = _EXPECTATIONS_FILE_STRING
self.exp = TestExpectations(self.port, tests=_EXPECTATIONS_TEST_LIST, expectations_dict=expectations_dict)
def test_update_output_json(self):
"""Tests that output JSON is properly updated with expectations."""
# Create a WPTOutputUpdater and reset it to use our test expectations.
output_updater = WPTOutputUpdater(self.exp)
# Note: this is the WPT output which omits the "external/wpt" root that
# is present in the expectations file. Also, the expected status is
# always PASS by default.
output_string = """
{
"path_delimiter": "/",
"tests": {
"some": {
"test.html": {
"expected": "PASS",
"actual": "PASS"
}
},
"expected_crash.html": {
"expected": "PASS",
"actual": "CRASH"
},
"flake.html": {
"expected": "PASS",
"actual": "TIMEOUT"
},
"subdir": {
"unexpected_failure.html": {
"expected": "PASS",
"actual": "FAIL"
}
}
}
}
"""
output_json = json.loads(output_string)
# A few simple assertions that the original JSON is formatter properly.
self.assertEqual("PASS", output_json["tests"]["expected_crash.html"]["expected"])
self.assertEqual("FAIL", output_json["tests"]["subdir"]["unexpected_failure.html"]["actual"])
# Run the output updater, and confirm the expected statuses are updated.
new_output_json = output_updater.update_output_json(output_json)
# some/test.html should not be updated since the expectation is not for
# external/wpt
cur_test = new_output_json["tests"]["some"]["test.html"]
self.assertEqual("PASS", cur_test["expected"])
# The expected_crash.html test crashed as expected. It's expected status
# should be updated but is_regression and is_unexpected are both False
# since this test ran as we expected.
cur_test = new_output_json["tests"]["expected_crash.html"]
self.assertEqual("CRASH", cur_test["expected"])
self.assertFalse(cur_test["is_regression"])
self.assertFalse(cur_test["is_unexpected"])
# The flake.html test ran as expected because its status was one of the
# ones from the expectation file.
cur_test = new_output_json["tests"]["flake.html"]
self.assertEqual("TIMEOUT FAIL", cur_test["expected"])
self.assertFalse(cur_test["is_regression"])
self.assertFalse(cur_test["is_unexpected"])
# The unexpected_failure.html test had a different status than expected,
# so is_unexpected is true. Since the actual status wasn't a Pass, it's
# also a regression.
cur_test = new_output_json["tests"]["subdir"]["unexpected_failure.html"]
self.assertEqual("TIMEOUT", cur_test["expected"])
self.assertTrue(cur_test["is_regression"])
self.assertTrue(cur_test["is_unexpected"])
#!/usr/bin/env vpython
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
from blinkpy.common.host import Host
from blinkpy.w3c.wpt_metadata_builder import WPTMetadataBuilder
from blinkpy.web_tests.models.test_expectations import TestExpectations
def main(args):
host = Host()
port = host.port_factory.get()
expectations = TestExpectations(port)
metadata_builder = WPTMetadataBuilder(expectations)
sys.exit(metadata_builder.run(args))
if __name__ == '__main__':
main(sys.argv[1:])
#!/usr/bin/env vpython
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
from blinkpy.common.host import Host
from blinkpy.w3c.wpt_output_updater import WPTOutputUpdater
from blinkpy.web_tests.models.test_expectations import TestExpectations
def main(args):
host = Host()
port = host.port_factory.get()
expectations = TestExpectations(port)
output_updater = WPTOutputUpdater(expectations)
sys.exit(output_updater.run(args))
if __name__ == '__main__':
main(sys.argv[1:])
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment