Commit d35791ce authored by Stephen McGruer's avatar Stephen McGruer Committed by Commit Bot

Roll internal WPT tools

This rolls up to c808aa3d15a42648d8b25a838024813990959e37

Bug: None
Change-Id: I46407cabbe41fff8dc0b8aef766fb66baf59f9a3
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2468897
Auto-Submit: Stephen McGruer <smcgruer@chromium.org>
Reviewed-by: default avatarLuke Z <lpz@chromium.org>
Reviewed-by: default avatarRobert Ma <robertma@chromium.org>
Commit-Queue: Robert Ma <robertma@chromium.org>
Cr-Commit-Position: refs/heads/master@{#816991}
parent 2430468d
...@@ -22,7 +22,7 @@ Local Modifications: None ...@@ -22,7 +22,7 @@ Local Modifications: None
Name: web-platform-tests - Test Suites for Web Platform specifications Name: web-platform-tests - Test Suites for Web Platform specifications
Short Name: wpt Short Name: wpt
URL: https://github.com/web-platform-tests/wpt/ URL: https://github.com/web-platform-tests/wpt/
Version: 4b8a64638b4c06fb38eb562b754eae389b381ec0 Version: c808aa3d15a42648d8b25a838024813990959e37
License: LICENSES FOR W3C TEST SUITES (https://www.w3.org/Consortium/Legal/2008/03-bsd-license.html) License: LICENSES FOR W3C TEST SUITES (https://www.w3.org/Consortium/Legal/2008/03-bsd-license.html)
License File: wpt/wpt/LICENSE.md License File: wpt/wpt/LICENSE.md
Security Critical: no Security Critical: no
......
...@@ -9,7 +9,7 @@ cd $DIR ...@@ -9,7 +9,7 @@ cd $DIR
TARGET_DIR=$DIR/wpt TARGET_DIR=$DIR/wpt
REMOTE_REPO="https://github.com/web-platform-tests/wpt.git" REMOTE_REPO="https://github.com/web-platform-tests/wpt.git"
WPT_HEAD=4b8a64638b4c06fb38eb562b754eae389b381ec0 WPT_HEAD=c808aa3d15a42648d8b25a838024813990959e37
function clone { function clone {
# Remove existing repo if already exists. # Remove existing repo if already exists.
......
...@@ -957,6 +957,8 @@ def create_parser(): ...@@ -957,6 +957,8 @@ def create_parser():
"working directory, not just files that changed") "working directory, not just files that changed")
parser.add_argument("--github-checks-text-file", type=ensure_text, parser.add_argument("--github-checks-text-file", type=ensure_text,
help="Path to GitHub checks output file for Taskcluster runs") help="Path to GitHub checks output file for Taskcluster runs")
parser.add_argument("-j", "--jobs", type=int, default=0,
help="Level to parallelism to use (defaults to 0, which detects the number of CPUs)")
return parser return parser
...@@ -984,18 +986,23 @@ def main(**kwargs_str): ...@@ -984,18 +986,23 @@ def main(**kwargs_str):
github_checks_outputter = get_gh_checks_outputter(kwargs["github_checks_text_file"]) github_checks_outputter = get_gh_checks_outputter(kwargs["github_checks_text_file"])
return lint(repo_root, paths, output_format, ignore_glob, github_checks_outputter) jobs = kwargs.get("jobs", 0)
return lint(repo_root, paths, output_format, ignore_glob, github_checks_outputter, jobs)
# best experimental guess at a decent cut-off for using the parallel path # best experimental guess at a decent cut-off for using the parallel path
MIN_FILES_FOR_PARALLEL = 80 MIN_FILES_FOR_PARALLEL = 80
def lint(repo_root, paths, output_format, ignore_glob=None, github_checks_outputter=None): def lint(repo_root, paths, output_format, ignore_glob=None, github_checks_outputter=None, jobs=0):
# type: (Text, List[Text], Text, Optional[List[Text]], Optional[GitHubChecksOutputter]) -> int # type: (Text, List[Text], Text, Optional[List[Text]], Optional[GitHubChecksOutputter], int) -> int
error_count = defaultdict(int) # type: Dict[Text, int] error_count = defaultdict(int) # type: Dict[Text, int]
last = None last = None
if jobs == 0:
jobs = multiprocessing.cpu_count()
with io.open(os.path.join(repo_root, "lint.ignore"), "r") as f: with io.open(os.path.join(repo_root, "lint.ignore"), "r") as f:
ignorelist, skipped_files = parse_ignorelist(f) ignorelist, skipped_files = parse_ignorelist(f)
...@@ -1053,8 +1060,8 @@ def lint(repo_root, paths, output_format, ignore_glob=None, github_checks_output ...@@ -1053,8 +1060,8 @@ def lint(repo_root, paths, output_format, ignore_glob=None, github_checks_output
paths = [p for p in paths if p not in skip] paths = [p for p in paths if p not in skip]
if len(to_check_content) >= MIN_FILES_FOR_PARALLEL: if jobs > 1 and len(to_check_content) >= MIN_FILES_FOR_PARALLEL:
pool = multiprocessing.Pool() pool = multiprocessing.Pool(jobs)
# submit this job first, as it's the longest running # submit this job first, as it's the longest running
all_paths_result = pool.apply_async(check_all_paths, (repo_root, paths)) all_paths_result = pool.apply_async(check_all_paths, (repo_root, paths))
# each item tends to be quick, so pass things in large chunks to avoid too much IPC overhead # each item tends to be quick, so pass things in large chunks to avoid too much IPC overhead
......
...@@ -73,7 +73,9 @@ class WrapperHandler(object): ...@@ -73,7 +73,9 @@ class WrapperHandler(object):
self.handler(request, response) self.handler(request, response)
def handle_request(self, request, response): def handle_request(self, request, response):
for header_name, header_value in self.headers: headers = self.headers + handlers.load_headers(
request, self._get_filesystem_path(request))
for header_name, header_value in headers:
response.headers.set(header_name, header_value) response.headers.set(header_name, header_value)
self.check_exposure(request) self.check_exposure(request)
...@@ -111,13 +113,17 @@ class WrapperHandler(object): ...@@ -111,13 +113,17 @@ class WrapperHandler(object):
path = replace_end(path, src, dest) path = replace_end(path, src, dest)
return path return path
def _get_filesystem_path(self, request):
"""Get the path of the underlying resource file on disk."""
return self._get_path(filesystem_path(self.base_path, request, self.url_base), False)
def _get_metadata(self, request): def _get_metadata(self, request):
"""Get an iterator over script metadata based on // META comments in the """Get an iterator over script metadata based on // META comments in the
associated js file. associated js file.
:param request: The Request being processed. :param request: The Request being processed.
""" """
path = self._get_path(filesystem_path(self.base_path, request, self.url_base), False) path = self._get_filesystem_path(request)
try: try:
with open(path, "rb") as f: with open(path, "rb") as f:
for key, value in read_script_metadata(f, js_meta_re): for key, value in read_script_metadata(f, js_meta_re):
...@@ -532,7 +538,7 @@ def start_servers(host, ports, paths, routes, bind_address, config, **kwargs): ...@@ -532,7 +538,7 @@ def start_servers(host, ports, paths, routes, bind_address, config, **kwargs):
# If trying to start HTTP/2.0 server, check compatibility # If trying to start HTTP/2.0 server, check compatibility
if scheme == 'h2' and not http2_compatible(): if scheme == 'h2' and not http2_compatible():
logger.error('Cannot start HTTP/2.0 server as the environment is not compatible. ' + logger.error('Cannot start HTTP/2.0 server as the environment is not compatible. ' +
'Requires Python 2.7.10+ (< 3.0) and OpenSSL 1.0.2+') 'Requires Python 2.7.10+ or 3.6+ and OpenSSL 1.0.2+')
continue continue
for port in ports: for port in ports:
......
...@@ -661,7 +661,7 @@ class Chrome(Browser): ...@@ -661,7 +661,7 @@ class Chrome(Browser):
def find_webdriver(self, venv_path=None, channel=None, browser_binary=None): def find_webdriver(self, venv_path=None, channel=None, browser_binary=None):
return find_executable("chromedriver") return find_executable("chromedriver")
def webdriver_supports_browser(self, webdriver_binary, browser_binary): def webdriver_supports_browser(self, webdriver_binary, browser_binary, browser_channel):
chromedriver_version = self.webdriver_version(webdriver_binary) chromedriver_version = self.webdriver_version(webdriver_binary)
if not chromedriver_version: if not chromedriver_version:
self.logger.warning( self.logger.warning(
...@@ -676,9 +676,17 @@ class Chrome(Browser): ...@@ -676,9 +676,17 @@ class Chrome(Browser):
return True return True
# Check that the ChromeDriver version matches the Chrome version. # Check that the ChromeDriver version matches the Chrome version.
chromedriver_major = chromedriver_version.split('.')[0] chromedriver_major = int(chromedriver_version.split('.')[0])
browser_major = browser_version.split('.')[0] browser_major = int(browser_version.split('.')[0])
if chromedriver_major != browser_major: if chromedriver_major != browser_major:
# There is no official ChromeDriver release for the dev channel -
# it switches between beta and tip-of-tree, so we accept version+1
# too for dev.
if browser_channel == "dev" and chromedriver_major == (browser_major + 1):
self.logger.debug(
"Accepting ChromeDriver %s for Chrome/Chromium Dev %s" %
(chromedriver_version, browser_version))
return True
self.logger.warning( self.logger.warning(
"ChromeDriver %s does not match Chrome/Chromium %s" % "ChromeDriver %s does not match Chrome/Chromium %s" %
(chromedriver_version, browser_version)) (chromedriver_version, browser_version))
......
...@@ -345,7 +345,7 @@ class Chrome(BrowserSetup): ...@@ -345,7 +345,7 @@ class Chrome(BrowserSetup):
if not kwargs["install_webdriver"]: if not kwargs["install_webdriver"]:
webdriver_binary = self.browser.find_webdriver() webdriver_binary = self.browser.find_webdriver()
if webdriver_binary and not self.browser.webdriver_supports_browser( if webdriver_binary and not self.browser.webdriver_supports_browser(
webdriver_binary, kwargs["binary"]): webdriver_binary, kwargs["binary"], browser_channel):
webdriver_binary = None webdriver_binary = None
if webdriver_binary is None: if webdriver_binary is None:
......
...@@ -32,7 +32,6 @@ def guess_content_type(path): ...@@ -32,7 +32,6 @@ def guess_content_type(path):
return "application/octet-stream" return "application/octet-stream"
def filesystem_path(base_path, request, url_base="/"): def filesystem_path(base_path, request, url_base="/"):
if base_path is None: if base_path is None:
base_path = request.doc_root base_path = request.doc_root
...@@ -53,6 +52,7 @@ def filesystem_path(base_path, request, url_base="/"): ...@@ -53,6 +52,7 @@ def filesystem_path(base_path, request, url_base="/"):
return new_path return new_path
class DirectoryHandler(object): class DirectoryHandler(object):
def __init__(self, base_path=None, url_base="/"): def __init__(self, base_path=None, url_base="/"):
self.base_path = base_path self.base_path = base_path
...@@ -121,6 +121,7 @@ class DirectoryHandler(object): ...@@ -121,6 +121,7 @@ class DirectoryHandler(object):
{"link": link, "name": escape(item), "class": class_, {"link": link, "name": escape(item), "class": class_,
"headers": dot_headers_markup}) "headers": dot_headers_markup})
def parse_qs(qs): def parse_qs(qs):
"""Parse a query string given as a string argument (data of type """Parse a query string given as a string argument (data of type
application/x-www-form-urlencoded). Data are returned as a dictionary. The application/x-www-form-urlencoded). Data are returned as a dictionary. The
...@@ -140,7 +141,12 @@ def parse_qs(qs): ...@@ -140,7 +141,12 @@ def parse_qs(qs):
rv[name].append(value) rv[name].append(value)
return dict(rv) return dict(rv)
def wrap_pipeline(path, request, response): def wrap_pipeline(path, request, response):
"""Applies pipelines to a response.
Pipelines are specified in the filename (.sub.) or the query param (?pipe).
"""
query = parse_qs(request.url_parts.query) query = parse_qs(request.url_parts.query)
pipe_string = "" pipe_string = ""
...@@ -161,6 +167,36 @@ def wrap_pipeline(path, request, response): ...@@ -161,6 +167,36 @@ def wrap_pipeline(path, request, response):
return response return response
def load_headers(request, path):
"""Loads headers from files for a given path.
Attempts to load both the neighbouring __dir__{.sub}.headers and
PATH{.sub}.headers (applying template substitution if needed); results are
concatenated in that order.
"""
def _load(request, path):
headers_path = path + ".sub.headers"
if os.path.exists(headers_path):
use_sub = True
else:
headers_path = path + ".headers"
use_sub = False
try:
with open(headers_path, "rb") as headers_file:
data = headers_file.read()
except IOError:
return []
else:
if use_sub:
data = template(request, data, escape_type="none")
return [tuple(item.strip() for item in line.split(b":", 1))
for line in data.splitlines() if line]
return (_load(request, os.path.join(os.path.dirname(path), "__dir__")) +
_load(request, path))
class FileHandler(object): class FileHandler(object):
def __init__(self, base_path=None, url_base="/"): def __init__(self, base_path=None, url_base="/"):
self.base_path = base_path self.base_path = base_path
...@@ -197,33 +233,13 @@ class FileHandler(object): ...@@ -197,33 +233,13 @@ class FileHandler(object):
raise HTTPException(404) raise HTTPException(404)
def get_headers(self, request, path): def get_headers(self, request, path):
rv = (self.load_headers(request, os.path.join(os.path.dirname(path), "__dir__")) + rv = load_headers(request, path)
self.load_headers(request, path))
if not any(key.lower() == b"content-type" for (key, _) in rv): if not any(key.lower() == b"content-type" for (key, _) in rv):
rv.insert(0, (b"Content-Type", guess_content_type(path).encode("ascii"))) rv.insert(0, (b"Content-Type", guess_content_type(path).encode("ascii")))
return rv return rv
def load_headers(self, request, path):
headers_path = path + ".sub.headers"
if os.path.exists(headers_path):
use_sub = True
else:
headers_path = path + ".headers"
use_sub = False
try:
with open(headers_path, "rb") as headers_file:
data = headers_file.read()
except IOError:
return []
else:
if use_sub:
data = template(request, data, escape_type="none")
return [tuple(item.strip() for item in line.split(b":", 1))
for line in data.splitlines() if line]
def get_data(self, response, path, byte_ranges): def get_data(self, response, path, byte_ranges):
"""Return either the handle to a file, or a string containing """Return either the handle to a file, or a string containing
the content of a chunk of the file, if we have a range request.""" the content of a chunk of the file, if we have a range request."""
...@@ -312,7 +328,6 @@ class PythonScriptHandler(object): ...@@ -312,7 +328,6 @@ class PythonScriptHandler(object):
self._set_path_and_load_file(request, response, func) self._set_path_and_load_file(request, response, func)
def frame_handler(self, request): def frame_handler(self, request):
""" """
This creates a FunctionHandler with one or more of the handling functions. This creates a FunctionHandler with one or more of the handling functions.
...@@ -340,8 +355,10 @@ class PythonScriptHandler(object): ...@@ -340,8 +355,10 @@ class PythonScriptHandler(object):
return handler return handler
return self._set_path_and_load_file(request, None, func) return self._set_path_and_load_file(request, None, func)
python_script_handler = PythonScriptHandler() python_script_handler = PythonScriptHandler()
class FunctionHandler(object): class FunctionHandler(object):
def __init__(self, func): def __init__(self, func):
self.func = func self.func = func
...@@ -370,10 +387,11 @@ class FunctionHandler(object): ...@@ -370,10 +387,11 @@ class FunctionHandler(object):
wrap_pipeline('', request, response) wrap_pipeline('', request, response)
#The generic name here is so that this can be used as a decorator # The generic name here is so that this can be used as a decorator
def handler(func): def handler(func):
return FunctionHandler(func) return FunctionHandler(func)
class JsonHandler(object): class JsonHandler(object):
def __init__(self, func): def __init__(self, func):
self.func = func self.func = func
...@@ -395,9 +413,11 @@ class JsonHandler(object): ...@@ -395,9 +413,11 @@ class JsonHandler(object):
response.headers.set("Content-Length", length) response.headers.set("Content-Length", length)
return value return value
def json_handler(func): def json_handler(func):
return JsonHandler(func) return JsonHandler(func)
class AsIsHandler(object): class AsIsHandler(object):
def __init__(self, base_path=None, url_base="/"): def __init__(self, base_path=None, url_base="/"):
self.base_path = base_path self.base_path = base_path
...@@ -414,8 +434,10 @@ class AsIsHandler(object): ...@@ -414,8 +434,10 @@ class AsIsHandler(object):
except IOError: except IOError:
raise HTTPException(404) raise HTTPException(404)
as_is_handler = AsIsHandler() as_is_handler = AsIsHandler()
class BasicAuthHandler(object): class BasicAuthHandler(object):
def __init__(self, handler, user, password): def __init__(self, handler, user, password):
""" """
...@@ -442,8 +464,10 @@ class BasicAuthHandler(object): ...@@ -442,8 +464,10 @@ class BasicAuthHandler(object):
return response return response
return self.handler(request, response) return self.handler(request, response)
basic_auth_handler = BasicAuthHandler(file_handler, None, None) basic_auth_handler = BasicAuthHandler(file_handler, None, None)
class ErrorHandler(object): class ErrorHandler(object):
def __init__(self, status): def __init__(self, status):
self.status = status self.status = status
...@@ -454,7 +478,7 @@ class ErrorHandler(object): ...@@ -454,7 +478,7 @@ class ErrorHandler(object):
class StringHandler(object): class StringHandler(object):
def __init__(self, data, content_type, **headers): def __init__(self, data, content_type, **headers):
"""Hander that reads a file from a path and substitutes some fixed data """Handler that returns a fixed data string and headers
:param data: String to use :param data: String to use
:param content_type: Content type header to server the response with :param content_type: Content type header to server the response with
...@@ -478,7 +502,9 @@ class StringHandler(object): ...@@ -478,7 +502,9 @@ class StringHandler(object):
class StaticHandler(StringHandler): class StaticHandler(StringHandler):
def __init__(self, path, format_args, content_type, **headers): def __init__(self, path, format_args, content_type, **headers):
"""Hander that reads a file from a path and substitutes some fixed data """Handler that reads a file from a path and substitutes some fixed data
Note that *.headers files have no effect in this handler.
:param path: Path to the template file to use :param path: Path to the template file to use
:param format_args: Dictionary of values to substitute into the template file :param format_args: Dictionary of values to substitute into the template file
......
...@@ -413,9 +413,9 @@ class H2Response(Response): ...@@ -413,9 +413,9 @@ class H2Response(Response):
item = None item = None
item_iter = self.iter_content() item_iter = self.iter_content()
try: try:
item = item_iter.next() item = next(item_iter)
while True: while True:
check_last = item_iter.next() check_last = next(item_iter)
self.writer.write_data(item, last=False) self.writer.write_data(item, last=False)
item = check_last item = check_last
except StopIteration: except StopIteration:
...@@ -451,6 +451,13 @@ class H2ResponseWriter(object): ...@@ -451,6 +451,13 @@ class H2ResponseWriter(object):
secondary_headers = [] # Non ':' prefixed headers are to be added afterwards secondary_headers = [] # Non ':' prefixed headers are to be added afterwards
for header, value in headers: for header, value in headers:
# h2_headers are native strings
# header field names are strings of ASCII
if isinstance(header, binary_type):
header = header.decode('ascii')
# value in headers can be either string or integer
if isinstance(value, binary_type):
value = self.decode(value)
if header in h2_headers: if header in h2_headers:
header = ':' + header header = ':' + header
formatted_headers.append((header, str(value))) formatted_headers.append((header, str(value)))
...@@ -635,6 +642,15 @@ class H2ResponseWriter(object): ...@@ -635,6 +642,15 @@ class H2ResponseWriter(object):
self.content_written = True self.content_written = True
self.socket.sendall(raw_data) self.socket.sendall(raw_data)
def decode(self, data):
"""Convert bytes to unicode according to response.encoding."""
if isinstance(data, binary_type):
return data.decode(self._response.encoding)
elif isinstance(data, text_type):
return data
else:
raise ValueError(type(data))
def encode(self, data): def encode(self, data):
"""Convert unicode to bytes according to response.encoding.""" """Convert unicode to bytes according to response.encoding."""
if isinstance(data, binary_type): if isinstance(data, binary_type):
......
...@@ -26,7 +26,7 @@ from .logger import get_logger ...@@ -26,7 +26,7 @@ from .logger import get_logger
from .request import Server, Request, H2Request from .request import Server, Request, H2Request
from .response import Response, H2Response from .response import Response, H2Response
from .router import Router from .router import Router
from .utils import HTTPException from .utils import HTTPException, isomorphic_decode
from .constants import h2_headers from .constants import h2_headers
# We need to stress test that browsers can send/receive many headers (there is # We need to stress test that browsers can send/receive many headers (there is
...@@ -506,6 +506,8 @@ class H2Headers(dict): ...@@ -506,6 +506,8 @@ class H2Headers(dict):
def __init__(self, headers): def __init__(self, headers):
self.raw_headers = OrderedDict() self.raw_headers = OrderedDict()
for key, val in headers: for key, val in headers:
key = isomorphic_decode(key)
val = isomorphic_decode(val)
self.raw_headers[key] = val self.raw_headers[key] = val
dict.__setitem__(self, self._convert_h2_header_to_h1(key), val) dict.__setitem__(self, self._convert_h2_header_to_h1(key), val)
......
...@@ -152,8 +152,9 @@ def get_port(host=''): ...@@ -152,8 +152,9 @@ def get_port(host=''):
return port return port
def http2_compatible(): def http2_compatible():
# Currently, the HTTP/2.0 server is only working in python 2.7.10+ and OpenSSL 1.0.2+ # Currently, the HTTP/2.0 server is only working in python 2.7.10+ or 3.6+ and OpenSSL 1.0.2+
import ssl import ssl
ssl_v = ssl.OPENSSL_VERSION_INFO ssl_v = ssl.OPENSSL_VERSION_INFO
return ((sys.version_info[0] == 2 and sys.version_info[1] == 7 and sys.version_info[2] >= 10) and py_v = sys.version_info
return (((py_v[0] == 2 and py_v[1] == 7 and py_v[2] >= 10) or (py_v[0] == 3 and py_v[1] >= 6)) and
(ssl_v[0] == 1 and (ssl_v[1] == 1 or (ssl_v[1] == 0 and ssl_v[2] >= 2)))) (ssl_v[0] == 1 and (ssl_v[1] == 1 or (ssl_v[1] == 0 and ssl_v[2] >= 2))))
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment