Commit ce444e13 authored by Luke Zielinski's avatar Luke Zielinski Committed by Commit Bot

Remove WPTOutputUpdater, it's dead code.

This used to post-process the output of wptrunner to support things like
flakes, but we now support that in WPTMetadataBuilder instead.

Change-Id: I967aff8ae3babbb620478cd32f486c5ac760c70f
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2187193
Commit-Queue: Robert Ma <robertma@chromium.org>
Auto-Submit: Luke Z <lpz@chromium.org>
Reviewed-by: default avatarRobert Ma <robertma@chromium.org>
Cr-Commit-Position: refs/heads/master@{#766439}
parent 91c26fa8
...@@ -11,7 +11,6 @@ group("wpt_tests_isolate") { ...@@ -11,7 +11,6 @@ group("wpt_tests_isolate") {
# Include blinkpy tools for setting up expectations. # Include blinkpy tools for setting up expectations.
"//third_party/blink/tools/build_wpt_metadata.py", "//third_party/blink/tools/build_wpt_metadata.py",
"//third_party/blink/tools/update_wpt_output.py",
"//third_party/blink/tools/blinkpy/", "//third_party/blink/tools/blinkpy/",
"//third_party/catapult/third_party/typ/", "//third_party/catapult/third_party/typ/",
......
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Updates the output of a WPT run.
The purpose of this script is to do any post-processing on the WPT output that
is necessary to support features unavailable in WPT. For example, TestExpectations
for flaky tests where multiple possible statuses are listed for a test. However,
WPT only supports a single status so it can't understand that a test is flaky.
This script takes two inputs:
1. The output JSON file of a WPT run
2. The set of test expectations files
It then creates a new output JSON file with any required adjustments (eg: the
expected statuses of flaky tests filled in from the expectations file).
"""
import argparse
import json
import logging
import os
from blinkpy.common.system.log_utils import configure_logging
_log = logging.getLogger(__name__)
class WPTOutputUpdater(object):
def __init__(self, expectations):
"""
Args:
expectations: a blinkpy.web_tests.models.test_expectations.TestExpectations object
"""
self.expectations = expectations
self.old_json_output_file_path = None
self.new_json_output_dir = None
self.new_json_output_filename = None
def run(self, args=None):
"""Main entry point to parse flags and execute the script."""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--old-json-output-file-path",
help="The JSON output file to be updated, generated by WPT.")
parser.add_argument(
"--new-json-output-dir",
help="The directory to put the new JSON output file.")
parser.add_argument(
"--new-json-output-filename",
help="The name of the new JSON output file.")
parser.add_argument(
'-v',
'--verbose',
action='store_true',
help='More verbose logging.')
args = parser.parse_args(args)
log_level = logging.DEBUG if args.verbose else logging.INFO
configure_logging(logging_level=log_level, include_time=True)
self.old_json_output_file_path = args.old_json_output_file_path
self.new_json_output_dir = args.new_json_output_dir
self.new_json_output_filename = args.new_json_output_filename
self._update_output_and_write()
return 0
def _update_output_and_write(self):
"""Updates the output JSON and writes it to disk."""
old_output_file = open(self.old_json_output_file_path, "r")
new_output_json = self.update_output_json(json.load(old_output_file))
# Ok, we updated all the tests that were in this file, output it
if not os.path.exists(self.new_json_output_dir):
os.makedirs(self.new_json_output_dir)
new_file_path = os.path.join(self.new_json_output_dir,
self.new_json_output_filename)
if os.path.exists(new_file_path):
_log.warning("Output file already exists, will be overwritten: %s",
new_file_path)
with open(new_file_path, "w") as new_output_file:
json.dump(new_output_json, new_output_file)
def update_output_json(self, output_json):
"""Updates the output JSON object in place.
Args:
output_json: a nested dict containing the JSON output from WPT.
Returns:
The updated JSON dictionary
"""
delim = output_json['path_delimiter']
# Go through each WPT expectation, try to find it in the output, and
# then update the expected statuses in the output file.
for path in self.expectations.expectations_dict:
for line in self.expectations.get_updated_lines(path):
if not line.test or line.is_glob:
continue
test_leaf = self._find_test_for_expectation(
line, delim, output_json)
if test_leaf is not None:
self._update_output_for_test(line, test_leaf)
return output_json
def _find_test_for_expectation(self, exp, delim, output_json):
"""Finds the test output for the specified expectation.
We only handle expectations for WPT tests, so we skip non-WPT entries as
well as directory-wide expectations for WPT. We also want to ensure that
the test from the expectation was run by the specific shard that created
the output.
Args:
exp: an expectation object representing a line from the expectation
file.
delim: the delimiter character in the test names.
output_json: a nested dict containing the JSON output from WPT.
Returns:
The leaf node from the JSON output file for the test that needs to
be updated, which is a dictionary. Or None if this expectation is
not supposed to be handled.
"""
# Skip expectations for non-WPT tests
if not exp.test or not exp.test.startswith('external/wpt'):
return None
# Split the test name by the test delimiter. We omit the first 2 entries
# because they are 'external' and 'wpt' and these don't exist in the WPT
# run.
test_name_parts = exp.test.split(delim)[2:]
# Drill down through the JSON output file using the parts of the test
# name to find the leaf node containing the results for this test.
test_leaf = output_json['tests']
for name_part in test_name_parts:
# Since this script runs on each shard, it's possible that the test
# from the expectations file was not run in this shard. If we don't
# find the test in the WPT output, then skip the expectation.
if name_part not in test_leaf.keys():
_log.debug("Test was not run: %s", exp.test)
return None
test_leaf = test_leaf[name_part]
# Ensure that the expectation is for an actual test, not a directory. To
# do this we check that we're at a leaf, which should have 'actual' and
# 'expected' fields.
if 'actual' not in test_leaf or 'expected' not in test_leaf:
_log.debug("Expectation was not for a test, skipping: %s",
exp.test)
return None
# If we get this far then we have an expectation for a single test that
# was actually run by this shard, so return the test leaf that needs to
# be updated in the JSON output.
return test_leaf
def _update_output_for_test(self, exp, test_leaf):
"""Updates the output of a specific test based on the expectations file.
Args:
exp: an expectation object representing a line from the expectation
file.
test_leaf: a dictionary containing the JSON output for a test.
"""
expectation_string = ' '.join(sorted(exp.results))
_log.info("Updating expectation for test %s from %s to %s", exp.test,
test_leaf['expected'], expectation_string)
test_leaf['expected'] = expectation_string
# Also update the "is_regression" and "is_unexpected" fields.
is_unexpected = test_leaf['actual'] not in expectation_string
test_leaf['is_unexpected'] = is_unexpected
test_leaf['is_regression'] = (is_unexpected
and test_leaf['actual'] != 'PASS')
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from collections import OrderedDict
import json
import unittest
from blinkpy.common.host_mock import MockHost
from blinkpy.web_tests.models.test_expectations import TestExpectations
from blinkpy.web_tests.port.factory_mock import MockPortFactory
from blinkpy.w3c.wpt_output_updater import WPTOutputUpdater
_EXPECTATIONS_TEST_LIST = [
"some/test.html", "external/wpt/expected_crash.html",
"external/wpt/flake.html", "external/wpt/subdir/unexpected_failure.html",
"passed/test.html"
]
_EXPECTATIONS_FILE_STRING = """
# results: [ Failure Crash Timeout ]
some/test.html [ Failure ]
external/wpt/expected_crash.html [ Crash ]
external/wpt/flake.html [ Timeout Failure ]
external/wpt/subdir/unexpected_failure.html [ Timeout ]
"""
class WPTOutputUpdaterTest(unittest.TestCase):
def setUp(self):
self.host = MockHost()
self.host.port_factory = MockPortFactory(self.host)
self.port = self.host.port_factory.get()
expectations_dict = OrderedDict()
expectations_dict['expectations'] = _EXPECTATIONS_FILE_STRING
self.exp = TestExpectations(
self.port, expectations_dict=expectations_dict)
def test_update_output_json(self):
"""Tests that output JSON is properly updated with expectations."""
# Create a WPTOutputUpdater and reset it to use our test expectations.
output_updater = WPTOutputUpdater(self.exp)
# Note: this is the WPT output which omits the "external/wpt" root that
# is present in the expectations file. Also, the expected status is
# always PASS by default.
output_string = """
{
"path_delimiter": "/",
"tests": {
"some": {
"test.html": {
"expected": "PASS",
"actual": "PASS"
}
},
"expected_crash.html": {
"expected": "PASS",
"actual": "CRASH"
},
"flake.html": {
"expected": "PASS",
"actual": "TIMEOUT"
},
"subdir": {
"unexpected_failure.html": {
"expected": "PASS",
"actual": "FAIL"
}
}
}
}
"""
output_json = json.loads(output_string)
# A few simple assertions that the original JSON is formatter properly.
self.assertEqual(
"PASS", output_json["tests"]["expected_crash.html"]["expected"])
self.assertEqual(
"FAIL", output_json["tests"]["subdir"]["unexpected_failure.html"]
["actual"])
# Run the output updater, and confirm the expected statuses are updated.
new_output_json = output_updater.update_output_json(output_json)
# some/test.html should not be updated since the expectation is not for
# external/wpt
cur_test = new_output_json["tests"]["some"]["test.html"]
self.assertEqual("PASS", cur_test["expected"])
# The expected_crash.html test crashed as expected. It's expected status
# should be updated but is_regression and is_unexpected are both False
# since this test ran as we expected.
cur_test = new_output_json["tests"]["expected_crash.html"]
self.assertEqual("CRASH", cur_test["expected"])
self.assertFalse(cur_test["is_regression"])
self.assertFalse(cur_test["is_unexpected"])
# The flake.html test ran as expected because its status was one of the
# ones from the expectation file.
cur_test = new_output_json["tests"]["flake.html"]
self.assertEqual("FAIL TIMEOUT", cur_test["expected"])
self.assertFalse(cur_test["is_regression"])
self.assertFalse(cur_test["is_unexpected"])
# The unexpected_failure.html test had a different status than expected,
# so is_unexpected is true. Since the actual status wasn't a Pass, it's
# also a regression.
cur_test = new_output_json["tests"]["subdir"][
"unexpected_failure.html"]
self.assertEqual("TIMEOUT", cur_test["expected"])
self.assertTrue(cur_test["is_regression"])
self.assertTrue(cur_test["is_unexpected"])
#!/usr/bin/env vpython
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import optparse
import sys
from blinkpy.common.host import Host
from blinkpy.w3c.wpt_output_updater import WPTOutputUpdater
from blinkpy.web_tests.models.test_expectations import TestExpectations
def main(args):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--additional-expectations",
action="append",
help="Paths to additional expectations files for WPT.")
known_args, rest_args = parser.parse_known_args(args)
host = Host()
port = host.port_factory.get(options=optparse.Values(vars(known_args)))
expectations = TestExpectations(port)
output_updater = WPTOutputUpdater(expectations)
sys.exit(output_updater.run(rest_args))
if __name__ == '__main__':
main(sys.argv[1:])
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment