Commit 5f825c46 authored by Luke Zielinski's avatar Luke Zielinski Committed by Commit Bot

Refactor WptMetadataBuilder to loop over tests to lookup statuses.

Previously we'd ask typ for all "Skip" expectations and then loop over
all tests to look up their baselines. In a future CL we'll need to look
at other (non-Skip) expectations for all tests as well, and since we're
already looping over tests to find baselines we might as well query the
expectations at the same time.

This also changes the return type of "get_tests_needing_metadata" into a
dict (rather than list of pairs), which makes it easier to attach new
more statuses to a test as we process more input.

This change should functionally be a no-op.

Tested: built metadata with and without this change and recursively
diffed. Validated that changes make sense.

Bug: 937369
Change-Id: I80c68c6326f20c864c4b1c6334116c5d2c0498ee
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2024457
Commit-Queue: Luke Z <lpz@chromium.org>
Reviewed-by: default avatarRobert Ma <robertma@chromium.org>
Cr-Commit-Position: refs/heads/master@{#742749}
parent 715a298a
...@@ -17,6 +17,7 @@ import re ...@@ -17,6 +17,7 @@ import re
from blinkpy.common.system.log_utils import configure_logging from blinkpy.common.system.log_utils import configure_logging
from blinkpy.web_tests.models import test_expectations from blinkpy.web_tests.models import test_expectations
from blinkpy.web_tests.models.typ_types import ResultType from blinkpy.web_tests.models.typ_types import ResultType
from collections import defaultdict
_log = logging.getLogger(__name__) _log = logging.getLogger(__name__)
...@@ -25,7 +26,10 @@ _log = logging.getLogger(__name__) ...@@ -25,7 +26,10 @@ _log = logging.getLogger(__name__)
HARNESS_ERROR = 1 HARNESS_ERROR = 1
# The test has at least one failing subtest in its baseline file. # The test has at least one failing subtest in its baseline file.
SUBTEST_FAIL = 1 << 1 SUBTEST_FAIL = 1 << 1
# Next status: 1 << 2 # The test should be skipped
SKIP_TEST = 1 << 2
# Next status: 1 << 3
class WPTMetadataBuilder(object): class WPTMetadataBuilder(object):
...@@ -64,20 +68,10 @@ class WPTMetadataBuilder(object): ...@@ -64,20 +68,10 @@ class WPTMetadataBuilder(object):
import shutil import shutil
shutil.rmtree(self.metadata_output_dir) shutil.rmtree(self.metadata_output_dir)
failing_baseline_tests = self.get_tests_with_baselines() tests_for_metadata = self.get_tests_needing_metadata()
_log.info("Found %d tests with failing baselines", len(failing_baseline_tests)) _log.info("Found %d tests requiring metadata", len(tests_for_metadata))
for test_name, test_status_bitmap in failing_baseline_tests: for test_name, test_status_bitmap in tests_for_metadata.items():
filename, file_contents = self.get_metadata_filename_and_contents(test_name, 'FAIL', test_status_bitmap) filename, file_contents = self.get_metadata_filename_and_contents(test_name, test_status_bitmap)
if not filename or not file_contents:
continue
self._write_to_file(filename, file_contents)
skipped_tests = self.get_test_names_to_skip()
_log.info("Found %d tests with skip expectations", len(skipped_tests))
for test_name in skipped_tests:
if test_name in failing_baseline_tests:
_log.error("Test %s has a baseline but is also skipped" % test_name)
filename, file_contents = self.get_metadata_filename_and_contents(test_name, 'SKIP')
if not filename or not file_contents: if not filename or not file_contents:
continue continue
self._write_to_file(filename, file_contents) self._write_to_file(filename, file_contents)
...@@ -97,46 +91,62 @@ class WPTMetadataBuilder(object): ...@@ -97,46 +91,62 @@ class WPTMetadataBuilder(object):
with open(filename, "a") as metadata_file: with open(filename, "a") as metadata_file:
metadata_file.write(file_contents) metadata_file.write(file_contents)
def get_test_names_to_skip(self): def get_tests_needing_metadata(self):
"""Determines which tests in the expectation file need metadata. """Determines which tests need metadata files.
Returns:
A list of test names that need metadata.
"""
return self.expectations.get_tests_with_expected_result(ResultType.Skip)
def get_tests_with_baselines(self):
"""Determines which tests have baselines that need metadata.
This is currently tests with baseline files containing failing subtests, This function loops over the tests to be run and checks whether each test
or tests with harness errors. has an expectation (eg: in TestExpectations) and/or a baseline (ie:
Failing subtests are those with statuses in (FAIL, NOTRUN, TIMEOUT). test-name-expected.txt). The existence of those things will determine
the information that will be emitted into the tests's metadata file.
Returns: Returns:
A list of pairs, the first being the test name, and the second being A dict. The key is the string test name and the value is an integer
a an integer indicating the status. The status is a bitmap of bitmap of statuses for the test.
constants |HARNESS_ERROR| and/or |SUBTEST_FAILURE|.
""" """
failing_baseline_tests = [] tests_needing_metadata = defaultdict(int)
for test in self.port.tests(paths=['external/wpt']): for test_name in self.port.tests(paths=["external/wpt"]):
test_baseline = self.port.expected_text(test) # First check for expectations. If a test is skipped then we do not
# look for more statuses
expectation_line = self.expectations.get_expectations(test_name)
test_statuses = expectation_line.results
self._handle_test_with_expectation(test_name, test_statuses, tests_needing_metadata)
if self._test_was_skipped(test_name, tests_needing_metadata):
# Do not consider other statuses if a test is skipped
continue
# Check if the test has a baseline
test_baseline = self.port.expected_text(test_name)
if not test_baseline: if not test_baseline:
continue continue
status_bitmap = 0 self._handle_test_with_baseline(test_name, test_baseline, tests_needing_metadata)
if re.search("^(FAIL|NOTRUN|TIMEOUT)", test_baseline, re.MULTILINE): return tests_needing_metadata
status_bitmap |= SUBTEST_FAIL
if re.search("^Harness Error\.", test_baseline, re.MULTILINE): def _handle_test_with_expectation(self, test_name, test_statuses, status_dict):
status_bitmap |= HARNESS_ERROR """Handles a single test expectation and updates |status_dict|."""
if status_bitmap > 0: # TODO(lpz): This will handle more statuses in the future, such as flakes.
failing_baseline_tests.append([test, status_bitmap]) if ResultType.Skip in test_statuses:
else: status_dict[test_name] |= SKIP_TEST
# Treat this as an error because we don't want it to happen.
# Either the non-FAIL statuses need to be handled here, or the def _test_was_skipped(self, test_name, status_dict):
# baseline is all PASS which should just be deleted. """Returns whether |test_name| is marked as skipped in |status_dict|."""
_log.error("Test %s has a non-FAIL baseline" % test) return test_name in status_dict and (status_dict[test_name] & SKIP_TEST)
return failing_baseline_tests
def _handle_test_with_baseline(self, test_name, test_baseline, status_dict):
def get_metadata_filename_and_contents(self, chromium_test_name, test_status, test_status_bitmap=0): """Handles a single test baseline and updates |status_dict|."""
status_bitmap = 0
if re.search(r"^(FAIL|NOTRUN|TIMEOUT)", test_baseline, re.MULTILINE):
status_bitmap |= SUBTEST_FAIL
if re.search(r"^Harness Error\.", test_baseline, re.MULTILINE):
status_bitmap |= HARNESS_ERROR
if status_bitmap > 0:
status_dict[test_name] |= status_bitmap
else:
# Treat this as an error because we don't want it to happen.
# Either the non-FAIL statuses need to be handled here, or the
# baseline is all PASS which should just be deleted.
_log.error("Test %s has a non-FAIL baseline" % test_name)
def get_metadata_filename_and_contents(self, chromium_test_name, test_status_bitmap=0):
"""Determines the metadata filename and contents for the specified test. """Determines the metadata filename and contents for the specified test.
The metadata filename is derived from the test name but will differ if The metadata filename is derived from the test name but will differ if
...@@ -146,9 +156,6 @@ class WPTMetadataBuilder(object): ...@@ -146,9 +156,6 @@ class WPTMetadataBuilder(object):
Args: Args:
chromium_test_name: A Chromium test name from the expectation file, chromium_test_name: A Chromium test name from the expectation file,
which starts with `external/wpt`. which starts with `external/wpt`.
test_status: The expected status of this test. Possible values:
'SKIP' - skip this test (or directory).
'FAIL' - the test is expected to fail, not applicable to dirs.
test_status_bitmap: An integer containing additional data about the test_status_bitmap: An integer containing additional data about the
status, such as enumerating flaky statuses, or whether a test has status, such as enumerating flaky statuses, or whether a test has
a combination of harness error and subtest failure. a combination of harness error and subtest failure.
...@@ -158,8 +165,6 @@ class WPTMetadataBuilder(object): ...@@ -158,8 +165,6 @@ class WPTMetadataBuilder(object):
the second is the contents to write to that file. Or None if the the second is the contents to write to that file. Or None if the
test does not need a metadata file. test does not need a metadata file.
""" """
assert test_status in ('SKIP', 'FAIL')
# Ignore expectations for non-WPT tests # Ignore expectations for non-WPT tests
if not chromium_test_name or not chromium_test_name.startswith('external/wpt'): if not chromium_test_name or not chromium_test_name.startswith('external/wpt'):
return None, None return None, None
...@@ -200,18 +205,14 @@ class WPTMetadataBuilder(object): ...@@ -200,18 +205,14 @@ class WPTMetadataBuilder(object):
test_file_parts[-1] += ".ini" test_file_parts[-1] += ".ini"
metadata_filename = os.path.join(self.metadata_output_dir, metadata_filename = os.path.join(self.metadata_output_dir,
*test_file_parts) *test_file_parts)
_log.debug("Creating a test ini file %s with status %s", _log.debug("Creating a test ini file %s with status_bitmap %s", metadata_filename, test_status_bitmap)
metadata_filename, test_status)
# The contents of the metadata file is two lines: # The contents of the metadata file is two lines:
# 1. the last part of the WPT test path (ie the filename) inside # 1. the last part of the WPT test path (ie the filename) inside
# square brackets - this could differ from the metadata filename. # square brackets - this could differ from the metadata filename.
# 2. an indented line with the test status and reason # 2. an indented line with the test status and reason
wpt_test_file_name_part = wpt_test_name_parts[-1] wpt_test_file_name_part = wpt_test_name_parts[-1]
if test_status == 'SKIP': metadata_file_contents = self._get_test_failed_string(wpt_test_file_name_part, test_status_bitmap)
metadata_file_contents = self._get_test_disabled_string(wpt_test_file_name_part)
elif test_status == 'FAIL':
metadata_file_contents = self._get_test_failed_string(wpt_test_file_name_part, test_status_bitmap)
return metadata_filename, metadata_file_contents return metadata_filename, metadata_file_contents
...@@ -223,6 +224,15 @@ class WPTMetadataBuilder(object): ...@@ -223,6 +224,15 @@ class WPTMetadataBuilder(object):
def _get_test_failed_string(self, test_name, test_status_bitmap): def _get_test_failed_string(self, test_name, test_status_bitmap):
result = "[%s]\n" % test_name result = "[%s]\n" % test_name
# A skipped test is a little special in that it doesn't happen along with
# any other status. So we compare directly against SKIP_TEST and also
# return right away.
if test_status_bitmap == SKIP_TEST:
result += " disabled: wpt_metadata_builder.py\n"
return result
# Other test statuses can exist together.
if test_status_bitmap & HARNESS_ERROR: if test_status_bitmap & HARNESS_ERROR:
result += " expected: ERROR\n" result += " expected: ERROR\n"
if test_status_bitmap & SUBTEST_FAIL: if test_status_bitmap & SUBTEST_FAIL:
......
...@@ -11,7 +11,7 @@ from blinkpy.common.host_mock import MockHost ...@@ -11,7 +11,7 @@ from blinkpy.common.host_mock import MockHost
from blinkpy.web_tests.models.test_expectations import TestExpectations from blinkpy.web_tests.models.test_expectations import TestExpectations
from blinkpy.web_tests.port.factory_mock import MockPortFactory from blinkpy.web_tests.port.factory_mock import MockPortFactory
from blinkpy.w3c.wpt_manifest import BASE_MANIFEST_NAME from blinkpy.w3c.wpt_manifest import BASE_MANIFEST_NAME
from blinkpy.w3c.wpt_metadata_builder import WPTMetadataBuilder, HARNESS_ERROR, SUBTEST_FAIL from blinkpy.w3c.wpt_metadata_builder import WPTMetadataBuilder, HARNESS_ERROR, SUBTEST_FAIL, SKIP_TEST
def _make_expectation(port, test_path, test_statuses, test_names=[]): def _make_expectation(port, test_path, test_statuses, test_names=[]):
...@@ -79,8 +79,7 @@ class WPTMetadataBuilderTest(unittest.TestCase): ...@@ -79,8 +79,7 @@ class WPTMetadataBuilderTest(unittest.TestCase):
test_name = "some/other/test.html" test_name = "some/other/test.html"
expectations = _make_expectation(self.port, test_name, "SKIP") expectations = _make_expectation(self.port, test_name, "SKIP")
metadata_builder = WPTMetadataBuilder(expectations, self.port) metadata_builder = WPTMetadataBuilder(expectations, self.port)
filename, contents = metadata_builder.get_metadata_filename_and_contents( filename, contents = metadata_builder.get_metadata_filename_and_contents(test_name, SKIP_TEST)
test_name, 'SKIP')
self.assertIsNone(filename) self.assertIsNone(filename)
self.assertIsNone(contents) self.assertIsNone(contents)
...@@ -89,8 +88,7 @@ class WPTMetadataBuilderTest(unittest.TestCase): ...@@ -89,8 +88,7 @@ class WPTMetadataBuilderTest(unittest.TestCase):
test_name = "external/wpt/test-not-in-manifest.html" test_name = "external/wpt/test-not-in-manifest.html"
expectations = _make_expectation(self.port, test_name, "SKIP") expectations = _make_expectation(self.port, test_name, "SKIP")
metadata_builder = WPTMetadataBuilder(expectations, self.port) metadata_builder = WPTMetadataBuilder(expectations, self.port)
filename, contents = metadata_builder.get_metadata_filename_and_contents( filename, contents = metadata_builder.get_metadata_filename_and_contents(test_name, SKIP_TEST)
test_name, 'SKIP')
self.assertIsNone(filename) self.assertIsNone(filename)
self.assertIsNone(contents) self.assertIsNone(contents)
...@@ -99,7 +97,7 @@ class WPTMetadataBuilderTest(unittest.TestCase): ...@@ -99,7 +97,7 @@ class WPTMetadataBuilderTest(unittest.TestCase):
test_name = "external/wpt/test.html" test_name = "external/wpt/test.html"
expectations = _make_expectation(self.port, test_name, "TIMEOUT") expectations = _make_expectation(self.port, test_name, "TIMEOUT")
metadata_builder = WPTMetadataBuilder(expectations, self.port) metadata_builder = WPTMetadataBuilder(expectations, self.port)
test_names = metadata_builder.get_test_names_to_skip() test_names = metadata_builder.get_tests_needing_metadata()
self.assertFalse(test_names) self.assertFalse(test_names)
def test_parse_baseline_all_pass(self): def test_parse_baseline_all_pass(self):
...@@ -113,8 +111,8 @@ class WPTMetadataBuilderTest(unittest.TestCase): ...@@ -113,8 +111,8 @@ class WPTMetadataBuilderTest(unittest.TestCase):
"This is a test\nPASS some subtest\nPASS another subtest\n") "This is a test\nPASS some subtest\nPASS another subtest\n")
expectations = TestExpectations(self.port) expectations = TestExpectations(self.port)
metadata_builder = WPTMetadataBuilder(expectations, self.port) metadata_builder = WPTMetadataBuilder(expectations, self.port)
test_and_status_list = metadata_builder.get_tests_with_baselines() test_and_status_dict = metadata_builder.get_tests_needing_metadata()
self.assertFalse(test_and_status_list) self.assertFalse(test_and_status_dict)
def test_parse_baseline_subtest_fail(self): def test_parse_baseline_subtest_fail(self):
"""Test parsing a baseline with a failing subtest.""" """Test parsing a baseline with a failing subtest."""
...@@ -127,11 +125,10 @@ class WPTMetadataBuilderTest(unittest.TestCase): ...@@ -127,11 +125,10 @@ class WPTMetadataBuilderTest(unittest.TestCase):
"This is a test\nPASS some subtest\nFAIL another subtest\n") "This is a test\nPASS some subtest\nFAIL another subtest\n")
expectations = TestExpectations(self.port) expectations = TestExpectations(self.port)
metadata_builder = WPTMetadataBuilder(expectations, self.port) metadata_builder = WPTMetadataBuilder(expectations, self.port)
test_and_status_list = metadata_builder.get_tests_with_baselines() test_and_status_dict = metadata_builder.get_tests_needing_metadata()
self.assertEqual(1, len(test_and_status_list)) self.assertEqual(1, len(test_and_status_dict))
test_entry = test_and_status_list[0] self.assertTrue(test_name in test_and_status_dict)
self.assertEqual(test_name, test_entry[0]) self.assertEqual(SUBTEST_FAIL, test_and_status_dict[test_name])
self.assertEqual(SUBTEST_FAIL, test_entry[1])
def test_parse_baseline_subtest_notrun(self): def test_parse_baseline_subtest_notrun(self):
"""Test parsing a baseline with a notrun subtest.""" """Test parsing a baseline with a notrun subtest."""
...@@ -144,11 +141,10 @@ class WPTMetadataBuilderTest(unittest.TestCase): ...@@ -144,11 +141,10 @@ class WPTMetadataBuilderTest(unittest.TestCase):
"This is a test\nPASS some subtest\nNOTRUN another subtest\n") "This is a test\nPASS some subtest\nNOTRUN another subtest\n")
expectations = TestExpectations(self.port) expectations = TestExpectations(self.port)
metadata_builder = WPTMetadataBuilder(expectations, self.port) metadata_builder = WPTMetadataBuilder(expectations, self.port)
test_and_status_list = metadata_builder.get_tests_with_baselines() test_and_status_dict = metadata_builder.get_tests_needing_metadata()
self.assertEqual(1, len(test_and_status_list)) self.assertEqual(1, len(test_and_status_dict))
test_entry = test_and_status_list[0] self.assertTrue(test_name in test_and_status_dict)
self.assertEqual(test_name, test_entry[0]) self.assertEqual(SUBTEST_FAIL, test_and_status_dict[test_name])
self.assertEqual(SUBTEST_FAIL, test_entry[1])
def test_parse_baseline_subtest_timeout(self): def test_parse_baseline_subtest_timeout(self):
"""Test parsing a baseline with a timeout subtest.""" """Test parsing a baseline with a timeout subtest."""
...@@ -161,11 +157,10 @@ class WPTMetadataBuilderTest(unittest.TestCase): ...@@ -161,11 +157,10 @@ class WPTMetadataBuilderTest(unittest.TestCase):
"This is a test\nTIMEOUT some subtest\nPASS another subtest\n") "This is a test\nTIMEOUT some subtest\nPASS another subtest\n")
expectations = TestExpectations(self.port) expectations = TestExpectations(self.port)
metadata_builder = WPTMetadataBuilder(expectations, self.port) metadata_builder = WPTMetadataBuilder(expectations, self.port)
test_and_status_list = metadata_builder.get_tests_with_baselines() test_and_status_dict = metadata_builder.get_tests_needing_metadata()
self.assertEqual(1, len(test_and_status_list)) self.assertEqual(1, len(test_and_status_dict))
test_entry = test_and_status_list[0] self.assertTrue(test_name in test_and_status_dict)
self.assertEqual(test_name, test_entry[0]) self.assertEqual(SUBTEST_FAIL, test_and_status_dict[test_name])
self.assertEqual(SUBTEST_FAIL, test_entry[1])
def test_parse_baseline_harness_error(self): def test_parse_baseline_harness_error(self):
"""Test parsing a baseline with a harness error.""" """Test parsing a baseline with a harness error."""
...@@ -178,11 +173,10 @@ class WPTMetadataBuilderTest(unittest.TestCase): ...@@ -178,11 +173,10 @@ class WPTMetadataBuilderTest(unittest.TestCase):
"This is a test\nHarness Error. some stuff\n") "This is a test\nHarness Error. some stuff\n")
expectations = TestExpectations(self.port) expectations = TestExpectations(self.port)
metadata_builder = WPTMetadataBuilder(expectations, self.port) metadata_builder = WPTMetadataBuilder(expectations, self.port)
test_and_status_list = metadata_builder.get_tests_with_baselines() test_and_status_dict = metadata_builder.get_tests_needing_metadata()
self.assertEqual(1, len(test_and_status_list)) self.assertEqual(1, len(test_and_status_dict))
test_entry = test_and_status_list[0] self.assertTrue(test_name in test_and_status_dict)
self.assertEqual(test_name, test_entry[0]) self.assertEqual(HARNESS_ERROR, test_and_status_dict[test_name])
self.assertEqual(HARNESS_ERROR, test_entry[1])
def test_parse_baseline_subtest_fail_and_harness_error(self): def test_parse_baseline_subtest_fail_and_harness_error(self):
"""Test parsing a baseline with a harness error AND a subtest fail.""" """Test parsing a baseline with a harness error AND a subtest fail."""
...@@ -195,18 +189,17 @@ class WPTMetadataBuilderTest(unittest.TestCase): ...@@ -195,18 +189,17 @@ class WPTMetadataBuilderTest(unittest.TestCase):
"This is a test\nHarness Error. some stuff\nPASS some subtest\nFAIL another subtest\n") "This is a test\nHarness Error. some stuff\nPASS some subtest\nFAIL another subtest\n")
expectations = TestExpectations(self.port) expectations = TestExpectations(self.port)
metadata_builder = WPTMetadataBuilder(expectations, self.port) metadata_builder = WPTMetadataBuilder(expectations, self.port)
test_and_status_list = metadata_builder.get_tests_with_baselines() test_and_status_dict = metadata_builder.get_tests_needing_metadata()
self.assertEqual(1, len(test_and_status_list)) self.assertEqual(1, len(test_and_status_dict))
test_entry = test_and_status_list[0] self.assertTrue(test_name in test_and_status_dict)
self.assertEqual(test_name, test_entry[0]) self.assertEqual(SUBTEST_FAIL | HARNESS_ERROR, test_and_status_dict[test_name])
self.assertEqual(SUBTEST_FAIL | HARNESS_ERROR, test_entry[1])
def test_metadata_for_skipped_test(self): def test_metadata_for_skipped_test(self):
"""A skipped WPT test should get a test-specific metadata file.""" """A skipped WPT test should get a test-specific metadata file."""
test_name = "external/wpt/test.html" test_name = "external/wpt/test.html"
expectations = _make_expectation(self.port, test_name, "SKIP") expectations = _make_expectation(self.port, test_name, "SKIP")
metadata_builder = WPTMetadataBuilder(expectations, self.port) metadata_builder = WPTMetadataBuilder(expectations, self.port)
filename, contents = metadata_builder.get_metadata_filename_and_contents(test_name, 'SKIP') filename, contents = metadata_builder.get_metadata_filename_and_contents(test_name, SKIP_TEST)
self.assertEqual("test.html.ini", filename) self.assertEqual("test.html.ini", filename)
self.assertEqual("[test.html]\n disabled: wpt_metadata_builder.py\n", contents) self.assertEqual("[test.html]\n disabled: wpt_metadata_builder.py\n", contents)
...@@ -215,7 +208,7 @@ class WPTMetadataBuilderTest(unittest.TestCase): ...@@ -215,7 +208,7 @@ class WPTMetadataBuilderTest(unittest.TestCase):
test_name = "external/wpt/variant.html?foo=bar" test_name = "external/wpt/variant.html?foo=bar"
expectations = _make_expectation(self.port, test_name, "SKIP") expectations = _make_expectation(self.port, test_name, "SKIP")
metadata_builder = WPTMetadataBuilder(expectations, self.port) metadata_builder = WPTMetadataBuilder(expectations, self.port)
filename, contents = metadata_builder.get_metadata_filename_and_contents(test_name, 'SKIP') filename, contents = metadata_builder.get_metadata_filename_and_contents(test_name, SKIP_TEST)
# The metadata file name should not include variants # The metadata file name should not include variants
self.assertEqual("variant.html.ini", filename) self.assertEqual("variant.html.ini", filename)
# ..but the contents of the file should include variants in the test name # ..but the contents of the file should include variants in the test name
...@@ -227,7 +220,7 @@ class WPTMetadataBuilderTest(unittest.TestCase): ...@@ -227,7 +220,7 @@ class WPTMetadataBuilderTest(unittest.TestCase):
test_name = "external/wpt/test_dir/test.html" test_name = "external/wpt/test_dir/test.html"
expectations = _make_expectation(self.port, test_dir, "SKIP", test_names=[test_name]) expectations = _make_expectation(self.port, test_dir, "SKIP", test_names=[test_name])
metadata_builder = WPTMetadataBuilder(expectations, self.port) metadata_builder = WPTMetadataBuilder(expectations, self.port)
filename, contents = metadata_builder.get_metadata_filename_and_contents(test_dir, 'SKIP') filename, contents = metadata_builder.get_metadata_filename_and_contents(test_dir, SKIP_TEST)
self.assertEqual(os.path.join("test_dir", "__dir__.ini"), filename) self.assertEqual(os.path.join("test_dir", "__dir__.ini"), filename)
self.assertEqual("disabled: wpt_metadata_builder.py\n", contents) self.assertEqual("disabled: wpt_metadata_builder.py\n", contents)
...@@ -236,8 +229,7 @@ class WPTMetadataBuilderTest(unittest.TestCase): ...@@ -236,8 +229,7 @@ class WPTMetadataBuilderTest(unittest.TestCase):
test_name = "external/wpt/dir/zzzz.html" test_name = "external/wpt/dir/zzzz.html"
expectations = TestExpectations(self.port) expectations = TestExpectations(self.port)
metadata_builder = WPTMetadataBuilder(expectations, self.port) metadata_builder = WPTMetadataBuilder(expectations, self.port)
filename, contents = metadata_builder.get_metadata_filename_and_contents( filename, contents = metadata_builder.get_metadata_filename_and_contents(test_name, SUBTEST_FAIL)
test_name, 'FAIL', SUBTEST_FAIL)
self.assertEqual(os.path.join("dir", "zzzz.html.ini"), filename) self.assertEqual(os.path.join("dir", "zzzz.html.ini"), filename)
self.assertEqual( self.assertEqual(
"[zzzz.html]\n blink_expect_any_subtest_status: True # wpt_metadata_builder.py\n", "[zzzz.html]\n blink_expect_any_subtest_status: True # wpt_metadata_builder.py\n",
...@@ -248,8 +240,7 @@ class WPTMetadataBuilderTest(unittest.TestCase): ...@@ -248,8 +240,7 @@ class WPTMetadataBuilderTest(unittest.TestCase):
test_name = "external/wpt/dir/zzzz.html" test_name = "external/wpt/dir/zzzz.html"
expectations = TestExpectations(self.port) expectations = TestExpectations(self.port)
metadata_builder = WPTMetadataBuilder(expectations, self.port) metadata_builder = WPTMetadataBuilder(expectations, self.port)
filename, contents = metadata_builder.get_metadata_filename_and_contents( filename, contents = metadata_builder.get_metadata_filename_and_contents(test_name, HARNESS_ERROR)
test_name, 'FAIL', HARNESS_ERROR)
self.assertEqual(os.path.join("dir", "zzzz.html.ini"), filename) self.assertEqual(os.path.join("dir", "zzzz.html.ini"), filename)
self.assertEqual("[zzzz.html]\n expected: ERROR\n", contents) self.assertEqual("[zzzz.html]\n expected: ERROR\n", contents)
...@@ -258,8 +249,7 @@ class WPTMetadataBuilderTest(unittest.TestCase): ...@@ -258,8 +249,7 @@ class WPTMetadataBuilderTest(unittest.TestCase):
test_name = "external/wpt/dir/zzzz.html" test_name = "external/wpt/dir/zzzz.html"
expectations = TestExpectations(self.port) expectations = TestExpectations(self.port)
metadata_builder = WPTMetadataBuilder(expectations, self.port) metadata_builder = WPTMetadataBuilder(expectations, self.port)
filename, contents = metadata_builder.get_metadata_filename_and_contents( filename, contents = metadata_builder.get_metadata_filename_and_contents(test_name, SUBTEST_FAIL | HARNESS_ERROR)
test_name, 'FAIL', SUBTEST_FAIL | HARNESS_ERROR)
self.assertEqual(os.path.join("dir", "zzzz.html.ini"), filename) self.assertEqual(os.path.join("dir", "zzzz.html.ini"), filename)
self.assertEqual( self.assertEqual(
"[zzzz.html]\n expected: ERROR\n blink_expect_any_subtest_status: True # wpt_metadata_builder.py\n", "[zzzz.html]\n expected: ERROR\n blink_expect_any_subtest_status: True # wpt_metadata_builder.py\n",
...@@ -270,8 +260,7 @@ class WPTMetadataBuilderTest(unittest.TestCase): ...@@ -270,8 +260,7 @@ class WPTMetadataBuilderTest(unittest.TestCase):
test_name = "external/wpt/dir/multiglob.https.any.window.html" test_name = "external/wpt/dir/multiglob.https.any.window.html"
expectations = TestExpectations(self.port) expectations = TestExpectations(self.port)
metadata_builder = WPTMetadataBuilder(expectations, self.port) metadata_builder = WPTMetadataBuilder(expectations, self.port)
filename, contents = metadata_builder.get_metadata_filename_and_contents( filename, contents = metadata_builder.get_metadata_filename_and_contents(test_name, SUBTEST_FAIL)
test_name, 'FAIL', SUBTEST_FAIL)
# The metadata filename matches the test *filename*, not the test name, # The metadata filename matches the test *filename*, not the test name,
# which in this case is the js file from the manifest. # which in this case is the js file from the manifest.
self.assertEqual(os.path.join("dir", "multiglob.https.any.js.ini"), filename) self.assertEqual(os.path.join("dir", "multiglob.https.any.js.ini"), filename)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment