Commit 738683a5 authored by Rakib M. Hasan's avatar Rakib M. Hasan Committed by Commit Bot

[blinkpy] Create new methods to encapsulate updates to expectations

This CL adds new methods to blinkpy's TestExpectation class which
extracts and encapsulates logic for adding and removing expectation
lines and also updating expectations files after the lines were updated.
This removes duplicate logic from several of blinkpy's sub modules.

Bug: 986447
Change-Id: Ie8a49dfb98c6553fb224cbfd691238cee6869f66
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2055892
Commit-Queue: Rakib Hasan <rmhasan@google.com>
Reviewed-by: default avatarRobert Ma <robertma@chromium.org>
Reviewed-by: default avatarLuke Z <lpz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#747935}
parent 0d8db383
......@@ -35,7 +35,8 @@ import logging
from blinkpy.w3c.common import is_basename_skipped
from blinkpy.common import path_finder
from blinkpy.web_tests.models.typ_types import ResultType, TestExpectations
from blinkpy.web_tests.models.test_expectations import TestExpectations
from blinkpy.web_tests.models.typ_types import ResultType
_log = logging.getLogger(__name__)
......@@ -139,17 +140,17 @@ class TestCopier(object):
port = self.host.port_factory.get()
w3c_import_expectations_path = self.path_finder.path_from_web_tests('W3CImportExpectations')
w3c_import_expectations = self.filesystem.read_text_file(w3c_import_expectations_path)
expectations = TestExpectations()
ret, errors = expectations.parse_tagged_list(w3c_import_expectations)
assert not ret, errors
expectations = TestExpectations(port, {w3c_import_expectations_path: w3c_import_expectations})
# get test names that should be skipped
for path in expectations.individual_exps.keys():
exp = expectations.expectations_for(path)
if ResultType.Skip in exp.results:
if exp.tags:
_log.warning('W3CImportExpectations:%d should not have any specifiers' % exp.lineno)
paths_to_skip.add(path)
for line in expectations.get_updated_lines(w3c_import_expectations_path):
if line.is_glob:
_log.warning('W3CImportExpectations:%d Globs are not allowed in this file.' % line.lineno)
continue
if ResultType.Skip in line.results:
if line.tags:
_log.warning('W3CImportExpectations:%d should not have any specifiers' % line.lineno)
paths_to_skip.add(line.test)
return paths_to_skip
......
......@@ -32,7 +32,7 @@ from blinkpy.w3c.wpt_expectations_updater import WPTExpectationsUpdater
from blinkpy.w3c.wpt_github import WPTGitHub
from blinkpy.w3c.wpt_manifest import WPTManifest, BASE_MANIFEST_NAME
from blinkpy.web_tests.port.base import Port
from blinkpy.web_tests.models.typ_types import TestExpectations, Expectation
from blinkpy.web_tests.models.test_expectations import TestExpectations
# Settings for how often to check try job results and how long to wait.
......@@ -592,30 +592,16 @@ class TestImporter(object):
def _update_single_test_expectations_file(self, port, path, file_contents, deleted_tests, renamed_tests):
"""Updates a single test expectations file."""
test_expectations = TestExpectations()
ret, errors = test_expectations.parse_tagged_list(file_contents)
assert not ret, errors
test_expectations = TestExpectations(port, expectations_dict={path: file_contents})
if not test_expectations.individual_exps:
return
exps = reduce(lambda x, y: x + y, test_expectations.individual_exps.values())
if test_expectations.glob_exps:
exps.extend(reduce(lambda x, y: x + y, test_expectations.glob_exps.values()))
lineno_to_exps = {e.lineno: e for e in exps}
new_lines = []
for lineno, line in enumerate(file_contents.splitlines(), 1):
if lineno not in lineno_to_exps:
new_lines.append(line)
continue
exp = lineno_to_exps[lineno]
test_name = exp.test
# if a test is a glob type expectation then add it to the updated
for line in test_expectations.get_updated_lines(path):
# if a test is a glob type expectation or empty line or comment then add it to the updated
# expectations file without modifications
if exp.is_glob:
new_lines.append(line)
if not line.test or line.is_glob:
new_lines.append(line.to_string())
continue
test_name = line.test
if self.finder.is_webdriver_test_path(test_name):
root_test_file, subtest_suffix = port.split_webdriver_test_name(test_name)
else:
......@@ -628,8 +614,8 @@ class TestImporter(object):
test_name = port.add_webdriver_subtest_suffix(renamed_test, subtest_suffix)
else:
test_name = renamed_tests[root_test_file]
exp.test = test_name
new_lines.append(exp.to_string())
line.test = test_name
new_lines.append(line.to_string())
self.host.filesystem.write_text_file(path, '\n'.join(new_lines) + '\n')
def _list_deleted_tests(self):
......
......@@ -281,6 +281,7 @@ class TestImporterTest(LoggingTestCase):
'# results: [ Failure ]\n'
'some/test/a.html [ Failure ]\n'
'some/test/b.html [ Failure ]\n'
'ignore/globs/* [ Failure ]\n'
'some/test/c\*.html [ Failure ]\n'
# default test case, line below should exist in new file
'some/test/d.html [ Failure ]\n')
......@@ -308,6 +309,7 @@ class TestImporterTest(LoggingTestCase):
host.filesystem.read_text_file(MOCK_WEB_TESTS + 'TestExpectations'),
('# results: [ Failure ]\n'
'new/a.html [ Failure ]\n'
'ignore/globs/* [ Failure ]\n'
'new/c\*.html [ Failure ]\n'
'some/test/d.html [ Failure ]\n'))
self.assertMultiLineEqual(
......
......@@ -90,12 +90,13 @@ class WPTOutputUpdater(object):
delim = output_json['path_delimiter']
# Go through each WPT expectation, try to find it in the output, and
# then update the expected statuses in the output file.
for typ_expectation in self.expectations.expectations:
for exp_list in typ_expectation.individual_exps.values():
for e in exp_list:
test_leaf = self._find_test_for_expectation(e, delim, output_json)
if test_leaf is not None:
self._update_output_for_test(e, test_leaf)
for path in self.expectations.expectations_dict:
for line in self.expectations.get_updated_lines(path):
if not line.test or line.is_glob:
continue
test_leaf = self._find_test_for_expectation(line, delim, output_json)
if test_leaf is not None:
self._update_output_for_test(line, test_leaf)
return output_json
def _find_test_for_expectation(self, exp, delim, output_json):
......
......@@ -34,7 +34,7 @@ from blinkpy.common.host_mock import MockHost
from blinkpy.common.system.output_capture import OutputCapture
from blinkpy.web_tests.models.test_configuration import TestConfiguration, TestConfigurationConverter
from blinkpy.web_tests.models.test_expectations import TestExpectations, SystemConfigurationRemover, ParseError
from blinkpy.web_tests.models.typ_types import ResultType
from blinkpy.web_tests.models.typ_types import ResultType, Expectation
class Base(unittest.TestCase):
......@@ -137,7 +137,8 @@ class SystemConfigurationRemoverTests(Base):
}
def set_up_using_raw_expectations(self, content):
self._general_exp_filename = 'TestExpectations'
self._general_exp_filename = self._port.host.filesystem.join(
self._port.web_tests_dir(), 'TestExpectations')
self._port.host.filesystem.write_text_file(self._general_exp_filename, content)
expectations_dict = {self._general_exp_filename: content}
test_expectations = TestExpectations(self._port, expectations_dict)
......@@ -369,6 +370,187 @@ class MiscTests(Base):
self.assertEqual(expectations.get_expectations(test_name2).results, set([ResultType.Crash]))
class RemoveExpectationsTest(Base):
def test_remove_expectation(self):
port = MockHost().port_factory.get('test-win-win7')
raw_expectations = (
'# tags: [ Mac Win ]\n'
'# results: [ Failure ]\n'
'\n'
'# This comment will be deleted\n'
'[ mac ] test1 [ Failure ]\n')
expectations_dict = OrderedDict()
expectations_dict['/tmp/TestExpectations'] = ''
expectations_dict['/tmp/TestExpectations2'] = raw_expectations
test_expectations = TestExpectations(port, expectations_dict)
test_to_exps = test_expectations.expectations[1].individual_exps
test_expectations.remove_expectations(
'/tmp/TestExpectations2', [test_to_exps['test1'][0]])
test_expectations.commit_changes()
content = port.host.filesystem.read_text_file('/tmp/TestExpectations2')
self.assertEqual(content, (
'# tags: [ Mac Win ]\n'
'# results: [ Failure ]\n'))
def test_remove_added_expectations(self):
port = MockHost().port_factory.get('test-win-win7')
raw_expectations = (
'# tags: [ Mac Win ]\n'
'# results: [ Failure ]\n'
'\n'
'# This comment will be deleted\n'
'[ mac ] test1 [ Failure ]\n')
expectations_dict = OrderedDict()
expectations_dict['/tmp/TestExpectations'] = ''
expectations_dict['/tmp/TestExpectations2'] = raw_expectations
test_expectations = TestExpectations(port, expectations_dict)
test_expectations.add_expectations(
'/tmp/TestExpectations2',
[Expectation(test='test2', results=set([ResultType.Failure])),
Expectation(test='test3', results=set([ResultType.Crash]), tags=set(['win']))], 5)
test_expectations.remove_expectations(
'/tmp/TestExpectations2',
[Expectation(test='test2', results=set([ResultType.Failure]), lineno=5)])
test_expectations.commit_changes()
content = port.host.filesystem.read_text_file('/tmp/TestExpectations2')
self.assertEqual(content, (
'# tags: [ Mac Win ]\n'
'# results: [ Failure ]\n'
'\n'
'# This comment will be deleted\n'
'[ mac ] test1 [ Failure ]\n'
'[ Win ] test3 [ Crash ]\n'))
def test_remove_after_add(self):
port = MockHost().port_factory.get('test-win-win7')
raw_expectations = (
'# tags: [ Mac Win ]\n'
'# results: [ Failure Crash ]\n'
'\n'
'# This comment will not be deleted\n'
'[ mac ] test1 [ Failure ]\n')
expectations_dict = OrderedDict()
expectations_dict['/tmp/TestExpectations'] = ''
expectations_dict['/tmp/TestExpectations2'] = raw_expectations
test_expectations = TestExpectations(port, expectations_dict)
test_to_exps = test_expectations.expectations[1].individual_exps
test_expectations.add_expectations(
'/tmp/TestExpectations2',
[Expectation(test='test2', results=set([ResultType.Failure])),
Expectation(test='test3', results=set([ResultType.Crash]), tags=set(['mac']))], 5)
test_expectations.remove_expectations(
'/tmp/TestExpectations2', [test_to_exps['test1'][0]])
test_expectations.commit_changes()
content = port.host.filesystem.read_text_file('/tmp/TestExpectations2')
self.assertEqual(content, (
'# tags: [ Mac Win ]\n'
'# results: [ Failure Crash ]\n'
'\n'
'# This comment will not be deleted\n'
'[ Mac ] test3 [ Crash ]\n'
'test2 [ Failure ]\n'))
class AddExpectationsTest(Base):
def test_add_expectation(self):
port = MockHost().port_factory.get('test-win-win7')
raw_expectations = (
'# tags: [ Mac Win ]\n'
'# results: [ Failure ]\n')
expectations_dict = OrderedDict()
expectations_dict['/tmp/TestExpectations'] = ''
expectations_dict['/tmp/TestExpectations2'] = raw_expectations
test_expectations = TestExpectations(port, expectations_dict)
test_expectations.add_expectations(
'/tmp/TestExpectations2',
[Expectation(test='test1', results=set([ResultType.Failure]))])
test_expectations.commit_changes()
content = port.host.filesystem.read_text_file('/tmp/TestExpectations2')
self.assertEqual(content, (
'# tags: [ Mac Win ]\n'
'# results: [ Failure ]\n'
'\n'
'test1 [ Failure ]\n'))
def test_add_after_remove(self):
port = MockHost().port_factory.get('test-win-win7')
raw_expectations = (
'# tags: [ Mac Win ]\n'
'# results: [ Failure Crash ]\n'
'test1 [ Failure ]\n')
expectations_dict = OrderedDict()
expectations_dict['/tmp/TestExpectations'] = ''
expectations_dict['/tmp/TestExpectations2'] = raw_expectations
test_expectations = TestExpectations(port, expectations_dict)
test_expectations.remove_expectations(
'/tmp/TestExpectations2',
[Expectation(test='test1', results=set([ResultType.Failure]), lineno=3)])
test_expectations.add_expectations(
'/tmp/TestExpectations2',
[Expectation(test='test2', results=set([ResultType.Crash]))], 3)
test_expectations.commit_changes()
content = port.host.filesystem.read_text_file('/tmp/TestExpectations2')
self.assertEqual(content, (
'# tags: [ Mac Win ]\n'
'# results: [ Failure Crash ]\n'
'test2 [ Crash ]\n'))
def test_add_expectation_at_line(self):
port = MockHost().port_factory.get('test-win-win7')
raw_expectations = (
'# tags: [ Mac Win ]\n'
'# results: [ Failure Crash ]\n'
'\n'
'# add expectations after this line\n'
'test1 [ Failure ]\n'
'\n')
expectations_dict = OrderedDict()
expectations_dict['/tmp/TestExpectations'] = raw_expectations
test_expectations = TestExpectations(port, expectations_dict)
test_expectations.add_expectations(
'/tmp/TestExpectations',
[Expectation(test='test2', results=set([ResultType.Crash]), tags=set(['win']))], 4)
test_expectations.remove_expectations(
'/tmp/TestExpectations',
[Expectation(test='test1', results=set([ResultType.Failure]), lineno=5)])
test_expectations.commit_changes()
content = port.host.filesystem.read_text_file('/tmp/TestExpectations')
self.assertEqual(content, (
'# tags: [ Mac Win ]\n'
'# results: [ Failure Crash ]\n'
'\n'
'# add expectations after this line\n'
'[ Win ] test2 [ Crash ]\n'
'\n'))
class CommitChangesTests(Base):
def test_commit_changes_without_modifications(self):
port = MockHost().port_factory.get('test-win-win7')
raw_expectations = (
'# tags: [ Mac Win ]\n'
'# results: [ Failure Crash ]\n'
'\n'
'# add expectations after this line\n'
'test1 [ Failure ]\n'
'\n')
expectations_dict = OrderedDict()
expectations_dict['/tmp/TestExpectations'] = raw_expectations
test_expectations = TestExpectations(port, expectations_dict)
test_expectations.commit_changes()
content = port.host.filesystem.read_text_file('/tmp/TestExpectations')
self.assertEqual(content, (
'# tags: [ Mac Win ]\n'
'# results: [ Failure Crash ]\n'
'\n'
'# add expectations after this line\n'
'test1 [ Failure ]\n'
'\n'))
class SkippedTests(Base):
def check(self, expectations, overrides, ignore_tests, lint=False, expected_results=None):
......
......@@ -236,56 +236,29 @@ class ExpectationsRemover(object):
"""
generic_exp_path = self._port.path_to_generic_test_expectations_file()
raw_test_expectations = self._host.filesystem.read_text_file(generic_exp_path)
expectations_dict = {self._host.filesystem.basename(generic_exp_path): raw_test_expectations}
expectations_dict = {generic_exp_path: raw_test_expectations}
test_expectations = TestExpectations(port=self._port, expectations_dict=expectations_dict)
removed_exps = []
lines = []
# only get expectations objects for non glob patterns
if test_expectations.general_expectations.individual_exps.values():
lineno_to_exps = {
e.lineno: e for e in reduce(
lambda x,y: x+y, test_expectations.general_expectations.individual_exps.values())}
else:
lineno_to_exps = {}
new_raw_exp_lines = []
for exp in test_expectations.get_updated_lines(generic_exp_path):
# only get expectations objects for non glob patterns
if not exp.test or exp.is_glob:
continue
for lineno, line in enumerate(raw_test_expectations.splitlines(), 1):
if lineno in lineno_to_exps and self._can_delete_line(lineno_to_exps[lineno]):
exp = lineno_to_exps[lineno]
if self._can_delete_line(exp):
reason = exp.reason or ''
self._bug_numbers.update(
[reason[len(CHROMIUM_BUG_PREFIX):] for reason in reason.split()
if reason.startswith(CHROMIUM_BUG_PREFIX)])
self._removed_test_names.add(exp.test)
removed_exps.append(exp)
_log.info('Deleting line "%s"' % exp.to_string().strip())
if lineno + 1 not in lineno_to_exps:
self._remove_associated_comments_and_whitespace(new_raw_exp_lines)
_log.info('Deleting line "%s"' % line.strip())
else:
new_raw_exp_lines.append(line)
return '\n'.join(new_raw_exp_lines)
@staticmethod
def _remove_associated_comments_and_whitespace(new_raw_exp_lines):
"""Removes comments and whitespace from an empty expectation block.
If the removed expectation was the last in a block of expectations, this method
will remove any associated comments and whitespace.
Args:
new_raw_exp_lines: A list of strings for the updated expectations file.
"""
# remove comments associated with deleted expectation
while (new_raw_exp_lines and new_raw_exp_lines[-1].strip().startswith('#') and
not any(new_raw_exp_lines[-1].strip().startswith(prefix) for prefix in SPECIAL_PREFIXES)):
new_raw_exp_lines.pop(-1)
# remove spaces above expectation
while new_raw_exp_lines and new_raw_exp_lines[-1].strip() == '':
new_raw_exp_lines.pop(-1)
if removed_exps:
test_expectations.remove_expectations(generic_exp_path, removed_exps)
return '\n'.join([e.to_string() for e in test_expectations.get_updated_lines(generic_exp_path)])
def show_removed_results(self):
"""Opens a browser showing the removed lines in the results dashboard.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment