Commit d7406299 authored by Ned Nguyen's avatar Ned Nguyen Committed by Commit Bot

Remove support of the REBASELINE expectation from web tests

along with the "rebaseline-expectations" subcommand in blink_tool.py.

Also clean up the remaining references to NEEDS_MANUAL_REBASELINE left
from https://crrev.com/c/1140927.

Bug: 621126, 738152
Change-Id: I33c109e4343e7bf41aac6e7527237702e4cce94a
Reviewed-on: https://chromium-review.googlesource.com/1164405
Commit-Queue: Ned Nguyen <nednguyen@google.com>
Reviewed-by: default avatarDirk Pranke <dpranke@chromium.org>
Reviewed-by: default avatarRobert Ma <robertma@chromium.org>
Reviewed-by: default avatarAleks Totic <atotic@chromium.org>
Cr-Commit-Position: refs/heads/master@{#581366}
parent 6856c650
...@@ -391,7 +391,6 @@ const TestResultInformation = { ...@@ -391,7 +391,6 @@ const TestResultInformation = {
"SKIP": { index: 9, text: "Skip", isFailure: false, isSuccess: false }, "SKIP": { index: 9, text: "Skip", isFailure: false, isSuccess: false },
"MISSING": { index: 10, text: "Missing", isFailure: false, isSuccess: false }, "MISSING": { index: 10, text: "Missing", isFailure: false, isSuccess: false },
"WONTFIX": { index: 11, text: "WontFix", isFailure: false, isSuccess: false }, "WONTFIX": { index: 11, text: "WontFix", isFailure: false, isSuccess: false },
"NEEDSMANUALREBASELINE": { index: 12, text: "NeedsManualRebaseline", isFailure: false, isSuccess: false },
"PASS": { index: 13, text: "Pass", isFailure: false, isSuccess: true }, "PASS": { index: 13, text: "Pass", isFailure: false, isSuccess: true },
"NOTRUN": { index: 14, text: "NOTRUN", isFailure: false, isSuccess: true } "NOTRUN": { index: 14, text: "NOTRUN", isFailure: false, isSuccess: true }
}; };
...@@ -962,7 +961,6 @@ const Filters = { ...@@ -962,7 +961,6 @@ const Filters = {
}, },
containsNoPass: function(map) { containsNoPass: function(map) {
return map.has("FAIL") return map.has("FAIL")
|| map.has("NEEDSMANUALREBASELINE")
|| map.has("WONTFIX") || map.has("WONTFIX")
|| map.has("SKIP") || map.has("SKIP")
|| map.has("CRASH"); || map.has("CRASH");
...@@ -971,8 +969,7 @@ const Filters = { ...@@ -971,8 +969,7 @@ const Filters = {
return !Filters.containsPass(test.expectedMap) && Filters.containsPass(test.actualMap); return !Filters.containsPass(test.expectedMap) && Filters.containsPass(test.actualMap);
}, },
regressionFromExpectedMap: (finalResult, expectedMap) => { regressionFromExpectedMap: (finalResult, expectedMap) => {
if (expectedMap.has("NEEDSMANUALREBASELINE") if (expectedMap.has("NEEDSREBASELINE")
|| expectedMap.has("NEEDSREBASELINE")
|| expectedMap.has("WONTFIX")) || expectedMap.has("WONTFIX"))
return false; return false;
switch (finalResult) { switch (finalResult) {
...@@ -1016,8 +1013,7 @@ const Filters = { ...@@ -1016,8 +1013,7 @@ const Filters = {
flagFailure: test => { // Tests that are failing, but expected to pass in base. flagFailure: test => { // Tests that are failing, but expected to pass in base.
if (Filters.containsPass(test.actualMap)) if (Filters.containsPass(test.actualMap))
return false; return false;
if (test.expectedMap.has("NEEDSMANUALREBASELINE") if (test.expectedMap.has("NEEDSREBASELINE")
|| test.expectedMap.has("NEEDSREBASELINE")
|| test.expectedMap.has("WONTFIX")) || test.expectedMap.has("WONTFIX"))
return false; return false;
let baseMap = test.flagMap ? test.baseMap : test.expectedMap; let baseMap = test.flagMap ? test.baseMap : test.expectedMap;
......
...@@ -49,7 +49,6 @@ from blinkpy.tool.commands.queries import CrashLog ...@@ -49,7 +49,6 @@ from blinkpy.tool.commands.queries import CrashLog
from blinkpy.tool.commands.queries import PrintBaselines from blinkpy.tool.commands.queries import PrintBaselines
from blinkpy.tool.commands.queries import PrintExpectations from blinkpy.tool.commands.queries import PrintExpectations
from blinkpy.tool.commands.rebaseline import Rebaseline from blinkpy.tool.commands.rebaseline import Rebaseline
from blinkpy.tool.commands.rebaseline import RebaselineExpectations
from blinkpy.tool.commands.rebaseline_cl import RebaselineCL from blinkpy.tool.commands.rebaseline_cl import RebaselineCL
from blinkpy.tool.commands.rebaseline_test import RebaselineTest from blinkpy.tool.commands.rebaseline_test import RebaselineTest
...@@ -84,7 +83,6 @@ class BlinkTool(Host): ...@@ -84,7 +83,6 @@ class BlinkTool(Host):
PrintExpectations(), PrintExpectations(),
Rebaseline(), Rebaseline(),
RebaselineCL(), RebaselineCL(),
RebaselineExpectations(),
RebaselineTest(), RebaselineTest(),
] ]
self.help_command = HelpCommand(tool=self) self.help_command = HelpCommand(tool=self)
......
...@@ -509,53 +509,6 @@ class AbstractParallelRebaselineCommand(AbstractRebaseliningCommand): ...@@ -509,53 +509,6 @@ class AbstractParallelRebaselineCommand(AbstractRebaseliningCommand):
return test_result return test_result
class RebaselineExpectations(AbstractParallelRebaselineCommand):
name = 'rebaseline-expectations'
help_text = 'Rebaselines the tests indicated in TestExpectations.'
show_in_main_help = True
def __init__(self):
super(RebaselineExpectations, self).__init__(options=[
self.no_optimize_option,
] + self.platform_options)
self._test_baseline_set = None
@staticmethod
def _tests_to_rebaseline(port):
tests_to_rebaseline = []
for path, value in port.expectations_dict().items():
expectations = TestExpectations(port, include_overrides=False, expectations_dict={path: value})
for test in expectations.get_rebaselining_failures():
tests_to_rebaseline.append(test)
return tests_to_rebaseline
def _add_tests_to_rebaseline(self, port_name):
builder_name = self._tool.builders.builder_name_for_port_name(port_name)
if not builder_name:
return
tests = self._tests_to_rebaseline(self._tool.port_factory.get(port_name))
if tests:
_log.info('Retrieving results for %s from %s.', port_name, builder_name)
for test_name in tests:
_log.info(' %s', test_name)
self._test_baseline_set.add(test_name, Build(builder_name))
def execute(self, options, args, tool):
self._tool = tool
self._test_baseline_set = TestBaselineSet(tool)
options.results_directory = None
port_names = tool.port_factory.all_port_names(options.platform)
for port_name in port_names:
self._add_tests_to_rebaseline(port_name)
if not self._test_baseline_set:
_log.warning('Did not find any tests marked Rebaseline.')
return
self.rebaseline(options, self._test_baseline_set)
class Rebaseline(AbstractParallelRebaselineCommand): class Rebaseline(AbstractParallelRebaselineCommand):
name = 'rebaseline' name = 'rebaseline'
help_text = 'Rebaseline tests with results from the continuous builders.' help_text = 'Rebaseline tests with results from the continuous builders.'
......
...@@ -218,7 +218,7 @@ class BotTestExpectations(object): ...@@ -218,7 +218,7 @@ class BotTestExpectations(object):
# Distinct resulting expectations. # Distinct resulting expectations.
result_exp = map(string_to_exp, result_strings) result_exp = map(string_to_exp, result_strings)
expected = lambda e: TestExpectations.result_was_expected(e, expectations, False) expected = lambda e: TestExpectations.result_was_expected(e, expectations)
additional_expectations = set(e for e in result_exp if not expected(e)) additional_expectations = set(e for e in result_exp if not expected(e))
...@@ -328,7 +328,7 @@ class BotTestExpectations(object): ...@@ -328,7 +328,7 @@ class BotTestExpectations(object):
# individual runs' full_results.json, which would be slow and more complicated. # individual runs' full_results.json, which would be slow and more complicated.
# The only thing we lose by not fixing this is that a test that was flaky # The only thing we lose by not fixing this is that a test that was flaky
# and got fixed will still get printed out until 100 runs have passed. # and got fixed will still get printed out until 100 runs have passed.
if not TestExpectations.result_was_expected(result_enum, latest_expectations, test_needs_rebaselining=False): if not TestExpectations.result_was_expected(result_enum, latest_expectations):
has_unexpected_results = True has_unexpected_results = True
break break
......
...@@ -44,7 +44,7 @@ _log = logging.getLogger(__name__) ...@@ -44,7 +44,7 @@ _log = logging.getLogger(__name__)
# FIXME: range() starts with 0 which makes if expectation checks harder # FIXME: range() starts with 0 which makes if expectation checks harder
# as PASS is 0. # as PASS is 0.
(PASS, FAIL, TEXT, IMAGE, IMAGE_PLUS_TEXT, AUDIO, TIMEOUT, CRASH, LEAK, SKIP, WONTFIX, (PASS, FAIL, TEXT, IMAGE, IMAGE_PLUS_TEXT, AUDIO, TIMEOUT, CRASH, LEAK, SKIP, WONTFIX,
SLOW, REBASELINE, NEEDS_REBASELINE_UNUSED, NEEDS_MANUAL_REBASELINE, MISSING, FLAKY, NOW, NONE) = range(19) SLOW, MISSING, FLAKY, NOW, NONE) = range(16)
WEBKIT_BUG_PREFIX = 'webkit.org/b/' WEBKIT_BUG_PREFIX = 'webkit.org/b/'
CHROMIUM_BUG_PREFIX = 'crbug.com/' CHROMIUM_BUG_PREFIX = 'crbug.com/'
...@@ -73,7 +73,6 @@ class TestExpectationParser(object): ...@@ -73,7 +73,6 @@ class TestExpectationParser(object):
# FIXME: Rename these to *_KEYWORD as in MISSING_KEYWORD above, but make # FIXME: Rename these to *_KEYWORD as in MISSING_KEYWORD above, but make
# the case studdly-caps to match the actual file contents. # the case studdly-caps to match the actual file contents.
REBASELINE_MODIFIER = 'rebaseline'
PASS_EXPECTATION = 'pass' PASS_EXPECTATION = 'pass'
SKIP_MODIFIER = 'skip' SKIP_MODIFIER = 'skip'
SLOW_MODIFIER = 'slow' SLOW_MODIFIER = 'slow'
...@@ -160,12 +159,7 @@ class TestExpectationParser(object): ...@@ -160,12 +159,7 @@ class TestExpectationParser(object):
expectations = [expectation.lower() for expectation in expectation_line.expectations] expectations = [expectation.lower() for expectation in expectation_line.expectations]
if not expectation_line.bugs and self.WONTFIX_MODIFIER not in expectations: if not expectation_line.bugs and self.WONTFIX_MODIFIER not in expectations:
expectation_line.warnings.append(self.MISSING_BUG_WARNING) expectation_line.warnings.append(self.MISSING_BUG_WARNING)
if self.REBASELINE_MODIFIER in expectations:
expectation_line.warnings.append('REBASELINE should only be used for running rebaseline.py. Cannot be checked in.')
specifiers = [specifier.lower() for specifier in expectation_line.specifiers] specifiers = [specifier.lower() for specifier in expectation_line.specifiers]
if self.REBASELINE_MODIFIER in expectations and ('debug' in specifiers or 'release' in specifiers):
expectation_line.warnings.append('A test cannot be rebaselined for Debug/Release.')
def _parse_expectations(self, expectation_line): def _parse_expectations(self, expectation_line):
result = set() result = set()
...@@ -296,7 +290,6 @@ class TestExpectationLine(object): ...@@ -296,7 +290,6 @@ class TestExpectationLine(object):
'Failure': 'FAIL', 'Failure': 'FAIL',
MISSING_KEYWORD: 'MISSING', MISSING_KEYWORD: 'MISSING',
'Pass': 'PASS', 'Pass': 'PASS',
'Rebaseline': 'REBASELINE',
'Skip': 'SKIP', 'Skip': 'SKIP',
'Slow': 'SLOW', 'Slow': 'SLOW',
'Timeout': 'TIMEOUT', 'Timeout': 'TIMEOUT',
...@@ -904,7 +897,6 @@ class TestExpectations(object): ...@@ -904,7 +897,6 @@ class TestExpectations(object):
TestExpectationParser.SKIP_MODIFIER: SKIP, TestExpectationParser.SKIP_MODIFIER: SKIP,
TestExpectationParser.WONTFIX_MODIFIER: WONTFIX, TestExpectationParser.WONTFIX_MODIFIER: WONTFIX,
TestExpectationParser.SLOW_MODIFIER: SLOW, TestExpectationParser.SLOW_MODIFIER: SLOW,
TestExpectationParser.REBASELINE_MODIFIER: REBASELINE,
} }
EXPECTATIONS_TO_STRING = {k: v.upper() for (v, k) in EXPECTATIONS.iteritems()} EXPECTATIONS_TO_STRING = {k: v.upper() for (v, k) in EXPECTATIONS.iteritems()}
...@@ -953,12 +945,11 @@ class TestExpectations(object): ...@@ -953,12 +945,11 @@ class TestExpectations(object):
raise ValueError(expectation) raise ValueError(expectation)
@staticmethod @staticmethod
def result_was_expected(result, expected_results, test_needs_rebaselining): def result_was_expected(result, expected_results):
"""Returns whether we got a result we were expecting. """Returns whether we got a result we were expecting.
Args: Args:
result: actual result of a test execution result: actual result of a test execution
expected_results: set of results listed in test_expectations expected_results: set of results listed in test_expectations
test_needs_rebaselining: whether test was marked as REBASELINE
""" """
local_expected = set(expected_results) local_expected = set(expected_results)
if WONTFIX in local_expected: if WONTFIX in local_expected:
...@@ -967,19 +958,14 @@ class TestExpectations(object): ...@@ -967,19 +958,14 @@ class TestExpectations(object):
# Make sure we have at least one result type that may actually happen. # Make sure we have at least one result type that may actually happen.
local_expected.discard(WONTFIX) local_expected.discard(WONTFIX)
local_expected.discard(REBASELINE)
local_expected.discard(SLOW) local_expected.discard(SLOW)
if not local_expected: if not local_expected:
local_expected = {PASS} local_expected = {PASS}
if result in local_expected: if result in local_expected:
return True return True
if result in (PASS, TEXT, IMAGE, IMAGE_PLUS_TEXT, AUDIO, MISSING) and NEEDS_MANUAL_REBASELINE in local_expected:
return True
if result in (TEXT, IMAGE, IMAGE_PLUS_TEXT, AUDIO) and FAIL in local_expected: if result in (TEXT, IMAGE, IMAGE_PLUS_TEXT, AUDIO) and FAIL in local_expected:
return True return True
if result == MISSING and test_needs_rebaselining:
return True
return False return False
@staticmethod @staticmethod
...@@ -1090,9 +1076,6 @@ class TestExpectations(object): ...@@ -1090,9 +1076,6 @@ class TestExpectations(object):
def expectations(self): def expectations(self):
return self._expectations return self._expectations
def get_rebaselining_failures(self):
return self._model.get_test_set(REBASELINE)
# FIXME: Change the callsites to use TestExpectationsModel and remove. # FIXME: Change the callsites to use TestExpectationsModel and remove.
def get_expectations(self, test): def get_expectations(self, test):
return self._model.get_expectations(test) return self._model.get_expectations(test)
...@@ -1118,10 +1101,7 @@ class TestExpectations(object): ...@@ -1118,10 +1101,7 @@ class TestExpectations(object):
expected_results = self.remove_non_sanitizer_failures(expected_results) expected_results = self.remove_non_sanitizer_failures(expected_results)
elif not pixel_tests_are_enabled: elif not pixel_tests_are_enabled:
expected_results = self.remove_pixel_failures(expected_results) expected_results = self.remove_pixel_failures(expected_results)
return self.result_was_expected(result, expected_results, self.is_rebaselining(test)) return self.result_was_expected(result, expected_results)
def is_rebaselining(self, test):
return REBASELINE in self._model.get_expectations(test)
def _shorten_filename(self, filename): def _shorten_filename(self, filename):
finder = PathFinder(self._port.host.filesystem) finder = PathFinder(self._port.host.filesystem)
......
...@@ -36,7 +36,7 @@ from blinkpy.web_tests.models.test_configuration import TestConfiguration, TestC ...@@ -36,7 +36,7 @@ from blinkpy.web_tests.models.test_configuration import TestConfiguration, TestC
from blinkpy.web_tests.models.test_expectations import ( from blinkpy.web_tests.models.test_expectations import (
TestExpectationLine, TestExpectations, ParseError, TestExpectationParser, TestExpectationLine, TestExpectations, ParseError, TestExpectationParser,
PASS, FAIL, TEXT, IMAGE, IMAGE_PLUS_TEXT, AUDIO, PASS, FAIL, TEXT, IMAGE, IMAGE_PLUS_TEXT, AUDIO,
TIMEOUT, CRASH, LEAK, SKIP, WONTFIX, NEEDS_MANUAL_REBASELINE, MISSING TIMEOUT, CRASH, LEAK, SKIP, WONTFIX, MISSING
) )
...@@ -121,35 +121,15 @@ class MiscTests(Base): ...@@ -121,35 +121,15 @@ class MiscTests(Base):
def test_result_was_expected(self): def test_result_was_expected(self):
# test basics # test basics
self.assertEqual(TestExpectations.result_was_expected(PASS, set([PASS]), test_needs_rebaselining=False), True) self.assertEqual(TestExpectations.result_was_expected(PASS, set([PASS])), True)
self.assertEqual(TestExpectations.result_was_expected(FAIL, set([PASS]), test_needs_rebaselining=False), False) self.assertEqual(TestExpectations.result_was_expected(FAIL, set([PASS])), False)
# test handling of SKIPped tests and results # test handling of SKIPped tests and results
self.assertEqual(TestExpectations.result_was_expected(SKIP, set([CRASH]), test_needs_rebaselining=False), False) self.assertEqual(TestExpectations.result_was_expected(SKIP, set([CRASH])), False)
self.assertEqual(TestExpectations.result_was_expected(SKIP, set([LEAK]), test_needs_rebaselining=False), False) self.assertEqual(TestExpectations.result_was_expected(SKIP, set([LEAK])), False)
# test handling of MISSING results and the REBASELINE specifier # test handling of MISSING results
self.assertEqual(TestExpectations.result_was_expected(MISSING, set([PASS]), test_needs_rebaselining=True), True) self.assertEqual(TestExpectations.result_was_expected(MISSING, set([PASS])), False)
self.assertEqual(TestExpectations.result_was_expected(MISSING, set([PASS]), test_needs_rebaselining=False), False)
self.assertTrue(TestExpectations.result_was_expected(
PASS, set([NEEDS_MANUAL_REBASELINE]), test_needs_rebaselining=False))
self.assertTrue(TestExpectations.result_was_expected(
MISSING, set([NEEDS_MANUAL_REBASELINE]), test_needs_rebaselining=False))
self.assertTrue(TestExpectations.result_was_expected(
TEXT, set([NEEDS_MANUAL_REBASELINE]), test_needs_rebaselining=False))
self.assertTrue(TestExpectations.result_was_expected(
IMAGE, set([NEEDS_MANUAL_REBASELINE]), test_needs_rebaselining=False))
self.assertTrue(TestExpectations.result_was_expected(
IMAGE_PLUS_TEXT, set([NEEDS_MANUAL_REBASELINE]), test_needs_rebaselining=False))
self.assertTrue(TestExpectations.result_was_expected(
AUDIO, set([NEEDS_MANUAL_REBASELINE]), test_needs_rebaselining=False))
self.assertFalse(TestExpectations.result_was_expected(
TIMEOUT, set([NEEDS_MANUAL_REBASELINE]), test_needs_rebaselining=False))
self.assertFalse(TestExpectations.result_was_expected(
CRASH, set([NEEDS_MANUAL_REBASELINE]), test_needs_rebaselining=False))
self.assertFalse(TestExpectations.result_was_expected(
LEAK, set([NEEDS_MANUAL_REBASELINE]), test_needs_rebaselining=False))
def test_remove_pixel_failures(self): def test_remove_pixel_failures(self):
self.assertEqual(TestExpectations.remove_pixel_failures(set([FAIL])), set([FAIL])) self.assertEqual(TestExpectations.remove_pixel_failures(set([FAIL])), set([FAIL]))
...@@ -776,17 +756,6 @@ Bug(y) [ Mac ] failures/expected/foo.html [ Crash ] ...@@ -776,17 +756,6 @@ Bug(y) [ Mac ] failures/expected/foo.html [ Crash ]
""", actual_expectations) """, actual_expectations)
class RebaseliningTest(Base):
def test_get_rebaselining_failures(self):
# Make sure we find a test as needing a rebaseline even if it is not marked as a failure.
self.parse_exp('Bug(x) failures/expected/text.html [ Rebaseline ]\n')
self.assertEqual(len(self._exp.get_rebaselining_failures()), 1)
self.parse_exp(self.get_basic_expectations())
self.assertEqual(len(self._exp.get_rebaselining_failures()), 0)
class TestExpectationsParserTests(unittest.TestCase): class TestExpectationsParserTests(unittest.TestCase):
def __init__(self, testFunc): def __init__(self, testFunc):
......
...@@ -205,7 +205,6 @@ class SummarizedResultsTest(unittest.TestCase): ...@@ -205,7 +205,6 @@ class SummarizedResultsTest(unittest.TestCase):
'TEXT': 1, 'TEXT': 1,
'IMAGE': 1, 'IMAGE': 1,
'PASS': 0, 'PASS': 0,
'REBASELINE': 0,
'SKIP': 0, 'SKIP': 0,
'SLOW': 0, 'SLOW': 0,
'TIMEOUT': 3, 'TIMEOUT': 3,
...@@ -225,7 +224,6 @@ class SummarizedResultsTest(unittest.TestCase): ...@@ -225,7 +224,6 @@ class SummarizedResultsTest(unittest.TestCase):
'TEXT': 0, 'TEXT': 0,
'IMAGE': 0, 'IMAGE': 0,
'PASS': 1, 'PASS': 1,
'REBASELINE': 0,
'SKIP': 0, 'SKIP': 0,
'SLOW': 0, 'SLOW': 0,
'TIMEOUT': 1, 'TIMEOUT': 1,
...@@ -245,7 +243,6 @@ class SummarizedResultsTest(unittest.TestCase): ...@@ -245,7 +243,6 @@ class SummarizedResultsTest(unittest.TestCase):
'TEXT': 0, 'TEXT': 0,
'IMAGE': 0, 'IMAGE': 0,
'PASS': 5, 'PASS': 5,
'REBASELINE': 0,
'SKIP': 1, 'SKIP': 1,
'SLOW': 0, 'SLOW': 0,
'TIMEOUT': 0, 'TIMEOUT': 0,
......
...@@ -230,8 +230,6 @@ class ExpectationsRemover(object): ...@@ -230,8 +230,6 @@ class ExpectationsRemover(object):
otherwise. otherwise.
""" """
unstrippable_expectations = ( unstrippable_expectations = (
'NEEDSMANUALREBASELINE',
'REBASELINE',
'SKIP', 'SKIP',
'SLOW', 'SLOW',
) )
......
...@@ -172,8 +172,7 @@ class UpdateTestExpectationsTest(LoggingTestCase): ...@@ -172,8 +172,7 @@ class UpdateTestExpectationsTest(LoggingTestCase):
# expectations are flaky so we shouldn't remove any. # expectations are flaky so we shouldn't remove any.
Bug(test) test/a.html [ Pass ] Bug(test) test/a.html [ Pass ]
Bug(test) test/b.html [ Timeout ] Bug(test) test/b.html [ Timeout ]
Bug(test) test/c.html [ Failure Timeout ] Bug(test) test/c.html [ Failure Timeout ]"""
Bug(test) test/d.html [ Rebaseline ]"""
self._expectations_remover = ( self._expectations_remover = (
self._create_expectations_remover(self.FLAKE_TYPE)) self._create_expectations_remover(self.FLAKE_TYPE))
...@@ -213,8 +212,7 @@ class UpdateTestExpectationsTest(LoggingTestCase): ...@@ -213,8 +212,7 @@ class UpdateTestExpectationsTest(LoggingTestCase):
# expectations are failing so we shouldn't remove any. # expectations are failing so we shouldn't remove any.
Bug(test) test/a.html [ Pass ] Bug(test) test/a.html [ Pass ]
Bug(test) test/b.html [ Failure Pass ] Bug(test) test/b.html [ Failure Pass ]
Bug(test) test/c.html [ Failure Pass Timeout ] Bug(test) test/c.html [ Failure Pass Timeout ]"""
Bug(test) test/d.html [ Rebaseline ]"""
self._expectations_remover = ( self._expectations_remover = (
self._create_expectations_remover(self.FAIL_TYPE)) self._create_expectations_remover(self.FAIL_TYPE))
...@@ -344,36 +342,6 @@ class UpdateTestExpectationsTest(LoggingTestCase): ...@@ -344,36 +342,6 @@ class UpdateTestExpectationsTest(LoggingTestCase):
self._assert_expectations_match( self._assert_expectations_match(
updated_expectations, test_expectations_before) updated_expectations, test_expectations_before)
def test_dont_remove_rebaselines(self):
"""Tests that lines with rebaseline expectations are untouched."""
test_expectations_before = """
# Even though the results show all passing, none of the
# expectations are flaky or failing so we shouldn't remove any.
Bug(test) test/a.html [ Failure Pass Rebaseline ]
Bug(test) test/b.html [ Failure Rebaseline ]"""
self._expectations_remover = self._create_expectations_remover()
self._define_builders({
'WebKit Linux Trusty': {
'port_name': 'linux-trusty',
'specifiers': ['Trusty', 'Release']
},
})
self._port.all_build_types = ('release',)
self._port.all_systems = (('trusty', 'x86_64'),)
self._parse_expectations(test_expectations_before)
self._expectation_factory.all_results_by_builder = {
'WebKit Linux Trusty': {
'test/a.html': ['PASS', 'PASS'],
'test/b.html': ['PASS', 'PASS'],
}
}
updated_expectations = (
self._expectations_remover.get_updated_test_expectations())
self._assert_expectations_match(
updated_expectations, test_expectations_before)
def test_all_failure_result_types(self): def test_all_failure_result_types(self):
"""Tests that all failure types are treated as failure.""" """Tests that all failure types are treated as failure."""
test_expectations_before = ( test_expectations_before = (
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment