Commit 2caf5aa7 authored by eseidel@chromium.org's avatar eseidel@chromium.org

Make update-flaky-tests to work again and support all bots

Long ago this was added in hopes of generating a FlakyTests file.
This patch finally finishes that work and adds the FlakyTests
file as well as renames the command to update-flaky-tests
and writes to the file instead of stdout.

TestExpectationLine changed syntax a little since
flaky-tests was written.  I've also added support
for specifying path and specifiers values so that
the resulting test output looks right.

I explored having it update TestExpectations directly
(which we're definitely very close to being able to do)
but the consensus was FlakyTests as a separate file would
work better, so lets start with that.

BUG=242366

Review URL: https://codereview.chromium.org/301853003

git-svn-id: svn://svn.chromium.org/blink/trunk@175121 bbb929c8-8fbe-4397-9dbb-9b2b20218538
parent 186666df
This diff is collapsed.
...@@ -135,16 +135,21 @@ class BotTestExpectations(object): ...@@ -135,16 +135,21 @@ class BotTestExpectations(object):
# FIXME: Get this from the json instead of hard-coding it. # FIXME: Get this from the json instead of hard-coding it.
RESULT_TYPES_TO_IGNORE = ['N', 'X', 'Y'] RESULT_TYPES_TO_IGNORE = ['N', 'X', 'Y']
def __init__(self, results_json): # specifiers arg is used in unittests to avoid the static dependency on builders.
def __init__(self, results_json, specifiers=None):
self.results_json = results_json self.results_json = results_json
self.specifiers = specifiers or set(builders.specifiers_for_builder(results_json.builder_name))
def _line_from_test_and_flaky_types_and_bug_urls(self, test_path, flaky_types, bug_urls): def _line_from_test_and_flaky_types_and_bug_urls(self, test_path, flaky_types, bug_urls):
line = TestExpectationLine() line = TestExpectationLine()
line.original_string = test_path line.original_string = test_path
line.name = test_path line.name = test_path
line.filename = test_path line.filename = test_path
line.specifiers = bug_urls if bug_urls else "" line.path = test_path # FIXME: Should this be normpath?
line.matching_tests = [test_path]
line.bugs = bug_urls if bug_urls else ["Bug(gardener)"]
line.expectations = sorted(map(self.results_json.expectation_for_type, flaky_types)) line.expectations = sorted(map(self.results_json.expectation_for_type, flaky_types))
line.specifiers = self.specifiers
return line return line
def flakes_by_path(self, only_ignore_very_flaky): def flakes_by_path(self, only_ignore_very_flaky):
...@@ -203,11 +208,11 @@ class BotTestExpectations(object): ...@@ -203,11 +208,11 @@ class BotTestExpectations(object):
unexpected_results_by_path[test_path] = sorted(map(exp_to_string, expectations)) unexpected_results_by_path[test_path] = sorted(map(exp_to_string, expectations))
return unexpected_results_by_path return unexpected_results_by_path
def expectation_lines(self): def expectation_lines(self, only_ignore_very_flaky=False):
lines = [] lines = []
for test_path, entry in self.results_json.walk_results(): for test_path, entry in self.results_json.walk_results():
results_array = entry[self.results_json.RESULTS_KEY] results_array = entry[self.results_json.RESULTS_KEY]
flaky_types = self._flaky_types_in_results(results_array, False) flaky_types = self._flaky_types_in_results(results_array, only_ignore_very_flaky)
if len(flaky_types) > 1: if len(flaky_types) > 1:
bug_urls = entry.get(self.results_json.BUGS_KEY) bug_urls = entry.get(self.results_json.BUGS_KEY)
line = self._line_from_test_and_flaky_types_and_bug_urls(test_path, flaky_types, bug_urls) line = self._line_from_test_and_flaky_types_and_bug_urls(test_path, flaky_types, bug_urls)
......
...@@ -42,7 +42,7 @@ class BotTestExpectationsTest(unittest.TestCase): ...@@ -42,7 +42,7 @@ class BotTestExpectationsTest(unittest.TestCase):
def _assert_is_flaky(self, results_string, should_be_flaky): def _assert_is_flaky(self, results_string, should_be_flaky):
results_json = self._results_json_from_test_data({}) results_json = self._results_json_from_test_data({})
expectations = bot_test_expectations.BotTestExpectations(results_json) expectations = bot_test_expectations.BotTestExpectations(results_json, set('test'))
length_encoded = self._results_from_string(results_string)['results'] length_encoded = self._results_from_string(results_string)['results']
num_actual_results = len(expectations._flaky_types_in_results(length_encoded, only_ignore_very_flaky=True)) num_actual_results = len(expectations._flaky_types_in_results(length_encoded, only_ignore_very_flaky=True))
if should_be_flaky: if should_be_flaky:
...@@ -80,12 +80,12 @@ class BotTestExpectationsTest(unittest.TestCase): ...@@ -80,12 +80,12 @@ class BotTestExpectationsTest(unittest.TestCase):
def _assert_expectations(self, test_data, expectations_string, only_ignore_very_flaky): def _assert_expectations(self, test_data, expectations_string, only_ignore_very_flaky):
results_json = self._results_json_from_test_data(test_data) results_json = self._results_json_from_test_data(test_data)
expectations = bot_test_expectations.BotTestExpectations(results_json) expectations = bot_test_expectations.BotTestExpectations(results_json, set('test'))
self.assertEqual(expectations.flakes_by_path(only_ignore_very_flaky), expectations_string) self.assertEqual(expectations.flakes_by_path(only_ignore_very_flaky), expectations_string)
def _assert_unexpected_results(self, test_data, expectations_string): def _assert_unexpected_results(self, test_data, expectations_string):
results_json = self._results_json_from_test_data(test_data) results_json = self._results_json_from_test_data(test_data)
expectations = bot_test_expectations.BotTestExpectations(results_json) expectations = bot_test_expectations.BotTestExpectations(results_json, set('test'))
self.assertEqual(expectations.unexpected_results_by_path(), expectations_string) self.assertEqual(expectations.unexpected_results_by_path(), expectations_string)
def test_basic(self): def test_basic(self):
......
...@@ -402,7 +402,7 @@ class TestExpectationLine(object): ...@@ -402,7 +402,7 @@ class TestExpectationLine(object):
and self.is_skipped_outside_expectations_file == other.is_skipped_outside_expectations_file) and self.is_skipped_outside_expectations_file == other.is_skipped_outside_expectations_file)
def is_invalid(self): def is_invalid(self):
return self.warnings and self.warnings != [TestExpectationParser.MISSING_BUG_WARNING] return bool(self.warnings and self.warnings != [TestExpectationParser.MISSING_BUG_WARNING])
def is_flaky(self): def is_flaky(self):
return len(self.parsed_expectations) > 1 return len(self.parsed_expectations) > 1
......
...@@ -1224,6 +1224,7 @@ class Port(object): ...@@ -1224,6 +1224,7 @@ class Port(object):
paths.append(self._filesystem.join(self.layout_tests_dir(), 'NeverFixTests')) paths.append(self._filesystem.join(self.layout_tests_dir(), 'NeverFixTests'))
paths.append(self._filesystem.join(self.layout_tests_dir(), 'StaleTestExpectations')) paths.append(self._filesystem.join(self.layout_tests_dir(), 'StaleTestExpectations'))
paths.append(self._filesystem.join(self.layout_tests_dir(), 'SlowTests')) paths.append(self._filesystem.join(self.layout_tests_dir(), 'SlowTests'))
paths.append(self._filesystem.join(self.layout_tests_dir(), 'FlakyTests'))
builder_name = self.get_option('builder_name', 'DUMMY_BUILDER_NAME') builder_name = self.get_option('builder_name', 'DUMMY_BUILDER_NAME')
if builder_name == 'DUMMY_BUILDER_NAME' or '(deps)' in builder_name or builder_name in self.try_builder_names: if builder_name == 'DUMMY_BUILDER_NAME' or '(deps)' in builder_name or builder_name in self.try_builder_names:
......
...@@ -37,23 +37,24 @@ from webkitpy.common.memoized import memoized ...@@ -37,23 +37,24 @@ from webkitpy.common.memoized import memoized
# This is useful when we don't have bots that cover particular configurations; so, e.g., you might # This is useful when we don't have bots that cover particular configurations; so, e.g., you might
# support mac-mountainlion but not have a mac-mountainlion bot yet, so you'd want to put the mac-lion # support mac-mountainlion but not have a mac-mountainlion bot yet, so you'd want to put the mac-lion
# results into platform/mac temporarily. # results into platform/mac temporarily.
# * specifiers -- TestExpectation specifiers for that config. Valid values are found in
# TestExpectationsParser._configuration_tokens_list
_exact_matches = { _exact_matches = {
"WebKit XP": {"port_name": "win-xp"}, "WebKit XP": {"port_name": "win-xp", "specifiers": ['XP', 'Release']},
"WebKit Win7": {"port_name": "win-win7"}, "WebKit Win7": {"port_name": "win-win7", "specifiers": ['Win7', 'Release']},
"WebKit Win7 (dbg)": {"port_name": "win-win7"}, "WebKit Win7 (dbg)": {"port_name": "win-win7", "specifiers": ['Win7', 'Debug']},
"WebKit Linux": {"port_name": "linux-x86_64"}, "WebKit Linux": {"port_name": "linux-x86_64", "specifiers": ['Linux', 'Release']},
"WebKit Linux 32": {"port_name": "linux-x86"}, "WebKit Linux 32": {"port_name": "linux-x86", "specifiers": ['Linux', 'Release']},
"WebKit Linux (dbg)": {"port_name": "linux-x86_64"}, "WebKit Linux (dbg)": {"port_name": "linux-x86_64", "specifiers": ['Linux', 'Debug']},
"WebKit Linux ASAN": {"port_name": "linux-x86_64"}, "WebKit Mac10.6": {"port_name": "mac-snowleopard", "specifiers": ['SnowLeopard', 'Release']},
"WebKit Mac10.6": {"port_name": "mac-snowleopard"}, "WebKit Mac10.6 (dbg)": {"port_name": "mac-snowleopard", "specifiers": ['SnowLeopard', 'Debug']},
"WebKit Mac10.6 (dbg)": {"port_name": "mac-snowleopard"}, "WebKit Mac10.7": {"port_name": "mac-lion", "specifiers": ['Lion', 'Release']},
"WebKit Mac10.7": {"port_name": "mac-lion"}, "WebKit Mac10.7 (dbg)": {"port_name": "mac-lion", "specifiers": ['Lion', 'Debug']},
"WebKit Mac10.7 (dbg)": {"port_name": "mac-lion"}, "WebKit Mac10.8": {"port_name": "mac-mountainlion", "specifiers": ['MountainLion', 'Release']},
"WebKit Mac10.8": {"port_name": "mac-mountainlion"}, "WebKit Mac10.8 (retina)": {"port_name": "mac-retina", "specifiers": ['Retina', 'Release']},
"WebKit Mac10.8 (retina)": {"port_name": "mac-retina"}, "WebKit Mac10.9": {"port_name": "mac-mavericks", "specifiers": ['Mavericks', 'Release']},
"WebKit Mac10.9": {"port_name": "mac-mavericks"}, "WebKit Android (Nexus4)": {"port_name": "android", "specifiers": ['Android', 'Release']},
"WebKit Android (Nexus4)": {"port_name": "android"},
} }
...@@ -95,6 +96,10 @@ def port_name_for_builder_name(builder_name): ...@@ -95,6 +96,10 @@ def port_name_for_builder_name(builder_name):
return _exact_matches[builder_name]["port_name"] return _exact_matches[builder_name]["port_name"]
def specifiers_for_builder(builder_name):
return _exact_matches[builder_name]["specifiers"]
def builder_name_for_port_name(target_port_name): def builder_name_for_port_name(target_port_name):
debug_builder_name = None debug_builder_name = None
for builder_name, builder_info in _exact_matches.items(): for builder_name, builder_info in _exact_matches.items():
......
...@@ -330,6 +330,7 @@ class PortTestCase(unittest.TestCase): ...@@ -330,6 +330,7 @@ class PortTestCase(unittest.TestCase):
never_fix_tests_path = port._filesystem.join(port.layout_tests_dir(), 'NeverFixTests') never_fix_tests_path = port._filesystem.join(port.layout_tests_dir(), 'NeverFixTests')
stale_tests_path = port._filesystem.join(port.layout_tests_dir(), 'StaleTestExpectations') stale_tests_path = port._filesystem.join(port.layout_tests_dir(), 'StaleTestExpectations')
slow_tests_path = port._filesystem.join(port.layout_tests_dir(), 'SlowTests') slow_tests_path = port._filesystem.join(port.layout_tests_dir(), 'SlowTests')
flaky_tests_path = port._filesystem.join(port.layout_tests_dir(), 'FlakyTests')
skia_overrides_path = port.path_from_chromium_base( skia_overrides_path = port.path_from_chromium_base(
'skia', 'skia_test_expectations.txt') 'skia', 'skia_test_expectations.txt')
...@@ -343,20 +344,21 @@ class PortTestCase(unittest.TestCase): ...@@ -343,20 +344,21 @@ class PortTestCase(unittest.TestCase):
self.assertEqual(port.expectations_files(), self.assertEqual(port.expectations_files(),
[generic_path, skia_overrides_path, w3c_overrides_path, [generic_path, skia_overrides_path, w3c_overrides_path,
never_fix_tests_path, stale_tests_path, slow_tests_path, never_fix_tests_path, stale_tests_path, slow_tests_path,
chromium_overrides_path]) flaky_tests_path, chromium_overrides_path])
port._options.builder_name = 'builder (deps)' port._options.builder_name = 'builder (deps)'
self.assertEqual(port.expectations_files(), self.assertEqual(port.expectations_files(),
[generic_path, skia_overrides_path, w3c_overrides_path, [generic_path, skia_overrides_path, w3c_overrides_path,
never_fix_tests_path, stale_tests_path, slow_tests_path, never_fix_tests_path, stale_tests_path, slow_tests_path,
chromium_overrides_path]) flaky_tests_path, chromium_overrides_path])
# A builder which does NOT observe the Chromium test_expectations, # A builder which does NOT observe the Chromium test_expectations,
# but still observes the Skia test_expectations... # but still observes the Skia test_expectations...
port._options.builder_name = 'builder' port._options.builder_name = 'builder'
self.assertEqual(port.expectations_files(), self.assertEqual(port.expectations_files(),
[generic_path, skia_overrides_path, w3c_overrides_path, [generic_path, skia_overrides_path, w3c_overrides_path,
never_fix_tests_path, stale_tests_path, slow_tests_path]) never_fix_tests_path, stale_tests_path, slow_tests_path,
flaky_tests_path])
def test_check_sys_deps(self): def test_check_sys_deps(self):
port = self.make_port() port = self.make_port()
......
...@@ -26,18 +26,57 @@ ...@@ -26,18 +26,57 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import optparse
from webkitpy.tool.multicommandtool import AbstractDeclarativeCommand from webkitpy.tool.multicommandtool import AbstractDeclarativeCommand
from webkitpy.layout_tests.layout_package.bot_test_expectations import BotTestExpectationsFactory from webkitpy.layout_tests.layout_package.bot_test_expectations import BotTestExpectationsFactory
from webkitpy.layout_tests.models.test_expectations import TestExpectationParser, TestExpectationsModel, TestExpectations from webkitpy.layout_tests.models.test_expectations import TestExpectationParser, TestExpectationsModel, TestExpectations
class FlakyTests(AbstractDeclarativeCommand): class FlakyTests(AbstractDeclarativeCommand):
name = "flaky-tests" name = "update-flaky-tests"
help_text = "Generate FlakyTests file from the flakiness dashboard" help_text = "Update FlakyTests file from the flakiness dashboard"
show_in_main_help = True show_in_main_help = True
def __init__(self):
options = [
optparse.make_option('--upload', action='store_true',
help='upload the changed FlakyTest file for review'),
]
AbstractDeclarativeCommand.__init__(self, options=options)
def execute(self, options, args, tool): def execute(self, options, args, tool):
port = tool.port_factory.get() port = tool.port_factory.get()
full_port_name = port.determine_full_port_name(tool, options, port.port_name) model = TestExpectationsModel()
expectations = BotTestExpectationsFactory().expectations_for_port(full_port_name) for port_name in tool.port_factory.all_port_names():
print TestExpectations.list_to_string(expectations.expectation_lines()) expectations = BotTestExpectationsFactory().expectations_for_port(port_name)
for line in expectations.expectation_lines(only_ignore_very_flaky=True):
model.add_expectation_line(line)
# FIXME: We need an official API to get all the test names or all test lines.
lines = model._test_to_expectation_line.values()
lines.sort(key=lambda line: line.path)
# Skip any tests which are mentioned in the dashboard but not in our checkout:
fs = tool.filesystem
lines = filter(lambda line: fs.exists(fs.join(port.layout_tests_dir(), line.path)), lines)
flaky_tests_path = fs.join(port.layout_tests_dir(), 'FlakyTests')
# Note: This includes all flaky tests from the dashboard, even ones mentioned
# in existing TestExpectations. We could certainly load existing TestExpecations
# and filter accordingly, or update existing TestExpectations instead of FlakyTests.
with open(flaky_tests_path, 'w') as flake_file:
flake_file.write(TestExpectations.list_to_string(lines))
if not options.upload:
return 0
files = tool.scm().changed_files()
flaky_tests_path = 'LayoutTests/FlakyTests'
if flaky_tests_path not in files:
print "%s is not changed, not uploading." % flaky_tests_path
return 0
commit_message = "Update FlakyTests"
git_cmd = ['git', 'commit', '-m', commit_message, flaky_tests_path]
tool.executive.run_command(git_cmd)
git_cmd = ['git', 'cl', 'upload', '--use-commit-queue', '--send-mail']
tool.executive.run_command(git_cmd)
# If there are changes to git, upload.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment