Commit 238f6ccc authored by tansell's avatar tansell Committed by Commit bot

webkitpy: Reworking test chunking for sharding.

Tests which are not being run as part of this shard should not appear in
the output. This means the chunking needs to occur *before* the test
expectations are loaded as any test with an expectation will appear in
the output.

After doing this;

 * It is possible to merge the test shard output without having to
   understand the different test states.

 * It significantly reduces the size of the output on each shard (as
   they only contain the details about tests which are actually assigned
   to the shard).

BUG=524758

Review-Url: https://codereview.chromium.org/2693533003
Cr-Commit-Position: refs/heads/master@{#450623}
parent e47e5f7e
...@@ -96,16 +96,31 @@ class Manager(object): ...@@ -96,16 +96,31 @@ class Manager(object):
self._printer.write_update("Collecting tests ...") self._printer.write_update("Collecting tests ...")
running_all_tests = False running_all_tests = False
try: try:
paths, test_names, running_all_tests = self._collect_tests(args) paths, all_test_names, running_all_tests = self._collect_tests(args)
except IOError: except IOError:
# This is raised if --test-list doesn't exist # This is raised if --test-list doesn't exist
return test_run_results.RunDetails(exit_code=test_run_results.NO_TESTS_EXIT_STATUS) return test_run_results.RunDetails(exit_code=test_run_results.NO_TESTS_EXIT_STATUS)
# Create a sorted list of test files so the subset chunk,
# if used, contains alphabetically consecutive tests.
if self._options.order == 'natural':
all_test_names.sort(key=self._port.test_key)
elif self._options.order == 'random':
all_test_names.sort()
random.Random(self._options.seed).shuffle(all_test_names)
test_names, tests_in_other_chunks = self._finder.split_into_chunks(all_test_names)
self._printer.write_update("Parsing expectations ...") self._printer.write_update("Parsing expectations ...")
self._expectations = test_expectations.TestExpectations(self._port, test_names) self._expectations = test_expectations.TestExpectations(self._port, test_names)
tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names) tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)
self._printer.print_found(len(test_names), len(tests_to_run), self._options.repeat_each, self._options.iterations)
self._expectations.remove_tests(tests_in_other_chunks)
self._printer.print_found(
len(all_test_names), len(test_names), len(tests_to_run),
self._options.repeat_each, self._options.iterations)
# Check to make sure we're not skipping every test. # Check to make sure we're not skipping every test.
if not tests_to_run: if not tests_to_run:
...@@ -242,21 +257,6 @@ class Manager(object): ...@@ -242,21 +257,6 @@ class Manager(object):
tests_to_skip = self._finder.skip_tests(paths, test_names, self._expectations, self._http_tests(test_names)) tests_to_skip = self._finder.skip_tests(paths, test_names, self._expectations, self._http_tests(test_names))
tests_to_run = [test for test in test_names if test not in tests_to_skip] tests_to_run = [test for test in test_names if test not in tests_to_skip]
if not tests_to_run:
return tests_to_run, tests_to_skip
# Create a sorted list of test files so the subset chunk,
# if used, contains alphabetically consecutive tests.
if self._options.order == 'natural':
tests_to_run.sort(key=self._port.test_key)
elif self._options.order == 'random':
tests_to_run.sort()
random.Random(self._options.seed).shuffle(tests_to_run)
tests_to_run, tests_in_other_chunks = self._finder.split_into_chunks(tests_to_run)
self._expectations.add_extra_skipped_tests(tests_in_other_chunks)
tests_to_skip.update(tests_in_other_chunks)
return tests_to_run, tests_to_skip return tests_to_run, tests_to_skip
def _test_input_for_file(self, test_file): def _test_input_for_file(self, test_file):
......
...@@ -1174,6 +1174,11 @@ class TestExpectations(object): ...@@ -1174,6 +1174,11 @@ class TestExpectations(object):
model.add_expectation_line(expectation_line) model.add_expectation_line(expectation_line)
self._model.merge_model(model) self._model.merge_model(model)
def remove_tests(self, tests_to_remove):
for test in self._expectations:
if test.name and test.name in tests_to_remove:
self.remove_expectation_line(test)
def add_expectations_from_bot(self): def add_expectations_from_bot(self):
# FIXME: With mode 'very-flaky' and 'maybe-flaky', this will show the expectations entry in the flakiness # FIXME: With mode 'very-flaky' and 'maybe-flaky', this will show the expectations entry in the flakiness
# dashboard rows for each test to be whatever the bot thinks they should be. Is this a good thing? # dashboard rows for each test to be whatever the bot thinks they should be. Is this a good thing?
......
...@@ -101,11 +101,14 @@ class Printer(object): ...@@ -101,11 +101,14 @@ class Printer(object):
self._print_default('Command line: ' + ' '.join(self._port.driver_cmd_line())) self._print_default('Command line: ' + ' '.join(self._port.driver_cmd_line()))
self._print_default('') self._print_default('')
def print_found(self, num_all_test_files, num_to_run, repeat_each, iterations): def print_found(self, num_all_test_files, num_shard_test_files, num_to_run, repeat_each, iterations):
found_str = 'Found %s; running %d' % (grammar.pluralize('test', num_all_test_files), num_to_run) found_str = 'Found %s' % grammar.pluralize('test', num_shard_test_files)
if num_all_test_files != num_shard_test_files:
found_str += ' (total %d)' % num_all_test_files
found_str += '; running %d' % num_to_run
if repeat_each * iterations > 1: if repeat_each * iterations > 1:
found_str += ' (%d times each: --repeat-each=%d --iterations=%d)' % (repeat_each * iterations, repeat_each, iterations) found_str += ' (%d times each: --repeat-each=%d --iterations=%d)' % (repeat_each * iterations, repeat_each, iterations)
found_str += ', skipping %d' % (num_all_test_files - num_to_run) found_str += ', skipping %d' % (num_shard_test_files - num_to_run)
self._print_default(found_str + '.') self._print_default(found_str + '.')
def print_expected(self, run_results, tests_with_result_type_callback): def print_expected(self, run_results, tests_with_result_type_callback):
......
...@@ -232,11 +232,16 @@ class Testprinter(unittest.TestCase): ...@@ -232,11 +232,16 @@ class Testprinter(unittest.TestCase):
def test_print_found(self): def test_print_found(self):
printer, err = self.get_printer() printer, err = self.get_printer()
printer.print_found(100, 10, 1, 1) self.reset(err)
printer.print_found(100, 100, 10, 1, 1)
self.assertWritten(err, ["Found 100 tests; running 10, skipping 90.\n"]) self.assertWritten(err, ["Found 100 tests; running 10, skipping 90.\n"])
self.reset(err) self.reset(err)
printer.print_found(100, 10, 2, 3) printer.print_found(100, 20, 10, 1, 1)
self.assertWritten(err, ["Found 20 tests (total 100); running 10, skipping 10.\n"])
self.reset(err)
printer.print_found(100, 100, 10, 2, 3)
self.assertWritten(err, ["Found 100 tests; running 10 (6 times each: --repeat-each=2 --iterations=3), skipping 90.\n"]) self.assertWritten(err, ["Found 100 tests; running 10 (6 times each: --repeat-each=2 --iterations=3), skipping 90.\n"])
def test_debug_rwt_logging_is_throttled(self): def test_debug_rwt_logging_is_throttled(self):
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment