Commit 93e2682c authored by danakj's avatar danakj Committed by Commit Bot

Stop removing TestExpectations from model that won't run on the shard

This removal was done in 238f6ccc which claims it was to remove
the not-run tests from the shard bot's output. The list of
TestExpectations no longer appear to affect the output, as only the
list of tests to be run on the shard appear in the output.json file.

After building the model of 80000 TestExpectations, we then go through
them all removing tests that are not going to run on the bot. To do so
we do O(TestExpectations * TestsNotRunningOnBot) which is like
O(100k * 100k) so it is very slow. Locally, not doing this saves
31 of the 46 seconds spent during startup between:

16:14:54.439 117357 Collecting tests ...
...
16:15:40.689 117357 Found 3626 tests (total 86597); running 3388, skipping 238.

On windows bots we see this time go from 65 seconds to 28 seconds, saving 37
seconds, or another 60% of the test collection startup time.

R=robertma@chromium.org

Bug: 982208
Change-Id: I0570cc5610f296b3f5850973f02fb85946ebaf38
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1913165
Commit-Queue: danakj <danakj@chromium.org>
Reviewed-by: default avatarDirk Pranke <dpranke@chromium.org>
Reviewed-by: default avatarRobert Ma <robertma@chromium.org>
Cr-Commit-Position: refs/heads/master@{#714941}
parent 788a3004
......@@ -103,7 +103,7 @@ class Manager(object):
# This is raised if --test-list doesn't exist
return test_run_results.RunDetails(exit_code=exit_codes.NO_TESTS_EXIT_STATUS)
test_names, tests_in_other_chunks = self._finder.split_into_chunks(all_test_names)
test_names = self._finder.split_into_chunks(all_test_names)
if self._options.order == 'natural':
test_names.sort(key=self._port.test_key)
......@@ -116,8 +116,6 @@ class Manager(object):
tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)
self._expectations.remove_tests_from_expectations(tests_in_other_chunks)
self._printer.print_found(
len(all_test_names), len(test_names), len(tests_to_run),
self._options.repeat_each, self._options.iterations)
......
......@@ -185,7 +185,7 @@ class WebTestFinder(object):
def split_into_chunks(self, test_names):
"""split into a list to run and a set to skip, based on --shard_index and --total_shards."""
if self._options.shard_index is None and self._options.total_shards is None:
return test_names, set()
return test_names
if self._options.shard_index is None:
raise ValueError('Must provide --shard-index or GTEST_SHARD_INDEX when sharding.')
......@@ -207,15 +207,11 @@ class WebTestFinder(object):
test_name
for test_name, test_index in tests_and_indices
if test_index == index]
other_tests = [
test_name
for test_name, test_index in tests_and_indices
if test_index != index]
_log.debug('chunk %d of %d contains %d tests of %d',
index, count, len(tests_to_run), len(test_names))
return tests_to_run, other_tests
return tests_to_run
def filter_tests(tests, filters):
......
......@@ -100,34 +100,34 @@ class WebTestFinderTests(unittest.TestCase):
with mock.patch('__builtin__.hash', int):
tests = [1, 2, 3, 4]
self.assertEqual(([1, 2, 3, 4], []), split(tests, 0, 1))
self.assertEqual([1, 2, 3, 4], split(tests, 0, 1))
self.assertEqual(([2, 4], [1, 3]), split(tests, 0, 2))
self.assertEqual(([1, 3], [2, 4]), split(tests, 1, 2))
self.assertEqual([2, 4], split(tests, 0, 2))
self.assertEqual([1, 3], split(tests, 1, 2))
self.assertEqual(([3], [1, 2, 4]), split(tests, 0, 3))
self.assertEqual(([1, 4], [2, 3]), split(tests, 1, 3))
self.assertEqual(([2], [1, 3, 4]), split(tests, 2, 3))
self.assertEqual([3], split(tests, 0, 3))
self.assertEqual([1, 4], split(tests, 1, 3))
self.assertEqual([2], split(tests, 2, 3))
tests = [1, 2, 3, 4, 5]
self.assertEqual(([1, 2, 3, 4, 5], []), split(tests, 0, 1))
self.assertEqual([1, 2, 3, 4, 5], split(tests, 0, 1))
self.assertEqual(([2, 4], [1, 3, 5]), split(tests, 0, 2))
self.assertEqual(([1, 3, 5], [2, 4]), split(tests, 1, 2))
self.assertEqual([2, 4], split(tests, 0, 2))
self.assertEqual([1, 3, 5], split(tests, 1, 2))
self.assertEqual(([3], [1, 2, 4, 5]), split(tests, 0, 3))
self.assertEqual(([1, 4], [2, 3, 5]), split(tests, 1, 3))
self.assertEqual(([2, 5], [1, 3, 4]), split(tests, 2, 3))
self.assertEqual([3], split(tests, 0, 3))
self.assertEqual([1, 4], split(tests, 1, 3))
self.assertEqual([2, 5], split(tests, 2, 3))
tests = [1, 2, 3, 4, 5, 6]
self.assertEqual(([1, 2, 3, 4, 5, 6], []), split(tests, 0, 1))
self.assertEqual([1, 2, 3, 4, 5, 6], split(tests, 0, 1))
self.assertEqual(([2, 4, 6], [1, 3, 5]), split(tests, 0, 2))
self.assertEqual(([1, 3, 5], [2, 4, 6]), split(tests, 1, 2))
self.assertEqual([2, 4, 6], split(tests, 0, 2))
self.assertEqual([1, 3, 5], split(tests, 1, 2))
self.assertEqual(([3, 6], [1, 2, 4, 5]), split(tests, 0, 3))
self.assertEqual(([1, 4], [2, 3, 5, 6]), split(tests, 1, 3))
self.assertEqual(([2, 5], [1, 3, 4, 6]), split(tests, 2, 3))
self.assertEqual([3, 6], split(tests, 0, 3))
self.assertEqual([1, 4], split(tests, 1, 3))
self.assertEqual([2, 5], split(tests, 2, 3))
class FilterTestsTests(unittest.TestCase):
......
......@@ -768,12 +768,6 @@ class TestExpectationsModel(object):
return ' '.join(retval)
def remove_expectation_line(self, test):
if not self.has_test(test):
return
self._clear_expectations_for_test(test)
del self._test_to_expectation_line[test]
def add_expectation_line(self, expectation_line,
model_all_expectations=False):
"""Returns a list of warnings encountered while matching specifiers."""
......@@ -1156,18 +1150,6 @@ class TestExpectations(object):
model.add_expectation_line(expectation_line)
self._model.merge_model(model)
def remove_tests_from_expectations(self, tests_to_remove):
for test in self._expectations:
if not test.name:
continue
if test.name not in tests_to_remove:
continue
self._expectations.remove(test)
if not self._model.has_test(test.name):
continue
line = self._model.get_expectation_line(test.name)
self._model.remove_expectation_line(line)
def add_expectations_from_bot(self):
# FIXME: With mode 'very-flaky' and 'maybe-flaky', this will show the expectations entry in the flakiness
# dashboard rows for each test to be whatever the bot thinks they should be. Is this a good thing?
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment