Commit 66122271 authored by Brian Sheedy's avatar Brian Sheedy Committed by Chromium LUCI CQ

Add stale removal to unexpected pass finder

Moves the splitting of expectations based on staleness out of the
output code and uses it to automatically remove stale expectations from
the expectation file if specified by the user.

Bug: 998329
Change-Id: I317ded44447203b8fddc2c6277ed3f5f5b66a0f9
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2585846Reviewed-by: default avatarYuly Novikov <ynovikov@chromium.org>
Commit-Queue: Brian Sheedy <bsheedy@chromium.org>
Auto-Submit: Brian Sheedy <bsheedy@chromium.org>
Cr-Commit-Position: refs/heads/master@{#836358}
parent 30e5d69e
......@@ -97,6 +97,11 @@ def ParseArgs():
],
default='html',
help='How to output script results.')
parser.add_argument('--remove-stale-expectations',
action='store_true',
default=False,
help='Automatically remove any expectations that are '
'determined to be stale from the expectation file.')
parser.add_argument('-v',
'--verbose',
action='count',
......@@ -120,6 +125,10 @@ def ParseArgs():
'%s_expectations.txt' %
SUITE_TO_EXPECTATIONS_MAP.get(args.suite, args.suite))
if args.remove_stale_expectations and not args.expectation_file:
raise argparse.ArgumentError('--remove-stale-expectations',
'Can only be used with expectation files')
return args
......@@ -167,9 +176,21 @@ def main():
args.project, args.num_samples))
unused_expectations = expectations.FilterOutUnusedExpectations(
test_expectation_map)
result_output.OutputResults(test_expectation_map, unmatched,
stale, semi_stale, active = expectations.SplitExpectationsByStaleness(
test_expectation_map)
result_output.OutputResults(stale, semi_stale, active, unmatched,
unused_expectations, args.output_format)
if args.remove_stale_expectations:
stale_expectations = []
for _, expectation_map in stale.iteritems():
stale_expectations.extend(expectation_map.keys())
stale_expectations.extend(unused_expectations)
expectations.RemoveExpectationsFromFile(stale_expectations,
args.expectation_file)
print('Stale expectations removed from %s. Stale comments, etc. may still '
'need to be removed.' % args.expectation_file)
if __name__ == '__main__':
main()
......@@ -5,6 +5,8 @@
import logging
import validate_tag_consistency
from typ import expectations_parser
from unexpected_passes import data_types
......@@ -100,3 +102,125 @@ def FilterOutUnusedExpectations(test_expectation_map):
logging.debug('Found %d empty tests: %s', len(empty_tests), empty_tests)
return unused_expectations
def SplitExpectationsByStaleness(test_expectation_map):
"""Separates |test_expectation_map| based on expectation staleness.
Args:
test_expectation_map: A dict in the format returned by
CreateTestExpectationMap() with any unused expectations already filtered
out.
Returns:
Three dicts (stale_dict, semi_stale_dict, active_dict). All three combined
contain the information of |test_expectation_map| in the same format.
|stale_dict| contains entries for expectations that are no longer being
helpful, |semi_stale_dict| contains entries for expectations that might be
removable or modifiable, but have at least one failed test run.
|active_dict| contains entries for expectations that are preventing failures
on all builders they're active on, and thus shouldn't be removed.
"""
FULL_PASS = 1
NEVER_PASS = 2
PARTIAL_PASS = 3
stale_dict = {}
semi_stale_dict = {}
active_dict = {}
for test_name, expectation_map in test_expectation_map.iteritems():
for expectation, builder_map in expectation_map.iteritems():
# A temporary map to hold data so we can later determine whether an
# expectation is stale, semi-stale, or active.
tmp_map = {
FULL_PASS: {},
NEVER_PASS: {},
PARTIAL_PASS: {},
}
for builder_name, step_map in builder_map.iteritems():
fully_passed = {}
partially_passed = {}
never_passed = {}
for step_name, stats in step_map.iteritems():
if stats.passed_builds == stats.total_builds:
assert step_name not in fully_passed
fully_passed[step_name] = stats
elif stats.failed_builds == stats.total_builds:
assert step_name not in never_passed
never_passed[step_name] = stats
else:
assert step_name not in partially_passed
partially_passed[step_name] = stats
if fully_passed:
tmp_map[FULL_PASS][builder_name] = fully_passed
if never_passed:
tmp_map[NEVER_PASS][builder_name] = never_passed
if partially_passed:
tmp_map[PARTIAL_PASS][builder_name] = partially_passed
def _CopyPassesIntoBuilderMap(builder_map, pass_types):
for pt in pass_types:
for builder, steps in tmp_map[pt].iteritems():
builder_map.setdefault(builder, {}).update(steps)
# Handle the case of a stale expectation.
if not (tmp_map[NEVER_PASS] or tmp_map[PARTIAL_PASS]):
builder_map = stale_dict.setdefault(test_name,
{}).setdefault(expectation, {})
_CopyPassesIntoBuilderMap(builder_map, [FULL_PASS])
# Handle the case of an active expectation.
elif not tmp_map[FULL_PASS]:
builder_map = active_dict.setdefault(test_name,
{}).setdefault(expectation, {})
_CopyPassesIntoBuilderMap(builder_map, [NEVER_PASS, PARTIAL_PASS])
# Handle the case of a semi-stale expectation.
else:
# TODO(crbug.com/998329): Sort by pass percentage so it's easier to find
# problematic builders without highlighting.
builder_map = semi_stale_dict.setdefault(test_name, {}).setdefault(
expectation, {})
_CopyPassesIntoBuilderMap(builder_map,
[FULL_PASS, PARTIAL_PASS, NEVER_PASS])
return stale_dict, semi_stale_dict, active_dict
def RemoveExpectationsFromFile(expectations, expectation_file):
"""Removes lines corresponding to |expectations| from |expectation_file|.
Args:
expectations: A list of data_types.Expectations to remove.
expectation_file: A filepath pointing to an expectation file to remove lines
from.
"""
header = validate_tag_consistency.TAG_HEADER
with open(expectation_file) as f:
input_contents = f.read()
output_contents = ''
for line in input_contents.splitlines(True):
# Auto-add any comments or empty lines
stripped_line = line.strip()
if not stripped_line or stripped_line.startswith('#'):
output_contents += line
continue
single_line_content = header + line
list_parser = expectations_parser.TaggedTestListParser(single_line_content)
assert len(list_parser.expectations) == 1
typ_expectation = list_parser.expectations[0]
current_expectation = data_types.Expectation(typ_expectation.test,
typ_expectation.tags,
typ_expectation.raw_results)
# Add any lines containing expectations that don't match any of the given
# expectations to remove.
if not any([e for e in expectations if e == current_expectation]):
output_contents += line
with open(expectation_file, 'w') as f:
f.write(output_contents)
......@@ -8,8 +8,11 @@ import unittest
from pyfakefs import fake_filesystem_unittest
import validate_tag_consistency
from unexpected_passes import data_types
from unexpected_passes import expectations
from unexpected_passes import unittest_utils as uu
FAKE_EXPECTATION_FILE_CONTENTS = """\
# tags: [ win linux ]
......@@ -120,5 +123,252 @@ class FilterOutUnusedExpectationsUnittest(unittest.TestCase):
self.assertEqual(expectation_map, {})
class SplitExpectationsByStalenessUnittest(unittest.TestCase):
def testEmptyInput(self):
"""Tests that nothing blows up with empty input."""
stale_dict, semi_stale_dict, active_dict =\
expectations.SplitExpectationsByStaleness({})
self.assertEqual(stale_dict, {})
self.assertEqual(semi_stale_dict, {})
self.assertEqual(active_dict, {})
def testStaleExpectations(self):
"""Tests output when only stale expectations are provided."""
expectation_map = {
'foo': {
data_types.Expectation('foo', ['win'], ['Failure']): {
'foo_builder': {
'step1': uu.CreateStatsWithPassFails(1, 0),
'step2': uu.CreateStatsWithPassFails(2, 0),
},
'bar_builder': {
'step1': uu.CreateStatsWithPassFails(3, 0),
'step2': uu.CreateStatsWithPassFails(4, 0)
},
},
data_types.Expectation('foo', ['linux'], ['RetryOnFailure']): {
'foo_builder': {
'step1': uu.CreateStatsWithPassFails(5, 0),
'step2': uu.CreateStatsWithPassFails(6, 0),
},
},
},
'bar': {
data_types.Expectation('bar', ['win'], ['Failure']): {
'foo_builder': {
'step1': uu.CreateStatsWithPassFails(7, 0),
},
},
},
}
expected_stale_dict = copy.deepcopy(expectation_map)
stale_dict, semi_stale_dict, active_dict =\
expectations.SplitExpectationsByStaleness(expectation_map)
self.assertEqual(stale_dict, expected_stale_dict)
self.assertEqual(semi_stale_dict, {})
self.assertEqual(active_dict, {})
def testActiveExpectations(self):
"""Tests output when only active expectations are provided."""
expectation_map = {
'foo': {
data_types.Expectation('foo', ['win'], ['Failure']): {
'foo_builder': {
'step1': uu.CreateStatsWithPassFails(0, 1),
'step2': uu.CreateStatsWithPassFails(0, 2),
},
'bar_builder': {
'step1': uu.CreateStatsWithPassFails(0, 3),
'step2': uu.CreateStatsWithPassFails(0, 4)
},
},
data_types.Expectation('foo', ['linux'], ['RetryOnFailure']): {
'foo_builder': {
'step1': uu.CreateStatsWithPassFails(0, 5),
'step2': uu.CreateStatsWithPassFails(0, 6),
},
},
},
'bar': {
data_types.Expectation('bar', ['win'], ['Failure']): {
'foo_builder': {
'step1': uu.CreateStatsWithPassFails(0, 7),
},
},
},
}
expected_active_dict = copy.deepcopy(expectation_map)
stale_dict, semi_stale_dict, active_dict =\
expectations.SplitExpectationsByStaleness(expectation_map)
self.assertEqual(stale_dict, {})
self.assertEqual(semi_stale_dict, {})
self.assertEqual(active_dict, expected_active_dict)
def testSemiStaleExpectations(self):
"""Tests output when only semi-stale expectations are provided."""
expectation_map = {
'foo': {
data_types.Expectation('foo', ['win'], ['Failure']): {
'foo_builder': {
'step1': uu.CreateStatsWithPassFails(1, 0),
'step2': uu.CreateStatsWithPassFails(2, 2),
},
'bar_builder': {
'step1': uu.CreateStatsWithPassFails(3, 0),
'step2': uu.CreateStatsWithPassFails(0, 4)
},
},
data_types.Expectation('foo', ['linux'], ['RetryOnFailure']): {
'foo_builder': {
'step1': uu.CreateStatsWithPassFails(5, 0),
'step2': uu.CreateStatsWithPassFails(6, 6),
},
},
},
'bar': {
data_types.Expectation('bar', ['win'], ['Failure']): {
'foo_builder': {
'step1': uu.CreateStatsWithPassFails(7, 0),
},
'bar_builder': {
'step1': uu.CreateStatsWithPassFails(0, 8),
},
},
},
}
expected_semi_stale_dict = copy.deepcopy(expectation_map)
stale_dict, semi_stale_dict, active_dict =\
expectations.SplitExpectationsByStaleness(expectation_map)
self.assertEqual(stale_dict, {})
self.assertEqual(semi_stale_dict, expected_semi_stale_dict)
self.assertEqual(active_dict, {})
def testAllExpectations(self):
"""Tests output when all three types of expectations are provided."""
expectation_map = {
'foo': {
data_types.Expectation('foo', ['stale'], 'Failure'): {
'foo_builder': {
'step1': uu.CreateStatsWithPassFails(1, 0),
'step2': uu.CreateStatsWithPassFails(2, 0),
},
'bar_builder': {
'step1': uu.CreateStatsWithPassFails(3, 0),
'step2': uu.CreateStatsWithPassFails(4, 0)
},
},
data_types.Expectation('foo', ['semistale'], 'Failure'): {
'foo_builder': {
'step1': uu.CreateStatsWithPassFails(1, 0),
'step2': uu.CreateStatsWithPassFails(2, 2),
},
'bar_builder': {
'step1': uu.CreateStatsWithPassFails(3, 0),
'step2': uu.CreateStatsWithPassFails(0, 4)
},
},
data_types.Expectation('foo', ['active'], 'Failure'): {
'foo_builder': {
'step1': uu.CreateStatsWithPassFails(1, 1),
'step2': uu.CreateStatsWithPassFails(2, 2),
},
'bar_builder': {
'step1': uu.CreateStatsWithPassFails(3, 3),
'step2': uu.CreateStatsWithPassFails(0, 4)
},
},
},
}
expected_stale = {
'foo': {
data_types.Expectation('foo', ['stale'], 'Failure'): {
'foo_builder': {
'step1': uu.CreateStatsWithPassFails(1, 0),
'step2': uu.CreateStatsWithPassFails(2, 0),
},
'bar_builder': {
'step1': uu.CreateStatsWithPassFails(3, 0),
'step2': uu.CreateStatsWithPassFails(4, 0)
},
},
},
}
expected_semi_stale = {
'foo': {
data_types.Expectation('foo', ['semistale'], 'Failure'): {
'foo_builder': {
'step1': uu.CreateStatsWithPassFails(1, 0),
'step2': uu.CreateStatsWithPassFails(2, 2),
},
'bar_builder': {
'step1': uu.CreateStatsWithPassFails(3, 0),
'step2': uu.CreateStatsWithPassFails(0, 4)
},
},
},
}
expected_active = {
'foo': {
data_types.Expectation('foo', ['active'], 'Failure'): {
'foo_builder': {
'step1': uu.CreateStatsWithPassFails(1, 1),
'step2': uu.CreateStatsWithPassFails(2, 2),
},
'bar_builder': {
'step1': uu.CreateStatsWithPassFails(3, 3),
'step2': uu.CreateStatsWithPassFails(0, 4)
},
},
},
}
stale_dict, semi_stale_dict, active_dict =\
expectations.SplitExpectationsByStaleness(expectation_map)
self.assertEqual(stale_dict, expected_stale)
self.assertEqual(semi_stale_dict, expected_semi_stale)
self.assertEqual(active_dict, expected_active)
class RemoveExpectationsFromFileUnittest(fake_filesystem_unittest.TestCase):
def setUp(self):
self.setUpPyfakefs()
with tempfile.NamedTemporaryFile(delete=False) as f:
self.filename = f.name
def testExpectationRemoval(self):
"""Tests that expectations are properly removed from a file."""
contents = validate_tag_consistency.TAG_HEADER + """
# This is a test comment
crbug.com/1234 [ win ] foo/test [ Failure ]
crbug.com/2345 [ win ] foo/test [ RetryOnFailure ]
# Another comment
[ linux ] bar/test [ RetryOnFailure ]
[ win ] bar/test [ RetryOnFailure ]
"""
stale_expectations = [
data_types.Expectation('foo/test', ['win'], ['Failure']),
data_types.Expectation('bar/test', ['linux'], ['RetryOnFailure'])
]
expected_contents = validate_tag_consistency.TAG_HEADER + """
# This is a test comment
crbug.com/2345 [ win ] foo/test [ RetryOnFailure ]
# Another comment
[ win ] bar/test [ RetryOnFailure ]
"""
with open(self.filename, 'w') as f:
f.write(contents)
expectations.RemoveExpectationsFromFile(stale_expectations, self.filename)
with open(self.filename) as f:
self.assertEqual(f.read(), expected_contents)
if __name__ == '__main__':
unittest.main(verbosity=2)
......@@ -137,7 +137,9 @@ SECTION_UNUSED = ('Unused Expectations (Indicative Of The Configuration No '
'Longer Being Tested Or Tags Changing)')
def OutputResults(test_expectation_map,
def OutputResults(stale_dict,
semi_stale_dict,
active_dict,
unmatched_results,
unused_expectations,
output_format,
......@@ -145,8 +147,15 @@ def OutputResults(test_expectation_map,
"""Outputs script results to |file_handle|.
Args:
test_expectation_map: A map in the format returned by
expectations.CreateTestExpectationMap()
stale_dict: A map in the format returned by
expectations.CreateTestExpectationMap() containing all the stale
expectations.
semi_stale_dict: A map in the format returned by
expectations.CreateTestExpectationMap() containing all the semi-stale
expectations.
active_dict: A map in the format returned by
expectations.CreateTestExpectationMap() containing all the active
expectations.
ummatched_results: Any unmatched results found while filling
|test_expectation_map|, as returned by
queries.FillExpectationMapFor[Ci|Try]Builders().
......@@ -158,8 +167,9 @@ def OutputResults(test_expectation_map,
specified, a suitable default will be used.
"""
logging.info('Outputting results in format %s', output_format)
stale_dict, semi_stale_dict, active_dict =\
_ConvertTestExpectationMapToStringDicts(test_expectation_map)
stale_str_dict = _ConvertTestExpectationMapToStringDict(stale_dict)
semi_stale_str_dict = _ConvertTestExpectationMapToStringDict(semi_stale_dict)
active_str_dict = _ConvertTestExpectationMapToStringDict(active_dict)
unmatched_results_str_dict = _ConvertUnmatchedResultsToStringDict(
unmatched_results)
unused_expectations_str_list = _ConvertUnusedExpectationsToStringList(
......@@ -169,13 +179,13 @@ def OutputResults(test_expectation_map,
file_handle = file_handle or sys.stdout
if stale_dict:
file_handle.write(SECTION_STALE + '\n')
_RecursivePrintToFile(stale_dict, 0, file_handle)
_RecursivePrintToFile(stale_str_dict, 0, file_handle)
if semi_stale_dict:
file_handle.write(SECTION_SEMI_STALE + '\n')
_RecursivePrintToFile(semi_stale_dict, 0, file_handle)
_RecursivePrintToFile(semi_stale_str_dict, 0, file_handle)
if active_dict:
file_handle.write(SECTION_ACTIVE + '\n')
_RecursivePrintToFile(active_dict, 0, file_handle)
_RecursivePrintToFile(active_str_dict, 0, file_handle)
if unused_expectations_str_list:
file_handle.write('\n' + SECTION_UNUSED + '\n')
......@@ -193,13 +203,13 @@ def OutputResults(test_expectation_map,
file_handle.write(HTML_HEADER)
if stale_dict:
file_handle.write('<h1>' + SECTION_STALE + '</h1>\n')
_RecursiveHtmlToFile(stale_dict, file_handle)
_RecursiveHtmlToFile(stale_str_dict, file_handle)
if semi_stale_dict:
file_handle.write('<h1>' + SECTION_SEMI_STALE + '</h1>\n')
_RecursiveHtmlToFile(semi_stale_dict, file_handle)
_RecursiveHtmlToFile(semi_stale_str_dict, file_handle)
if active_dict:
file_handle.write('<h1>' + SECTION_ACTIVE + '</h1>\n')
_RecursiveHtmlToFile(active_dict, file_handle)
_RecursiveHtmlToFile(active_str_dict, file_handle)
if unused_expectations_str_list:
file_handle.write('\n<h1>' + SECTION_UNUSED + "</h1>\n")
......@@ -290,18 +300,16 @@ def _LinkifyString(s):
return s
def _ConvertTestExpectationMapToStringDicts(test_expectation_map):
"""Converts |test_expectation_map| to dicts of strings for reporting.
def _ConvertTestExpectationMapToStringDict(test_expectation_map):
"""Converts |test_expectation_map| to a dict of strings for reporting.
Args:
test_expectation_map: A dict in the format output by
expectations.CreateTestExpectationMap()
Returns:
Three dictionaries stale_dict, semi_stale_dict, and active_dict. All three
combined contain the information of |test_expectation_map| in the following
format:
A string dictionary representation of |test_expectation_map| in the
following format:
{
test_name: {
expectation_summary: {
......@@ -321,28 +329,17 @@ def _ConvertTestExpectationMapToStringDicts(test_expectation_map):
}
}
}
|stale_dict| contains entries for expectations that are no longer being
helpful, |semi_stale_dict| contains entries for expectations that might be
removable or modifiable, but have at least one failed test run.
|active_dict| contains entries for expectations that are preventing failures
on all builders they're active on, and thus shouldn't be removed.
"""
stale_dict = {}
semi_stale_dict = {}
active_dict = {}
output_dict = {}
for test_name, expectation_map in test_expectation_map.iteritems():
output_dict[test_name] = {}
for expectation, builder_map in expectation_map.iteritems():
expectation_str = _FormatExpectation(expectation)
# A temporary map to hold data so we can later determine whether an
# expectation is stale, semi-stale, or active.
tmp_map = {
FULL_PASS: {},
NEVER_PASS: {},
PARTIAL_PASS: {},
}
output_dict[test_name][expectation_str] = {}
for builder_name, step_map in builder_map.iteritems():
output_dict[test_name][expectation_str][builder_name] = {}
fully_passed = []
partially_passed = {}
never_passed = []
......@@ -356,39 +353,18 @@ def _ConvertTestExpectationMapToStringDicts(test_expectation_map):
assert step_name not in partially_passed
partially_passed[step_name] = stats
output_builder_map =\
output_dict[test_name][expectation_str][builder_name]
if fully_passed:
tmp_map[FULL_PASS][builder_name] = fully_passed
if never_passed:
tmp_map[NEVER_PASS][builder_name] = never_passed
output_builder_map[FULL_PASS] = fully_passed
if partially_passed:
tmp_map[PARTIAL_PASS][builder_name] = {}
output_builder_map[PARTIAL_PASS] = {}
for step_name, stats in partially_passed.iteritems():
s = _AddStatsToStr(step_name, stats)
tmp_map[PARTIAL_PASS][builder_name][s] = list(stats.failure_links)
def _CopyPassesIntoDict(d, pass_types):
for pt in pass_types:
for builder, steps in tmp_map[pt].iteritems():
d.setdefault(builder, {})[pt] = steps
# Handle the case of a stale expectation.
if not (tmp_map[NEVER_PASS] or tmp_map[PARTIAL_PASS]):
builder_map = stale_dict.setdefault(test_name,
{}).setdefault(expectation_str, {})
_CopyPassesIntoDict(builder_map, [FULL_PASS])
# Handle the case of an active expectation.
elif not tmp_map[FULL_PASS]:
builder_map = active_dict.setdefault(test_name, {}).setdefault(
expectation_str, {})
_CopyPassesIntoDict(builder_map, [NEVER_PASS, PARTIAL_PASS])
# Handle the case of a semi-stale expectation.
else:
# TODO(crbug.com/998329): Sort by pass percentage so it's easier to find
# problematic builders without highlighting.
builder_map = semi_stale_dict.setdefault(test_name, {}).setdefault(
expectation_str, {})
_CopyPassesIntoDict(builder_map, [FULL_PASS, NEVER_PASS, PARTIAL_PASS])
return stale_dict, semi_stale_dict, active_dict
output_builder_map[PARTIAL_PASS][s] = list(stats.failure_links)
if never_passed:
output_builder_map[NEVER_PASS] = never_passed
return output_dict
def _ConvertUnmatchedResultsToStringDict(unmatched_results):
......
......@@ -9,7 +9,9 @@ import unittest
from pyfakefs import fake_filesystem_unittest
from unexpected_passes import data_types
from unexpected_passes import expectations
from unexpected_passes import result_output
from unexpected_passes import unittest_utils as uu
def CreateTextOutputPermutations(text, inputs):
......@@ -82,11 +84,11 @@ class ConvertUnmatchedResultsToStringDictUnittest(unittest.TestCase):
self.assertEqual(output, expected_output)
class ConvertTestExpectationMapToStringDictsUnittest(unittest.TestCase):
class ConvertTestExpectationMapToStringDictUnittest(unittest.TestCase):
def testEmptyMap(self):
"""Tests that providing an empty map is a no-op."""
self.assertEqual(result_output._ConvertTestExpectationMapToStringDicts({}),
({}, {}, {}))
self.assertEqual(result_output._ConvertTestExpectationMapToStringDict({}),
{})
def testSemiStaleMap(self):
"""Tests that everything functions when regular data is provided."""
......@@ -95,27 +97,27 @@ class ConvertTestExpectationMapToStringDictsUnittest(unittest.TestCase):
data_types.Expectation('foo', ['win', 'intel'], ['RetryOnFailure']):
{
'builder': {
'all_pass': _CreateStatsWithPassFails(2, 0),
'all_fail': _CreateStatsWithPassFails(0, 2),
'some_pass': _CreateStatsWithPassFails(1, 1),
'all_pass': uu.CreateStatsWithPassFails(2, 0),
'all_fail': uu.CreateStatsWithPassFails(0, 2),
'some_pass': uu.CreateStatsWithPassFails(1, 1),
},
},
data_types.Expectation('foo', ['linux', 'intel'], [
'RetryOnFailure'
]): {
'builder': {
'all_pass': _CreateStatsWithPassFails(2, 0),
'all_pass': uu.CreateStatsWithPassFails(2, 0),
}
},
data_types.Expectation('foo', ['mac', 'intel'], ['RetryOnFailure']):
{
'builder': {
'all_fail': _CreateStatsWithPassFails(0, 2),
'all_fail': uu.CreateStatsWithPassFails(0, 2),
}
},
},
}
expected_semi_stale = {
expected_ouput = {
'foo': {
'"RetryOnFailure" expectation on "win intel"': {
'builder': {
......@@ -132,10 +134,6 @@ class ConvertTestExpectationMapToStringDictsUnittest(unittest.TestCase):
},
},
},
},
}
expected_stale = {
'foo': {
'"RetryOnFailure" expectation on "intel linux"': {
'builder': {
'Fully passed in the following': [
......@@ -143,10 +141,6 @@ class ConvertTestExpectationMapToStringDictsUnittest(unittest.TestCase):
],
},
},
},
}
expected_active = {
'foo': {
'"RetryOnFailure" expectation on "mac intel"': {
'builder': {
'Never passed in the following': [
......@@ -157,12 +151,9 @@ class ConvertTestExpectationMapToStringDictsUnittest(unittest.TestCase):
},
}
stale_dict, semi_stale_dict, active_dict =\
result_output._ConvertTestExpectationMapToStringDicts(
str_dict = result_output._ConvertTestExpectationMapToStringDict(
expectation_map)
self.assertEqual(semi_stale_dict, expected_semi_stale)
self.assertEqual(stale_dict, expected_stale)
self.assertEqual(active_dict, expected_active)
self.assertEqual(str_dict, expected_ouput)
class HtmlToFileUnittest(fake_filesystem_unittest.TestCase):
......@@ -383,15 +374,27 @@ class OutputResultsUnittest(fake_filesystem_unittest.TestCase):
def testOutputResultsUnsupportedFormat(self):
"""Tests that passing in an unsupported format is an error."""
with self.assertRaises(RuntimeError):
result_output.OutputResults({}, {}, [], 'asdf')
result_output.OutputResults({}, {}, {}, {}, [], 'asdf')
def testOutputResultsSmoketest(self):
"""Test that nothing blows up when outputting."""
expectation_map = {
'foo': {
data_types.Expectation('foo', ['win', 'intel'], 'RetryOnFailure'): {
'builder': {
'all_pass': _CreateStatsWithPassFails(2, 0),
'stale': {
'all_pass': uu.CreateStatsWithPassFails(2, 0),
},
},
data_types.Expectation('foo', ['linux'], 'Failure'): {
'semi_stale': {
'all_pass': uu.CreateStatsWithPassFails(2, 0),
'some_pass': uu.CreateStatsWithPassFails(1, 1),
'none_pass': uu.CreateStatsWithPassFails(0, 2),
},
},
data_types.Expectation('foo', ['mac'], 'Failure'): {
'active': {
'none_pass': uu.CreateStatsWithPassFails(0, 2),
},
},
},
......@@ -406,36 +409,32 @@ class OutputResultsUnittest(fake_filesystem_unittest.TestCase):
data_types.Expectation('foo', ['linux'], 'RetryOnFailure')
]
result_output.OutputResults(expectation_map, {}, [], 'print',
stale, semi_stale, active = expectations.SplitExpectationsByStaleness(
expectation_map)
result_output.OutputResults(stale, semi_stale, active, {}, [], 'print',
self._file_handle)
result_output.OutputResults(expectation_map, unmatched_results, [], 'print',
result_output.OutputResults(stale, semi_stale, active, unmatched_results,
[], 'print', self._file_handle)
result_output.OutputResults(stale, semi_stale, active, {},
unmatched_expectations, 'print',
self._file_handle)
result_output.OutputResults(expectation_map, {}, unmatched_expectations,
'print', self._file_handle)
result_output.OutputResults(expectation_map, unmatched_results,
result_output.OutputResults(stale, semi_stale, active, unmatched_results,
unmatched_expectations, 'print',
self._file_handle)
result_output.OutputResults(expectation_map, {}, [], 'html',
result_output.OutputResults(stale, semi_stale, active, {}, [], 'html',
self._file_handle)
result_output.OutputResults(expectation_map, unmatched_results, [], 'html',
result_output.OutputResults(stale, semi_stale, active, unmatched_results,
[], 'html', self._file_handle)
result_output.OutputResults(stale, semi_stale, active, {},
unmatched_expectations, 'html',
self._file_handle)
result_output.OutputResults(expectation_map, {}, unmatched_expectations,
'html', self._file_handle)
result_output.OutputResults(expectation_map, unmatched_results,
result_output.OutputResults(stale, semi_stale, active, unmatched_results,
unmatched_expectations, 'html',
self._file_handle)
def _CreateStatsWithPassFails(passes, fails):
stats = data_types.BuildStats()
for _ in xrange(passes):
stats.AddPassedBuild()
for i in xrange(fails):
stats.AddFailedBuild('build_id%d' % i)
return stats
def _Dedent(s):
output = ''
for line in s.splitlines(True):
......
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Helper methods for unittests."""
from unexpected_passes import data_types
def CreateStatsWithPassFails(passes, fails):
stats = data_types.BuildStats()
for _ in xrange(passes):
stats.AddPassedBuild()
for i in xrange(fails):
stats.AddFailedBuild('build_id%d' % i)
return stats
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment