Commit df85f9f7 authored by Rakib M. Hasan's avatar Rakib M. Hasan Committed by Commit Bot

[web tests] Re add flag and base expectations to JSON results

The layout test results viewer shows tests that have different results
then the expected results specified in the non flag specific test
expectation files. The results viewer uses the flag and base
expectations per test fields to find these tests. We need to re add
these per test fields.

Bug: 1047602
Bug: 986447
Change-Id: I0bf603b13672e6b97a3840275c8670a13679e830
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2036359
Commit-Queue: Rakib Hasan <rmhasan@google.com>
Reviewed-by: default avatarRobert Ma <robertma@chromium.org>
Cr-Commit-Position: refs/heads/master@{#738385}
parent 2b401f3e
...@@ -46,6 +46,7 @@ class FakePort(object): ...@@ -46,6 +46,7 @@ class FakePort(object):
self.path = path self.path = path
ALL_BUILD_TYPES = ('debug', 'release') ALL_BUILD_TYPES = ('debug', 'release')
FLAG_EXPECTATIONS_PREFIX = 'FlagExpectations'
def test_configuration(self): def test_configuration(self):
return None return None
......
...@@ -93,6 +93,9 @@ class TestExpectations(object): ...@@ -93,6 +93,9 @@ class TestExpectations(object):
self._system_condition_tags = self._port.get_platform_tags() self._system_condition_tags = self._port.get_platform_tags()
self._expectations = [] self._expectations = []
self._expectations_dict = expectations_dict or port.expectations_dict() self._expectations_dict = expectations_dict or port.expectations_dict()
self._flags = []
self._flag_expectations = []
self._base_expectations = []
filesystem = self._port.host.filesystem filesystem = self._port.host.filesystem
expectation_errors = [] expectation_errors = []
...@@ -103,6 +106,15 @@ class TestExpectations(object): ...@@ -103,6 +106,15 @@ class TestExpectations(object):
if ret: if ret:
expectation_errors.append('Parsing file %s produced following errors\n%s' % (path, errors)) expectation_errors.append('Parsing file %s produced following errors\n%s' % (path, errors))
self._expectations.append(test_expectations) self._expectations.append(test_expectations)
flag_match = re.match('.*' + port.FLAG_EXPECTATIONS_PREFIX + '(.*)', path)
# If file is a flag specific file, store the typ.TestExpectation
# instance in _flag_expectations list, otherwise store it in _base_expectations
if flag_match:
self._flags.append(flag_match.group(1))
self._flag_expectations.append(test_expectations)
else:
self._base_expectations.append(test_expectations)
if port.get_option('ignore_tests', []): if port.get_option('ignore_tests', []):
content = '# results: [ Skip ]\n' content = '# results: [ Skip ]\n'
...@@ -124,6 +136,10 @@ class TestExpectations(object): ...@@ -124,6 +136,10 @@ class TestExpectations(object):
def expectations(self): def expectations(self):
return self._expectations[:] return self._expectations[:]
@property
def flag_name(self):
return ' '.join(self._flags)
@property @property
def general_expectations(self): def general_expectations(self):
# Get typ.TestExpectations instance for general # Get typ.TestExpectations instance for general
...@@ -162,11 +178,12 @@ class TestExpectations(object): ...@@ -162,11 +178,12 @@ class TestExpectations(object):
test_expectations.parse_tagged_list(content) test_expectations.parse_tagged_list(content)
self._expectations.append(test_expectations) self._expectations.append(test_expectations)
def get_expectations(self, test): @staticmethod
def _get_expectations(expectations, test):
results = set() results = set()
reasons = set() reasons = set()
is_slow_test = False is_slow_test = False
for test_exp in self._expectations: for test_exp in expectations:
expected_results = test_exp.expectations_for(test) expected_results = test_exp.expectations_for(test)
# The return Expectation instance from expectations_for has the default # The return Expectation instance from expectations_for has the default
# PASS expected result. If there are no expected results in the first # PASS expected result. If there are no expected results in the first
...@@ -184,6 +201,18 @@ class TestExpectations(object): ...@@ -184,6 +201,18 @@ class TestExpectations(object):
test=test, results=results, is_slow_test=is_slow_test, test=test, results=results, is_slow_test=is_slow_test,
reason=' '.join(reasons)) reason=' '.join(reasons))
def get_expectations(self, test):
return self._get_expectations(self._expectations, test)
def get_flag_expectations(self, test):
exp = self._get_expectations(self._flag_expectations, test)
if exp.is_slow_test or exp.results != set([ResultType.Pass]):
return exp
return None
def get_base_expectations(self, test):
return self._get_expectations(self._base_expectations, test)
def get_tests_with_expected_result(self, result): def get_tests_with_expected_result(self, result):
"""This method will return a list of tests and directories which """This method will return a list of tests and directories which
have the result argument value in its expected results have the result argument value in its expected results
......
...@@ -96,6 +96,32 @@ class BasicTests(Base): ...@@ -96,6 +96,32 @@ class BasicTests(Base):
self.assert_exp('failures/expected/image.html', ResultType.Crash) self.assert_exp('failures/expected/image.html', ResultType.Crash)
class FlagExpectationsTests(Base):
def setup_using_raw_expectations(self, base_exps='', flag_exps='', flag_name=''):
self._general_exp_filename = 'TestExpectations'
self._port.host.filesystem.write_text_file(self._general_exp_filename, base_exps)
expectations_dict = {self._general_exp_filename: base_exps}
# set up flag specific expectations
if flag_name:
self._flag_exp_filename = self._port.host.filesystem.join('FlagExpectations', flag_name)
self._port.host.filesystem.write_text_file(self._flag_exp_filename, flag_exps)
expectations_dict[self._flag_exp_filename] = flag_exps
self._test_expectations = TestExpectations(self._port, expectations_dict)
def test_add_flag_test_expectations(self):
raw_flag_exps = """
# tags: [ Win ]
# results: [ Failure ]
[ Win ] failures/expected/text.html [ Failure ]
"""
self.setup_using_raw_expectations(flag_exps=raw_flag_exps, flag_name='composite-after-paint')
flag_exp = self._test_expectations.get_flag_expectations('failures/expected/text.html')
self.assertEqual(flag_exp.results, set([ResultType.Failure]))
self.assertEqual(self._test_expectations.flag_name, '/composite-after-paint')
class SystemConfigurationRemoverTests(Base): class SystemConfigurationRemoverTests(Base):
def __init__(self, testFunc): def __init__(self, testFunc):
......
...@@ -250,6 +250,13 @@ def summarize_results(port_obj, expectations, initial_results, ...@@ -250,6 +250,13 @@ def summarize_results(port_obj, expectations, initial_results,
test_dict['expected'] = expected test_dict['expected'] = expected
test_dict['actual'] = ' '.join(actual) test_dict['actual'] = ' '.join(actual)
# If a flag was added then add flag specific test expectations to the per test field
flag_exp = expectations.get_flag_expectations(test_name)
if flag_exp:
base_exp = expectations.get_base_expectations(test_name)
test_dict['flag_expectations'] = list(flag_exp.results)
test_dict['base_expectations'] = list(base_exp.results)
# Fields below are optional. To avoid bloating the output results json # Fields below are optional. To avoid bloating the output results json
# too much, only add them when they are True or non-empty. # too much, only add them when they are True or non-empty.
...@@ -357,6 +364,10 @@ def summarize_results(port_obj, expectations, initial_results, ...@@ -357,6 +364,10 @@ def summarize_results(port_obj, expectations, initial_results,
results['random_order_seed'] = port_obj.get_option('seed') results['random_order_seed'] = port_obj.get_option('seed')
results['path_delimiter'] = '/' results['path_delimiter'] = '/'
# If there is a flag name then add the flag name field
if expectations.flag_name:
results['flag_name'] = expectations.flag_name
# Don't do this by default since it takes >100ms. # Don't do this by default since it takes >100ms.
# It's only used for rebaselining and uploading data to the flakiness dashboard. # It's only used for rebaselining and uploading data to the flakiness dashboard.
results['chromium_revision'] = '' results['chromium_revision'] = ''
......
...@@ -475,6 +475,12 @@ class TestPort(Port): ...@@ -475,6 +475,12 @@ class TestPort(Port):
sample_files[cp[0]] = sample_file sample_files[cp[0]] = sample_file
return sample_files return sample_files
def _flag_specific_expectations_path(self):
flags = [f[2:] for f in self._specified_additional_driver_flags()]
if not flags:
return None
return self._filesystem.join(self.web_tests_dir(), 'FlagExpectations', flags[0])
def look_for_new_crash_logs(self, crashed_processes, start_time): def look_for_new_crash_logs(self, crashed_processes, start_time):
del start_time del start_time
crash_logs = {} crash_logs = {}
......
...@@ -483,6 +483,105 @@ class RunTest(unittest.TestCase, StreamTestingMixin): ...@@ -483,6 +483,105 @@ class RunTest(unittest.TestCase, StreamTestingMixin):
tests_run = get_tests_run([WEB_TESTS_LAST_COMPONENT + '/passes/text.html']) tests_run = get_tests_run([WEB_TESTS_LAST_COMPONENT + '/passes/text.html'])
self.assertEqual(['passes/text.html'], tests_run) self.assertEqual(['passes/text.html'], tests_run)
def test_no_flag_specific_files_json_results(self):
host = MockHost()
port = host.port_factory.get('test-win-win7')
host.filesystem.write_text_file(
'/tmp/overrides.txt', '# results: [ Timeout ]\nfailures/expected/text.html [ Timeout ]')
self.assertTrue(logging_run(
['--order', 'natural', 'failures/expected/text.html', '--num-retries', '1',
'--additional-driver-flag', '--composite-after-paint', '--additional-expectations',
'/tmp/overrides.txt'],
tests_included=True, host=host))
results = json.loads(
host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json'))
test_results = results['tests']['failures']['expected']['text.html']
self.assertNotIn('flag_name', results)
self.assertNotIn('flag_expectations', test_results)
self.assertNotIn('base_expectations', test_results)
def test_no_flag_expectations_found_json_results(self):
host = MockHost()
port = host.port_factory.get('test-win-win7')
flag_exp_path = host.filesystem.join(
port.web_tests_dir(), 'FlagExpectations', 'composite-after-paint')
host.filesystem.write_text_file(
'/tmp/overrides.txt', '# results: [ Timeout ]\nfailures/expected/text.html [ Timeout ]')
host.filesystem.write_text_file(flag_exp_path, '')
self.assertTrue(logging_run(
['--order', 'natural', 'failures/expected/text.html', '--num-retries', '1',
'--additional-driver-flag', '--composite-after-paint', '--additional-expectations',
'/tmp/overrides.txt'],
tests_included=True, host=host))
results = json.loads(
host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json'))
test_results = results['tests']['failures']['expected']['text.html']
self.assertEqual(results['flag_name'], '/composite-after-paint')
self.assertNotIn('flag_expectations', test_results)
self.assertNotIn('base_expectations', test_results)
def test_slow_flag_expectations_in_json_results(self):
host = MockHost()
port = host.port_factory.get('test-win-win7')
flag_exp_path = host.filesystem.join(
port.web_tests_dir(), 'FlagExpectations', 'composite-after-paint')
host.filesystem.write_text_file(
'/tmp/overrides.txt', '# results: [ Timeout ]\nfailures/expected/text.html [ Timeout ]')
host.filesystem.write_text_file(
flag_exp_path,
'# results: [ Slow ]\nfailures/expected/text.html [ Slow ]')
self.assertTrue(logging_run(
['--order', 'natural', 'failures/expected/text.html', '--num-retries', '1',
'--additional-driver-flag', '--composite-after-paint', '--additional-expectations',
'/tmp/overrides.txt'],
tests_included=True, host=host))
results = json.loads(
host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json'))
test_results = results['tests']['failures']['expected']['text.html']
self.assertEqual(results['flag_name'], '/composite-after-paint')
self.assertEqual(test_results['flag_expectations'], ['PASS'])
self.assertEqual(test_results['base_expectations'], ['FAIL', 'TIMEOUT'])
def test_flag_and_base_expectations_in_json_results(self):
host = MockHost()
port = host.port_factory.get('test-win-win7')
flag_exp_path = host.filesystem.join(
port.web_tests_dir(), 'FlagExpectations', 'composite-after-paint')
host.filesystem.write_text_file(
'/tmp/overrides.txt', '# results: [ Timeout ]\nfailures/expected/text.html [ Timeout ]')
host.filesystem.write_text_file(
flag_exp_path,
'# results: [ Crash Failure ]\nfailures/expected/text.html [ Crash Failure ]')
self.assertTrue(logging_run(
['--order', 'natural', 'failures/expected/text.html', '--num-retries', '1',
'--additional-driver-flag', '--composite-after-paint', '--additional-expectations',
'/tmp/overrides.txt'],
tests_included=True, host=host))
results = json.loads(
host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json'))
test_results = results['tests']['failures']['expected']['text.html']
self.assertEqual(results['flag_name'], '/composite-after-paint')
self.assertEqual(test_results['flag_expectations'], ['FAIL', 'CRASH'])
self.assertEqual(test_results['base_expectations'], ['FAIL', 'TIMEOUT'])
def test_flag_and_default_base_expectations_in_json_results(self):
host = MockHost()
port = host.port_factory.get('test-win-win7')
flag_exp_path = host.filesystem.join(
port.web_tests_dir(), 'FlagExpectations', 'composite-after-paint')
host.filesystem.write_text_file(
flag_exp_path, '# results: [ Failure ]\npasses/args.html [ Failure ]')
self.assertTrue(logging_run(
['--order', 'natural', 'passes/args.html', '--num-retries', '1',
'--additional-driver-flag', '--composite-after-paint'],
tests_included=True, host=host))
results = json.loads(
host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json'))
test_results = results['tests']['passes']['args.html']
self.assertEqual(results['flag_name'], '/composite-after-paint')
self.assertEqual(test_results['flag_expectations'], ['FAIL'])
self.assertEqual(test_results['base_expectations'], ['PASS'])
def test_stderr_is_saved(self): def test_stderr_is_saved(self):
host = MockHost() host = MockHost()
self.assertTrue(passing_run(host=host)) self.assertTrue(passing_run(host=host))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment