Commit 00550736 authored by nednguyen's avatar nednguyen Committed by Commit bot

Reland of ll typ to v0.9.7. (patchset #1 id:1 of https://codereview.chromium.org/2317473002/ )

Reason for revert:
This is probably not the root cause of crbug.com/643320

Original issue's description:
> Revert of Roll typ to v0.9.7. (patchset #1 id:1 of https://codereview.chromium.org/2289303002/ )
>
> Reason for revert:
> Let's see if reverting this helps w/ crbug.com/643320.
>
> Original issue's description:
> > Roll typ to v0.9.7.
> >
> > This picks up a change that will help address flaky telemetry failures.
> >
> > This includes the following changes (v0.9.5..v0.9.7):
> >
> >   79fe79db Change typ's interpretation of a test that first fails
> >            and is then skipped.
> >   101acd31 Bump version to 0.9.6, clean up Python 3 failures
> >   2f8787b8 rework run script to remove python3 log
> >   d5023636 rework sharding tests
> >
> > TBR=rnephew@chromium.org, nednguyen@google.com
> > BUG=618330
> >
> > Committed: https://crrev.com/71c9ade4e91e16846779311f1ae800ed550c8b69
> > Cr-Commit-Position: refs/heads/master@{#415392}
>
> TBR=nednguyen@google.com,rnephew@chromium.org
> # Not skipping CQ checks because original CL landed more than 1 days ago.
> BUG=618330, 643320
>
> Committed: https://crrev.com/1023620c13385de9fcea55ec35f66dc22b454ff8
> Cr-Commit-Position: refs/heads/master@{#416573}

TBR=rnephew@chromium.org,nednguyen@chromium.org,dpranke@chromium.org
# Not skipping CQ checks because original CL landed more than 1 days ago.
BUG=618330, 643320

Review-Url: https://codereview.chromium.org/2328753002
Cr-Commit-Position: refs/heads/master@{#417415}
parent 40db8701
Name: typ Name: typ
URL: https://github.com/dpranke/typ.git URL: https://github.com/dpranke/typ.git
Version: 0.9.5 Version: 0.9.7
Revision: a277897604718c50b8353b4bce15d6b78cacdfca Revision: 79fe79dbb4cdd56fd4fe0491f31ad61619652668
Security Critical: no Security Critical: no
License: Apache 2.0 License: Apache 2.0
License File: NOT_SHIPPED License File: NOT_SHIPPED
......
# This file is used by gcl to get repository specific information.
CODE_REVIEW_SERVER: codereview.chromium.org
PROJECT: typ
...@@ -10,9 +10,8 @@ import sys ...@@ -10,9 +10,8 @@ import sys
from tools import cov from tools import cov
is_python3 = bool(sys.version_info.major == 3)
has_python34 = False
verbose = False verbose = False
repo_dir = os.path.abspath(os.path.dirname(__file__)) repo_dir = os.path.abspath(os.path.dirname(__file__))
path_to_cov = os.path.join(repo_dir, 'tools', 'cov.py') path_to_cov = os.path.join(repo_dir, 'tools', 'cov.py')
path_to_runner = os.path.join(repo_dir, 'typ', 'runner.py') path_to_runner = os.path.join(repo_dir, 'typ', 'runner.py')
...@@ -28,8 +27,6 @@ def call(*args, **kwargs): ...@@ -28,8 +27,6 @@ def call(*args, **kwargs):
def main(argv): def main(argv):
parser = argparse.ArgumentParser(prog='run') parser = argparse.ArgumentParser(prog='run')
parser.add_argument('--no3', action='store_true',
help='Do not run the tests under Python 3.')
parser.add_argument('-v', '--verbose', action='store_true') parser.add_argument('-v', '--verbose', action='store_true')
subps = parser.add_subparsers() subps = parser.add_subparsers()
...@@ -60,13 +57,6 @@ def main(argv): ...@@ -60,13 +57,6 @@ def main(argv):
global verbose global verbose
if args.verbose: if args.verbose:
verbose = True verbose = True
global has_python34
if not args.no3:
try:
ver = subprocess.check_output(['python3', '--version'])
has_python34 = ver.split()[1] >= '3.4'
except:
pass
args.func(args) args.func(args)
...@@ -81,11 +71,8 @@ def run_coverage(args): ...@@ -81,11 +71,8 @@ def run_coverage(args):
args.source = [os.path.join(repo_dir, 'typ')] args.source = [os.path.join(repo_dir, 'typ')]
argv = cov.argv_from_args(args) argv = cov.argv_from_args(args)
cov_args = [path_to_runner, '-j', '1'] cov_args = [path_to_runner, '-j', '1']
print('Running coverage of unit tests for Python 2.7.') python = sys.executable
call(['python', path_to_cov] + argv + cov_args) call([python, path_to_cov] + argv + cov_args)
if has_python34:
print('Running coverage of unit tests for Python 3.4.')
call(['python3', path_to_cov] + argv + cov_args)
def run_help(args): def run_help(args):
...@@ -99,20 +86,17 @@ def run_lint(args): ...@@ -99,20 +86,17 @@ def run_lint(args):
def run_tests(args): def run_tests(args):
print('Testing running the typ module directly if it is in sys.path.') python = sys.executable
call(['python', '-m', 'typ', 'typ.tests.main_test.TestMain.test_basic']) # Test running the typ module directly if it is in sys.path.
call([python, '-m', 'typ', 'typ.tests.main_test.TestMain.test_basic'])
print('Testing running the runner directly if nothing is in sys.path.') # Testing running the runner directly if nothing is in sys.path.'
home_dir = os.environ['HOME'] home_dir = os.environ['HOME']
call(['python', path_to_runner, 'typ.tests.main_test.TestMain.test_basic'], call([python, path_to_runner, 'typ.tests.main_test.TestMain.test_basic'],
cwd=home_dir) cwd=home_dir)
# Now run all the tests under Python2 and Python3. # Run the remaining tests.
print('Running the unit tests under Python 2.') call([python, path_to_runner])
call(['python', path_to_runner])
if has_python34:
print('Running the unit tests under Python 3.4.')
call(['python3', path_to_runner])
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -91,12 +91,11 @@ def make_full_results(metadata, seconds_since_epoch, all_test_names, results): ...@@ -91,12 +91,11 @@ def make_full_results(metadata, seconds_since_epoch, all_test_names, results):
for test_name in all_test_names: for test_name in all_test_names:
value = OrderedDict() value = OrderedDict()
value['actual'] = _actual_results_for_test(test_name, results)
if test_name in skipped_tests: if test_name in skipped_tests:
value['expected'] = 'SKIP' value['expected'] = 'SKIP'
value['actual'] = 'SKIP'
else: else:
value['expected'] = 'PASS' value['expected'] = 'PASS'
value['actual'] = _actual_results_for_test(test_name, results)
if value['actual'].endswith('FAIL'): if value['actual'].endswith('FAIL'):
value['is_unexpected'] = True value['is_unexpected'] = True
_add_path_to_trie(full_results['tests'], test_name, value) _add_path_to_trie(full_results['tests'], test_name, value)
...@@ -127,7 +126,13 @@ def failed_test_names(results): ...@@ -127,7 +126,13 @@ def failed_test_names(results):
for r in results.results: for r in results.results:
if r.actual == ResultType.Failure: if r.actual == ResultType.Failure:
names.add(r.name) names.add(r.name)
elif r.actual == ResultType.Pass and r.name in names: elif ((r.actual == ResultType.Pass or r.actual == ResultType.Skip)
and r.name in names):
# This check indicates that a test failed, and then either passed
# or was skipped on a retry. It is somewhat counterintuitive
# that a test that failed and then skipped wouldn't be considered
# failed, but that's at least consistent with a test that is
# skipped every time.
names.remove(r.name) names.remove(r.name)
return names return names
...@@ -144,8 +149,10 @@ def _actual_results_for_test(test_name, results): ...@@ -144,8 +149,10 @@ def _actual_results_for_test(test_name, results):
actuals.append('FAIL') actuals.append('FAIL')
elif r.actual == ResultType.Pass: elif r.actual == ResultType.Pass:
actuals.append('PASS') actuals.append('PASS')
elif r.actual == ResultType.Skip:
assert actuals, 'We did not find any result data for %s.' % test_name actuals.append('SKIP')
if not actuals:
actuals.append('SKIP')
return ' '.join(actuals) return ' '.join(actuals)
......
...@@ -407,6 +407,11 @@ class Runner(object): ...@@ -407,6 +407,11 @@ class Runner(object):
add_tests(suite) add_tests(suite)
else: else:
add_tests(loader.loadTestsFromName(name)) add_tests(loader.loadTestsFromName(name))
if hasattr(loader, 'errors') and loader.errors:
# In Python3's version of unittest, loader failures get converted
# into failed test cases, rather than raising exceptions. However,
# the errors also get recorded so you can err out immediately.
raise ImportError(loader.errors)
def _run_tests(self, result_set, test_set): def _run_tests(self, result_set, test_set):
h = self.host h = self.host
......
...@@ -141,16 +141,17 @@ SF_TEST_FILES = {'sf_test.py': SF_TEST_PY} ...@@ -141,16 +141,17 @@ SF_TEST_FILES = {'sf_test.py': SF_TEST_PY}
LOAD_TEST_PY = """ LOAD_TEST_PY = """
import unittest import unittest
def load_tests(_, _2, _3):
class BaseTest(unittest.TestCase):
pass
def method_fail(self): class BaseTest(unittest.TestCase):
self.fail() pass
def method_pass(self): def method_fail(self):
pass self.fail()
def method_pass(self):
pass
def load_tests(_, _2, _3):
setattr(BaseTest, "test_fail", method_fail) setattr(BaseTest, "test_fail", method_fail)
setattr(BaseTest, "test_pass", method_pass) setattr(BaseTest, "test_pass", method_pass)
suite = unittest.TestSuite() suite = unittest.TestSuite()
...@@ -162,82 +163,6 @@ def load_tests(_, _2, _3): ...@@ -162,82 +163,6 @@ def load_tests(_, _2, _3):
LOAD_TEST_FILES = {'load_test.py': LOAD_TEST_PY} LOAD_TEST_FILES = {'load_test.py': LOAD_TEST_PY}
MIXED_TEST_PY = """
import unittest
class SampleTest(unittest.TestCase):
def test_pass_0(self):
self.assertEqual(1, 1)
def test_pass_1(self):
self.assertEqual(1, 1)
def test_fail_0(self):
self.assertEqual(1, 2)
def test_fail_1(self):
raise Exception()
@unittest.skip('Skip for no reason')
def test_skip_0(self):
pass
"""
LOAD_MANY_TEST_PY = """
import unittest
def generate_test_case(test_method_name, test_type):
class GeneratedTest(unittest.TestCase):
pass
if test_type == 'pass':
def test_method(self):
self.assertEqual(1, 1)
elif test_type == 'fail':
def test_method(self):
self.assertEqual(1, 2)
elif test_type == 'skip':
def test_method(self):
self.skipTest('Skipped')
else:
raise Exception
setattr(GeneratedTest, test_method_name, test_method)
return GeneratedTest(test_method_name)
def load_tests(loader, standard_tests, pattern):
del loader, standard_tests, pattern # unused
suite = unittest.TestSuite()
passed_test_names = [
str('test_pass_%s' % i) for i in range(2, 15)]
failed_test_names = [
str('test_fail_%s' % i) for i in range(2, 10)]
skipped_test_names = [
str('test_skip_%s' % i) for i in range(1, 10)]
for test_method_name in passed_test_names:
suite.addTest(generate_test_case(test_method_name, 'pass'))
for test_method_name in failed_test_names:
suite.addTest(generate_test_case(test_method_name, 'fail'))
for test_method_name in skipped_test_names:
suite.addTest(generate_test_case(test_method_name, 'skip'))
return suite
"""
MANY_TEST_FILES = {
'mixed_test.py': MIXED_TEST_PY, # 2 passes, 2 fails, 1 skip
'load_many_test.py': LOAD_MANY_TEST_PY} # 13 passes, 13 fails, 9 skips
path_to_main = os.path.join( path_to_main = os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))), os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
...@@ -248,34 +173,6 @@ class TestCli(test_case.MainTestCase): ...@@ -248,34 +173,6 @@ class TestCli(test_case.MainTestCase):
prog = [sys.executable, path_to_main] prog = [sys.executable, path_to_main]
files_to_ignore = ['*.pyc'] files_to_ignore = ['*.pyc']
def get_test_results_stat(self, test_output):
num_passes = test_output.count(' passed\n')
num_fails = test_output.count(' failed unexpectedly:\n')
num_skips = test_output.count(' was skipped\n')
return num_passes, num_fails, num_skips
def run_and_check_test_results(self, num_shards):
total_passes, total_fails, total_skips = 0, 0, 0
min_num_tests_run = float('inf')
max_num_tests_run = 0
for shard_index in range(num_shards):
_, out, _, _ = self.check(
['--total-shards', str(num_shards), '--shard-index',
str(shard_index)], files=MANY_TEST_FILES)
passes, fails, skips = self.get_test_results_stat(out)
total_passes += passes
total_fails += fails
total_skips += skips
num_tests_run = passes + fails
min_num_tests_run = min(min_num_tests_run, num_tests_run)
max_num_tests_run = max(max_num_tests_run, num_tests_run)
self.assertEqual(total_passes, 15)
self.assertEqual(total_fails, 10)
self.assertEqual(total_skips, 10)
# Make sure that we don't distribute the tests too unevenly.
self.assertLessEqual(max_num_tests_run - min_num_tests_run, 2)
def test_bad_arg(self): def test_bad_arg(self):
self.check(['--bad-arg'], ret=2, out='', self.check(['--bad-arg'], ret=2, out='',
rerr='.*: error: unrecognized arguments: --bad-arg\n') rerr='.*: error: unrecognized arguments: --bad-arg\n')
...@@ -305,12 +202,12 @@ class TestCli(test_case.MainTestCase): ...@@ -305,12 +202,12 @@ class TestCli(test_case.MainTestCase):
[1/1] pass_test.PassingTest.test_pass passed [1/1] pass_test.PassingTest.test_pass passed
1 test run, 0 failures. 1 test run, 0 failures.
Name Stmts Miss Cover Name Stmts Miss Cover
------------------------------- ----------------------------------
fail_test 4 4 0% fail_test.py 4 4 0%
pass_test 4 0 100% pass_test.py 4 0 100%
------------------------------- ----------------------------------
TOTAL 8 4 50% TOTAL 8 4 50%
""")) """))
except ImportError: # pragma: no cover except ImportError: # pragma: no cover
# We can never cover this line, since running coverage means # We can never cover this line, since running coverage means
...@@ -374,6 +271,32 @@ class TestCli(test_case.MainTestCase): ...@@ -374,6 +271,32 @@ class TestCli(test_case.MainTestCase):
'fail_then_pass_test']['FPTest']['test_count']['actual'], 'fail_then_pass_test']['FPTest']['test_count']['actual'],
'FAIL PASS') 'FAIL PASS')
def test_fail_then_skip(self):
files = {'fail_then_skip_test.py': d("""\
import unittest
count = 0
class FPTest(unittest.TestCase):
def test_count(self):
global count
count += 1
if count == 1:
self.fail()
elif count == 2:
self.skipTest('')
""")}
_, out, _, files = self.check(['--retry-limit', '3',
'--write-full-results-to',
'full_results.json'],
files=files, ret=0, err='')
self.assertIn('Retrying failed tests (attempt #1 of 3)', out)
self.assertNotIn('Retrying failed tests (attempt #2 of 3)', out)
self.assertIn('1 test run, 0 failures.\n', out)
results = json.loads(files['full_results.json'])
self.assertEqual(
results['tests'][
'fail_then_skip_test']['FPTest']['test_count']['actual'],
'FAIL SKIP')
def test_failures_are_not_elided(self): def test_failures_are_not_elided(self):
_, out, _, _ = self.check(['--terminal-width=20'], _, out, _, _ = self.check(['--terminal-width=20'],
files=FAIL_TEST_FILES, ret=1, err='') files=FAIL_TEST_FILES, ret=1, err='')
...@@ -445,8 +368,7 @@ class TestCli(test_case.MainTestCase): ...@@ -445,8 +368,7 @@ class TestCli(test_case.MainTestCase):
pass pass
""")} """)}
self.check(['-l', 'foo.py'], files=files, ret=1, err='', self.check(['-l', 'foo.py'], files=files, ret=1, err='',
rout=('Failed to load "foo.py": No module named ' rout=('Failed to load "foo.py":'))
'\'?package_that_does_not_exist\'?\n'))
def test_import_failure_no_tests(self): def test_import_failure_no_tests(self):
files = {'foo.py': 'import unittest'} files = {'foo.py': 'import unittest'}
...@@ -463,9 +385,7 @@ class TestCli(test_case.MainTestCase): ...@@ -463,9 +385,7 @@ class TestCli(test_case.MainTestCase):
""")} """)}
_, out, _, _ = self.check([], files=files, ret=1, err='') _, out, _, _ = self.check([], files=files, ret=1, err='')
self.assertIn('Failed to import test module: syn_test', out) self.assertIn('Failed to import test module: syn_test', out)
self.assertIn((' syntax error\n' self.assertIn('SyntaxError: invalid syntax', out)
' ^\n'
'SyntaxError: invalid syntax\n'), out)
def test_interrupt(self): def test_interrupt(self):
files = {'interrupt_test.py': d("""\ files = {'interrupt_test.py': d("""\
...@@ -489,8 +409,8 @@ class TestCli(test_case.MainTestCase): ...@@ -489,8 +409,8 @@ class TestCli(test_case.MainTestCase):
def load_tests(_, _2, _3): def load_tests(_, _2, _3):
raise ValueError('this should fail') raise ValueError('this should fail')
""")} """)}
self.check([], files=files, ret=1, err='', _, out, _, _ = self.check([], files=files, ret=1, err='')
out=('foo_test.load_tests() failed: this should fail\n')) self.assertIn('this should fail', out)
def test_load_tests_single_worker(self): def test_load_tests_single_worker(self):
files = LOAD_TEST_FILES files = LOAD_TEST_FILES
...@@ -632,17 +552,46 @@ class TestCli(test_case.MainTestCase): ...@@ -632,17 +552,46 @@ class TestCli(test_case.MainTestCase):
self.assertIn('sf_test.SkipSetup.test_notrun was skipped', out) self.assertIn('sf_test.SkipSetup.test_notrun was skipped', out)
def test_sharding(self): def test_sharding(self):
# Test no sharding.
self.run_and_check_test_results(1)
# A typical with 4 shards.
self.run_and_check_test_results(4)
# Case which number of shards is a prime.
self.run_and_check_test_results(7)
# Case which number of shards is more than number of tests. def run(shard_index, total_shards, tests):
self.run_and_check_test_results(50) files = {'shard_test.py': textwrap.dedent(
"""\
import unittest
class ShardTest(unittest.TestCase):
def test_01(self):
pass
def test_02(self):
pass
def test_03(self):
pass
def test_04(self):
pass
def test_05(self):
pass
""")}
_, out, _, _ = self.check(
['--shard-index', str(shard_index),
'--total-shards', str(total_shards),
'--jobs', '1'],
files=files)
exp_out = ''
total_tests = len(tests)
for i, test in enumerate(tests):
exp_out += ('[%d/%d] shard_test.ShardTest.test_%s passed\n' %
(i + 1, total_tests, test))
exp_out += '%d test%s run, 0 failures.\n' % (
total_tests, "" if total_tests == 1 else "s")
self.assertEqual(out, exp_out)
run(0, 1, ['01', '02', '03', '04', '05'])
run(0, 2, ['01', '03', '05'])
run(1, 2, ['02', '04'])
run(0, 6, ['01'])
def test_subdir(self): def test_subdir(self):
files = { files = {
...@@ -682,7 +631,7 @@ class TestCli(test_case.MainTestCase): ...@@ -682,7 +631,7 @@ class TestCli(test_case.MainTestCase):
self.assertEqual(len(posts), 1) self.assertEqual(len(posts), 1)
payload = posts[0][2].decode('utf8') payload = posts[0][2].decode('utf8')
self.assertIn('"test_pass": {"expected": "PASS", "actual": "PASS"}', self.assertIn('"test_pass": {"actual": "PASS", "expected": "PASS"}',
payload) payload)
self.assertTrue(payload.endswith('--\r\n')) self.assertTrue(payload.endswith('--\r\n'))
self.assertNotEqual(server.log.getvalue(), '') self.assertNotEqual(server.log.getvalue(), '')
......
...@@ -157,11 +157,11 @@ class TestPool(test_case.TestCase): ...@@ -157,11 +157,11 @@ class TestPool(test_case.TestCase):
host = Host() host = Host()
jobs = 2 jobs = 2
self.assertRaises(ValueError, make_pool, self.assertRaises(Exception, make_pool,
host, jobs, _stub, unpicklable_fn, None, None) host, jobs, _stub, unpicklable_fn, None, None)
self.assertRaises(ValueError, make_pool, self.assertRaises(Exception, make_pool,
host, jobs, _stub, None, unpicklable_fn, None) host, jobs, _stub, None, unpicklable_fn, None)
self.assertRaises(ValueError, make_pool, self.assertRaises(Exception, make_pool,
host, jobs, _stub, None, None, unpicklable_fn) host, jobs, _stub, None, None, unpicklable_fn)
def test_no_close(self): def test_no_close(self):
......
...@@ -12,4 +12,4 @@ ...@@ -12,4 +12,4 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
VERSION = '0.9.5' VERSION = '0.9.7'
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment