Commit a9434345 authored by maruel@chromium.org's avatar maruel@chromium.org

run_test_cases.py: Enable use of RUN_TEST_CASES_RESULT_FILE as environment variable.

Much more strict on what is allowable for GTEST_* environment variables.
Enable support for GTEST_FILTER.
Workaround non utf-8 output.

fix_test_cases.py:
Work better with run_test_cases.py inside isolate.py.
Runs all the test cases simultaneously, so it is much faster.

R=cmp@chromium.org
BUG=
TEST=


Review URL: https://chromiumcodereview.appspot.com/10825049

git-svn-id: svn://svn.chromium.org/chrome/trunk/src@151043 0039d316-1c4b-4281-b951-d872f2087c98
parent 921c277e
...@@ -5,9 +5,11 @@ ...@@ -5,9 +5,11 @@
"""Scripts to run a test, grab the failures and trace them.""" """Scripts to run a test, grab the failures and trace them."""
import json
import os import os
import subprocess import subprocess
import sys import sys
import tempfile
import run_test_cases import run_test_cases
...@@ -48,6 +50,29 @@ def trace_and_merge(result, test): ...@@ -48,6 +50,29 @@ def trace_and_merge(result, test):
[sys.executable, 'isolate.py', 'merge', '-r', result]) [sys.executable, 'isolate.py', 'merge', '-r', result])
def run_all(result):
"""Runs all the tests. Returns the tests that failed or None on failure.
Assumes run_test_cases.py is implicitly called.
"""
handle, result_file = tempfile.mkstemp(prefix='run_test_cases')
os.close(handle)
env = os.environ.copy()
env['RUN_TEST_CASES_RESULT_FILE'] = result_file
subprocess.call(
[sys.executable, 'isolate.py', 'run', '-r', result], env=env)
if not os.path.isfile(result_file):
print >> sys.stderr, 'Failed to find %s' % result_file
return None
with open(result_file) as f:
data = json.load(f)
os.remove(result_file)
return [
test for test, runs in data.iteritems()
if not any(not run['returncode'] for run in runs)
]
def run(result, test): def run(result, test):
"""Runs a single test case in an isolated environment. """Runs a single test case in an isolated environment.
...@@ -55,7 +80,7 @@ def run(result, test): ...@@ -55,7 +80,7 @@ def run(result, test):
""" """
return not subprocess.call([ return not subprocess.call([
sys.executable, 'isolate.py', 'run', '-r', result, sys.executable, 'isolate.py', 'run', '-r', result,
'--', '--gtest_filter=' + test '--', '--gtest_filter=' + test,
]) ])
...@@ -75,14 +100,28 @@ def trace_and_verify(result, test): ...@@ -75,14 +100,28 @@ def trace_and_verify(result, test):
return run(result, test) return run(result, test)
def run_all(result, executable): def fix_all(result):
"""Runs all the test cases in a gtest executable and trace the failing tests. """Runs all the test cases in a gtest executable and trace the failing tests.
Then make sure the test passes afterward. Returns True on success.
Makes sure the test passes afterward.
""" """
test_cases = run_test_cases.list_test_cases( # These could have adverse side-effects.
executable, 0, 0, False, False, False) # TODO(maruel): Be more intelligent about it, for now be safe.
print 'Found %d test cases.' % len(test_cases) for i in run_test_cases.KNOWN_GTEST_ENV_VARS:
if i in os.environ:
print >> 'Please unset %s' % i
return False
test_cases = run_all(result)
if test_cases is None:
return False
print '\nFound %d broken test cases.' % len(test_cases)
if not test_cases:
return True
failures = [] failures = []
fixed_tests = [] fixed_tests = []
try: try:
...@@ -135,7 +174,7 @@ def main(): ...@@ -135,7 +174,7 @@ def main():
'%s doesn\'t exist, please build %s_run' % (result, basename)) '%s doesn\'t exist, please build %s_run' % (result, basename))
return 1 return 1
return not run_all(result, executable) return not fix_all(result)
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -21,6 +21,42 @@ import threading ...@@ -21,6 +21,42 @@ import threading
import time import time
# These are known to influence the way the output is generated.
KNOWN_GTEST_ENV_VARS = [
'GTEST_ALSO_RUN_DISABLED_TESTS',
'GTEST_BREAK_ON_FAILURE',
'GTEST_CATCH_EXCEPTIONS',
'GTEST_COLOR',
'GTEST_FILTER',
'GTEST_OUTPUT',
'GTEST_PRINT_TIME',
'GTEST_RANDOM_SEED',
'GTEST_REPEAT',
'GTEST_SHARD_INDEX',
'GTEST_SHARD_STATUS_FILE',
'GTEST_SHUFFLE',
'GTEST_THROW_ON_FAILURE',
'GTEST_TOTAL_SHARDS',
]
# These needs to be poped out before running a test.
GTEST_ENV_VARS_TO_REMOVE = [
# TODO(maruel): Handle.
'GTEST_ALSO_RUN_DISABLED_TESTS',
'GTEST_FILTER',
# TODO(maruel): Handle.
'GTEST_OUTPUT',
# TODO(maruel): Handle.
'GTEST_RANDOM_SEED',
# TODO(maruel): Handle.
'GTEST_REPEAT',
'GTEST_SHARD_INDEX',
# TODO(maruel): Handle.
'GTEST_SHUFFLE',
'GTEST_TOTAL_SHARDS',
]
def num_processors(): def num_processors():
"""Returns the number of processors. """Returns the number of processors.
...@@ -428,8 +464,10 @@ class Runner(object): ...@@ -428,8 +464,10 @@ class Runner(object):
# It is important to remove the shard environment variables since it could # It is important to remove the shard environment variables since it could
# conflict with --gtest_filter. # conflict with --gtest_filter.
self.env = os.environ.copy() self.env = os.environ.copy()
self.env.pop('GTEST_SHARD_INDEX', None) for name in GTEST_ENV_VARS_TO_REMOVE:
self.env.pop('GTEST_TOTAL_SHARDS', None) self.env.pop(name, None)
# Forcibly enable color by default, if not already disabled.
self.env.setdefault('GTEST_COLOR', 'on')
def map(self, test_case): def map(self, test_case):
"""Traces a single test case and returns its output.""" """Traces a single test case and returns its output."""
...@@ -449,7 +487,8 @@ class Runner(object): ...@@ -449,7 +487,8 @@ class Runner(object):
'test_case': test_case, 'test_case': test_case,
'returncode': returncode, 'returncode': returncode,
'duration': duration, 'duration': duration,
'output': output, # It needs to be valid utf-8 otherwise it can't be store.
'output': output.decode('ascii', 'ignore').encode('utf-8'),
} }
if '[ RUN ]' not in output: if '[ RUN ]' not in output:
# Can't find gtest marker, mark it as invalid. # Can't find gtest marker, mark it as invalid.
...@@ -507,7 +546,7 @@ def get_test_cases(executable, whitelist, blacklist, index, shards): ...@@ -507,7 +546,7 @@ def get_test_cases(executable, whitelist, blacklist, index, shards):
return tests return tests
def run_test_cases(executable, test_cases, jobs, timeout, no_dump): def run_test_cases(executable, test_cases, jobs, timeout, result_file):
"""Traces test cases one by one.""" """Traces test cases one by one."""
progress = Progress(len(test_cases)) progress = Progress(len(test_cases))
with ThreadPool(jobs) as pool: with ThreadPool(jobs) as pool:
...@@ -517,8 +556,8 @@ def run_test_cases(executable, test_cases, jobs, timeout, no_dump): ...@@ -517,8 +556,8 @@ def run_test_cases(executable, test_cases, jobs, timeout, no_dump):
results = pool.join(progress, 0.1) results = pool.join(progress, 0.1)
duration = time.time() - progress.start duration = time.time() - progress.start
results = dict((item[0]['test_case'], item) for item in results) results = dict((item[0]['test_case'], item) for item in results)
if not no_dump: if result_file:
with open('%s.run_test_cases' % executable, 'wb') as f: with open(result_file, 'wb') as f:
json.dump(results, f, sort_keys=True, indent=2) json.dump(results, f, sort_keys=True, indent=2)
sys.stdout.write('\n') sys.stdout.write('\n')
total = len(results) total = len(results)
...@@ -583,7 +622,11 @@ def main(argv): ...@@ -583,7 +622,11 @@ def main(argv):
parser.add_option( parser.add_option(
'--no-dump', '--no-dump',
action='store_true', action='store_true',
help='do not generate a .test_cases file') help='do not generate a .run_test_cases file')
parser.add_option(
'--result',
default=os.environ.get('RUN_TEST_CASES_RESULT_FILE', ''),
help='Override the default name of the generated .run_test_cases file')
group = optparse.OptionGroup(parser, 'Which test cases to run') group = optparse.OptionGroup(parser, 'Which test cases to run')
group.add_option( group.add_option(
...@@ -613,6 +656,7 @@ def main(argv): ...@@ -613,6 +656,7 @@ def main(argv):
help='File containing the exact list of test cases to run') help='File containing the exact list of test cases to run')
group.add_option( group.add_option(
'--gtest_filter', '--gtest_filter',
default=os.environ.get('GTEST_FILTER', ''),
help='Runs a single test, provideded to keep compatibility with ' help='Runs a single test, provideded to keep compatibility with '
'other tools') 'other tools')
parser.add_option_group(group) parser.add_option_group(group)
...@@ -665,12 +709,20 @@ def main(argv): ...@@ -665,12 +709,20 @@ def main(argv):
if not test_cases: if not test_cases:
return 0 return 0
if options.no_dump:
result_file = None
else:
if options.result:
result_file = options.result
else:
result_file = '%s.run_test_cases' % executable
return run_test_cases( return run_test_cases(
executable, executable,
test_cases, test_cases,
options.jobs, options.jobs,
options.timeout, options.timeout,
options.no_dump) result_file)
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -99,12 +99,12 @@ class RunTestCases(unittest.TestCase): ...@@ -99,12 +99,12 @@ class RunTestCases(unittest.TestCase):
def test_gtest_filter(self): def test_gtest_filter(self):
old = run_test_cases.run_test_cases old = run_test_cases.run_test_cases
exe = os.path.join(ROOT_DIR, 'data', 'gtest_fake', 'gtest_fake_pass.py') exe = os.path.join(ROOT_DIR, 'data', 'gtest_fake', 'gtest_fake_pass.py')
def expect(executable, test_cases, jobs, timeout, no_dump): def expect(executable, test_cases, jobs, timeout, result_file):
self.assertEquals(exe, executable) self.assertEquals(exe, executable)
self.assertEquals(['Foo.Bar1', 'Foo.Bar3'], test_cases) self.assertEquals(['Foo.Bar1', 'Foo.Bar3'], test_cases)
self.assertEquals(run_test_cases.num_processors(), jobs) self.assertEquals(run_test_cases.num_processors(), jobs)
self.assertEquals(120, timeout) self.assertEquals(120, timeout)
self.assertEquals(None, no_dump) self.assertEquals(exe + '.run_test_cases', result_file)
return 89 return 89
try: try:
run_test_cases.run_test_cases = expect run_test_cases.run_test_cases = expect
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment