Commit cbcc115d authored by jbudorick's avatar jbudorick Committed by Commit bot

[Android] Allow instrumentation test skipping.

This patch also removes a call to AdbInterface.StartInstrumentation and
implements some of the functionality of am_instrument_parser.py.

BUG=408585

Review URL: https://codereview.chromium.org/558883003

Cr-Commit-Position: refs/heads/master@{#295491}
parent 10538620
......@@ -63,6 +63,7 @@ def CommonChecks(input_api, output_api):
unit_tests=[
J('pylib', 'device', 'device_utils_test.py'),
J('pylib', 'gtest', 'test_package_test.py'),
J('pylib', 'instrumentation', 'test_runner_test.py'),
],
env=pylib_test_env))
output.extend(_CheckDeletionsOnlyFiles(input_api, output_api))
......
......@@ -7,6 +7,7 @@
class ResultType(object):
"""Class enumerating test types."""
PASS = 'PASS'
SKIP = 'SKIP'
FAIL = 'FAIL'
CRASH = 'CRASH'
TIMEOUT = 'TIMEOUT'
......@@ -15,8 +16,8 @@ class ResultType(object):
@staticmethod
def GetTypes():
"""Get a list of all test types."""
return [ResultType.PASS, ResultType.FAIL, ResultType.CRASH,
ResultType.TIMEOUT, ResultType.UNKNOWN]
return [ResultType.PASS, ResultType.SKIP, ResultType.FAIL,
ResultType.CRASH, ResultType.TIMEOUT, ResultType.UNKNOWN]
class BaseTestResult(object):
......@@ -97,19 +98,26 @@ class TestRunResults(object):
s.append('[==========] %s ran.' % (tests(len(self.GetAll()))))
s.append('[ PASSED ] %s.' % (tests(len(self.GetPass()))))
not_passed = self.GetNotPass()
if len(not_passed) > 0:
s.append('[ FAILED ] %s, listed below:' % tests(len(self.GetNotPass())))
for t in self.GetFail():
skipped = self.GetSkip()
if skipped:
s.append('[ SKIPPED ] Skipped %s, listed below:' % tests(len(skipped)))
for t in sorted(skipped):
s.append('[ SKIPPED ] %s' % str(t))
all_failures = self.GetFail().union(self.GetCrash(), self.GetTimeout(),
self.GetUnknown())
if all_failures:
s.append('[ FAILED ] %s, listed below:' % tests(len(all_failures)))
for t in sorted(self.GetFail()):
s.append('[ FAILED ] %s' % str(t))
for t in self.GetCrash():
for t in sorted(self.GetCrash()):
s.append('[ FAILED ] %s (CRASHED)' % str(t))
for t in self.GetTimeout():
for t in sorted(self.GetTimeout()):
s.append('[ FAILED ] %s (TIMEOUT)' % str(t))
for t in self.GetUnknown():
for t in sorted(self.GetUnknown()):
s.append('[ FAILED ] %s (UNKNOWN)' % str(t))
s.append('')
s.append(plural(len(not_passed), 'FAILED TEST', 'FAILED TESTS'))
s.append(plural(len(all_failures), 'FAILED TEST', 'FAILED TESTS'))
return '\n'.join(s)
def GetShortForm(self):
......@@ -163,6 +171,10 @@ class TestRunResults(object):
"""Get the set of all passed test results."""
return self._GetType(ResultType.PASS)
def GetSkip(self):
"""Get the set of all skipped test results."""
return self._GetType(ResultType.SKIP)
def GetFail(self):
"""Get the set of all failed test results."""
return self._GetType(ResultType.FAIL)
......@@ -185,4 +197,5 @@ class TestRunResults(object):
def DidRunPass(self):
"""Return whether the test run was successful."""
return not self.GetNotPass()
return not (self.GetNotPass() - self.GetSkip())
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for instrumentation.TestRunner."""
# pylint: disable=W0212
import os
import sys
import unittest
from pylib import constants
from pylib.base import base_test_result
from pylib.instrumentation import test_runner
sys.path.append(os.path.join(
constants.DIR_SOURCE_ROOT, 'third_party', 'pymock'))
import mock # pylint: disable=F0401
class InstrumentationTestRunnerTest(unittest.TestCase):
def setUp(self):
options = mock.Mock()
options.tool = ''
package = mock.Mock()
self.instance = test_runner.TestRunner(options, None, 0, package)
def testParseAmInstrumentRawOutput_nothing(self):
code, result, statuses = (
test_runner.TestRunner._ParseAmInstrumentRawOutput(['']))
self.assertEqual(None, code)
self.assertEqual([], result)
self.assertEqual([], statuses)
def testParseAmInstrumentRawOutput_noMatchingStarts(self):
raw_output = [
'',
'this.is.a.test.package.TestClass:.',
'Test result for =.',
'Time: 1.234',
'',
'OK (1 test)',
]
code, result, statuses = (
test_runner.TestRunner._ParseAmInstrumentRawOutput(raw_output))
self.assertEqual(None, code)
self.assertEqual([], result)
self.assertEqual([], statuses)
def testParseAmInstrumentRawOutput_resultAndCode(self):
raw_output = [
'INSTRUMENTATION_RESULT: foo',
'bar',
'INSTRUMENTATION_CODE: -1',
]
code, result, _ = (
test_runner.TestRunner._ParseAmInstrumentRawOutput(raw_output))
self.assertEqual(-1, code)
self.assertEqual(['foo', 'bar'], result)
def testParseAmInstrumentRawOutput_oneStatus(self):
raw_output = [
'INSTRUMENTATION_STATUS: foo=1',
'INSTRUMENTATION_STATUS: bar=hello',
'INSTRUMENTATION_STATUS: world=false',
'INSTRUMENTATION_STATUS: class=this.is.a.test.package.TestClass',
'INSTRUMENTATION_STATUS: test=testMethod',
'INSTRUMENTATION_STATUS_CODE: 0',
]
_, _, statuses = (
test_runner.TestRunner._ParseAmInstrumentRawOutput(raw_output))
expected = [
(0, {
'foo': ['1'],
'bar': ['hello'],
'world': ['false'],
'class': ['this.is.a.test.package.TestClass'],
'test': ['testMethod'],
})
]
self.assertEqual(expected, statuses)
def testParseAmInstrumentRawOutput_multiStatus(self):
raw_output = [
'INSTRUMENTATION_STATUS: class=foo',
'INSTRUMENTATION_STATUS: test=bar',
'INSTRUMENTATION_STATUS_CODE: 1',
'INSTRUMENTATION_STATUS: test_skipped=true',
'INSTRUMENTATION_STATUS_CODE: 0',
'INSTRUMENTATION_STATUS: class=hello',
'INSTRUMENTATION_STATUS: test=world',
'INSTRUMENTATION_STATUS: stack=',
'foo/bar.py (27)',
'hello/world.py (42)',
'test/file.py (1)',
'INSTRUMENTATION_STATUS_CODE: -1',
]
_, _, statuses = (
test_runner.TestRunner._ParseAmInstrumentRawOutput(raw_output))
expected = [
(1, {'class': ['foo'], 'test': ['bar'],}),
(0, {'test_skipped': ['true']}),
(-1, {
'class': ['hello'],
'test': ['world'],
'stack': ['', 'foo/bar.py (27)', 'hello/world.py (42)',
'test/file.py (1)'],
}),
]
self.assertEqual(expected, statuses)
def testParseAmInstrumentRawOutput_statusResultAndCode(self):
raw_output = [
'INSTRUMENTATION_STATUS: class=foo',
'INSTRUMENTATION_STATUS: test=bar',
'INSTRUMENTATION_STATUS_CODE: 1',
'INSTRUMENTATION_RESULT: hello',
'world',
'',
'',
'INSTRUMENTATION_CODE: 0',
]
code, result, statuses = (
test_runner.TestRunner._ParseAmInstrumentRawOutput(raw_output))
self.assertEqual(0, code)
self.assertEqual(['hello', 'world', '', ''], result)
self.assertEqual([(1, {'class': ['foo'], 'test': ['bar']})], statuses)
def testGenerateTestResult_noStatus(self):
result = self.instance._GenerateTestResult(
'test.package.TestClass#testMethod', [], 0, 1000)
self.assertEqual('test.package.TestClass#testMethod', result.GetName())
self.assertEqual(base_test_result.ResultType.UNKNOWN, result.GetType())
self.assertEqual('', result.GetLog())
self.assertEqual(1000, result.GetDur())
def testGenerateTestResult_testPassed(self):
statuses = [
(1, {
'class': ['test.package.TestClass'],
'test': ['testMethod'],
}),
(0, {
'class': ['test.package.TestClass'],
'test': ['testMethod'],
}),
]
result = self.instance._GenerateTestResult(
'test.package.TestClass#testMethod', statuses, 0, 1000)
self.assertEqual(base_test_result.ResultType.PASS, result.GetType())
def testGenerateTestResult_testSkipped_first(self):
statuses = [
(0, {
'test_skipped': ['true'],
}),
(1, {
'class': ['test.package.TestClass'],
'test': ['testMethod'],
}),
(0, {
'class': ['test.package.TestClass'],
'test': ['testMethod'],
}),
]
result = self.instance._GenerateTestResult(
'test.package.TestClass#testMethod', statuses, 0, 1000)
self.assertEqual(base_test_result.ResultType.SKIP, result.GetType())
def testGenerateTestResult_testSkipped_last(self):
statuses = [
(1, {
'class': ['test.package.TestClass'],
'test': ['testMethod'],
}),
(0, {
'class': ['test.package.TestClass'],
'test': ['testMethod'],
}),
(0, {
'test_skipped': ['true'],
}),
]
result = self.instance._GenerateTestResult(
'test.package.TestClass#testMethod', statuses, 0, 1000)
self.assertEqual(base_test_result.ResultType.SKIP, result.GetType())
def testGenerateTestResult_testSkipped_false(self):
statuses = [
(0, {
'test_skipped': ['false'],
}),
(1, {
'class': ['test.package.TestClass'],
'test': ['testMethod'],
}),
(0, {
'class': ['test.package.TestClass'],
'test': ['testMethod'],
}),
]
result = self.instance._GenerateTestResult(
'test.package.TestClass#testMethod', statuses, 0, 1000)
self.assertEqual(base_test_result.ResultType.PASS, result.GetType())
def testGenerateTestResult_testFailed(self):
statuses = [
(1, {
'class': ['test.package.TestClass'],
'test': ['testMethod'],
}),
(-2, {
'class': ['test.package.TestClass'],
'test': ['testMethod'],
}),
]
result = self.instance._GenerateTestResult(
'test.package.TestClass#testMethod', statuses, 0, 1000)
self.assertEqual(base_test_result.ResultType.FAIL, result.GetType())
def testGenerateTestResult_testCrashed(self):
self.instance.test_pkg.GetPackageName = mock.Mock(
return_value='generate.test.result.test.package')
self.instance.device.old_interface.DismissCrashDialogIfNeeded = mock.Mock(
return_value='generate.test.result.test.package')
statuses = [
(1, {
'class': ['test.package.TestClass'],
'test': ['testMethod'],
}),
(-1, {
'class': ['test.package.TestClass'],
'test': ['testMethod'],
'stack': ['', 'foo/bar.py (27)', 'hello/world.py (42)'],
}),
]
result = self.instance._GenerateTestResult(
'test.package.TestClass#testMethod', statuses, 0, 1000)
self.assertEqual(base_test_result.ResultType.CRASH, result.GetType())
self.assertEqual('\nfoo/bar.py (27)\nhello/world.py (42)', result.GetLog())
def testRunInstrumentationTest_verifyAdbShellCommand(self):
self.instance.options.test_runner = 'MyTestRunner'
self.instance.device.RunShellCommand = mock.Mock()
self.instance._GenerateTestResult = mock.Mock()
with mock.patch('pylib.instrumentation.test_runner.'
'TestRunner._ParseAmInstrumentRawOutput',
return_value=(mock.Mock(), mock.Mock(), mock.Mock())):
self.instance.RunInstrumentationTest(
'test.package.TestClass#testMethod',
'test.package',
{'test_arg_key': 'test_arg_value'},
100)
self.instance.device.RunShellCommand.assert_called_with(
['am', 'instrument', '-r',
'-e', 'test_arg_key', "'test_arg_value'",
'-e', 'class', "'test.package.TestClass#testMethod'",
'-w', 'test.package/MyTestRunner'],
timeout=100, retries=0)
if __name__ == '__main__':
unittest.main(verbosity=2)
......@@ -175,21 +175,21 @@ def AddJavaTestOptions(option_parser):
'-E', '--exclude-annotation', dest='exclude_annotation_str',
help=('Comma-separated list of annotations. Exclude tests with these '
'annotations.'))
option_parser.add_option('--screenshot', dest='screenshot_failures',
action='store_true',
help='Capture screenshots of test failures')
option_parser.add_option('--save-perf-json', action='store_true',
help='Saves the JSON file for each UI Perf test.')
option_parser.add_option('--official-build', action='store_true',
help='Run official build tests.')
option_parser.add_option('--test_data', action='append', default=[],
help=('Each instance defines a directory of test '
'data that should be copied to the target(s) '
'before running the tests. The argument '
'should be of the form <target>:<source>, '
'<target> is relative to the device data'
'directory, and <source> is relative to the '
'chromium build directory.'))
option_parser.add_option(
'--screenshot', dest='screenshot_failures', action='store_true',
help='Capture screenshots of test failures')
option_parser.add_option(
'--save-perf-json', action='store_true',
help='Saves the JSON file for each UI Perf test.')
option_parser.add_option(
'--official-build', action='store_true', help='Run official build tests.')
option_parser.add_option(
'--test_data', '--test-data', action='append', default=[],
help=('Each instance defines a directory of test data that should be '
'copied to the target(s) before running the tests. The argument '
'should be of the form <target>:<source>, <target> is relative to '
'the device data directory, and <source> is relative to the '
'chromium build directory.'))
def ProcessJavaTestOptions(options):
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment