Commit a02a2039 authored by smut's avatar smut Committed by Commit bot

Support xctests on simulators

xctest_utils.py is copied from chromium/tools/build/scripts/slave/ios/xctest_utils.py.

BUG=608537

Review-Url: https://chromiumcodereview.appspot.com/2437953002
Cr-Commit-Position: refs/heads/master@{#426933}
parent 72a715af
#!/usr/bin/env python
# Copyright (c) 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
......
......@@ -43,6 +43,7 @@ def main(args, test_args):
args.out_dir,
env_vars=args.env_var,
test_args=test_args,
xctest=args.xctest,
)
else:
tr = test_runner.DeviceTestRunner(
......@@ -127,6 +128,11 @@ if __name__ == '__main__':
metavar='ver',
required=True,
)
parser.add_argument(
'--xctest',
action='store_true',
help='Whether or not the given app should be run as an XCTest.',
)
args, test_args = parser.parse_known_args()
if args.iossim or args.platform or args.version:
......@@ -139,6 +145,7 @@ if __name__ == '__main__':
args_json = json.loads(args.args_json)
args.env_var = args.env_var or []
args.env_var.extend(args_json.get('env_var', []))
args.xctest = args_json.get('xctest', args.xctest)
test_args.extend(args_json.get('test_args', []))
sys.exit(main(args, test_args))
......@@ -16,6 +16,7 @@ import time
import find_xcode
import gtest_utils
import xctest_utils
class Error(Exception):
......@@ -47,6 +48,13 @@ class DeviceDetectionError(TestRunnerError):
'Expected one device, found %s:\n%s' % (len(udids), '\n'.join(udids)))
class PlugInsNotFoundError(TestRunnerError):
"""The PlugIns directory was not found."""
def __init__(self, plugins_dir):
super(PlugInsNotFoundError, self).__init__(
'PlugIns directory does not exist: %s' % plugins_dir)
class SimulatorNotFoundError(TestRunnerError):
"""The given simulator binary was not found."""
def __init__(self, iossim_path):
......@@ -54,13 +62,20 @@ class SimulatorNotFoundError(TestRunnerError):
'Simulator does not exist: %s' % iossim_path)
class XcodeVersionNotFound(TestRunnerError):
class XcodeVersionNotFoundError(TestRunnerError):
"""The requested version of Xcode was not found."""
def __init__(self, xcode_version):
super(XcodeVersionNotFoundError, self).__init__(
'Xcode version not found: %s', xcode_version)
class XCTestPlugInNotFoundError(TestRunnerError):
"""The .xctest PlugIn was not found."""
def __init__(self, xctest_path):
super(XCTestPlugInNotFoundError, self).__init__(
'XCTest not found: %s', xctest_path)
def get_kif_test_filter(tests, invert=False):
"""Returns the KIF test filter to filter the given test cases.
......@@ -105,7 +120,14 @@ class TestRunner(object):
"""Base class containing common functionality."""
def __init__(
self, app_path, xcode_version, out_dir, env_vars=None, test_args=None):
self,
app_path,
xcode_version,
out_dir,
env_vars=None,
test_args=None,
xctest=False,
):
"""Initializes a new instance of this class.
Args:
......@@ -115,10 +137,13 @@ class TestRunner(object):
env_vars: List of environment variables to pass to the test itself.
test_args: List of strings to pass as arguments to the test when
launching.
xctest: Whether or not this is an XCTest.
Raises:
AppNotFoundError: If the given app does not exist.
PlugInsNotFoundError: If the PlugIns directory does not exist for XCTests.
XcodeVersionNotFoundError: If the given Xcode version does not exist.
XCTestPlugInNotFoundError: If the .xctest PlugIn does not exist.
"""
if not os.path.exists(app_path):
raise AppNotFoundError(app_path)
......@@ -141,6 +166,17 @@ class TestRunner(object):
self.out_dir = out_dir
self.test_args = test_args or []
self.xcode_version = xcode_version
self.xctest_path = ''
if xctest:
plugins_dir = os.path.join(self.app_path, 'PlugIns')
if not os.path.exists(plugins_dir):
raise PlugInsNotFoundError(plugins_dir)
for plugin in os.listdir(plugins_dir):
if plugin.endswith('.xctest'):
self.xctest_path = os.path.join(plugins_dir, plugin)
if not os.path.exists(self.xctest_path):
raise XCTestPlugInNotFoundError(self.xctest_path)
def get_launch_command(self, test_filter=None, invert=False):
"""Returns the command that can be used to launch the test app.
......@@ -170,8 +206,7 @@ class TestRunner(object):
os.path.join(self.out_dir, 'desktop_%s.png' % time.time()),
])
@staticmethod
def _run(cmd):
def _run(self, cmd):
"""Runs the specified command, parsing GTest output.
Args:
......@@ -183,8 +218,11 @@ class TestRunner(object):
print ' '.join(cmd)
print
parser = gtest_utils.GTestLogParser()
result = gtest_utils.GTestResult(cmd)
if self.xctest_path:
parser = xctest_utils.XCTestLogParser()
else:
parser = gtest_utils.GTestLogParser()
proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
......@@ -238,7 +276,8 @@ class TestRunner(object):
flaked = result.flaked_tests
try:
while result.crashed and result.crashed_test:
# XCTests cannot currently be resumed at the next test case.
while not self.xctest_path and result.crashed and result.crashed_test:
# If the app crashes during a specific test case, then resume at the
# next test case. This is achieved by filtering out every test case
# which has already run.
......@@ -281,6 +320,7 @@ class SimulatorTestRunner(TestRunner):
out_dir,
env_vars=None,
test_args=None,
xctest=False,
):
"""Initializes a new instance of this class.
......@@ -296,10 +336,13 @@ class SimulatorTestRunner(TestRunner):
env_vars: List of environment variables to pass to the test itself.
test_args: List of strings to pass as arguments to the test when
launching.
xctest: Whether or not this is an XCTest.
Raises:
AppNotFoundError: If the given app does not exist.
PlugInsNotFoundError: If the PlugIns directory does not exist for XCTests.
XcodeVersionNotFoundError: If the given Xcode version does not exist.
XCTestPlugInNotFoundError: If the .xctest PlugIn does not exist.
"""
super(SimulatorTestRunner, self).__init__(
app_path,
......@@ -307,6 +350,7 @@ class SimulatorTestRunner(TestRunner):
out_dir,
env_vars=env_vars,
test_args=test_args,
xctest=xctest,
)
if not os.path.exists(iossim_path):
......@@ -446,6 +490,8 @@ class SimulatorTestRunner(TestRunner):
cmd.extend(['-e', env_var])
cmd.append(self.app_path)
if self.xctest_path:
cmd.append(self.xctest_path)
cmd.extend(self.test_args)
cmd.extend(args)
return cmd
......
# Copyright (c) 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import re
import tempfile
# These labels should match the ones output by gtest's JSON.
TEST_UNKNOWN_LABEL = 'UNKNOWN'
TEST_SUCCESS_LABEL = 'SUCCESS'
TEST_FAILURE_LABEL = 'FAILURE'
TEST_CRASH_LABEL = 'CRASH'
TEST_TIMEOUT_LABEL = 'TIMEOUT'
TEST_WARNING_LABEL = 'WARNING'
class XCTestLogParser(object):
"""This helper class process XCTest test output."""
def __init__(self):
# State tracking for log parsing
self.completed = False
self._current_test = ''
self._failure_description = []
self._current_report_hash = ''
self._current_report = []
self._parsing_failures = False
# Line number currently being processed.
self._line_number = 0
# List of parsing errors, as human-readable strings.
self._internal_error_lines = []
# Tests are stored here as 'test.name': (status, [description]).
# The status should be one of ('started', 'OK', 'failed', 'timeout',
# 'warning'). Warning indicates that a test did not pass when run in
# parallel with other tests but passed when run alone. The description is
# a list of lines detailing the test's error, as reported in the log.
self._test_status = {}
# This may be either text or a number. It will be used in the phrase
# '%s disabled' or '%s flaky' on the waterfall display.
self._disabled_tests = 0
self._flaky_tests = 0
test_name_regexp = r'\-\[(\w+)\s(\w+)\]'
self._test_name = re.compile(test_name_regexp)
self._test_start = re.compile(
r'Test Case \'' + test_name_regexp + '\' started\.')
self._test_ok = re.compile(
r'Test Case \'' + test_name_regexp +
'\' passed\s+\(\d+\.\d+\s+seconds\)?.')
self._test_fail = re.compile(
r'Test Case \'' + test_name_regexp +
'\' failed\s+\(\d+\.\d+\s+seconds\)?.')
self._test_passed = re.compile(r'\*\*\s+TEST\s+EXECUTE\s+SUCCEEDED\s+\*\*')
self._retry_message = re.compile('RETRYING FAILED TESTS:')
self.retrying_failed = False
self.TEST_STATUS_MAP = {
'OK': TEST_SUCCESS_LABEL,
'failed': TEST_FAILURE_LABEL,
'timeout': TEST_TIMEOUT_LABEL,
'warning': TEST_WARNING_LABEL
}
def GetCurrentTest(self):
return self._current_test
def _StatusOfTest(self, test):
"""Returns the status code for the given test, or 'not known'."""
test_status = self._test_status.get(test, ('not known', []))
return test_status[0]
def _TestsByStatus(self, status, include_fails, include_flaky):
"""Returns list of tests with the given status.
Args:
include_fails: If False, tests containing 'FAILS_' anywhere in their
names will be excluded from the list.
include_flaky: If False, tests containing 'FLAKY_' anywhere in their
names will be excluded from the list.
"""
test_list = [x[0] for x in self._test_status.items()
if self._StatusOfTest(x[0]) == status]
if not include_fails:
test_list = [x for x in test_list if x.find('FAILS_') == -1]
if not include_flaky:
test_list = [x for x in test_list if x.find('FLAKY_') == -1]
return test_list
def _RecordError(self, line, reason):
"""Record a log line that produced a parsing error.
Args:
line: text of the line at which the error occurred
reason: a string describing the error
"""
self._internal_error_lines.append('%s: %s [%s]' %
(self._line_number, line.strip(), reason))
def RunningTests(self):
"""Returns list of tests that appear to be currently running."""
return self._TestsByStatus('started', True, True)
def ParsingErrors(self):
"""Returns a list of lines that have caused parsing errors."""
return self._internal_error_lines
def ClearParsingErrors(self):
"""Clears the currently stored parsing errors."""
self._internal_error_lines = ['Cleared.']
def PassedTests(self, include_fails=False, include_flaky=False):
"""Returns list of tests that passed."""
return self._TestsByStatus('OK', include_fails, include_flaky)
def FailedTests(self, include_fails=False, include_flaky=False):
"""Returns list of tests that failed, timed out, or didn't finish
(crashed).
This list will be incorrect until the complete log has been processed,
because it will show currently running tests as having failed.
Args:
include_fails: If true, all failing tests with FAILS_ in their names will
be included. Otherwise, they will only be included if they crashed or
timed out.
include_flaky: If true, all failing tests with FLAKY_ in their names will
be included. Otherwise, they will only be included if they crashed or
timed out.
"""
return (self._TestsByStatus('failed', include_fails, include_flaky) +
self._TestsByStatus('timeout', True, True) +
self._TestsByStatus('warning', include_fails, include_flaky) +
self.RunningTests())
def TriesForTest(self, test):
"""Returns a list containing the state for all tries of the given test.
This parser doesn't support retries so a single result is returned."""
return [self.TEST_STATUS_MAP.get(self._StatusOfTest(test),
TEST_UNKNOWN_LABEL)]
def FailureDescription(self, test):
"""Returns a list containing the failure description for the given test.
If the test didn't fail or timeout, returns [].
"""
test_status = self._test_status.get(test, ('', []))
return ['%s: ' % test] + test_status[1]
def CompletedWithoutFailure(self):
"""Returns True if all tests completed and no tests failed unexpectedly."""
return self.completed
def ProcessLine(self, line):
"""This is called once with each line of the test log."""
# Track line number for error messages.
self._line_number += 1
# Some tests (net_unittests in particular) run subprocesses which can write
# stuff to shared stdout buffer. Sometimes such output appears between new
# line and gtest directives ('[ RUN ]', etc) which breaks the parser.
# Code below tries to detect such cases and recognize a mixed line as two
# separate lines.
# List of regexps that parses expects to find at the start of a line but
# which can be somewhere in the middle.
gtest_regexps = [
self._test_start,
self._test_ok,
self._test_fail,
self._test_passed,
]
for regexp in gtest_regexps:
match = regexp.search(line)
if match:
break
if not match or match.start() == 0:
self._ProcessLine(line)
else:
self._ProcessLine(line[:match.start()])
self._ProcessLine(line[match.start():])
def _ProcessLine(self, line):
"""Parses the line and changes the state of parsed tests accordingly.
Will recognize newly started tests, OK or FAILED statuses, timeouts, etc.
"""
# Is it a line declaring all tests passed?
results = self._test_passed.match(line)
if results:
self.completed = True
self._current_test = ''
return
# Is it the start of a test?
results = self._test_start.match(line)
if results:
if self._current_test:
if self._test_status[self._current_test][0] == 'started':
self._test_status[self._current_test] = (
'timeout', self._failure_description)
test_name = '%s.%s' % (results.group(1), results.group(2))
self._test_status[test_name] = ('started', ['Did not complete.'])
self._current_test = test_name
if self.retrying_failed:
self._failure_description = self._test_status[test_name][1]
self._failure_description.extend(['', 'RETRY OUTPUT:', ''])
else:
self._failure_description = []
return
# Is it a test success line?
results = self._test_ok.match(line)
if results:
test_name = '%s.%s' % (results.group(1), results.group(2))
status = self._StatusOfTest(test_name)
if status != 'started':
self._RecordError(line, 'success while in status %s' % status)
if self.retrying_failed:
self._test_status[test_name] = ('warning', self._failure_description)
else:
self._test_status[test_name] = ('OK', [])
self._failure_description = []
self._current_test = ''
return
# Is it a test failure line?
results = self._test_fail.match(line)
if results:
test_name = '%s.%s' % (results.group(1), results.group(2))
status = self._StatusOfTest(test_name)
if status not in ('started', 'failed', 'timeout'):
self._RecordError(line, 'failure while in status %s' % status)
# Don't overwrite the failure description when a failing test is listed a
# second time in the summary, or if it was already recorded as timing
# out.
if status not in ('failed', 'timeout'):
self._test_status[test_name] = ('failed', self._failure_description)
self._failure_description = []
self._current_test = ''
return
# Is it the start of the retry tests?
results = self._retry_message.match(line)
if results:
self.retrying_failed = True
return
# Random line: if we're in a test, collect it for the failure description.
# Tests may run simultaneously, so this might be off, but it's worth a try.
# This also won't work if a test times out before it begins running.
if self._current_test:
self._failure_description.append(line)
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment