Commit 1938b7a5 authored by Maksym Onufriienko's avatar Maksym Onufriienko Committed by Commit Bot

Updated bot summary logging for xcodebuild_runner.

Do the same logging as test_runnner:
- show flaky/failed tests in bot summary
- display stacktrace for each failure and each attempt
- removed xcodebuild command from bot summary
- changed print -> logging

For non_started_tests case add `Message` from ErrorSummaries

The examples of log:
https://ci.chromium.org/p/chromium/builders/try/ios12-sdk-simulator/241
https://ci.chromium.org/p/chromium/builders/try/ios12-sdk-simulator/249

Bug: 912323
Change-Id: Ie48cf020db31ff2229a1e5a5be05f1ed651a1c0a
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1495440Reviewed-by: default avatarJohn Budorick <jbudorick@chromium.org>
Reviewed-by: default avatarJustin Cohen <justincohen@chromium.org>
Commit-Queue: Maksym Onufriienko <monufriienko@chromium.org>
Cr-Commit-Position: refs/heads/master@{#644423}
parent 31f8d48d
...@@ -48,7 +48,7 @@ def terminate_process(proc): ...@@ -48,7 +48,7 @@ def terminate_process(proc):
try: try:
proc.terminate() proc.terminate()
except OSError as ex: except OSError as ex:
print 'Error while killing a process: %s' % ex LOGGER.info('Error while killing a process: %s' % ex)
def test_status_summary(summary_plist): def test_status_summary(summary_plist):
...@@ -58,24 +58,22 @@ def test_status_summary(summary_plist): ...@@ -58,24 +58,22 @@ def test_status_summary(summary_plist):
summary_plist: (str) A path to plist-file. summary_plist: (str) A path to plist-file.
Returns: Returns:
A dict that contains all passed and failed tests from each egtests.app. A dict that contains all passed and failed tests from the egtests.app.
e.g. e.g.
{ {
'passed': { 'passed': [passed_tests],
'egtests.app': [passed_tests]
},
'failed': { 'failed': {
'egtests.app': [failed_tests] 'failed_test': ['StackTrace']
} }
} }
""" """
root_summary = plistlib.readPlist(summary_plist) root_summary = plistlib.readPlist(summary_plist)
status_summary = { status_summary = {
'passed': {}, 'passed': [],
'failed': {} 'failed': {}
} }
for summary in root_summary['TestableSummaries']: for summary in root_summary['TestableSummaries']:
failed_egtests = [] failed_egtests = {} # Contains test identifier and message
passed_egtests = [] passed_egtests = []
if not summary['Tests']: if not summary['Tests']:
continue continue
...@@ -84,35 +82,35 @@ def test_status_summary(summary_plist): ...@@ -84,35 +82,35 @@ def test_status_summary(summary_plist):
if test['TestStatus'] == 'Success': if test['TestStatus'] == 'Success':
passed_egtests.append(test['TestIdentifier']) passed_egtests.append(test['TestIdentifier'])
else: else:
failed_egtests.append(test['TestIdentifier']) message = []
module = summary['TargetName'].replace('_module', '.app') for failure_summary in test['FailureSummaries']:
message.append('%s: line %s' % (failure_summary['FileName'],
failure_summary['LineNumber']))
message.extend(failure_summary['Message'].splitlines())
failed_egtests[test['TestIdentifier']] = message
if failed_egtests: if failed_egtests:
status_summary['failed'][module] = failed_egtests status_summary['failed'] = failed_egtests
if passed_egtests: if passed_egtests:
status_summary['passed'][module] = passed_egtests status_summary['passed'] = passed_egtests
return status_summary return status_summary
def collect_test_results(plist_path, output): def collect_test_results(plist_path):
"""Gets test result data from Info.plist and from test output. """Gets test result data from Info.plist.
Args: Args:
plist_path: (str) A path to plist-file. plist_path: (str) A path to plist-file.
output: (str) An output of command.
Returns: Returns:
Test result as a map: Test result as a map:
{ {
'passed': { 'passed': [passed_tests],
'egtests.app': [passed_tests]
},
'failed': { 'failed': {
'egtests.app': [failed_tests] 'failed_test': ['StackTrace']
}, }
'output': ['Command output']
} }
""" """
test_results = { test_results = {
'passed': {}, 'passed': [],
'failed': {} 'failed': {}
} }
root = plistlib.readPlist(plist_path) root = plistlib.readPlist(plist_path)
...@@ -123,13 +121,16 @@ def collect_test_results(plist_path, output): ...@@ -123,13 +121,16 @@ def collect_test_results(plist_path, output):
root['TestsFailedCount'] == 0) root['TestsFailedCount'] == 0)
or 'TestSummaryPath' not in action_result): or 'TestSummaryPath' not in action_result):
test_results['failed']['TESTS_DID_NOT_START'] = [] test_results['failed']['TESTS_DID_NOT_START'] = []
if 'ErrorSummaries' in action_result and action_result['ErrorSummaries']:
test_results['failed']['TESTS_DID_NOT_START'].append('\n'.join(
error_summary['Message']
for error_summary in action_result['ErrorSummaries']))
else: else:
summary_plist = os.path.join(os.path.dirname(plist_path), summary_plist = os.path.join(os.path.dirname(plist_path),
action_result['TestSummaryPath']) action_result['TestSummaryPath'])
summary = test_status_summary(summary_plist) summary = test_status_summary(summary_plist)
test_results['failed'] = summary['failed'] test_results['failed'] = summary['failed']
test_results['passed'] = summary['passed'] test_results['passed'] = summary['passed']
test_results['output'] = output
return test_results return test_results
...@@ -274,7 +275,7 @@ class LaunchCommand(object): ...@@ -274,7 +275,7 @@ class LaunchCommand(object):
failed_results: Map of failed tests, where key is name of egtests_app and failed_results: Map of failed tests, where key is name of egtests_app and
value is a list of failed_test_case/test_methods: value is a list of failed_test_case/test_methods:
{ {
'egtests_app_name': [failed_test_case/test_methods] 'failed_test_case/test_methods': ['StackTrace']
} }
out_dir: (str) An output path. out_dir: (str) An output path.
test_args: List of strings to pass as arguments to the test when test_args: List of strings to pass as arguments to the test when
...@@ -287,9 +288,7 @@ class LaunchCommand(object): ...@@ -287,9 +288,7 @@ class LaunchCommand(object):
""" """
eg_app = EgtestsApp( eg_app = EgtestsApp(
egtests_app=self.egtests_app.egtests_path, egtests_app=self.egtests_app.egtests_path,
filtered_tests=[test.replace(' ', '/') for test in filtered_tests=[test.replace(' ', '/') for test in failed_results],
failed_results[os.path.basename(
self.egtests_app.egtests_path)]],
test_args=test_args, test_args=test_args,
env_vars=env_vars) env_vars=env_vars)
# Regenerates xctest run and gets a command. # Regenerates xctest run and gets a command.
...@@ -304,7 +303,7 @@ class LaunchCommand(object): ...@@ -304,7 +303,7 @@ class LaunchCommand(object):
""" """
plist = plistlib.readPlist(info_plist_path) plist = plistlib.readPlist(info_plist_path)
if 'TestFailureSummaries' not in plist or not plist['TestFailureSummaries']: if 'TestFailureSummaries' not in plist or not plist['TestFailureSummaries']:
print 'No failures in %s' % info_plist_path LOGGER.info('No failures in %s' % info_plist_path)
return return
screenshot_regex = re.compile(r'Screenshots:\s\{(\n.*)+?\n}') screenshot_regex = re.compile(r'Screenshots:\s\{(\n.*)+?\n}')
...@@ -318,15 +317,14 @@ class LaunchCommand(object): ...@@ -318,15 +317,14 @@ class LaunchCommand(object):
if not os.path.exists(test_case_folder): if not os.path.exists(test_case_folder):
os.makedirs(test_case_folder) os.makedirs(test_case_folder)
if screenshots: if screenshots:
print 'Screenshots for failure "%s" in "%s"' % ( LOGGER.info('Screenshots for failure "%s" in "%s"' % (
failure_summary['TestCase'], test_case_folder) failure_summary['TestCase'], test_case_folder))
d = json.loads(screenshots.group().replace('Screenshots:', '').strip()) d = json.loads(screenshots.group().replace('Screenshots:', '').strip())
for f in d.values(): for f in d.values():
if not os.path.exists(f): if not os.path.exists(f):
print 'File %s does not exist!' % f LOGGER.warning('File %s does not exist!' % f)
continue continue
screenshot = os.path.join(test_case_folder, os.path.basename(f)) screenshot = os.path.join(test_case_folder, os.path.basename(f))
print screenshot
shutil.copyfile(f, screenshot) shutil.copyfile(f, screenshot)
def summary_log(self): def summary_log(self):
...@@ -345,13 +343,11 @@ class LaunchCommand(object): ...@@ -345,13 +343,11 @@ class LaunchCommand(object):
for test_status in test_statuses: for test_status in test_statuses:
if test_status not in test_attempt_results: if test_status not in test_attempt_results:
continue continue
results = test_attempt_results[test_status] if (test_status == 'passed'
for _, destinations_egtests in results.iteritems(): # Number of failed tests is taken only from last run.
if test_status == 'passed' or ( or (test_status == 'failed'
# Number of failed tests is taken only from last run. and index == len(self.test_results['attempts']) - 1)):
test_status == 'failed' and index == len( self.logs[test_status] += len(test_attempt_results[test_status])
self.test_results['attempts']) - 1):
self.logs[test_status] += len(destinations_egtests)
def launch_attempt(self, cmd, out_dir): def launch_attempt(self, cmd, out_dir):
"""Launch a process and do logging simultaneously. """Launch a process and do logging simultaneously.
...@@ -361,12 +357,9 @@ class LaunchCommand(object): ...@@ -361,12 +357,9 @@ class LaunchCommand(object):
out_dir: (str) Output directory given to the command. Used in tests only. out_dir: (str) Output directory given to the command. Used in tests only.
Returns: Returns:
A tuple (cmd, returncode, output) where: returncode - return code of command run.
- returncode: return code of command run.
- output: command output as list of strings.
""" """
print 'Launching %s with env %s' % (cmd, self.env) LOGGER.info('Launching %s with env %s' % (cmd, self.env))
output = []
proc = subprocess.Popen( proc = subprocess.Popen(
cmd, cmd,
env=self.env, env=self.env,
...@@ -390,16 +383,14 @@ class LaunchCommand(object): ...@@ -390,16 +383,14 @@ class LaunchCommand(object):
break break
line = line.rstrip() line = line.rstrip()
LOGGER.info(line) LOGGER.info(line)
output.append(line)
sys.stdout.flush() sys.stdout.flush()
proc.wait() proc.wait()
LOGGER.info('Command %s finished with %d' % (cmd, proc.returncode)) LOGGER.info('Command %s finished with %d' % (cmd, proc.returncode))
return proc.returncode, output return proc.returncode
def launch(self): def launch(self):
"""Launches tests using xcodebuild.""" """Launches tests using xcodebuild."""
initial_command = []
cmd_list = [] cmd_list = []
self.test_results['attempts'] = [] self.test_results['attempts'] = []
...@@ -415,8 +406,6 @@ class LaunchCommand(object): ...@@ -415,8 +406,6 @@ class LaunchCommand(object):
outdir_attempt, outdir_attempt,
self.destination, self.destination,
self.shards) self.shards)
if attempt == 0:
initial_command = list(cmd_list)
# Re-init the command based on list of failed tests. # Re-init the command based on list of failed tests.
else: else:
cmd_list = self._make_cmd_list_for_failed_tests( cmd_list = self._make_cmd_list_for_failed_tests(
...@@ -428,10 +417,9 @@ class LaunchCommand(object): ...@@ -428,10 +417,9 @@ class LaunchCommand(object):
# TODO(crbug.com/914878): add heartbeat logging to xcodebuild_runner. # TODO(crbug.com/914878): add heartbeat logging to xcodebuild_runner.
LOGGER.info('Start test attempt #%d for command [%s]' % ( LOGGER.info('Start test attempt #%d for command [%s]' % (
attempt, ' '.join(cmd_list))) attempt, ' '.join(cmd_list)))
_, output = self.launch_attempt(cmd_list, outdir_attempt) self.launch_attempt(cmd_list, outdir_attempt)
self.test_results['attempts'].append( self.test_results['attempts'].append(
collect_test_results(os.path.join(outdir_attempt, 'Info.plist'), collect_test_results(os.path.join(outdir_attempt, 'Info.plist')))
output))
if self.retries == attempt or not self.test_results[ if self.retries == attempt or not self.test_results[
'attempts'][-1]['failed']: 'attempts'][-1]['failed']:
break break
...@@ -442,7 +430,6 @@ class LaunchCommand(object): ...@@ -442,7 +430,6 @@ class LaunchCommand(object):
self.summary_log() self.summary_log()
return { return {
'cmd': initial_command,
'test_results': self.test_results, 'test_results': self.test_results,
'logs': self.logs 'logs': self.logs
} }
...@@ -607,39 +594,59 @@ class SimulatorParallelTestRunner(test_runner.SimulatorTestRunner): ...@@ -607,39 +594,59 @@ class SimulatorParallelTestRunner(test_runner.SimulatorTestRunner):
def launch(self): def launch(self):
"""Launches tests using xcodebuild.""" """Launches tests using xcodebuild."""
try: destinaion_folder = lambda dest: dest.replace(
destinaion_folder = lambda dest: dest.replace( 'platform=iOS Simulator,', '').replace(',name=', ' ').replace('OS=', '')
'platform=iOS Simulator,', '').replace( launch_commands = []
',name=', ' ').replace('OS=', '') for params in self.sharding_data:
launch_commands = [] launch_commands.append(LaunchCommand(
for params in self.sharding_data: EgtestsApp(params['app'], filtered_tests=params['test_cases'],
launch_commands.append(LaunchCommand( env_vars=self.env_vars, test_args=self.test_args),
EgtestsApp(params['app'], filtered_tests=params['test_cases'], params['destination'],
env_vars=self.env_vars, test_args=self.test_args), shards=params['shards'],
params['destination'], retries=self.retries,
shards=params['shards'], out_dir=os.path.join(self.out_dir,
retries=self.retries, destinaion_folder(params['destination'])),
out_dir=os.path.join(self.out_dir, env=self.get_launch_env()))
destinaion_folder(params['destination'])),
env=self.get_launch_env())) pool = multiprocessing.pool.ThreadPool(len(launch_commands))
attempts_results = []
pool = multiprocessing.pool.ThreadPool(len(launch_commands)) for result in pool.imap_unordered(LaunchCommand.launch, launch_commands):
self.test_results['commands'] = [] attempts_results.append(result['test_results']['attempts'])
for result in pool.imap_unordered(LaunchCommand.launch, launch_commands): self.test_results['end_run'] = int(time.time())
self.logs[' '.join(result['cmd'])] = result['test_results']
self.test_results['commands'].append( # Gets passed tests
{'cmd': ' '.join(result['cmd']), 'logs': result['logs']}) self.logs['passed'] = []
self.test_results['end_run'] = int(time.time()) for shard_attempts in attempts_results:
LOGGER.debug('Test ended.') for attempt in shard_attempts:
# Test is failed if there are failures for the last run. self.logs['passed'].extend(attempt['passed'])
return not self.test_results['commands'][-1]['logs']['failed']
finally: # If the last attempt does not have failures, mark failed as empty
self.tear_down() self.logs['failed'] = []
for shard_attempts in attempts_results:
if shard_attempts[-1]['failed']:
self.logs['failed'].extend(shard_attempts[-1]['failed'].keys())
# Gets all failures/flakes and lists them in bot summary
all_failures = set()
for shard_attempts in attempts_results:
for attempt, attempt_results in enumerate(shard_attempts):
for failure in attempt_results['failed']:
if failure not in self.logs:
self.logs[failure] = []
self.logs[failure].append('%s: attempt # %d' % (failure, attempt))
self.logs[failure].extend(attempt_results['failed'][failure])
all_failures.add(failure)
# Gets only flaky(not failed) tests.
self.logs['flaked'] = list(all_failures - set(self.logs['failed']))
# Test is failed if there are failures for the last run.
return not self.logs['failed']
def erase_all_simulators(self): def erase_all_simulators(self):
"""Erases all simulator devices. """Erases all simulator devices.
Fix for DVTCoreSimulatorAdditionsErrorDomain error. Fix for DVTCoreSimulatorAdditionsErrorDomain error.
""" """
print 'Erasing all simulators.' LOGGER.info('Erasing all simulators.')
subprocess.call(['xcrun', 'simctl', 'erase', 'all']) subprocess.call(['xcrun', 'simctl', 'erase', 'all'])
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment