Commit b2392aa4 authored by Sergey Ulanov's avatar Sergey Ulanov Committed by Commit Bot

[Fuchsia] Save system logs to a file and remove LogReader

1. Added --system-log-file argument to the runner script which saves
   Fuchsia debug logs in a file
2. Enabled that flag in run-swarmed.py.
3. Removed the LogReader.

Bug: 869753
Change-Id: I4ef5c375e06937533841bab2696d90ed49887a0f
Reviewed-on: https://chromium-review.googlesource.com/1162899
Commit-Queue: Sergey Ulanov <sergeyu@chromium.org>
Reviewed-by: default avatarWez <wez@chromium.org>
Reviewed-by: default avatarDirk Pranke <dpranke@chromium.org>
Cr-Commit-Position: refs/heads/master@{#581076}
parent 2398014e
...@@ -44,6 +44,8 @@ def AddCommonArgs(arg_parser): ...@@ -44,6 +44,8 @@ def AddCommonArgs(arg_parser):
common_args.add_argument('--ssh-config', '-F', common_args.add_argument('--ssh-config', '-F',
help='The path to the SSH configuration used for ' help='The path to the SSH configuration used for '
'connecting to the target device.') 'connecting to the target device.')
common_args.add_argument('--system-log-file',
help='File to write system logs to.')
common_args.add_argument('--exclude-system-logs', common_args.add_argument('--exclude-system-logs',
action='store_false', action='store_false',
dest='include_system_logs', dest='include_system_logs',
...@@ -75,7 +77,8 @@ def GetDeploymentTargetForArgs(args): ...@@ -75,7 +77,8 @@ def GetDeploymentTargetForArgs(args):
command line arguments.""" command line arguments."""
if not args.device: if not args.device:
return QemuTarget(args.output_directory, args.target_cpu) return QemuTarget(args.output_directory, args.target_cpu,
args.system_log_file)
else: else:
return DeviceTarget(args.output_directory, args.target_cpu, return DeviceTarget(args.output_directory, args.target_cpu, args.host,
args.host, args.port, args.ssh_config) args.port, args.ssh_config, args.system_log_file)
...@@ -5,7 +5,6 @@ ...@@ -5,7 +5,6 @@
"""Implements commands for running and interacting with Fuchsia on devices.""" """Implements commands for running and interacting with Fuchsia on devices."""
import boot_data import boot_data
import log_reader
import logging import logging
import os import os
import subprocess import subprocess
...@@ -24,7 +23,7 @@ CONNECT_RETRY_COUNT_BEFORE_LOGGING = 10 ...@@ -24,7 +23,7 @@ CONNECT_RETRY_COUNT_BEFORE_LOGGING = 10
class DeviceTarget(target.Target): class DeviceTarget(target.Target):
def __init__(self, output_dir, target_cpu, host=None, port=None, def __init__(self, output_dir, target_cpu, host=None, port=None,
ssh_config=None): ssh_config=None, system_log_file=None):
"""output_dir: The directory which will contain the files that are """output_dir: The directory which will contain the files that are
generated to support the deployment. generated to support the deployment.
target_cpu: The CPU architecture of the deployment target. Can be target_cpu: The CPU architecture of the deployment target. Can be
...@@ -38,6 +37,8 @@ class DeviceTarget(target.Target): ...@@ -38,6 +37,8 @@ class DeviceTarget(target.Target):
self._port = 22 self._port = 22
self._auto = not host or not ssh_config self._auto = not host or not ssh_config
self._new_instance = True self._new_instance = True
self._system_log_file = system_log_file
self._loglistener = None
if self._auto: if self._auto:
self._ssh_config_path = EnsurePathExists( self._ssh_config_path = EnsurePathExists(
...@@ -49,6 +50,10 @@ class DeviceTarget(target.Target): ...@@ -49,6 +50,10 @@ class DeviceTarget(target.Target):
self._port = port self._port = port
self._new_instance = False self._new_instance = False
def __exit__(self, exc_type, exc_val, exc_tb):
if self._loglistener:
self._loglistener.kill()
def __Discover(self, node_name): def __Discover(self, node_name):
"""Returns the IP address and port of a Fuchsia instance discovered on """Returns the IP address and port of a Fuchsia instance discovered on
the local area network.""" the local area network."""
...@@ -98,20 +103,16 @@ class DeviceTarget(target.Target): ...@@ -98,20 +103,16 @@ class DeviceTarget(target.Target):
logging.debug(' '.join(bootserver_command)) logging.debug(' '.join(bootserver_command))
subprocess.check_call(bootserver_command) subprocess.check_call(bootserver_command)
# Setup loglistener. Logs will be redirected to stdout if the device takes # Start loglistener to save system logs.
# longer than expected to boot. if self._system_log_file:
loglistener_path = os.path.join(SDK_ROOT, 'tools', 'loglistener') loglistener_path = os.path.join(SDK_ROOT, 'tools', 'loglistener')
loglistener = subprocess.Popen([loglistener_path, node_name], self._loglistener = subprocess.Popen(
stdout=subprocess.PIPE, [loglistener_path, node_name],
stderr=subprocess.STDOUT, stdout=open(self._system_log_file, 'w'),
stdin=open(os.devnull)) stderr=subprocess.STDOUT, stdin=open(os.devnull))
self._SetSystemLogsReader(
log_reader.LogReader(loglistener, loglistener.stdout))
logging.debug('Waiting for device to join network.') logging.debug('Waiting for device to join network.')
for retry in xrange(CONNECT_RETRY_COUNT): for retry in xrange(CONNECT_RETRY_COUNT):
if retry == CONNECT_RETRY_COUNT_BEFORE_LOGGING:
self._system_logs_reader.RedirectTo(sys.stdout);
self._host = self.__Discover(node_name) self._host = self.__Discover(node_name)
if self._host: if self._host:
break break
......
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import multiprocessing
import os
import Queue
import select
import threading
class LogReader(object):
"""Helper class used to read a debug log stream on a background thread.
By default all log messages are stored in an internal buffer. After
RedirectTo() is called all logs are redirected to the specified stream.
Optionally the caller can give onwnership of the log process to the reader.
That process will be killed automatically when the log reader is closed.
"""
def __init__(self, process, stream):
self._process = process
self._stream = stream
self._buffer = Queue.Queue()
# Start a background thread that redirects the stream to self._buffer.
self._StartThread(lambda line: self._buffer.put(line))
def _StartThread(self, on_message_received):
self._quit_pipe, thread_quit_pipe = multiprocessing.Pipe()
self._thread = threading.Thread(target=self._ReadThread,
args=(thread_quit_pipe,
on_message_received))
self._thread.daemon = True
self._thread.start()
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
self.Close()
def __del__(self):
self.Close()
def _ReadThread(self, quit_pipe, on_message_received):
"""Main function for the background thread that reads |self._stream| and
calls |on_message_received| for each line."""
poll = select.poll()
poll.register(self._stream.fileno(), select.POLLIN)
poll.register(quit_pipe.fileno(), select.POLLIN)
quit = False
while not quit:
events = poll.poll()
for fileno, event in events:
if fileno == quit_pipe.fileno():
quit = True
break
assert fileno == self._stream.fileno()
if event & select.POLLIN:
on_message_received(self._stream.readline())
elif event & select.POLLHUP:
quit = True
def _StopThread(self):
if self._thread:
try:
self._quit_pipe.send("quit")
except IOError:
# The thread has already quit and closed the other end of the pipe.
pass
self._thread.join();
self._thread = None
def Close(self):
self._StopThread()
if self._process:
self._process.kill()
self._process = None
def RedirectTo(self, stream):
self._StopThread()
# Drain and reset the buffer.
while True:
try:
line = self._buffer.get_nowait()
stream.write(line)
except Queue.Empty:
break
# Restart the thread to write stream to stdout.
self._StartThread(lambda line: stream.write(line))
...@@ -5,7 +5,6 @@ ...@@ -5,7 +5,6 @@
"""Implements commands for running and interacting with Fuchsia on QEMU.""" """Implements commands for running and interacting with Fuchsia on QEMU."""
import boot_data import boot_data
import log_reader
import logging import logging
import target import target
import os import os
...@@ -34,7 +33,7 @@ def _GetAvailableTcpPort(): ...@@ -34,7 +33,7 @@ def _GetAvailableTcpPort():
class QemuTarget(target.Target): class QemuTarget(target.Target):
def __init__(self, output_dir, target_cpu, def __init__(self, output_dir, target_cpu, system_log_file,
ram_size_mb=2048): ram_size_mb=2048):
"""output_dir: The directory which will contain the files that are """output_dir: The directory which will contain the files that are
generated to support the QEMU deployment. generated to support the QEMU deployment.
...@@ -43,6 +42,7 @@ class QemuTarget(target.Target): ...@@ -43,6 +42,7 @@ class QemuTarget(target.Target):
super(QemuTarget, self).__init__(output_dir, target_cpu) super(QemuTarget, self).__init__(output_dir, target_cpu)
self._qemu_process = None self._qemu_process = None
self._ram_size_mb = ram_size_mb self._ram_size_mb = ram_size_mb
self._system_log_file = system_log_file
def __enter__(self): def __enter__(self):
return self return self
...@@ -138,13 +138,19 @@ class QemuTarget(target.Target): ...@@ -138,13 +138,19 @@ class QemuTarget(target.Target):
logging.debug('Launching QEMU.') logging.debug('Launching QEMU.')
logging.debug(' '.join(qemu_command)) logging.debug(' '.join(qemu_command))
# QEMU stderr/stdout are redirected to LogReader to debug # Zircon sends debug logs to serial port (see kernel.serial=legacy flag
# above). Serial port is redirected to a file through QEMU stdout.
# This approach is used instead of loglistener to debug
# https://crbug.com/86975 . # https://crbug.com/86975 .
if self._system_log_file:
stdout = open(self._system_log_file, 'w')
stderr = subprocess.STDOUT
else:
stdout = open(os.devnull)
stderr = open(os.devnull)
self._qemu_process = subprocess.Popen(qemu_command, stdin=open(os.devnull), self._qemu_process = subprocess.Popen(qemu_command, stdin=open(os.devnull),
stdout=subprocess.PIPE, stdout=stdout, stderr=stderr)
stderr=subprocess.STDOUT)
self._SetSystemLogsReader(
log_reader.LogReader(None, self._qemu_process.stdout))
self._WaitUntilReady(); self._WaitUntilReady();
def Shutdown(self): def Shutdown(self):
......
...@@ -4,7 +4,6 @@ ...@@ -4,7 +4,6 @@
import boot_data import boot_data
import common import common
import log_reader
import logging import logging
import remote_cmd import remote_cmd
import sys import sys
...@@ -29,7 +28,6 @@ class Target(object): ...@@ -29,7 +28,6 @@ class Target(object):
self._started = False self._started = False
self._dry_run = False self._dry_run = False
self._target_cpu = target_cpu self._target_cpu = target_cpu
self._system_logs_reader = None
# Functions used by the Python context manager for teardown. # Functions used by the Python context manager for teardown.
def __enter__(self): def __enter__(self):
...@@ -145,32 +143,17 @@ class Target(object): ...@@ -145,32 +143,17 @@ class Target(object):
def _AssertIsStarted(self): def _AssertIsStarted(self):
assert self.IsStarted() assert self.IsStarted()
def _SetSystemLogsReader(self, reader):
if self._system_logs_reader:
_system_logs_reader.Close()
self._system_logs_reader = reader
def _WaitUntilReady(self, retries=_ATTACH_MAX_RETRIES): def _WaitUntilReady(self, retries=_ATTACH_MAX_RETRIES):
logging.info('Connecting to Fuchsia using SSH.') logging.info('Connecting to Fuchsia using SSH.')
try: for retry in xrange(retries + 1):
for retry in xrange(retries + 1): host, port = self._GetEndpoint()
if retry == 2 and self._system_logs_reader: if remote_cmd.RunSsh(self._GetSshConfigPath(), host, port, ['true'],
# Log system logs after 2 failed SSH connection attempt. True) == 0:
self._system_logs_reader.RedirectTo(sys.stdout); logging.info('Connected!')
self._started = True
host, port = self._GetEndpoint() return True
if remote_cmd.RunSsh(self._GetSshConfigPath(), host, port, ['true'], time.sleep(_ATTACH_RETRY_INTERVAL)
True) == 0:
logging.info('Connected!')
self._started = True
return True
time.sleep(_ATTACH_RETRY_INTERVAL)
finally:
# Redirect logs to /dev/null. run_package.py will use SSH+dlog to get
# logs from the machine to console.
if self._system_logs_reader:
self._system_logs_reader.RedirectTo(open('/dev/null', 'w'));
logging.error('Timeout limit reached.') logging.error('Timeout limit reached.')
......
...@@ -11,7 +11,7 @@ etc. and allowing greater parallelism. ...@@ -11,7 +11,7 @@ etc. and allowing greater parallelism.
To use, run in a new shell (it blocks until all Swarming jobs complete): To use, run in a new shell (it blocks until all Swarming jobs complete):
tools/fuchsia/run-swarmed.py -t content_unittests --out-dir=out/fuch tools/run-swarmed.py -t content_unittests --out-dir=out/fuch
The logs of the runs will be stored in results/ (or specify a results directory The logs of the runs will be stored in results/ (or specify a results directory
with --results=some_dir). You can then do something like `grep -L SUCCESS with --results=some_dir). You can then do something like `grep -L SUCCESS
...@@ -62,7 +62,8 @@ def _Spawn(args): ...@@ -62,7 +62,8 @@ def _Spawn(args):
trigger_args += [ '-d', 'os', 'Windows' ] trigger_args += [ '-d', 'os', 'Windows' ]
trigger_args += [ trigger_args += [
'--', '--',
'--test-launcher-summary-output=${ISOLATED_OUTDIR}/output.json'] '--test-launcher-summary-output=${ISOLATED_OUTDIR}/output.json',
'--system-log-file=${ISOLATED_OUTDIR}/system_log']
if gtest_filter: if gtest_filter:
trigger_args.append('--gtest_filter=' + gtest_filter) trigger_args.append('--gtest_filter=' + gtest_filter)
elif args.target_os == 'fuchsia': elif args.target_os == 'fuchsia':
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment