Commit c4ec08a5 authored by Kevin Marshall's avatar Kevin Marshall Committed by Commit Bot

Fuchsia: Create new runner scripts which leverage packaging and SSH.

Generates an alternative runner script for each test target,
with a "_v2" suffix, e.g. run_base_unittests_v2.
The old-style generated runner scripts will still be without the
"_v2" suffix, but they will be deprecated and removed soon as part
of a followup CL.

The new scripts will bring up a new QEMU VM, a Zedboot physical
device, or continue to use an existing device that is currently
accepting SSH connections.

Once connected, the scripts push the package bits onto the device,
execute the package using SSH, extract the build logs and other
artifacts from the device, and finally clean up the state.

Other changes:
* Add DeviceTarget impl for deployment.
* Use relative paths in package archive manifests, for symbolization.
* Add logging statements everywhere, and a -v switch to turn them on.
* Add package cleanup teardown step, to avoid cluttering up long-running
  devices.
* Refactor symbolization as a stream filter object.


Bug: 799309
Change-Id: Iffaefd1d3a5ce919ba2dea4e29033987454c2eed
Reviewed-on: https://chromium-review.googlesource.com/882441
Commit-Queue: Kevin Marshall <kmarshall@chromium.org>
Reviewed-by: default avatarDirk Pranke <dpranke@chromium.org>
Reviewed-by: default avatarWez <wez@chromium.org>
Reviewed-by: default avatarScott Graham <scottmg@chromium.org>
Cr-Commit-Position: refs/heads/master@{#533200}
parent 279eff35
...@@ -16,11 +16,10 @@ template("package") { ...@@ -16,11 +16,10 @@ template("package") {
} }
assert(defined(pkg.binary)) assert(defined(pkg.binary))
_pkg_out_dir = "$root_build_dir/package" _pkg_out_dir = "$root_out_dir/gen/" +
_runtime_deps_file = "$root_out_dir/gen.runtime/${pkg.package_name}/" + get_label_info(pkg.package_name, "dir")
"${pkg.package_name}.runtime_deps" _runtime_deps_file = "$_pkg_out_dir/${pkg.package_name}.runtime_deps"
_manifest_file = "$root_out_dir/gen.runtime/${pkg.package_name}/" + _manifest_file = "$_pkg_out_dir/${pkg.package_name}.archive_manifest"
"${pkg.package_name}.archive_manifest"
_archive_file = "$_pkg_out_dir/${pkg.package_name}.far" _archive_file = "$_pkg_out_dir/${pkg.package_name}.far"
_write_archive_target = "${pkg.package_name}__write_archive" _write_archive_target = "${pkg.package_name}__write_archive"
_write_manifest_target = "${pkg.package_name}__write_manifest" _write_manifest_target = "${pkg.package_name}__write_manifest"
......
...@@ -14,6 +14,12 @@ template("generate_runner_script") { ...@@ -14,6 +14,12 @@ template("generate_runner_script") {
"$root_out_dir/gen.runtime/" + "$root_out_dir/gen.runtime/" +
get_label_info(invoker.root_target_name, "dir") + "/" + get_label_info(invoker.root_target_name, "dir") + "/" +
get_label_info(invoker.root_target_name, "name") + ".runtime_deps" get_label_info(invoker.root_target_name, "name") + ".runtime_deps"
_pkg_dir = "$root_out_dir/gen/" + get_label_info(invoker.package_name, "dir")
_manifest_path = "$_pkg_dir/${invoker.package_name}.archive_manifest"
_package_path = "$_pkg_dir/${invoker.package_name}.far"
_runner_target = "${target_name}_runner"
_legacy_runner_target = "${target_name}_legacy_runner"
_target_name = target_name
group(_runtime_deps_target) { group(_runtime_deps_target) {
forward_variables_from(invoker, forward_variables_from(invoker,
...@@ -25,7 +31,17 @@ template("generate_runner_script") { ...@@ -25,7 +31,17 @@ template("generate_runner_script") {
write_runtime_deps = _runtime_deps_file write_runtime_deps = _runtime_deps_file
} }
action(target_name) { # Build both types of runner scripts until the legacy scripts can be removed.
# TODO(crbug.com/805057): delete legacy runner scripts.
group(target_name) {
forward_variables_from(invoker, [ "testonly" ])
data_deps = [
":${_legacy_runner_target}",
]
data_deps += [ ":${_runner_target}" ]
}
action(_legacy_runner_target) {
forward_variables_from(invoker, forward_variables_from(invoker,
[ [
"deps", "deps",
...@@ -77,6 +93,59 @@ template("generate_runner_script") { ...@@ -77,6 +93,59 @@ template("generate_runner_script") {
rebase_path(_runtime_deps_file, root_build_dir), rebase_path(_runtime_deps_file, root_build_dir),
] ]
} }
action(_runner_target) {
forward_variables_from(invoker,
[
"runner_script",
"target",
"testonly",
])
deps = [
"//testing/buildbot/filters:fuchsia_filters",
]
_generated_script = "${invoker.generated_script}_v2"
script = "//build/fuchsia/runner_v2/create_runner_script.py"
depfile = "$target_gen_dir/$_target_name.d"
outputs = [
_generated_script,
]
data = [
_generated_script,
"//build/fuchsia/runner_v2/",
"//build/util/lib/",
"//third_party/fuchsia-sdk/",
]
# Arguments used at build time by the runner script generator.
args = [
"--script-output-path",
rebase_path(_generated_script, root_build_dir, root_out_dir),
"--package",
rebase_path(_package_path, root_out_dir, root_build_dir),
"--package-manifest",
rebase_path(_manifest_path, root_out_dir, root_build_dir),
]
if (defined(invoker.use_test_server) && invoker.use_test_server) {
args += [ "--enable-test-server" ]
}
# Arguments used at runtime by the test runner.
args += [
"--runner-script",
runner_script,
"--output-directory",
rebase_path(root_build_dir, root_build_dir),
"--target-cpu",
target_cpu,
]
}
} }
# This template is used to generate a runner script for test binaries into the # This template is used to generate a runner script for test binaries into the
...@@ -134,6 +203,7 @@ template("fuchsia_executable_runner") { ...@@ -134,6 +203,7 @@ template("fuchsia_executable_runner") {
] ]
exe_path = _exe_path exe_path = _exe_path
root_target_name = invoker.target_name root_target_name = invoker.target_name
package_name = _exe_name
} }
group(target_name) { group(target_name) {
...@@ -145,6 +215,8 @@ template("fuchsia_executable_runner") { ...@@ -145,6 +215,8 @@ template("fuchsia_executable_runner") {
] ]
} }
# This target is superceded by the packaging system.
# TODO(kmarshall): remove this target and remove dependencies from bots.
generate_runner_script(_archive_target) { generate_runner_script(_archive_target) {
forward_variables_from(invoker, [ "testonly" ]) forward_variables_from(invoker, [ "testonly" ])
runner_script = "archive_builder.py" runner_script = "archive_builder.py"
...@@ -155,5 +227,6 @@ template("fuchsia_executable_runner") { ...@@ -155,5 +227,6 @@ template("fuchsia_executable_runner") {
] ]
exe_path = _exe_path exe_path = _exe_path
root_target_name = invoker.target_name root_target_name = invoker.target_name
package_name = _exe_name
} }
} }
...@@ -5,9 +5,12 @@ ...@@ -5,9 +5,12 @@
"""Functions used to provision Fuchsia boot images.""" """Functions used to provision Fuchsia boot images."""
import common import common
import logging
import os import os
import subprocess import subprocess
import tempfile import tempfile
import time
import uuid
_SSH_CONFIG_TEMPLATE = """ _SSH_CONFIG_TEMPLATE = """
Host * Host *
...@@ -24,10 +27,6 @@ Host * ...@@ -24,10 +27,6 @@ Host *
ServerAliveCountMax 1""" ServerAliveCountMax 1"""
def _GetKernelPath(target_arch):
return os.path.join(_TargetCpuToSdkBinPath(target_arch), 'zircon.bin')
def _TargetCpuToSdkBinPath(target_arch): def _TargetCpuToSdkBinPath(target_arch):
"""Returns the path to the kernel & bootfs .bin files for |target_cpu|.""" """Returns the path to the kernel & bootfs .bin files for |target_cpu|."""
return os.path.join(common.SDK_ROOT, 'target', target_arch) return os.path.join(common.SDK_ROOT, 'target', target_arch)
...@@ -47,8 +46,9 @@ def _ProvisionSSH(output_dir): ...@@ -47,8 +46,9 @@ def _ProvisionSSH(output_dir):
id_key_path = output_dir + '/id_ed25519' id_key_path = output_dir + '/id_ed25519'
id_pubkey_path = id_key_path + '.pub' id_pubkey_path = id_key_path + '.pub'
known_hosts_path = output_dir + '/known_hosts' known_hosts_path = output_dir + '/known_hosts'
ssh_config_path = output_dir + '/ssh_config' ssh_config_path = GetSSHConfigPath(output_dir)
logging.debug('Generating SSH credentials.')
if not os.path.isfile(host_key_path): if not os.path.isfile(host_key_path):
subprocess.check_call(['ssh-keygen', '-t', 'ed25519', '-h', '-f', subprocess.check_call(['ssh-keygen', '-t', 'ed25519', '-h', '-f',
host_key_path, '-P', '', '-N', ''], host_key_path, '-P', '', '-N', ''],
...@@ -70,27 +70,57 @@ def _ProvisionSSH(output_dir): ...@@ -70,27 +70,57 @@ def _ProvisionSSH(output_dir):
) )
def CreateBootFS(output_dir, target_arch): def GetKernelPath(target_arch):
"""Creates a bootfs image provisoned with the credentials necessary return os.path.join(_TargetCpuToSdkBinPath(target_arch), 'zircon.bin')
for SSH remote access.
def GetSSHConfigPath(output_dir):
return output_dir + '/ssh_config'
def CreateBootdata(output_dir, target_arch):
"""Creates a bootdata image ready for SSH remote access.
Returns a tuple with the path to SSH config and the path to the boot Returns a path to the bootdata.bin file."""
image."""
boot_image = os.path.join( base_boot_data = os.path.join(
_TargetCpuToSdkBinPath(target_arch), 'bootdata.bin') _TargetCpuToSdkBinPath(target_arch), 'bootdata.bin')
ssh_manifest = tempfile.NamedTemporaryFile(delete=False)
ssh_config, ssh_data = _ProvisionSSH(output_dir) ssh_config, ssh_data = _ProvisionSSH(output_dir)
ssh_manifest = tempfile.NamedTemporaryFile(delete=False)
for key, val in ssh_data: for key, val in ssh_data:
ssh_manifest.write("%s=%s\n" % (key, val)) ssh_manifest.write("%s=%s\n" % (key, val))
ssh_manifest.close() ssh_manifest.close()
mkbootfs_path = os.path.join(common.SDK_ROOT, 'tools', 'mkbootfs') mkbootfs_path = os.path.join(common.SDK_ROOT, 'tools', 'mkbootfs')
bootfs_name = output_dir + '/image.bootfs' bootfs_path = output_dir + '/image.bootfs'
args = [mkbootfs_path, '-o', bootfs_name, args = [mkbootfs_path, '-o', bootfs_path,
'--target=boot', boot_image, '--target=boot', base_boot_data,
'--target=system', ssh_manifest.name] '--target=system', ssh_manifest.name]
logging.debug(' '.join(args))
subprocess.check_call(args) subprocess.check_call(args)
os.remove(ssh_manifest.name) os.remove(ssh_manifest.name)
return ssh_config, bootfs_name return bootfs_path
def GetNodeName(output_dir):
"""Returns the cached Zircon node name, or generates one if it doesn't
already exist. The node name is used by Discover to find the prior
deployment on the LAN."""
nodename_file = os.path.join(output_dir, 'nodename')
if not os.path.exists(nodename_file):
nodename = uuid.uuid4()
f = open(nodename_file, 'w')
f.write(str(nodename))
f.flush()
f.close()
return str(nodename)
else:
f = open(nodename_file, 'r')
return f.readline()
def GetKernelArgs(output_dir):
return ['devmgr.epoch=%d' % time.time(),
'zircon.nodename=' + GetNodeName(output_dir)]
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
from device_target import DeviceTarget
from qemu_target import QemuTarget
def AddCommonArgs(arg_parser):
"""Adds command line arguments to |arg_parser| for options which are shared
across test and executable target types."""
common_args = arg_parser.add_argument_group('common', 'Common arguments')
common_args.add_argument('--package',
type=os.path.realpath, required=True,
help='Path to the package to execute.')
common_args.add_argument('--package-manifest',
type=os.path.realpath, required=True,
help='Path to the Fuchsia package manifest file.')
common_args.add_argument('--output-directory',
type=os.path.realpath, required=True,
help=('Path to the directory in which build files are'
' located (must include build type).'))
common_args.add_argument('--target-cpu', required=True,
help='GN target_cpu setting for the build.')
common_args.add_argument('--device', '-d', action='store_true', default=False,
help='Run on hardware device instead of QEMU.')
common_args.add_argument('--host', help='The IP of the target device. ' +
'Optional.')
common_args.add_argument('--port', '-p', type=int, default=22,
help='The port of the SSH service running on the ' +
'device. Optional.')
common_args.add_argument('--ssh_config', '-F',
help='The path to the SSH configuration used for '
'connecting to the target device.')
common_args.add_argument('--verbose', '-v', default=False, action='store_true',
help='Show more logging information.')
def ConfigureLogging(args):
"""Configures the logging level based on command line |args|."""
logging.basicConfig(level=(logging.DEBUG if args.verbose else logging.INFO))
def GetDeploymentTargetForArgs(args):
"""Constructs a deployment target object using parameters taken from
command line arguments."""
if not args.device:
return QemuTarget(args.output_directory, args.target_cpu)
else:
return DeviceTarget(args.output_directory, args.target_cpu,
args.host, args.port, args.ssh_config)
#!/usr/bin/env python
#
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Creates a script to run a Fushsia executable by delegating to
build/fuchsia/(exe|test)_runner.py.
"""
import argparse
import os
import re
import sys
SCRIPT_TEMPLATE = """\
#!/usr/bin/env python
#
# This file was generated by build/fuchsia/runner_v2/create_runner_script.py
import os
import sys
def main():
script_directory = os.path.dirname(__file__)
def ResolvePath(path):
\"\"\"Returns an absolute filepath given a path relative to this script.
\"\"\"
return os.path.abspath(os.path.join(script_directory, path))
runner_path = ResolvePath('{runner_path}')
runner_args = {runner_args}
runner_path_args = {runner_path_args}
for arg, path in runner_path_args:
runner_args.extend([arg, ResolvePath(path)])
os.execv(runner_path,
[runner_path] + runner_args + sys.argv[1:])
if __name__ == '__main__':
sys.exit(main())
"""
def main(args):
parser = argparse.ArgumentParser()
parser.add_argument('--runner-script',
help='Name of the runner script to use.')
parser.add_argument('--script-output-path',
help='Output path for executable script.')
parser.add_argument('--test-runner-path',
help='Path to test_runner.py (optional).')
group = parser.add_argument_group('Test runner path arguments.')
group.add_argument('--output-directory')
group.add_argument('--package')
group.add_argument('--package-manifest')
args, runner_args = parser.parse_known_args(args)
def RelativizePathToScript(path):
"""Returns the path relative to the output script directory."""
return os.path.relpath(path, os.path.dirname(args.script_output_path))
runner_path = args.test_runner_path or os.path.join(
os.path.dirname(__file__), args.runner_script)
runner_path = RelativizePathToScript(runner_path)
runner_path_args = []
runner_path_args.append(
('--output-directory', RelativizePathToScript(args.output_directory)))
runner_path_args.append(
('--package', RelativizePathToScript(args.package)))
runner_path_args.append(
('--package-manifest', RelativizePathToScript(args.package_manifest)))
with open(args.script_output_path, 'w') as script:
script.write(SCRIPT_TEMPLATE.format(
runner_path=str(runner_path),
runner_args=repr(runner_args),
runner_path_args=repr(runner_path_args)))
# Sets the mode of the generated script so that it is executable by the
# current user.
os.chmod(args.script_output_path, 0750)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Implements commands for running and interacting with Fuchsia on devices."""
import boot_data
import common
import logging
import os
import subprocess
import target
import time
import uuid
CONNECT_RETRY_COUNT = 20
CONNECT_RETRY_WAIT_SECS = 1
class DeviceTarget(target.Target):
def __init__(self, output_dir, target_cpu, host=None, port=None,
ssh_config=None):
"""output_dir: The directory which will contain the files that are
generated to support the deployment.
target_cpu: The CPU architecture of the deployment target. Can be
"x64" or "arm64".
host: The address of the deployment target device.
port: The port of the SSH service on the deployment target device.
ssh_config: The path to SSH configuration data."""
super(DeviceTarget, self).__init__(output_dir, target_cpu)
self._port = 22
self._auto = not host or not ssh_config
if self._auto:
self._ssh_config_path = boot_data.GetSSHConfigPath(output_dir)
else:
self._ssh_config_path = os.path.expanduser(ssh_config)
self._host = host
if port:
self._port = port
def __Discover(self, node_name):
"""Returns the IP address and port of a Fuchsia instance discovered on
the local area network."""
netaddr_path = os.path.join(common.SDK_ROOT, 'tools', 'netaddr')
command = [netaddr_path, '--fuchsia', '--nowait', node_name]
logging.debug(' '.join(command))
proc = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=open(os.devnull, 'w'))
proc.wait()
if proc.returncode == 0:
return proc.stdout.readlines()[0].strip()
return None
def Start(self):
if self._auto:
logging.debug('Starting automatic device deployment.')
node_name = boot_data.GetNodeName(self._output_dir)
self._host = self.__Discover(node_name)
if self._host and self._WaitUntilReady(retries=0):
logging.info('Connected to an already booted device.')
return
logging.info('Netbooting Fuchsia. ' +
'Please ensure that your device is in bootloader mode.')
boot_data_path = boot_data.CreateBootdata(
self._output_dir, self._GetTargetSdkArch())
bootserver_path = os.path.join(common.SDK_ROOT, 'tools', 'bootserver')
bootserver_command = [bootserver_path, '-1',
boot_data.GetKernelPath(self._GetTargetSdkArch()),
boot_data_path, '--'] + \
boot_data.GetKernelArgs(self._output_dir)
logging.debug(' '.join(bootserver_command))
subprocess.check_call(bootserver_command)
logging.debug('Waiting for device to join network.')
for _ in xrange(CONNECT_RETRY_COUNT):
self._host = self.__Discover(node_name)
if self._host:
break
time.sleep(CONNECT_RETRY_WAIT_SECS)
if not self._host:
raise Exception('Couldn\'t connect to device.')
logging.debug('host=%s, port=%d' % (self._host, self._port))
self._WaitUntilReady();
def _GetEndpoint(self):
return (self._host, self._port)
def _GetSshConfigPath(self):
return self._ssh_config_path
...@@ -4,8 +4,9 @@ ...@@ -4,8 +4,9 @@
"""Implements commands for running and interacting with Fuchsia on QEMU.""" """Implements commands for running and interacting with Fuchsia on QEMU."""
import boot_image import boot_data
import common import common
import logging
import target import target
import os import os
import platform import platform
...@@ -31,12 +32,12 @@ def _GetAvailableTcpPort(): ...@@ -31,12 +32,12 @@ def _GetAvailableTcpPort():
class QemuTarget(target.Target): class QemuTarget(target.Target):
def __init__(self, output_dir, target_cpu, verbose=True, ram_size_mb=2048): def __init__(self, output_dir, target_cpu, ram_size_mb=2048):
"""output_dir: The directory which will contain the files that are """output_dir: The directory which will contain the files that are
generated to support the QEMU deployment. generated to support the QEMU deployment.
target_cpu: The emulated target CPU architecture. Can be 'x64' or 'arm64'. target_cpu: The emulated target CPU architecture.
verbose: If true, emits extra non-error logging data for diagnostics.""" Can be 'x64' or 'arm64'."""
super(QemuTarget, self).__init__(output_dir, target_cpu, verbose) super(QemuTarget, self).__init__(output_dir, target_cpu)
self._qemu_process = None self._qemu_process = None
self._ram_size_mb = ram_size_mb self._ram_size_mb = ram_size_mb
...@@ -50,18 +51,18 @@ class QemuTarget(target.Target): ...@@ -50,18 +51,18 @@ class QemuTarget(target.Target):
self.Shutdown() self.Shutdown()
def Start(self): def Start(self):
self._ssh_config_path, boot_image_path = boot_image.CreateBootFS( boot_data_path = boot_data.CreateBootdata(
self._output_dir, self._GetTargetSdkArch()) self._output_dir, self._GetTargetSdkArch())
qemu_path = os.path.join( qemu_path = os.path.join(
common.SDK_ROOT, 'qemu', 'bin', common.SDK_ROOT, 'qemu', 'bin',
'qemu-system-' + self._GetTargetSdkArch()) 'qemu-system-' + self._GetTargetSdkArch())
kernel_args = ['devmgr.epoch=%d' % time.time()] kernel_args = boot_data.GetKernelArgs(self._output_dir)
qemu_command = [qemu_path, qemu_command = [qemu_path,
'-m', str(self._ram_size_mb), '-m', str(self._ram_size_mb),
'-nographic', '-nographic',
'-kernel', boot_image._GetKernelPath(self._GetTargetSdkArch()), '-kernel', boot_data.GetKernelPath(self._GetTargetSdkArch()),
'-initrd', boot_image_path, '-initrd', boot_data_path,
'-smp', '4', '-smp', '4',
# Use stdio for the guest OS only; don't attach the QEMU interactive # Use stdio for the guest OS only; don't attach the QEMU interactive
...@@ -111,21 +112,23 @@ class QemuTarget(target.Target): ...@@ -111,21 +112,23 @@ class QemuTarget(target.Target):
# Python script panicking and aborting. # Python script panicking and aborting.
# The precise root cause is still nebulous, but this fix works. # The precise root cause is still nebulous, but this fix works.
# See crbug.com/741194. # See crbug.com/741194.
logging.debug('Launching QEMU.')
logging.debug(' '.join(qemu_command))
self._qemu_process = subprocess.Popen( self._qemu_process = subprocess.Popen(
qemu_command, stdout=subprocess.PIPE, stdin=open(os.devnull)) qemu_command, stdout=open(os.devnull), stdin=open(os.devnull),
stderr=open(os.devnull))
self._Attach(); self._WaitUntilReady();
def Shutdown(self): def Shutdown(self):
logging.info('Shutting down QEMU.')
self._qemu_process.kill() self._qemu_process.kill()
def GetQemuStdout(self): def GetQemuStdout(self):
return self._qemu_process.stdout return self._qemu_process.stdout
def _GetEndpoint(self): def _GetEndpoint(self):
return ('127.0.0.1', self._host_ssh_port) return ('localhost', self._host_ssh_port)
def _GetSshConfigPath(self): def _GetSshConfigPath(self):
return self._ssh_config_path return boot_data.GetSSHConfigPath(self._output_dir)
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
import qemu_target import qemu_target
import shutil import shutil
import subprocess
import tempfile import tempfile
import time import time
import unittest import unittest
...@@ -31,10 +32,10 @@ with qemu_target.QemuTarget(tmpdir, 'x64') as target: ...@@ -31,10 +32,10 @@ with qemu_target.QemuTarget(tmpdir, 'x64') as target:
tmp_path = tmpdir + "/payload" tmp_path = tmpdir + "/payload"
with open(tmp_path, "w") as tmpfile: with open(tmp_path, "w") as tmpfile:
tmpfile.write(TEST_PAYLOAD) tmpfile.write(TEST_PAYLOAD)
target.CopyTo(tmp_path, '/tmp/payload') target.PutFile(tmp_path, '/tmp/payload')
tmp_path_roundtrip = tmp_path + ".roundtrip" tmp_path_roundtrip = tmp_path + ".roundtrip"
target.CopyFrom('/tmp/payload', tmp_path_roundtrip) target.GetFile('/tmp/payload', tmp_path_roundtrip)
with open(tmp_path_roundtrip) as roundtrip: with open(tmp_path_roundtrip) as roundtrip:
self.assertEqual(TEST_PAYLOAD, roundtrip.read()) self.assertEqual(TEST_PAYLOAD, roundtrip.read())
...@@ -45,7 +46,9 @@ with qemu_target.QemuTarget(tmpdir, 'x64') as target: ...@@ -45,7 +46,9 @@ with qemu_target.QemuTarget(tmpdir, 'x64') as target:
self.assertEqual(1, target.RunCommand(['false'])) self.assertEqual(1, target.RunCommand(['false']))
def testRunCommandPiped(self): def testRunCommandPiped(self):
proc = target.RunCommandPiped(['cat']) proc = target.RunCommandPiped(['cat'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
proc.stdin.write(TEST_PAYLOAD) proc.stdin.write(TEST_PAYLOAD)
proc.stdin.flush() proc.stdin.flush()
proc.stdin.close() proc.stdin.close()
......
...@@ -5,8 +5,10 @@ ...@@ -5,8 +5,10 @@
"""Helper functions for remotely executing and copying files over a SSH """Helper functions for remotely executing and copying files over a SSH
connection.""" connection."""
import logging
import os import os
import subprocess import subprocess
import sys
_SSH = ['ssh'] _SSH = ['ssh']
_SCP = ['scp'] _SCP = ['scp']
...@@ -15,6 +17,10 @@ COPY_TO_TARGET = 0 ...@@ -15,6 +17,10 @@ COPY_TO_TARGET = 0
COPY_FROM_TARGET = 1 COPY_FROM_TARGET = 1
def _IsLinkLocalIPv6(hostname):
return hostname.startswith('fe80::')
def RunSsh(config_path, host, port, command, silent): def RunSsh(config_path, host, port, command, silent):
"""Executes an SSH command on the remote host and blocks until completion. """Executes an SSH command on the remote host and blocks until completion.
...@@ -29,6 +35,7 @@ def RunSsh(config_path, host, port, command, silent): ...@@ -29,6 +35,7 @@ def RunSsh(config_path, host, port, command, silent):
ssh_command = _SSH + ['-F', config_path, ssh_command = _SSH + ['-F', config_path,
host, host,
'-p', str(port)] + command '-p', str(port)] + command
logging.debug('ssh exec: ' + ' '.join(ssh_command))
if silent: if silent:
devnull = open(os.devnull, 'w') devnull = open(os.devnull, 'w')
return subprocess.call(ssh_command, stderr=devnull, stdout=devnull) return subprocess.call(ssh_command, stderr=devnull, stdout=devnull)
...@@ -36,7 +43,7 @@ def RunSsh(config_path, host, port, command, silent): ...@@ -36,7 +43,7 @@ def RunSsh(config_path, host, port, command, silent):
return subprocess.call(ssh_command) return subprocess.call(ssh_command)
def RunPipedSsh(config_path, host, port, command): def RunPipedSsh(config_path, host, port, command, **kwargs):
"""Executes an SSH command on the remote host and returns a process object """Executes an SSH command on the remote host and returns a process object
with access to the command's stdio streams. Does not block. with access to the command's stdio streams. Does not block.
...@@ -44,17 +51,16 @@ def RunPipedSsh(config_path, host, port, command): ...@@ -44,17 +51,16 @@ def RunPipedSsh(config_path, host, port, command):
host: The hostname or IP address of the remote host. host: The hostname or IP address of the remote host.
port: The port to connect to. port: The port to connect to.
command: A list of strings containing the command and its arguments. command: A list of strings containing the command and its arguments.
silent: If true, suppresses all output from 'ssh'. kwargs: A dictionary of parameters to be passed to subprocess.Popen().
The parameters can be used to override stdin and stdout, for example.
Returns a Popen object for the command.""" Returns a Popen object for the command."""
ssh_command = _SSH + ['-F', config_path, ssh_command = _SSH + ['-F', config_path,
host, host,
'-p', str(port)] + command '-p', str(port)] + command
return subprocess.Popen(ssh_command, logging.debug(' '.join(ssh_command))
stdout=subprocess.PIPE, return subprocess.Popen(ssh_command, **kwargs)
stderr=subprocess.PIPE,
stdin=subprocess.PIPE)
def RunScp(config_path, host, port, source, dest, direction): def RunScp(config_path, host, port, source, dest, direction):
...@@ -72,14 +78,18 @@ def RunScp(config_path, host, port, source, dest, direction): ...@@ -72,14 +78,18 @@ def RunScp(config_path, host, port, source, dest, direction):
Function will raise an assertion if a failure occurred.""" Function will raise an assertion if a failure occurred."""
scp_command = _SCP[:]
if ':' in host:
scp_command.append('-6')
host = '[' + host + ']'
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
scp_command.append('-v')
if direction == COPY_TO_TARGET: if direction == COPY_TO_TARGET:
dest = "%s:%s" % (host, dest) dest = "%s:%s" % (host, dest)
else: else:
source = "%s:%s" % (host, source) source = "%s:%s" % (host, source)
scp_command += ['-F', config_path, '-P', str(port), source, dest]
scp_command = _SCP + ['-F', config_path, logging.debug(' '.join(scp_command))
'-P', str(port), subprocess.check_call(scp_command, stdout=open(os.devnull, 'w'))
source,
dest]
devnull = open('/dev/null', 'w')
subprocess.check_call(scp_command, stdout=devnull)
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Contains a helper function for deploying and executing a packaged
executable on a Target."""
import logging
import os
import subprocess
import uuid
from symbolizer import FilterStream
def RunPackage(output_dir, target, package_path, run_args, symbolizer_config=None):
"""Copies the Fuchsia package at |package_path| to the target,
executes it with |run_args|, and symbolizes its output.
output_dir: The path containing the build output files.
target: The deployment Target object that will run the package.
package_path: The path to the .far package file.
run_args: The command-linearguments which will be passed to the Fuchsia process.
symbolizer_config: A newline delimited list of source files contained in the
package. Omitting this parameter will disable symbolization.
Returns the exit code of the remote package process."""
if symbolizer_config:
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
logging.debug('Contents of package "%s":' % os.path.basename(package_path))
for next_line in open(symbolizer_config, 'r'):
logging.debug(' ' + next_line.strip().split('=')[0])
logging.debug('')
# Copy the package.
deployed_package_path = '/tmp/package-%s.far' % uuid.uuid1()
target.PutFile(package_path, deployed_package_path)
try:
command = ['run', deployed_package_path] + run_args
process = target.RunCommandPiped(command,
stdin=open(os.devnull, 'r'),
stdout=subprocess.PIPE)
if symbolizer_config:
# Decorate the process output stream with the symbolizer.
output = FilterStream(process.stdout, symbolizer_config, output_dir)
else:
output = process.stdout
for next_line in output:
print next_line
process.wait()
if process.returncode != 0:
# The test runner returns an error status code if *any* tests fail,
# so we should proceed anyway.
logging.warning('Command exited with non-zero status code %d.' %
process.returncode)
finally:
logging.debug('Cleaning up package file.')
target.RunCommand(['rm', deployed_package_path])
return process.returncode
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import re
import subprocess
# Matches the coarse syntax of a backtrace entry.
_BACKTRACE_PREFIX_RE = re.compile(r'bt#(?P<frame_id>\d+): ')
# Matches the specific fields of a backtrace entry.
# Back-trace line matcher/parser assumes that 'pc' is always present, and
# expects that 'sp' and ('binary','pc_offset') may also be provided.
_BACKTRACE_ENTRY_RE = re.compile(
r'pc 0(?:x[0-9a-f]+)?' +
r'(?: sp 0x[0-9a-f]+)?' +
r'(?: \((?P<binary>\S+),(?P<pc_offset>0x[0-9a-f]+)\))?$')
def FilterStream(stream, manifest_path, output_dir):
"""Looks for backtrace lines from an iterable |stream| and symbolizes them.
Yields a stream of strings with symbolized entries replaced."""
return _SymbolizerFilter(manifest_path, output_dir).SymbolizeStream(stream)
class _SymbolizerFilter(object):
"""Adds backtrace symbolization capabilities to a process output stream."""
def __init__(self, manifest_path, output_dir):
self._symbols_mapping = {}
self._output_dir = output_dir
# Compute remote/local path mappings using the manifest data.
for next_line in open(manifest_path):
split = next_line.strip().split('=')
target = split[0]
source = os.path.join(output_dir, split[1])
with open(source, 'rb') as f:
file_tag = f.read(4)
if file_tag != '\x7fELF':
continue
self._symbols_mapping[os.path.basename(target)] = source
self._symbols_mapping[target] = source
logging.debug('Symbols: %s -> %s' % (source, target))
def _SymbolizeEntries(self, entries):
"""Symbolizes the parsed backtrace |entries| by calling addr2line.
Returns a set of (frame_id, result) pairs."""
filename_re = re.compile(r'at ([-._a-zA-Z0-9/+]+):(\d+)')
# Use addr2line to symbolize all the |pc_offset|s in |entries| in one go.
# Entries with no |debug_binary| are also processed here, so that we get
# consistent output in that case, with the cannot-symbolize case.
addr2line_output = None
if entries[0].has_key('debug_binary'):
addr2line_args = (['addr2line', '-Cipf', '-p',
'--exe=' + entries[0]['debug_binary']] +
map(lambda entry: entry['pc_offset'], entries))
addr2line_output = subprocess.check_output(addr2line_args).splitlines()
assert addr2line_output
results = {}
for entry in entries:
raw, frame_id = entry['raw'], entry['frame_id']
prefix = '#%s: ' % frame_id
if not addr2line_output:
# Either there was no addr2line output, or too little of it.
filtered_line = raw
else:
output_line = addr2line_output.pop(0)
# Relativize path to the current working (output) directory if we see
# a filename.
def RelativizePath(m):
relpath = os.path.relpath(os.path.normpath(m.group(1)))
return 'at ' + relpath + ':' + m.group(2)
filtered_line = filename_re.sub(RelativizePath, output_line)
if '??' in filtered_line.split():
# If symbolization fails just output the raw backtrace.
filtered_line = raw
else:
# Release builds may inline things, resulting in "(inlined by)" lines.
inlined_by_prefix = " (inlined by)"
while (addr2line_output and
addr2line_output[0].startswith(inlined_by_prefix)):
inlined_by_line = \
'\n' + (' ' * len(prefix)) + addr2line_output.pop(0)
filtered_line += filename_re.sub(RelativizePath, inlined_by_line)
results[entry['frame_id']] = prefix + filtered_line
return results
def _LookupDebugBinary(self, entry):
"""Looks up the binary listed in |entry| in the |_symbols_mapping|.
Returns the corresponding host-side binary's filename, or None."""
binary = entry['binary']
if not binary:
return None
app_prefix = 'app:'
if binary.startswith(app_prefix):
binary = binary[len(app_prefix):]
# We change directory into /system/ before running the target executable, so
# all paths are relative to "/system/", and will typically start with "./".
# Some crashes still uses the full filesystem path, so cope with that, too.
pkg_prefix = '/pkg/'
cwd_prefix = './'
if binary.startswith(cwd_prefix):
binary = binary[len(cwd_prefix):]
elif binary.startswith(pkg_prefix):
binary = binary[len(pkg_prefix):]
# Allow other paths to pass-through; sometimes neither prefix is present.
if binary in self._symbols_mapping:
return self._symbols_mapping[binary]
# |binary| may be truncated by the crashlogger, so if there is a unique
# match for the truncated name in |symbols_mapping|, use that instead.
matches = filter(lambda x: x.startswith(binary),
self._symbols_mapping.keys())
if len(matches) == 1:
return self._symbols_mapping[matches[0]]
return None
def _SymbolizeBacktrace(self, backtrace):
"""Group |backtrace| entries according to the associated binary, and locate
the path to the debug symbols for that binary, if any."""
batches = {}
for entry in backtrace:
debug_binary = self._LookupDebugBinary(entry)
if debug_binary:
entry['debug_binary'] = debug_binary
batches.setdefault(debug_binary, []).append(entry)
# Run _SymbolizeEntries on each batch and collate the results.
symbolized = {}
for batch in batches.itervalues():
symbolized.update(self._SymbolizeEntries(batch))
# Map each entry to its symbolized form, by frame-id, and return the list.
return map(lambda entry: symbolized[entry['frame_id']], backtrace)
def SymbolizeStream(self, stream):
"""Creates a symbolized logging stream object using the output from
|stream|."""
# A buffer of backtrace entries awaiting symbolization, stored as dicts:
# raw: The original back-trace line that followed the prefix.
# frame_id: backtrace frame number (starting at 0).
# binary: path to executable code corresponding to the current frame.
# pc_offset: memory offset within the executable.
backtrace_entries = []
# Read from the stream until we hit EOF.
for line in stream:
line = line.rstrip()
# Look for the back-trace prefix, otherwise just emit the line.
matched = _BACKTRACE_PREFIX_RE.match(line)
if not matched:
yield line
continue
backtrace_line = line[matched.end():]
# If this was the end of a back-trace then symbolize and emit it.
frame_id = matched.group('frame_id')
if backtrace_line == 'end':
if backtrace_entries:
for processed in self._SymbolizeBacktrace(backtrace_entries):
yield processed
backtrace_entries = []
continue
# Parse the program-counter offset, etc into |backtrace_entries|.
matched = _BACKTRACE_ENTRY_RE.match(backtrace_line)
if matched:
# |binary| and |pc_offset| will be None if not present.
backtrace_entries.append(
{'raw': backtrace_line, 'frame_id': frame_id,
'binary': matched.group('binary'),
'pc_offset': matched.group('pc_offset')})
else:
backtrace_entries.append(
{'raw': backtrace_line, 'frame_id': frame_id,
'binary': None, 'pc_offset': None})
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
# Use of this source code is governed by a BSD-style license that can be # Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file. # found in the LICENSE file.
import logging
import os import os
import remote_cmd import remote_cmd
import subprocess import subprocess
...@@ -20,14 +21,19 @@ class FuchsiaTargetException(Exception): ...@@ -20,14 +21,19 @@ class FuchsiaTargetException(Exception):
class Target(object): class Target(object):
"""Abstract base class representing a Fuchsia deployment target.""" """Base class representing a Fuchsia deployment target."""
def __init__(self, output_dir, target_cpu, verbose): def __init__(self, output_dir, target_cpu):
self._target_cpu = target_cpu
self._output_dir = output_dir self._output_dir = output_dir
self._started = False self._started = False
self._dry_run = False self._dry_run = False
self._vlogger = sys.stdout if verbose else open(os.devnull, 'w') self._target_cpu = target_cpu
# Functions used by the Python context manager for teardown.
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
return self
def Start(self): def Start(self):
"""Handles the instantiation and connection process for the Fuchsia """Handles the instantiation and connection process for the Fuchsia
...@@ -39,56 +45,59 @@ class Target(object): ...@@ -39,56 +45,59 @@ class Target(object):
commands.""" commands."""
return self._started return self._started
def RunCommandPiped(self, command): def RunCommandPiped(self, command, **kwargs):
"""Starts a remote command and immediately returns a Popen object for the """Starts a remote command and immediately returns a Popen object for the
command. The caller may interact with the streams, inspect the status code, command. The caller may interact with the streams, inspect the status code,
wait on command termination, etc. wait on command termination, etc.
command: A list of strings representing the command and arguments. command: A list of strings representing the command and arguments.
kwargs: A dictionary of parameters to be passed to subprocess.Popen().
The parameters can be used to override stdin and stdout, for example.
Returns: a Popen object. Returns: a Popen object.
Note: method does not block.""" Note: method does not block."""
self._AssertStarted() self._AssertIsStarted()
logging.debug('running (non-blocking) \'%s\'.' % ' '.join(command))
host, port = self._GetEndpoint() host, port = self._GetEndpoint()
return remote_cmd.RunPipedSsh(self._GetSshConfigPath(), host, port, command) return remote_cmd.RunPipedSsh(self._GetSshConfigPath(), host, port, command,
**kwargs)
def RunCommand(self, command, silent=False): def RunCommand(self, command, silent=False):
"""Executes a remote command and waits for it to finish executing. """Executes a remote command and waits for it to finish executing.
Returns the exit code of the command.""" Returns the exit code of the command."""
self._AssertStarted() self._AssertIsStarted()
logging.debug('running \'%s\'.' % ' '.join(command))
host, port = self._GetEndpoint() host, port = self._GetEndpoint()
return remote_cmd.RunSsh(self._GetSshConfigPath(), host, port, command, return remote_cmd.RunSsh(self._GetSshConfigPath(), host, port, command,
silent) silent)
def CopyTo(self, source, dest): def PutFile(self, source, dest):
"""Copies a file from the local filesystem to the target filesystem. """Copies a file from the local filesystem to the target filesystem.
source: The path of the file being copied. source: The path of the file being copied.
dest: The path on the remote filesystem which will be copied to.""" dest: The path on the remote filesystem which will be copied to."""
self._AssertStarted() self._AssertIsStarted()
host, port = self._GetEndpoint() host, port = self._GetEndpoint()
logging.debug('copy local:%s => remote:%s' % (source, dest))
command = remote_cmd.RunScp(self._GetSshConfigPath(), host, port, command = remote_cmd.RunScp(self._GetSshConfigPath(), host, port,
source, dest, remote_cmd.COPY_TO_TARGET) source, dest, remote_cmd.COPY_TO_TARGET)
def CopyFrom(self, source, dest): def GetFile(self, source, dest):
"""Copies a file from the target filesystem to the local filesystem. """Copies a file from the target filesystem to the local filesystem.
source: The path of the file being copied. source: The path of the file being copied.
dest: The path on the local filesystem which will be copied to.""" dest: The path on the local filesystem which will be copied to."""
self._AssertStarted() self._AssertIsStarted()
host, port = self._GetEndpoint() host, port = self._GetEndpoint()
logging.debug('copy remote:%s => local:%s' % (source, dest))
return remote_cmd.RunScp(self._GetSshConfigPath(), host, port, return remote_cmd.RunScp(self._GetSshConfigPath(), host, port,
source, dest, remote_cmd.COPY_FROM_TARGET) source, dest, remote_cmd.COPY_FROM_TARGET)
def Shutdown(self):
self.RunCommand(_SHUTDOWN_CMD)
self._started = False
def _GetEndpoint(self): def _GetEndpoint(self):
"""Returns a (host, port) tuple for the SSH connection to the target.""" """Returns a (host, port) tuple for the SSH connection to the target."""
raise NotImplementedError raise NotImplementedError
...@@ -101,22 +110,18 @@ class Target(object): ...@@ -101,22 +110,18 @@ class Target(object):
return 'x86_64' return 'x86_64'
raise FuchsiaTargetException('Unknown target_cpu:' + self._target_cpu) raise FuchsiaTargetException('Unknown target_cpu:' + self._target_cpu)
def _AssertStarted(self): def _AssertIsStarted(self):
assert self.IsStarted() assert self.IsStarted()
def _Attach(self): def _WaitUntilReady(self, retries=_ATTACH_MAX_RETRIES):
self._vlogger.write('Trying to connect over SSH...') logging.debug('Connecting to Fuchsia using SSH.')
self._vlogger.flush() for _ in xrange(retries+1):
for _ in xrange(_ATTACH_MAX_RETRIES):
host, port = self._GetEndpoint() host, port = self._GetEndpoint()
if remote_cmd.RunSsh(self._ssh_config_path, host, port, ['echo'], if remote_cmd.RunSsh(self._GetSshConfigPath(), host, port, ['true'],
True) == 0: True) == 0:
self._vlogger.write(' connected!\n') logging.debug('Connected!')
self._vlogger.flush()
self._started = True self._started = True
return return True
self._vlogger.write('.')
self._vlogger.flush()
time.sleep(_ATTACH_RETRY_INTERVAL) time.sleep(_ATTACH_RETRY_INTERVAL)
sys.stderr.write(' timeout limit reached.\n') sys.stderr.write(' timeout limit reached.\n')
raise FuchsiaTargetException('Couldn\'t connect to QEMU using SSH.') raise FuchsiaTargetException('Couldn\'t connect to QEMU using SSH.')
...@@ -124,3 +129,10 @@ class Target(object): ...@@ -124,3 +129,10 @@ class Target(object):
def _GetSshConfigPath(self, path): def _GetSshConfigPath(self, path):
raise NotImplementedError raise NotImplementedError
def _GetTargetSdkArch(self):
"""Returns the Fuchsia SDK architecture name for the target CPU."""
if self._target_cpu == 'arm64':
return 'aarch64'
elif self._target_cpu == 'x64':
return 'x86_64'
raise Exception('Unknown target_cpu %s:' % self._target_cpu)
#!/usr/bin/env python
#
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Packages a user.bootfs for a Fuchsia boot image, pulling in the runtime
dependencies of a test binary, and then uses either QEMU from the Fuchsia SDK
to run, or starts the bootserver to allow running on a hardware device."""
import argparse
import json
import logging
import os
import socket
import subprocess
import sys
import tempfile
import time
from common_args import AddCommonArgs, ConfigureLogging, GetDeploymentTargetForArgs
from run_package import RunPackage
DEFAULT_TEST_CONCURRENCY = 4
TEST_RESULT_PATH = '/data/test_summary.json'
TEST_FILTER_PATH = '/data/test_filter.txt'
def main():
parser = argparse.ArgumentParser()
AddCommonArgs(parser)
parser.add_argument('--gtest_filter',
help='GTest filter to use in place of any default.')
parser.add_argument('--gtest_repeat',
help='GTest repeat value to use. This also disables the '
'test launcher timeout.')
parser.add_argument('--gtest_break_on_failure', action='store_true',
default=False,
help='Should GTest break on failure; useful with '
'--gtest_repeat.')
parser.add_argument('--single-process-tests', action='store_true',
default=False,
help='Runs the tests and the launcher in the same '
'process. Useful for debugging.')
parser.add_argument('--test-launcher-batch-limit',
type=int,
help='Sets the limit of test batch to run in a single '
'process.')
# --test-launcher-filter-file is specified relative to --output-directory,
# so specifying type=os.path.* will break it.
parser.add_argument('--test-launcher-filter-file',
default=None,
help='Override default filter file passed to target test '
'process. Set an empty path to disable filtering.')
parser.add_argument('--test-launcher-jobs',
type=int,
help='Sets the number of parallel test jobs.')
parser.add_argument('--test-launcher-summary-output',
help='Where the test launcher will output its json.')
parser.add_argument('child_args', nargs='*',
help='Arguments for the test process.')
args = parser.parse_args()
ConfigureLogging(args)
child_args = ['--test-launcher-retry-limit=0']
if args.single_process_tests:
child_args.append('--single-process-tests')
if args.test_launcher_batch_limit:
child_args.append('--test-launcher-batch-limit=%d' %
args.test_launcher_batch_limit)
test_concurrency = args.test_launcher_jobs \
if args.test_launcher_jobs else DEFAULT_TEST_CONCURRENCY
child_args.append('--test-launcher-jobs=%d' % test_concurrency)
if args.gtest_filter:
child_args.append('--gtest_filter=' + args.gtest_filter)
if args.gtest_repeat:
child_args.append('--gtest_repeat=' + args.gtest_repeat)
child_args.append('--test-launcher-timeout=-1')
if args.gtest_break_on_failure:
child_args.append('--gtest_break_on_failure')
if args.child_args:
child_args.extend(args.child_args)
if args.test_launcher_summary_output:
child_args.append('--test-launcher-summary-output=' + TEST_RESULT_PATH)
with GetDeploymentTargetForArgs(args) as target:
target.Start()
if args.test_launcher_filter_file:
target.PutFile(args.test_launcher_filter_file, TEST_FILTER_PATH)
child_args.append('--test-launcher-filter-file=' + TEST_FILTER_PATH)
RunPackage(args.output_directory, target, args.package,
child_args, args.package_manifest)
if args.test_launcher_summary_output:
target.GetFile(TEST_RESULT_PATH, args.test_launcher_summary_output)
if __name__ == '__main__':
sys.exit(main())
...@@ -247,6 +247,7 @@ template("test") { ...@@ -247,6 +247,7 @@ template("test") {
] ]
test_name = _output_name test_name = _output_name
exe_path = "$root_out_dir/" + get_label_info(_exec_target, "name") exe_path = "$root_out_dir/" + get_label_info(_exec_target, "name")
package_name = _output_name
} }
executable(_exec_target) { executable(_exec_target) {
......
...@@ -30,6 +30,7 @@ import logging ...@@ -30,6 +30,7 @@ import logging
import os import os
import select import select
import socket import socket
import subprocess
import sys import sys
import threading import threading
...@@ -108,8 +109,9 @@ class SubprocessOutputLogger(object): ...@@ -108,8 +109,9 @@ class SubprocessOutputLogger(object):
class _TargetHost(object): class _TargetHost(object):
def __init__(self, build_path, ports_to_forward): def __init__(self, build_path, ports_to_forward):
try: try:
self._target = None
self._target = qemu_target.QemuTarget( self._target = qemu_target.QemuTarget(
build_path, 'x64', verbose=False, ram_size_mb=8192) build_path, 'x64', ram_size_mb=8192)
self._target.Start() self._target.Start()
self._setup_target(build_path, ports_to_forward) self._setup_target(build_path, ports_to_forward)
except: except:
...@@ -122,25 +124,33 @@ class _TargetHost(object): ...@@ -122,25 +124,33 @@ class _TargetHost(object):
# TODO(sergeyu): Potentially this can be implemented using port # TODO(sergeyu): Potentially this can be implemented using port
# forwarding in SSH, but that feature is currently broken on Fuchsia, # forwarding in SSH, but that feature is currently broken on Fuchsia,
# see ZX-1555. Remove layout_test_proxy once that SSH bug is fixed. # see ZX-1555. Remove layout_test_proxy once that SSH bug is fixed.
self._target.CopyTo( self._target.PutFile(
os.path.join(build_path, 'package/layout_test_proxy.far'), '/tmp') os.path.join(
build_path,
'gen/build/fuchsia/layout_test_proxy/layout_test_proxy/layout_test_proxy.far'),
'/tmp')
command = ['run', '/tmp/layout_test_proxy.far', command = ['run', '/tmp/layout_test_proxy.far',
'--remote-address=' + qemu_target.HOST_IP_ADDRESS, '--remote-address=' + qemu_target.HOST_IP_ADDRESS,
'--ports=' + ','.join([str(p) for p in ports_to_forward])] '--ports=' + ','.join([str(p) for p in ports_to_forward])]
self._proxy = self._target.RunCommandPiped(command) self._proxy = self._target.RunCommandPiped(command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
# Copy content_shell package to the device. # Copy content_shell package to the device.
self._target.CopyTo( self._target.PutFile(
os.path.join(build_path, 'package/content_shell.far'), '/tmp') os.path.join(build_path, 'gen/content/shell/content_shell.far'), '/tmp')
# Currently dynamic library loading is not implemented for packaged # Currently dynamic library loading is not implemented for packaged
# apps. Copy libosmesa.so to /system/lib as a workaround. # apps. Copy libosmesa.so to /system/lib as a workaround.
self._target.CopyTo( self._target.PutFile(
os.path.join(build_path, 'libosmesa.so'), '/system/lib') os.path.join(build_path, 'libosmesa.so'), '/system/lib')
def run_command(self, *args, **kvargs): def run_command(self, *args, **kvargs):
return self._target.RunCommandPiped(*args, **kvargs) return self._target.RunCommandPiped(*args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
**kvargs)
def cleanup(self): def cleanup(self):
if self._target: if self._target:
# TODO(sergeyu): Currently __init__() always starts Qemu, so we can # TODO(sergeyu): Currently __init__() always starts Qemu, so we can
...@@ -179,7 +189,8 @@ class FuchsiaPort(base.Port): ...@@ -179,7 +189,8 @@ class FuchsiaPort(base.Port):
return ChromiumFuchsiaDriver return ChromiumFuchsiaDriver
def _path_to_driver(self, target=None): def _path_to_driver(self, target=None):
return self._build_path_with_target(target, 'package/content_shell.far') return self._build_path_with_target(target,
'gen/content/shell/content_shell.far')
def __del__(self): def __del__(self):
if self._zircon_logger: if self._zircon_logger:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment