Commit bae194e3 authored by Xianzhu Wang's avatar Xianzhu Wang Committed by Commit Bot

Reland "[run_web_tests] Initialize printer earlier"

This is a reland of dcf5246f

The fix is to ensure the logger has proper log level.

Original change's description:
> [run_web_tests] Initialize printer earlier
>
> This allows logging before creating the port.
>
> Bug: 896799
> Change-Id: I3198fcaeb8d64d748a12d9882cddc273a9e3efe6
> Reviewed-on: https://chromium-review.googlesource.com/c/1289305
> Commit-Queue: Xianzhu Wang <wangxianzhu@chromium.org>
> Reviewed-by: Dirk Pranke <dpranke@chromium.org>
> Reviewed-by: Ned Nguyen <nednguyen@google.com>
> Cr-Commit-Position: refs/heads/master@{#601049}

Bug: 896799
Change-Id: I7145e2aebf122c6fecf0732d56a81a808cffbe4d
Reviewed-on: https://chromium-review.googlesource.com/c/1291358Reviewed-by: default avatarNed Nguyen <nednguyen@google.com>
Reviewed-by: default avatarDirk Pranke <dpranke@chromium.org>
Commit-Queue: Xianzhu Wang <wangxianzhu@chromium.org>
Cr-Commit-Position: refs/heads/master@{#601243}
parent 0ef9d747
......@@ -108,7 +108,7 @@ class LayoutTestRunner(object):
num_workers = min(num_workers, len(all_shards))
if retry_attempt < 1:
self._printer.print_workers_and_shards(num_workers, len(all_shards), len(locked_shards))
self._printer.print_workers_and_shards(self._port, num_workers, len(all_shards), len(locked_shards))
if self._options.dry_run:
return test_run_results
......@@ -198,11 +198,11 @@ class LayoutTestRunner(object):
actual_string = self._expectations.expectation_to_string(result.type)
if result.device_failed:
self._printer.print_finished_test(result, False, expectation_string, 'Aborted')
self._printer.print_finished_test(self._port, result, False, expectation_string, 'Aborted')
return
test_run_results.add(result, expected, self._test_is_slow(result.test_name))
self._printer.print_finished_test(result, expected, expectation_string, actual_string)
self._printer.print_finished_test(self._port, result, expected, expectation_string, actual_string)
self._interrupt_if_at_failure_limits(test_run_results)
def handle(self, name, source, *args):
......
......@@ -51,13 +51,13 @@ class FakePrinter(object):
def print_expected(self, run_results, get_tests_with_result_type):
pass
def print_workers_and_shards(self, num_workers, num_shards, num_locked_shards):
def print_workers_and_shards(self, port, num_workers, num_shards, num_locked_shards):
pass
def print_started_test(self, test_name):
pass
def print_finished_test(self, result, expected, exp_str, got_str):
def print_finished_test(self, port, result, expected, exp_str, got_str):
pass
def write(self, msg):
......
......@@ -43,7 +43,7 @@ from blinkpy.web_tests.views import printing
_log = logging.getLogger(__name__)
def main(argv, stdout, stderr):
def main(argv, stderr):
options, args = parse_args(argv)
if options.platform and 'test' in options.platform and not 'browser_test' in options.platform:
......@@ -55,27 +55,31 @@ def main(argv, stdout, stderr):
else:
host = Host()
printer = printing.Printer(host, options, stderr)
try:
port = host.port_factory.get(options.platform, options)
except (NotImplementedError, ValueError) as error:
# FIXME: is this the best way to handle unsupported port names?
print >> stderr, str(error)
_log.error(error)
printer.cleanup()
return exit_codes.UNEXPECTED_ERROR_EXIT_STATUS
try:
return run(port, options, args, stderr, stdout).exit_code
return run(port, options, args, printer).exit_code
# We need to still handle KeyboardInterrupt, at least for blinkpy unittest cases.
except KeyboardInterrupt:
return exit_codes.INTERRUPTED_EXIT_STATUS
except test_run_results.TestRunException as error:
print >> stderr, error.msg
_log.error(error.msg)
return error.code
except BaseException as error:
if isinstance(error, Exception):
print >> stderr, '\n%s raised: %s' % (error.__class__.__name__, error)
_log.error('\n%s raised: %s', error.__class__.__name__, error)
traceback.print_exc(file=stderr)
return exit_codes.UNEXPECTED_ERROR_EXIT_STATUS
finally:
printer.cleanup()
def deprecate(option, opt_str, _, parser):
......@@ -596,29 +600,16 @@ def _set_up_derived_options(port, options, args):
options.image_first_tests.extend(line for line in contents.splitlines(False) if line)
def _run_tests(port, options, args, printer):
def run(port, options, args, printer):
_set_up_derived_options(port, options, args)
manager = Manager(port, options, printer)
printer.print_config(port.results_directory())
return manager.run(args)
def run(port, options, args, logging_stream, stdout):
logger = logging.getLogger()
logger.setLevel(logging.DEBUG if options.debug_rwt_logging else logging.INFO)
printer = printing.Printer(port, options, logging_stream, logger=logger)
try:
run_details = _run_tests(port, options, args, printer)
printer.flush()
_log.debug('')
_log.debug('Testing completed. Exit status: %d', run_details.exit_code)
return run_details
finally:
printer.cleanup()
printer.print_config(port)
run_details = manager.run(args)
_log.debug('')
_log.debug('Testing completed. Exit status: %d', run_details.exit_code)
printer.flush()
return run_details
if __name__ == '__main__':
sys.exit(main(sys.argv[1:], sys.stdout, sys.stderr))
sys.exit(main(sys.argv[1:], sys.stderr))
......@@ -47,6 +47,7 @@ from blinkpy.web_tests import run_webkit_tests
from blinkpy.web_tests.models import test_expectations
from blinkpy.web_tests.models import test_failures
from blinkpy.web_tests.port import test
from blinkpy.web_tests.views.printing import Printer
_MOCK_ROOT = os.path.join(
path_finder.get_chromium_src_dir(), 'third_party', 'pymock')
......@@ -81,10 +82,8 @@ def passing_run(extra_args=None, port_obj=None, tests_included=False, host=None,
if shared_port:
port_obj.host.port_factory.get = lambda *args, **kwargs: port_obj
logging_stream = StringIO.StringIO()
stdout = StringIO.StringIO()
run_details = run_webkit_tests.run(port_obj, options, parsed_args,
logging_stream=logging_stream, stdout=stdout)
printer = Printer(host, options, StringIO.StringIO())
run_details = run_webkit_tests.run(port_obj, options, parsed_args, printer)
return run_details.exit_code == 0
......@@ -103,9 +102,8 @@ def run_and_capture(port_obj, options, parsed_args, shared_port=True):
if shared_port:
port_obj.host.port_factory.get = lambda *args, **kwargs: port_obj
logging_stream = StringIO.StringIO()
stdout = StringIO.StringIO()
run_details = run_webkit_tests.run(port_obj, options, parsed_args,
logging_stream=logging_stream, stdout=stdout)
printer = Printer(port_obj.host, options, logging_stream)
run_details = run_webkit_tests.run(port_obj, options, parsed_args, printer)
return (run_details, logging_stream)
......@@ -135,10 +133,8 @@ def get_test_results(args, host=None, port_obj=None):
host = host or MockHost()
port_obj = port_obj or host.port_factory.get(port_name=options.platform, options=options)
logging_stream = StringIO.StringIO()
stdout = StringIO.StringIO()
run_details = run_webkit_tests.run(port_obj, options, parsed_args,
logging_stream=logging_stream, stdout=stdout)
printer = Printer(host, options, StringIO.StringIO())
run_details = run_webkit_tests.run(port_obj, options, parsed_args, printer)
all_results = []
if run_details.initial_results:
......@@ -177,10 +173,10 @@ class RunTest(unittest.TestCase, StreamTestingMixin):
extra_args=['--json-failing-test-results', '/tmp/json_failing_test_results.json'],
tests_included=True)
logging_stream = StringIO.StringIO()
stdout = StringIO.StringIO()
host = MockHost()
port_obj = host.port_factory.get(options.platform, options)
details = run_webkit_tests.run(port_obj, options, args, logging_stream, stdout=stdout)
printer = Printer(host, options, logging_stream)
details = run_webkit_tests.run(port_obj, options, args, printer)
# These numbers will need to be updated whenever we add new tests.
self.assertEqual(details.initial_results.total, test.TOTAL_TESTS)
......@@ -1070,28 +1066,25 @@ class RunTest(unittest.TestCase, StreamTestingMixin):
self.assertFalse(any(path.endswith('-wdiff.html') for path in written_files))
def test_unsupported_platform(self):
stdout = StringIO.StringIO()
stderr = StringIO.StringIO()
res = run_webkit_tests.main(['--platform', 'foo'], stdout, stderr)
res = run_webkit_tests.main(['--platform', 'foo'], stderr)
self.assertEqual(res, exit_codes.UNEXPECTED_ERROR_EXIT_STATUS)
self.assertEqual(stdout.getvalue(), '')
self.assertTrue('unsupported platform' in stderr.getvalue())
def test_verbose_in_child_processes(self):
# When we actually run multiple processes, we may have to reconfigure logging in the
# child process (e.g., on win32) and we need to make sure that works and we still
# see the verbose log output. However, we can't use logging_run() because using
# output_capture to capture stdout and stderr latter results in a nonpicklable host.
# output_capture to capture stderr latter results in a nonpicklable host.
options, parsed_args = parse_args(['--verbose', '--fully-parallel', '--jobs',
'2', 'passes/text.html', 'passes/image.html'], tests_included=True)
host = MockHost()
port_obj = host.port_factory.get(port_name=options.platform, options=options)
logging_stream = StringIO.StringIO()
stdout = StringIO.StringIO()
run_webkit_tests.run(port_obj, options, parsed_args,
logging_stream=logging_stream, stdout=stdout)
printer = Printer(host, options, logging_stream)
run_webkit_tests.run(port_obj, options, parsed_args, printer)
self.assertTrue('text.html passed' in logging_stream.getvalue())
self.assertTrue('image.html passed' in logging_stream.getvalue())
......@@ -1145,8 +1138,8 @@ class RunTest(unittest.TestCase, StreamTestingMixin):
options, args = parse_args()
port_obj = host.port_factory.get(options.platform, options)
run_webkit_tests.run(port_obj, options, args, StringIO.StringIO(), StringIO.StringIO())
printer = Printer(host, options, StringIO.StringIO())
run_webkit_tests.run(port_obj, options, args, printer)
self.assertListEqual(options.image_first_tests, [image_first_folder1, image_first_folder2])
......@@ -1610,8 +1603,7 @@ class RebaselineTest(unittest.TestCase, StreamTestingMixin):
class MainTest(unittest.TestCase):
def test_exception_handling(self):
# Replacing protected method _run_tests - pylint: disable=protected-access
orig_run_fn = run_webkit_tests._run_tests
orig_run_fn = run_webkit_tests.run
# pylint: disable=unused-argument
def interrupting_run(port, options, args, printer):
......@@ -1627,19 +1619,18 @@ class MainTest(unittest.TestCase):
def exception_raising_run(port, options, args, printer):
assert False
stdout = StringIO.StringIO()
stderr = StringIO.StringIO()
try:
run_webkit_tests._run_tests = interrupting_run
res = run_webkit_tests.main([], stdout, stderr)
run_webkit_tests.run = interrupting_run
res = run_webkit_tests.main([], stderr)
self.assertEqual(res, exit_codes.INTERRUPTED_EXIT_STATUS)
run_webkit_tests._run_tests = successful_run
res = run_webkit_tests.main(['--platform', 'test'], stdout, stderr)
run_webkit_tests.run = successful_run
res = run_webkit_tests.main(['--platform', 'test'], stderr)
self.assertEqual(res, exit_codes.UNEXPECTED_ERROR_EXIT_STATUS)
run_webkit_tests._run_tests = exception_raising_run
res = run_webkit_tests.main([], stdout, stderr)
run_webkit_tests.run = exception_raising_run
res = run_webkit_tests.main([], stderr)
self.assertEqual(res, exit_codes.UNEXPECTED_ERROR_EXIT_STATUS)
finally:
run_webkit_tests._run_tests = orig_run_fn
run_webkit_tests.run = orig_run_fn
......@@ -67,9 +67,8 @@ class MeteredStream(object):
self._logger = logger
self._log_handler = None
if self._logger:
log_level = logging.DEBUG if verbose else logging.INFO
self._log_handler = _LogHandler(self)
self._log_handler.setLevel(log_level)
self._log_handler.setLevel(self._logger.level)
self._logger.addHandler(self._log_handler)
def __del__(self):
......
......@@ -28,6 +28,7 @@
"""Package that handles non-debug, non-file output for run_web_tests.py."""
import logging
import math
import optparse
......@@ -57,13 +58,15 @@ def print_options():
class Printer(object):
"""Class handling all non-debug-logging printing done by run_web_tests.py."""
def __init__(self, port, options, regular_output, logger=None):
def __init__(self, host, options, regular_output):
self.num_completed = 0
self.num_tests = 0
self._port = port
self._host = host
self._options = options
self._meter = MeteredStream(regular_output, options.debug_rwt_logging, logger=logger,
number_of_columns=self._port.host.platform.terminal_width())
logger = logging.getLogger()
logger.setLevel(logging.DEBUG if options.debug_rwt_logging else logging.INFO)
self._meter = MeteredStream(regular_output, options.debug_rwt_logging, logger,
number_of_columns=host.platform.terminal_width())
self._running_tests = []
self._completed_tests = []
......@@ -73,15 +76,15 @@ class Printer(object):
def __del__(self):
self.cleanup()
def print_config(self, results_directory):
self._print_default("Using port '%s'" % self._port.name())
self._print_default('Test configuration: %s' % self._port.test_configuration())
self._print_default('View the test results at file://%s/results.html' % results_directory)
def print_config(self, port):
self._print_default("Using port '%s'" % port.name())
self._print_default('Test configuration: %s' % port.test_configuration())
self._print_default('View the test results at file://%s/results.html' % port.results_directory())
if self._options.order == 'random':
self._print_default('Using random order with seed: %d' % self._options.seed)
fs = self._port.host.filesystem
fallback_path = [fs.split(x)[1] for x in self._port.baseline_search_path()]
fs = self._host.filesystem
fallback_path = [fs.split(x)[1] for x in port.baseline_search_path()]
self._print_default('Baseline search path: %s -> generic' % ' -> '.join(fallback_path))
self._print_default('Using %s build' % self._options.configuration)
......@@ -93,7 +96,7 @@ class Printer(object):
self._print_default('Regular timeout: %s, slow test timeout: %s' %
(self._options.time_out_ms, self._options.slow_time_out_ms))
self._print_default('Command line: ' + ' '.join(self._port.driver_cmd_line()))
self._print_default('Command line: ' + ' '.join(port.driver_cmd_line()))
self._print_default('')
def print_found(self, num_all_test_files, num_shard_test_files, num_to_run, repeat_each, iterations):
......@@ -112,8 +115,8 @@ class Printer(object):
self._print_expected_results_of_type(run_results, test_expectations.FLAKY, 'flaky', tests_with_result_type_callback)
self._print_debug('')
def print_workers_and_shards(self, num_workers, num_shards, num_locked_shards):
driver_name = self._port.driver_name()
def print_workers_and_shards(self, port, num_workers, num_shards, num_locked_shards):
driver_name = port.driver_name()
if num_workers == 1:
self._print_default('Running 1 %s.' % driver_name)
self._print_debug('(%s).' % grammar.pluralize('shard', num_shards))
......@@ -220,7 +223,7 @@ class Printer(object):
ellipsis = '...'
if len(test_name) < overflow_columns + len(ellipsis) + 2:
# We don't have enough space even if we elide, just show the test filename.
fs = self._port.host.filesystem
fs = self._host.filesystem
test_name = fs.split(test_name)[1]
else:
new_length = len(test_name) - overflow_columns - len(ellipsis)
......@@ -240,7 +243,7 @@ class Printer(object):
write = self._meter.write_throttled_update
write(self._test_status_line(test_name, suffix))
def print_finished_test(self, result, expected, exp_str, got_str):
def print_finished_test(self, port, result, expected, exp_str, got_str):
self.num_completed += 1
test_name = result.test_name
......@@ -248,7 +251,7 @@ class Printer(object):
self._options.timing, result.test_run_time)
if self._options.details:
self._print_test_trace(result, exp_str, got_str)
self._print_test_trace(port, result, exp_str, got_str)
elif self._options.verbose or not expected:
self.writeln(self._test_status_line(test_name, result_message))
elif self.num_completed == self.num_tests:
......@@ -272,35 +275,35 @@ class Printer(object):
else:
return ' failed%s (%s)%s' % (exp_string, ', '.join(failure.message() for failure in failures), timing_string)
def _print_test_trace(self, result, exp_str, got_str):
def _print_test_trace(self, port, result, exp_str, got_str):
test_name = result.test_name
self._print_default(self._test_status_line(test_name, ''))
base = self._port.lookup_virtual_test_base(test_name)
base = port.lookup_virtual_test_base(test_name)
if base:
args = ' '.join(self._port.lookup_virtual_test_args(test_name))
reference_args = ' '.join(self._port.lookup_virtual_reference_args(test_name))
args = ' '.join(port.lookup_virtual_test_args(test_name))
reference_args = ' '.join(port.lookup_virtual_reference_args(test_name))
self._print_default(' base: %s' % base)
self._print_default(' args: %s' % args)
self._print_default(' reference_args: %s' % reference_args)
references = self._port.reference_files(test_name)
references = port.reference_files(test_name)
if references:
for _, filename in references:
self._print_default(' ref: %s' % self._port.relative_test_filename(filename))
self._print_default(' ref: %s' % port.relative_test_filename(filename))
else:
for extension in ('.txt', '.png', '.wav'):
self._print_baseline(test_name, extension)
self._print_baseline(port, test_name, extension)
self._print_default(' exp: %s' % exp_str)
self._print_default(' got: %s' % got_str)
self._print_default(' took: %-.3f' % result.test_run_time)
self._print_default('')
def _print_baseline(self, test_name, extension):
baseline = self._port.expected_filename(test_name, extension)
if self._port.host.filesystem.exists(baseline):
relpath = self._port.relative_test_filename(baseline)
def _print_baseline(self, port, test_name, extension):
baseline = port.expected_filename(test_name, extension)
if self._host.filesystem.exists(baseline):
relpath = port.relative_test_filename(baseline)
else:
relpath = '<none>'
self._print_default(' %s: %s' % (extension[1:], relpath))
......
......@@ -101,7 +101,7 @@ class Testprinter(unittest.TestCase):
self._port = host.port_factory.get('test', options)
regular_output = StringIO.StringIO()
printer = printing.Printer(self._port, options, regular_output)
printer = printing.Printer(host, options, regular_output)
return printer, regular_output
def get_result(self, test_name, result_type=test_expectations.PASS, run_time=0):
......@@ -129,7 +129,7 @@ class Testprinter(unittest.TestCase):
printer._options.slow_time_out_ms = 12000
printer._options.order = 'random'
printer._options.seed = 1234
printer.print_config('/tmp')
printer.print_config(self._port)
self.assertIn("Using port 'test-mac-mac10.10'", err.getvalue())
self.assertIn('Test configuration: <mac10.10, x86, release>', err.getvalue())
self.assertIn('View the test results at file:///tmp', err.getvalue())
......@@ -142,7 +142,7 @@ class Testprinter(unittest.TestCase):
self.reset(err)
printer._options.quiet = True
printer.print_config('/tmp')
printer.print_config(self._port)
self.assertNotIn('Baseline search path: test-mac-mac10.10 -> test-mac-mac10.11 -> generic', err.getvalue())
def test_print_summary(self):
......@@ -201,7 +201,7 @@ class Testprinter(unittest.TestCase):
printer, err = self.get_printer(['--details'])
result = self.get_result('passes/image.html')
printer.print_started_test('passes/image.html')
printer.print_finished_test(result, expected=False, exp_str='', got_str='')
printer.print_finished_test(self._port, result, expected=False, exp_str='', got_str='')
self.assertNotEmpty(err)
def test_print_found(self):
......@@ -224,11 +224,11 @@ class Testprinter(unittest.TestCase):
result = self.get_result('passes/image.html')
printer.print_started_test('passes/image.html')
printer.print_finished_test(result, expected=True, exp_str='', got_str='')
printer.print_finished_test(self._port, result, expected=True, exp_str='', got_str='')
printer.print_started_test('passes/text.html')
result = self.get_result('passes/text.html')
printer.print_finished_test(result, expected=True, exp_str='', got_str='')
printer.print_finished_test(self._port, result, expected=True, exp_str='', got_str='')
# Only the first test's start should be printed.
lines = err.buflist
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment