Commit 995ca2e6 authored by Rakib M. Hasan's avatar Rakib M. Hasan Committed by Commit Bot

[web tests] Add an artifacts dictionary as a new per test field in the JSON results

This dictionary can then be used to derive the web tests specific failure types
for the results viewer. After we update the results viewer to get the specific
failure type from the artifacts, then we can remove the web tests specific results
from the test expectations and JSON results.

Bug: chromium:1014196
Change-Id: Ic78c97f8dbfdf1669d38fd6ba5b0ab11489b5c26
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1842753
Commit-Queue: Rakib Hasan <rmhasan@google.com>
Reviewed-by: default avatarRobert Ma <robertma@chromium.org>
Reviewed-by: default avatarDirk Pranke <dpranke@chromium.org>
Cr-Commit-Position: refs/heads/master@{#705650}
parent d29bb5f2
......@@ -1055,6 +1055,7 @@ if (!is_ios) {
"//third_party/blink/public/mojom:mojom_platform_js_data_deps",
"//third_party/blink/renderer/core/html:js_files_for_form_controls_web_tests",
"//third_party/blink/renderer/core/script:js_files_for_web_tests",
"//third_party/catapult/third_party/typ",
"//third_party/mesa_headers",
"//tools/imagediff",
]
......
......@@ -36,7 +36,7 @@ from blinkpy.common.path_finder import WEB_TESTS_LAST_COMPONENT
from blinkpy.common.memoized import memoized
from blinkpy.common.net.buildbot import Build
from blinkpy.tool.commands.command import Command
from blinkpy.web_tests.controllers.test_result_writer import TestResultWriter
from blinkpy.web_tests.models import test_failures
from blinkpy.web_tests.models.test_expectations import TestExpectations
from blinkpy.web_tests.port import base, factory
......@@ -101,12 +101,12 @@ class AbstractRebaseliningCommand(Command):
def _file_name_for_actual_result(self, test_name, suffix):
# output_filename takes extensions starting with '.'.
return self._host_port.output_filename(
test_name, TestResultWriter.FILENAME_SUFFIX_ACTUAL, '.' + suffix)
test_name, test_failures.FILENAME_SUFFIX_ACTUAL, '.' + suffix)
def _file_name_for_expected_result(self, test_name, suffix):
# output_filename takes extensions starting with '.'.
return self._host_port.output_filename(
test_name, TestResultWriter.FILENAME_SUFFIX_EXPECTED, '.' + suffix)
test_name, test_failures.FILENAME_SUFFIX_EXPECTED, '.' + suffix)
class ChangeSet(object):
......
......@@ -47,7 +47,6 @@ from blinkpy.common import exit_codes
from blinkpy.common.net.file_uploader import FileUploader
from blinkpy.common.path_finder import PathFinder
from blinkpy.tool import grammar
from blinkpy.web_tests.controllers.test_result_writer import TestResultWriter
from blinkpy.web_tests.controllers.web_test_finder import WebTestFinder
from blinkpy.web_tests.controllers.web_test_runner import WebTestRunner
from blinkpy.web_tests.layout_package import json_results_generator
......@@ -288,10 +287,11 @@ class Manager(object):
return tests_to_run, tests_to_skip
def _test_input_for_file(self, test_file):
def _test_input_for_file(self, test_file, retry_attempt):
return TestInput(test_file,
self._options.slow_time_out_ms if self._test_is_slow(test_file) else self._options.time_out_ms,
self._test_requires_lock(test_file))
self._test_requires_lock(test_file),
retry_attempt=retry_attempt)
def _test_requires_lock(self, test_file):
"""Returns True if the test needs to be locked when running multiple
......@@ -382,7 +382,8 @@ class Manager(object):
for _ in xrange(iterations):
for test in tests_to_run:
for _ in xrange(repeat_each):
test_inputs.append(self._test_input_for_file(test))
test_inputs.append(
self._test_input_for_file(test, retry_attempt))
return self._runner.run_tests(self._expectations, test_inputs,
tests_to_skip, num_workers, retry_attempt)
......@@ -436,28 +437,50 @@ class Manager(object):
logs after that time.
"""
crashed_processes = []
test_to_crash_failure = {}
# reset static variables for Failure type classes
test_failures.TestFailure.port = self._port
test_failures.TestFailure.result_directory = self._results_directory
test_failures.TestFailure.filesystem = self._filesystem
for test, result in run_results.unexpected_results_by_name.iteritems():
if result.type != test_expectations.CRASH:
continue
for failure in result.failures:
if not isinstance(failure, test_failures.FailureCrash):
continue
if failure.has_log:
if (not isinstance(failure, test_failures.FailureCrash) or
failure.has_log):
continue
crashed_processes.append([test, failure.process_name, failure.pid])
sample_files = self._port.look_for_new_samples(crashed_processes, start_time)
if sample_files:
for test, sample_file in sample_files.iteritems():
writer = TestResultWriter(self._filesystem, self._port, self._port.results_directory(), test)
writer.copy_sample_file(sample_file)
crash_logs = self._port.look_for_new_crash_logs(crashed_processes, start_time)
if crash_logs:
for test, (crash_log, crash_site) in crash_logs.iteritems():
writer = TestResultWriter(self._filesystem, self._port, self._port.results_directory(), test)
writer.write_crash_log(crash_log)
run_results.unexpected_results_by_name[test].crash_site = crash_site
test_to_crash_failure[test] = failure
sample_files = self._port.look_for_new_samples(
crashed_processes, start_time) or {}
for test, sample_file in sample_files.iteritems():
test_failures.TestFailure.test_name = test
test_result = run_results.unexpected_results_by_name[test]
artifact_relative_path = self._port.output_filename(
test, test_failures.FILENAME_SUFFIX_SAMPLE, '.txt')
artifacts_sub_dir = test_result.artifacts.ArtifactsSubDirectory()
artifact_abspath = self._filesystem.join(
self._results_directory, artifacts_sub_dir, artifact_relative_path)
self._filesystem.maybe_make_directory(
self._filesystem.dirname(artifact_abspath))
self._filesystem.copyfile(sample_file, artifact_abspath)
test_result.artifacts.AddArtifact('sample_file',
self._filesystem.join(artifacts_sub_dir, artifact_relative_path))
new_crash_logs = self._port.look_for_new_crash_logs(
crashed_processes, start_time) or {}
for test, (crash_log, crash_site) in new_crash_logs.iteritems():
test_failures.TestFailure.test_name = test
failure.crash_log = crash_log
failure.has_log = self._port.output_contains_sanitizer_messages(
failure.crash_log)
test_result = run_results.unexpected_results_by_name[test]
test_result.crash_site = crash_site
test_to_crash_failure[test].create_artifacts(
test_result.artifacts, force_overwrite=True)
def _clobber_old_results(self):
dir_above_results_path = self._filesystem.dirname(self._results_directory)
......
# Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import optparse
import unittest
from blinkpy.common.system.filesystem_mock import MockFileSystem
from blinkpy.common.system.system_host_mock import MockSystemHost
from blinkpy.web_tests.controllers.test_result_writer import write_test_result
from blinkpy.web_tests.port.driver import DriverOutput
from blinkpy.web_tests.port.test import TestPort
from blinkpy.web_tests.models import test_failures
class TestResultWriterTests(unittest.TestCase):
def setUp(self):
self._actual_output = DriverOutput(
text='', image=None, image_hash=None, audio=None)
self._expected_output = DriverOutput(
text='', image=None, image_hash=None, audio=None)
def run_test(self, failures=None, files=None, filename='foo.html'):
failures = failures or []
host = MockSystemHost()
host.filesystem.files = files or {}
port = TestPort(host=host, port_name='test-mac-mac10.11', options=optparse.Values())
write_test_result(
host.filesystem, port, '/tmp', filename, self._actual_output,
self._expected_output, failures)
return host.filesystem
def test_success(self):
# Nothing is written when the test passes.
fs = self.run_test(failures=[])
self.assertEqual(fs.written_files, {})
def test_reference_exists(self):
failure = test_failures.FailureReftestMismatch(
self._actual_output, self._expected_output)
failure.reference_filename = '/src/exists-expected.html'
files = {'/src/exists-expected.html': 'yup'}
fs = self.run_test(failures=[failure], files=files)
self.assertEqual(fs.written_files, {'/tmp/exists-expected.html': 'yup'})
failure = test_failures.FailureReftestMismatchDidNotOccur(
self._actual_output, self._expected_output)
failure.reference_filename = '/src/exists-expected-mismatch.html'
files = {'/src/exists-expected-mismatch.html': 'yup'}
fs = self.run_test(failures=[failure], files=files)
self.assertEqual(fs.written_files, {'/tmp/exists-expected-mismatch.html': 'yup'})
def test_reference_is_missing(self):
failure = test_failures.FailureReftestMismatch(
self._actual_output, self._expected_output)
failure.reference_filename = 'notfound.html'
fs = self.run_test(failures=[failure], files={})
self.assertEqual(fs.written_files, {})
failure = test_failures.FailureReftestMismatchDidNotOccur(
self._actual_output, self._expected_output)
failure.reference_filename = 'notfound.html'
fs = self.run_test(failures=[failure], files={})
self.assertEqual(fs.written_files, {})
def test_reftest_image_missing(self):
failure = test_failures.FailureReftestNoImageGenerated(
self._actual_output, self._expected_output)
failure.reference_filename = '/src/exists-expected.html'
files = {'/src/exists-expected.html': 'yup'}
fs = self.run_test(failures=[failure], files=files)
self.assertEqual(fs.written_files, {'/tmp/exists-expected.html': 'yup'})
failure = test_failures.FailureReftestNoReferenceImageGenerated(
self._actual_output, self._expected_output)
failure.reference_filename = '/src/exists-expected.html'
files = {'/src/exists-expected.html': 'yup'}
fs = self.run_test(failures=[failure], files=files)
self.assertEqual(fs.written_files, {'/tmp/exists-expected.html': 'yup'})
def test_slash_in_test_name(self):
failure = test_failures.FailureTestHarnessAssertion(
self._actual_output, self._expected_output)
fs = self.run_test(failures=[failure], filename='foo.html?a/b')
self.assertTrue('/tmp/foo_a_b-actual.txt' in fs.written_files)
self.assertEqual(set(fs.written_files.keys()), {
'/tmp/foo_a_b-actual.txt',
'/tmp/foo_a_b-diff.txt',
'/tmp/foo_a_b-expected.txt',
'/tmp/foo_a_b-pretty-diff.html',
})
# Should not mkdir '/tmp/foo.html?a'
self.assertEqual(fs.dirs, {'/', '/tmp'})
......@@ -69,7 +69,6 @@ class WebTestRunner(object):
self._expectations = None
self._test_inputs = []
self._retry_attempt = 0
self._shards_to_redo = []
self._current_run_results = None
......@@ -84,10 +83,8 @@ class WebTestRunner(object):
# prevents content shell reuse.
if not self._options.must_use_derived_batch_size and retry_attempt >= 1:
batch_size = 1
self._expectations = expectations
self._test_inputs = test_inputs
self._retry_attempt = retry_attempt
test_run_results = TestRunResults(self._expectations, len(test_inputs) + len(tests_to_skip))
self._current_run_results = test_run_results
......@@ -162,17 +159,12 @@ class WebTestRunner(object):
shard.test_inputs = list(itertools.chain(*tests_by_args.values()))
def _worker_factory(self, worker_connection):
results_directory = self._results_directory
if self._retry_attempt > 0:
results_directory = self._filesystem.join(self._results_directory,
'retry_%d' % self._retry_attempt)
self._filesystem.maybe_make_directory(results_directory)
return Worker(worker_connection, results_directory, self._options)
return Worker(worker_connection, self._results_directory, self._options)
def _mark_interrupted_tests_as_skipped(self, test_run_results):
for test_input in self._test_inputs:
if test_input.test_name not in test_run_results.results_by_name:
result = test_results.TestResult(test_input.test_name, [test_failures.FailureEarlyExit()])
result = test_results.TestResult(test_input.test_name, failures=[test_failures.FailureEarlyExit()])
# FIXME: We probably need to loop here if there are multiple iterations.
# FIXME: Also, these results are really neither expected nor unexpected. We probably
# need a third type of result.
......
......@@ -31,7 +31,8 @@
class TestInput(object):
"""Groups information about a test for easy passing of data."""
def __init__(self, test_name, timeout_ms=None, requires_lock=None, reference_files=None):
def __init__(self, test_name, timeout_ms=None, requires_lock=None,
reference_files=None, retry_attempt=0, requires_force_overwrite=False):
# TestInput objects are normally constructed by the manager and passed
# to the workers, but these some fields are set lazily in the workers
# where possible, because they require us to look at the filesystem,
......@@ -40,11 +41,15 @@ class TestInput(object):
self.timeout_ms = timeout_ms
self.requires_lock = requires_lock
self.reference_files = reference_files
self.retry_attempt = retry_attempt
self.requires_force_overwrite = requires_force_overwrite
def __repr__(self):
return (
"TestInput('%s', timeout_ms=%s, requires_lock=%s, reference_files=%s)" % (
"TestInput('%s', timeout_ms=%s, requires_lock=%s, reference_files=%s, retry_attempt=%d, requires_force_overwrite=%s)" % (
self.test_name,
self.timeout_ms,
self.requires_lock,
self.reference_files))
self.reference_files,
self.retry_attempt,
self.requires_force_overwrite))
......@@ -30,15 +30,38 @@ import cPickle
from blinkpy.web_tests.models import test_failures
from blinkpy.common import path_finder
path_finder.add_typ_dir_to_sys_path()
from typ.artifacts import Artifacts
def build_test_result(
driver_output, test_name, retry_attempt=0,
failures=None, test_run_time=None, reftest_type=None,
pid=None, references=None, device_failed=False, crash_site=None):
failures = failures or []
if not failures and driver_output.error:
failures.append(test_failures.PassWithStderr(driver_output))
return TestResult(
test_name, retry_attempt=retry_attempt,
failures=failures, test_run_time=test_run_time,
reftest_type=reftest_type, pid=pid, references=references,
device_failed=device_failed, crash_site=crash_site)
class TestResult(object):
"""Data object containing the results of a single test."""
repeat_tests = True
results_directory = ''
filesystem = None
@staticmethod
def loads(string):
return cPickle.loads(string)
def __init__(self, test_name, failures=None, test_run_time=None, reftest_type=None,
def __init__(self, test_name, retry_attempt=0, failures=None,
test_run_time=None, reftest_type=None,
pid=None, references=None, device_failed=False, crash_site=None):
self.test_name = test_name
self.failures = failures or []
......@@ -51,7 +74,7 @@ class TestResult(object):
self.has_repaint_overlay = any(
failure.has_repaint_overlay for failure in self.failures)
self.crash_site = crash_site
self.retry_attempt = retry_attempt
# FIXME: Setting this in the constructor makes this class hard to mutate.
self.type = test_failures.determine_result_type(failures)
......@@ -60,11 +83,20 @@ class TestResult(object):
self.shard_name = ''
self.total_run_time = 0 # The time taken to run the test plus any references, compute diffs, etc.
self.test_number = None
self.artifacts = Artifacts(
self.results_directory, self.filesystem, retry_attempt,
repeat_tests=self.repeat_tests)
def create_artifacts(self):
for failure in self.failures:
failure.create_artifacts(self.artifacts)
def __eq__(self, other):
return (self.test_name == other.test_name and
self.failures == other.failures and
self.test_run_time == other.test_run_time)
self.test_run_time == other.test_run_time and
self.retry_attempt == other.retry_attempt and
self.results_directory == other.results_directory)
def __ne__(self, other):
return not (self == other)
......
......@@ -337,6 +337,11 @@ def summarize_results(port_obj, expectations, initial_results,
if is_unexpected:
test_dict.update(_interpret_test_failures(retry_result.failures))
for test_result, _ in merged_results:
for artifact_name, artifacts in test_result.artifacts.artifacts.items():
artifact_dict = test_dict.setdefault('artifacts', {})
artifact_dict.setdefault(artifact_name, []).extend(artifacts)
# Store test hierarchically by directory. e.g.
# foo/bar/baz.html: test_dict
# foo/bar/baz1.html: test_dict
......
......@@ -37,18 +37,19 @@ from blinkpy.web_tests.port.driver import DriverOutput
def get_result(test_name, result_type=test_expectations.PASS, run_time=0):
failures = []
dummy_1, dummy_2 = DriverOutput(None, None, None, None), DriverOutput(None, None, None, None)
if result_type == test_expectations.TIMEOUT:
failures = [test_failures.FailureTimeout(None, None)]
failures = [test_failures.FailureTimeout(dummy_1)]
elif result_type == test_expectations.AUDIO:
failures = [test_failures.FailureAudioMismatch(None, None)]
failures = [test_failures.FailureAudioMismatch(dummy_1, dummy_2)]
elif result_type == test_expectations.TEXT:
failures = [test_failures.FailureTextMismatch(None, None)]
failures = [test_failures.FailureTextMismatch(dummy_1, dummy_2)]
elif result_type == test_expectations.IMAGE:
failures = [test_failures.FailureImageHashMismatch(None, None)]
failures = [test_failures.FailureImageHashMismatch(dummy_1, dummy_2)]
elif result_type == test_expectations.CRASH:
failures = [test_failures.FailureCrash(None, None)]
failures = [test_failures.FailureCrash(dummy_1)]
elif result_type == test_expectations.LEAK:
failures = [test_failures.FailureLeak(None, None)]
failures = [test_failures.FailureLeak(dummy_1)]
return test_results.TestResult(test_name, failures=failures, test_run_time=run_time)
......
......@@ -95,10 +95,10 @@ class TestList(object):
actual_text=None, expected_text=None, expected_image=None)
def add_reftest(self, name, reference_name, same_image=True,
actual_text=None, expected_text=None, crash=False):
actual_text=None, expected_text=None, crash=False, error=''):
self.add(name, actual_checksum='checksum', actual_image='IMAGE', expected_image=None,
actual_text=actual_text, expected_text=expected_text,
crash=crash)
crash=crash, error=error)
if same_image:
self.add_reference(reference_name)
else:
......@@ -116,14 +116,14 @@ class TestList(object):
#
# These numbers may need to be updated whenever we add or delete tests. This includes virtual tests.
#
TOTAL_TESTS = 127
TOTAL_TESTS = 151
TOTAL_WONTFIX = 3
TOTAL_SKIPS = 20 + TOTAL_WONTFIX
TOTAL_CRASHES = 76
TOTAL_CRASHES = 78
UNEXPECTED_PASSES = 1
UNEXPECTED_NON_VIRTUAL_FAILURES = 21
UNEXPECTED_FAILURES = 44
UNEXPECTED_NON_VIRTUAL_FAILURES = 33
UNEXPECTED_FAILURES = 65
def unit_test_list():
......@@ -144,6 +144,37 @@ def unit_test_list():
actual_text=None, expected_text=None,
actual_image=None, expected_image=None,
actual_checksum=None)
tests.add('failures/unexpected/image-mismatch.html',
actual_image='image_fail-pngtEXtchecksum\x00checksum_fail',
expected_image='image-pngtEXtchecksum\x00checksum-png')
tests.add('failures/unexpected/no-image-generated.html',
expected_image='image-pngtEXtchecksum\x00checksum-png',
actual_image=None, actual_checksum=None)
tests.add('failures/unexpected/no-image-baseline.html',
actual_image='image_fail-pngtEXtchecksum\x00checksum_fail',
expected_image=None)
tests.add('failures/unexpected/audio-mismatch.html',
actual_audio=base64.b64encode('audio_fail-wav'), expected_audio='audio-wav',
actual_text=None, expected_text=None,
actual_image=None, expected_image=None,
actual_checksum=None)
tests.add('failures/unexpected/no-audio-baseline.html',
actual_audio=base64.b64encode('audio_fail-wav'),
actual_text=None, expected_text=None,
actual_image=None, expected_image=None,
actual_checksum=None)
tests.add('failures/unexpected/no-audio-generated.html',
expected_audio=base64.b64encode('audio_fail-wav'),
actual_text=None, expected_text=None,
actual_image=None, expected_image=None,
actual_checksum=None)
tests.add('failures/unexpected/text-mismatch-overlay.html',
actual_text='"paintInvalidations": [\nfail',
expected_text='"paintInvalidations": [\npass')
tests.add('failures/unexpected/no-text-baseline.html',
actual_text='"paintInvalidations": [\nfail', expected_text=None)
tests.add('failures/unexpected/no-text-generated.html',
actual_text=None, expected_text='"paintInvalidations": [\npass')
tests.add('failures/expected/keyboard.html', keyboard=True)
tests.add('failures/expected/newlines_leading.html',
expected_text='\nfoo\n', actual_text='foo\n')
......@@ -167,6 +198,8 @@ layer at (0,0) size 800x34
text run at (0,0) width 133: "This is an image test!"
""", expected_text=None)
tests.add('failures/unexpected/crash.html', crash=True)
tests.add('failures/unexpected/crash-with-sample.html', crash=True)
tests.add('failures/unexpected/crash-with-delayed-log.html', crash=True)
tests.add('failures/unexpected/crash-with-stderr.html', crash=True,
error='mock-std-error-output')
tests.add('failures/unexpected/web-process-crash-with-stderr.html', web_process_crash=True,
......@@ -238,6 +271,9 @@ layer at (0,0) size 800x34
tests.add_reftest('failures/expected/mismatch.html', 'failures/expected/mismatch-expected-mismatch.html')
tests.add_reftest('failures/unexpected/crash-reftest.html', 'failures/unexpected/crash-reftest-expected.html', crash=True)
tests.add_reftest('failures/unexpected/reftest.html', 'failures/unexpected/reftest-expected.html', same_image=False)
tests.add_reftest('failures/unexpected/reftest-mismatch-with-text-mismatch-with-stderr.html',
'failures/unexpected/reftest-mismatch-with-text-mismatch-with-stderr-expected.html',
same_image=False, actual_text='actual', expected_text='expected', error='oops')
tests.add_reftest('failures/unexpected/mismatch.html', 'failures/unexpected/mismatch-expected-mismatch.html')
tests.add('failures/unexpected/reftest-nopixel.html', actual_checksum=None, actual_image=None, expected_image=None)
tests.add('failures/unexpected/reftest-nopixel-expected.html', actual_checksum=None, actual_image=None)
......@@ -419,6 +455,26 @@ class TestPort(Port):
'linux': ['precise', 'trusty']
}
def look_for_new_samples(self, crashed_processes, start_time):
del start_time
sample_files = {}
for cp in crashed_processes:
if cp[0].endswith('crash-with-sample.html'):
sample_file = cp[0].replace('.html', '_sample.txt')
self._filesystem.maybe_make_directory(
self._filesystem.dirname(sample_file))
self._filesystem.write_binary_file(sample_file, 'crash sample file')
sample_files[cp[0]] = sample_file
return sample_files
def look_for_new_crash_logs(self, crashed_processes, start_time):
del start_time
crash_logs = {}
for cp in crashed_processes:
if cp[0].endswith('-with-delayed-log.html'):
crash_logs[cp[0]] = ('delayed crash log', '/tmp')
return crash_logs
def _path_to_driver(self, target=None):
# This routine shouldn't normally be called, but it is called by
# the mock_drt Driver. We return something, but make sure it's useless.
......@@ -560,6 +616,7 @@ class TestDriver(Driver):
actual_text = test.actual_text
crash = test.crash
web_process_crash = test.web_process_crash
leak = test.leak
if 'flaky/text.html' in test_name and not test_name in self._port._flakes:
self._port._flakes.add(test_name)
......@@ -590,24 +647,30 @@ class TestDriver(Driver):
audio = base64.b64decode(test.actual_audio)
crashed_process_name = None
crashed_pid = None
leak_log = ''
if leak:
leak_log = 'leak detected'
crash_log = ''
if crash:
crashed_process_name = self._port.driver_name()
crashed_pid = 1
crash_log = 'crash log'
elif web_process_crash:
crashed_process_name = 'WebProcess'
crashed_pid = 2
crash_log = 'web process crash log'
crash_log = ''
if crashed_process_name:
crash_logs = CrashLogs(self._port.host)
crash_log = crash_logs.find_newest_log(crashed_process_name, None) or ''
crash_log = crash_logs.find_newest_log(crashed_process_name, None) or crash_log
if 'crash-reftest.html' in test_name:
crashed_process_name = self._port.driver_name()
crashed_pid = 3
crash = True
crash_log = 'reftest crash log'
if test.actual_checksum == driver_input.image_hash:
image = None
else:
......@@ -616,7 +679,7 @@ class TestDriver(Driver):
crash=(crash or web_process_crash), crashed_process_name=crashed_process_name,
crashed_pid=crashed_pid, crash_log=crash_log,
test_time=time.time() - start_time, timeout=test.timeout, error=test.error, pid=self.pid,
leak=test.leak)
leak=test.leak, leak_log=leak_log)
def stop(self, timeout_secs=0.0):
self.started = False
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment