Commit 2b42b7c5 authored by Dirk Pranke's avatar Dirk Pranke Committed by Commit Bot

Remove a bunch of logging from run-webkit-tests.

Long, long ago we added a bunch of diagnostic logging to
run-webkit-tests to identify slow tests and slow directories.

I'm willing to bet no one has looked at that output in 6+ years,
but it still clogs up every run.

This CL removes all of that, and also restores the list of
failed tests at the end of the run that was removed in
r494598 (though this is a much simpler version of that).

R=qyearsley@chromium.org
BUG=619065

Change-Id: Idb81192f848a307cdf4bcc834199156d2fd2d563
Reviewed-on: https://chromium-review.googlesource.com/1023053
Commit-Queue: Dirk Pranke <dpranke@chromium.org>
Reviewed-by: default avatarQuinten Yearsley <qyearsley@chromium.org>
Cr-Commit-Position: refs/heads/master@{#553400}
parent 7c7368fd
...@@ -26,14 +26,14 @@ ...@@ -26,14 +26,14 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Package that handles non-debug, non-file output for run-webkit-tests.""" """Package that handles non-debug, non-file output for run_webkit_tests."""
import math import math
import optparse import optparse
from blinkpy.tool import grammar
from blinkpy.web_tests.models import test_expectations from blinkpy.web_tests.models import test_expectations
from blinkpy.web_tests.views.metered_stream import MeteredStream from blinkpy.web_tests.views.metered_stream import MeteredStream
from blinkpy.tool import grammar
NUM_SLOW_TESTS_TO_LOG = 10 NUM_SLOW_TESTS_TO_LOG = 10
...@@ -42,7 +42,7 @@ NUM_SLOW_TESTS_TO_LOG = 10 ...@@ -42,7 +42,7 @@ NUM_SLOW_TESTS_TO_LOG = 10
def print_options(): def print_options():
return [ return [
optparse.make_option('--debug-rwt-logging', action='store_true', default=False, optparse.make_option('--debug-rwt-logging', action='store_true', default=False,
help='print timestamps and debug information for run-webkit-tests itself'), help='print timestamps and debug information for run_webkit_tests itself'),
optparse.make_option('--details', action='store_true', default=False, optparse.make_option('--details', action='store_true', default=False,
help='print detailed results for every test'), help='print detailed results for every test'),
optparse.make_option('-q', '--quiet', action='store_true', default=False, optparse.make_option('-q', '--quiet', action='store_true', default=False,
...@@ -55,7 +55,7 @@ def print_options(): ...@@ -55,7 +55,7 @@ def print_options():
class Printer(object): class Printer(object):
"""Class handling all non-debug-logging printing done by run-webkit-tests.""" """Class handling all non-debug-logging printing done by run_webkit_tests."""
def __init__(self, port, options, regular_output, logger=None): def __init__(self, port, options, regular_output, logger=None):
self.num_completed = 0 self.num_completed = 0
...@@ -140,20 +140,15 @@ class Printer(object): ...@@ -140,20 +140,15 @@ class Printer(object):
return ndigits return ndigits
def print_results(self, run_time, run_results): def print_results(self, run_time, run_results):
self._print_timing_statistics(run_time, run_results) self.print_timing_statistics(run_time, run_results)
self._print_one_line_summary(run_time, run_results) self.print_summary(run_time, run_results)
def _print_timing_statistics(self, total_time, run_results): def print_timing_statistics(self, total_time, run_results):
self._print_debug('Test timing:') self._print_debug('Test timing:')
self._print_debug(' %6.2f total testing time' % total_time) self._print_debug(' %6.2f total testing time' % total_time)
self._print_debug('') self._print_debug('')
self._print_worker_statistics(run_results, int(self._options.child_processes)) num_workers = int(self._options.child_processes)
self._print_aggregate_test_statistics(run_results)
self._print_individual_test_times(run_results)
self._print_directory_timings(run_results)
def _print_worker_statistics(self, run_results, num_workers):
self._print_debug('Thread timing:') self._print_debug('Thread timing:')
stats = {} stats = {}
cuml_time = 0 cuml_time = 0
...@@ -169,113 +164,7 @@ class Printer(object): ...@@ -169,113 +164,7 @@ class Printer(object):
self._print_debug(' %6.2f cumulative, %6.2f optimal' % (cuml_time, cuml_time / num_workers)) self._print_debug(' %6.2f cumulative, %6.2f optimal' % (cuml_time, cuml_time / num_workers))
self._print_debug('') self._print_debug('')
def _print_aggregate_test_statistics(self, run_results): def print_summary(self, total_time, run_results):
times_for_dump_render_tree = [result.test_run_time for result in run_results.results_by_name.values()]
self._print_statistics_for_test_timings('PER TEST TIME IN TESTSHELL (seconds):', times_for_dump_render_tree)
def _print_individual_test_times(self, run_results):
# Reverse-sort by the time spent in the driver.
individual_test_timings = sorted(run_results.results_by_name.values(),
key=lambda result: result.test_run_time, reverse=True)
num_printed = 0
slow_tests = []
timeout_or_crash_tests = []
unexpected_slow_tests = []
for test_tuple in individual_test_timings:
test_name = test_tuple.test_name
is_timeout_crash_or_slow = False
if test_name in run_results.slow_tests:
is_timeout_crash_or_slow = True
slow_tests.append(test_tuple)
if test_name in run_results.failures_by_name:
result = run_results.results_by_name[test_name].type
if (result == test_expectations.TIMEOUT or
result == test_expectations.CRASH):
is_timeout_crash_or_slow = True
timeout_or_crash_tests.append(test_tuple)
if not is_timeout_crash_or_slow and num_printed < NUM_SLOW_TESTS_TO_LOG:
num_printed = num_printed + 1
unexpected_slow_tests.append(test_tuple)
self._print_debug('')
if unexpected_slow_tests:
self._print_test_list_timing('%s slowest tests that are not marked as SLOW and did not timeout/crash:' %
NUM_SLOW_TESTS_TO_LOG, unexpected_slow_tests)
self._print_debug('')
if slow_tests:
self._print_test_list_timing('Tests marked as SLOW:', slow_tests)
self._print_debug('')
if timeout_or_crash_tests:
self._print_test_list_timing('Tests that timed out or crashed:', timeout_or_crash_tests)
self._print_debug('')
def _print_test_list_timing(self, title, test_list):
self._print_debug(title)
for test_tuple in test_list:
test_run_time = round(test_tuple.test_run_time, 1)
self._print_debug(' %s took %s seconds' % (test_tuple.test_name, test_run_time))
def _print_directory_timings(self, run_results):
stats = {}
for result in run_results.results_by_name.values():
stats.setdefault(result.shard_name, {'num_tests': 0, 'total_time': 0})
stats[result.shard_name]['num_tests'] += 1
stats[result.shard_name]['total_time'] += result.total_run_time
min_seconds_to_print = 15
timings = []
for directory in stats:
rounded_time = round(stats[directory]['total_time'], 1)
if rounded_time > min_seconds_to_print:
timings.append((directory, rounded_time, stats[directory]['num_tests']))
if not timings:
return
timings.sort()
self._print_debug('Time to process slowest subdirectories:')
for timing in timings:
self._print_debug(' %s took %s seconds to run %s tests.' % timing)
self._print_debug('')
def _print_statistics_for_test_timings(self, title, timings):
self._print_debug(title)
timings.sort()
num_tests = len(timings)
if not num_tests:
return
percentile90 = timings[int(.9 * num_tests)]
percentile99 = timings[int(.99 * num_tests)]
if num_tests % 2 == 1:
median = timings[((num_tests - 1) / 2) - 1]
else:
lower = timings[num_tests / 2 - 1]
upper = timings[num_tests / 2]
median = (float(lower + upper)) / 2
mean = sum(timings) / num_tests
for timing in timings:
sum_of_deviations = math.pow(timing - mean, 2)
std_deviation = math.sqrt(sum_of_deviations / num_tests)
self._print_debug(' Median: %6.3f' % median)
self._print_debug(' Mean: %6.3f' % mean)
self._print_debug(' 90th percentile: %6.3f' % percentile90)
self._print_debug(' 99th percentile: %6.3f' % percentile99)
self._print_debug(' Standard dev: %6.3f' % std_deviation)
self._print_debug('')
def _print_one_line_summary(self, total_time, run_results):
if self._options.timing: if self._options.timing:
parallel_time = sum(result.total_run_time for result in run_results.results_by_name.values()) parallel_time = sum(result.total_run_time for result in run_results.results_by_name.values())
...@@ -316,12 +205,12 @@ class Printer(object): ...@@ -316,12 +205,12 @@ class Printer(object):
else: else:
summary = '%s ran as expected%s%s%s.' % (grammar.pluralize( summary = '%s ran as expected%s%s%s.' % (grammar.pluralize(
'test', expected), expected_summary_str, incomplete_str, timing_summary) 'test', expected), expected_summary_str, incomplete_str, timing_summary)
self._print_quiet(summary)
else: else:
summary = "%s ran as expected%s, %d didn't%s%s:" % (grammar.pluralize( self._print_quiet("%s ran as expected%s, %d didn't%s%s:" % (grammar.pluralize(
'test', expected), expected_summary_str, unexpected, incomplete_str, timing_summary) 'test', expected), expected_summary_str, unexpected, incomplete_str, timing_summary))
for test_name in sorted(run_results.unexpected_results_by_name):
self._print_quiet(summary) self._print_quiet(' %s' % test_name)
self._print_quiet('')
def _test_status_line(self, test_name, suffix): def _test_status_line(self, test_name, suffix):
format_string = '[%d/%d] %s%s' format_string = '[%d/%d] %s%s'
......
...@@ -60,9 +60,12 @@ class FakeRunResults(object): ...@@ -60,9 +60,12 @@ class FakeRunResults(object):
self.total = total self.total = total
self.expected = expected self.expected = expected
self.expected_failures = 0 self.expected_failures = 0
self.unexpected = unexpected
self.expected_skips = 0 self.expected_skips = 0
self.unexpected = unexpected
self.results_by_name = {} self.results_by_name = {}
self.unexpected_results_by_name = {}
for i in range(unexpected):
self.unexpected_results_by_name['test%d' % i] = None
total_run_time = 0 total_run_time = 0
for result in fake_results: for result in fake_results:
self.results_by_name[result.shard_name] = result self.results_by_name[result.shard_name] = result
...@@ -142,52 +145,26 @@ class Testprinter(unittest.TestCase): ...@@ -142,52 +145,26 @@ class Testprinter(unittest.TestCase):
printer.print_config('/tmp') printer.print_config('/tmp')
self.assertNotIn('Baseline search path: test-mac-mac10.10 -> test-mac-mac10.11 -> generic', err.getvalue()) self.assertNotIn('Baseline search path: test-mac-mac10.10 -> test-mac-mac10.11 -> generic', err.getvalue())
def test_print_directory_timings(self): def test_print_summary(self):
printer, err = self.get_printer()
printer._options.debug_rwt_logging = True
run_results = FakeRunResults()
run_results.results_by_name = {
'slowShard': FakeShard('slowShard', 16),
'borderlineShard': FakeShard('borderlineShard', 15),
'fastShard': FakeShard('fastShard', 1),
}
printer._print_directory_timings(run_results)
self.assertWritten(err, ['Time to process slowest subdirectories:\n',
' slowShard took 16.0 seconds to run 1 tests.\n', '\n'])
printer, err = self.get_printer()
printer._options.debug_rwt_logging = True
run_results.results_by_name = {
'borderlineShard': FakeShard('borderlineShard', 15),
'fastShard': FakeShard('fastShard', 1),
}
printer._print_directory_timings(run_results)
self.assertWritten(err, [])
def test_print_one_line_summary(self):
def run_test(total, exp, unexp, shards, result): def run_test(total, exp, unexp, shards, result):
printer, err = self.get_printer(['--timing'] if shards else None) printer, err = self.get_printer(['--timing'] if shards else None)
fake_results = FakeRunResults(total, exp, unexp, shards) fake_results = FakeRunResults(total, exp, unexp, shards)
total_time = fake_results.run_time + 1 total_time = fake_results.run_time + 1
printer._print_one_line_summary(total_time, fake_results) printer.print_summary(total_time, fake_results)
self.assertWritten(err, result) self.assertWritten(err, result)
# Without times: # Without times:
run_test(1, 1, 0, [], ['The test ran as expected.\n', '\n']) run_test(1, 1, 0, [], ['The test ran as expected.\n'])
run_test(2, 1, 1, [], ['\n', "1 test ran as expected, 1 didn't:\n", '\n']) run_test(2, 1, 1, [], ['\n', "1 test ran as expected, 1 didn't:\n", ' test0\n'])
run_test(3, 2, 1, [], ['\n', "2 tests ran as expected, 1 didn't:\n", '\n']) run_test(3, 2, 1, [], ['\n', "2 tests ran as expected, 1 didn't:\n", ' test0\n'])
run_test(3, 2, 0, [], ['\n', "2 tests ran as expected (1 didn't run).\n", '\n']) run_test(3, 2, 0, [], ['\n', "2 tests ran as expected (1 didn't run).\n"])
# With times: # With times:
fake_shards = [FakeShard('foo', 1), FakeShard('bar', 2)] fake_shards = [FakeShard('foo', 1), FakeShard('bar', 2)]
run_test(1, 1, 0, fake_shards, ['The test ran as expected in 5.00s (2.00s in rwt, 1x).\n', '\n']) run_test(1, 1, 0, fake_shards, ['The test ran as expected in 5.00s (2.00s in rwt, 1x).\n'])
run_test(2, 1, 1, fake_shards, ['\n', "1 test ran as expected, 1 didn't in 5.00s (2.00s in rwt, 1x):\n", '\n']) run_test(2, 1, 1, fake_shards, ['\n', "1 test ran as expected, 1 didn't in 5.00s (2.00s in rwt, 1x):\n", ' test0\n'])
run_test(3, 2, 1, fake_shards, ['\n', "2 tests ran as expected, 1 didn't in 5.00s (2.00s in rwt, 1x):\n", '\n']) run_test(3, 2, 1, fake_shards, ['\n', "2 tests ran as expected, 1 didn't in 5.00s (2.00s in rwt, 1x):\n", ' test0\n'])
run_test(3, 2, 0, fake_shards, ['\n', "2 tests ran as expected (1 didn't run) in 5.00s (2.00s in rwt, 1x).\n", '\n']) run_test(3, 2, 0, fake_shards, ['\n', "2 tests ran as expected (1 didn't run) in 5.00s (2.00s in rwt, 1x).\n"])
def test_test_status_line(self): def test_test_status_line(self):
printer, _ = self.get_printer() printer, _ = self.get_printer()
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment