Commit e2974409 authored by chrishenry@google.com's avatar chrishenry@google.com

Kill AddError/AddErrorMessage from PageTestResults, switch existing usage to use AddFailure.

BUG=392901

Review URL: https://codereview.chromium.org/390233002

git-svn-id: svn://svn.chromium.org/chrome/trunk/src@283679 0039d316-1c4b-4281-b951-d872f2087c98
parent 2db9e7b6
......@@ -69,7 +69,7 @@ def _WebGLTestMessages(tab):
class WebglConformanceValidator(page_test.PageTest):
def __init__(self):
super(WebglConformanceValidator, self).__init__(attempts=1, max_errors=10)
super(WebglConformanceValidator, self).__init__(attempts=1, max_failures=10)
def ValidatePage(self, page, tab, results):
if not _DidWebGLTestSucceed(tab):
......
......@@ -86,7 +86,7 @@ class Benchmark(command_line.Command):
logging.warning(str(failure))
results.PrintSummary()
return len(results.failures) + len(results.errors)
return len(results.failures)
def _DownloadGeneratedProfileArchive(self, options):
"""Download and extract profile directory archive if one exists."""
......
......@@ -423,10 +423,6 @@ def Run(test, page_set, expectations, finder_options):
len(results.failures) > test.max_failures):
logging.error('Too many failures. Aborting.')
test.RequestExit()
if (not test.max_errors is None and
len(results.errors) > test.max_errors):
logging.error('Too many errors. Aborting.')
test.RequestExit()
if test.IsExiting():
break
state.repeat_state.DidRunPageSet()
......@@ -500,7 +496,7 @@ def _CheckArchives(page_set, pages, results):
for page in pages_missing_archive_path + pages_missing_archive_data:
results.StartTest(page)
results.AddErrorMessage(page, 'Page set archive doesn\'t exist.')
results.AddFailureMessage(page, 'Page set archive doesn\'t exist.')
results.StopTest(page)
return [page for page in pages if page not in
......@@ -523,7 +519,7 @@ def _RunPage(test, page, state, expectation, results, finder_options):
results.AddSuccess(page)
else:
msg = 'Exception while running %s' % page.url
results.AddError(page, sys.exc_info())
results.AddFailure(page, sys.exc_info())
exception_formatter.PrintFormattedException(msg=msg)
try:
......
......@@ -76,8 +76,7 @@ class PageRunnerTests(unittest.TestCase):
SetUpPageRunnerArguments(options)
results = page_runner.Run(Test(), ps, expectations, options)
self.assertEquals(0, len(results.successes))
self.assertEquals(0, len(results.failures))
self.assertEquals(1, len(results.errors))
self.assertEquals(1, len(results.failures))
def testHandlingOfTestThatRaisesWithNonFatalUnknownExceptions(self):
ps = page_set.PageSet()
......@@ -127,7 +126,6 @@ class PageRunnerTests(unittest.TestCase):
Test(), ps, expectations, options)
self.assertEquals(1, len(results.successes))
self.assertEquals(0, len(results.failures))
self.assertEquals(0, len(results.errors))
def testRetryOnBrowserCrash(self):
ps = page_set.PageSet()
......@@ -150,7 +148,6 @@ class PageRunnerTests(unittest.TestCase):
self.assertEquals(1, len(results.successes))
self.assertEquals(0, len(results.failures))
self.assertEquals(0, len(results.errors))
@decorators.Disabled('xp') # Flaky, http://crbug.com/390079.
def testDiscardFirstResult(self):
......@@ -476,7 +473,6 @@ class PageRunnerTests(unittest.TestCase):
self.assertFalse(test.will_navigate_to_page_called)
self.assertEquals(0, len(results.successes))
self.assertEquals(0, len(results.failures))
self.assertEquals(0, len(results.errors))
def TestUseLiveSitesFlag(self, options, expect_from_archive):
ps = page_set.PageSet(
......@@ -521,3 +517,50 @@ class PageRunnerTests(unittest.TestCase):
options.output_format = 'none'
SetUpPageRunnerArguments(options)
self.TestUseLiveSitesFlag(options, expect_from_archive=True)
def testMaxFailuresOptionIsRespected(self):
class TestPage(page_module.Page):
def __init__(self, *args, **kwargs):
super(TestPage, self).__init__(*args, **kwargs)
self.was_run = False
def RunNavigateSteps(self, action_runner):
self.was_run = True
raise Exception('Test exception')
class Test(page_test.PageTest):
def ValidatePage(self, *args):
pass
ps = page_set.PageSet()
expectations = test_expectations.TestExpectations()
page1 = TestPage(
'file://blank.html', ps, base_dir=util.GetUnittestDataDir())
ps.pages.append(page1)
page2 = TestPage(
'file://blank.html', ps, base_dir=util.GetUnittestDataDir())
ps.pages.append(page2)
page3 = TestPage(
'file://blank.html', ps, base_dir=util.GetUnittestDataDir())
ps.pages.append(page3)
page4 = TestPage(
'file://blank.html', ps, base_dir=util.GetUnittestDataDir())
ps.pages.append(page4)
page5 = TestPage(
'file://blank.html', ps, base_dir=util.GetUnittestDataDir())
ps.pages.append(page5)
options = options_for_unittests.GetCopy()
options.output_format = 'none'
SetUpPageRunnerArguments(options)
results = page_runner.Run(Test(max_failures=2), ps, expectations, options)
self.assertEquals(0, len(results.successes))
# Runs up to max_failures+1 failing tests before stopping, since
# every tests after max_failures failures have been encountered
# may all be passing.
self.assertEquals(3, len(results.failures))
self.assertTrue(page1.was_run)
self.assertTrue(page2.was_run)
self.assertTrue(page3.was_run)
self.assertFalse(page4.was_run)
self.assertFalse(page5.was_run)
......@@ -31,7 +31,6 @@ class PageTest(command_line.Command):
clear_cache_before_each_run=False,
attempts=3,
max_failures=None,
max_errors=None,
is_action_name_to_run_optional=False):
super(PageTest, self).__init__()
......@@ -49,7 +48,6 @@ class PageTest(command_line.Command):
self._close_tabs_before_run = True
self._attempts = attempts
self._max_failures = max_failures
self._max_errors = max_errors
self._is_action_name_to_run_optional = is_action_name_to_run_optional
assert self._attempts > 0, 'Test attempts must be greater than 0'
# If the test overrides the TabForPage method, it is considered a multi-tab
......@@ -112,15 +110,6 @@ class PageTest(command_line.Command):
def max_failures(self, count):
self._max_failures = count
@property
def max_errors(self):
"""Maximum number of errors allowed for the page set."""
return self._max_errors
@max_errors.setter
def max_errors(self, count):
self._max_errors = count
def Run(self, args):
# Define this method to avoid pylint errors.
# TODO(dtu): Make this actually run the test with args.page_set.
......
......@@ -46,11 +46,10 @@ def GenerateProfiles(profile_creator_class, profile_creator_name, options):
results = page_runner.Run(test, test.page_set, expectations, options)
if results.errors or results.failures:
if results.failures:
logging.warning('Some pages failed.')
if results.errors or results.failures:
logging.warning('Failed pages:\n%s',
'\n'.join(zip(*results.errors + results.failures)[0]))
logging.warning('Failed pages:\n%s',
'\n'.join(zip(*results.failures)[0]))
return 1
# Everything is a-ok, move results to final destination.
......
......@@ -132,11 +132,11 @@ def Main(base_dir):
recorder.CustomizeBrowserOptions(options)
results = page_runner.Run(recorder, ps, expectations, options)
if results.errors or results.failures:
if results.failures:
logging.warning('Some pages failed. The recording has not been updated for '
'these pages.')
logging.warning('Failed pages:\n%s', '\n'.join(
p.display_name for p in zip(*results.errors + results.failures)[0]))
p.display_name for p in zip(*results.failures)[0]))
if results.skipped:
logging.warning('Some pages were skipped. The recording has not been '
......
......@@ -22,8 +22,8 @@ class BuildbotPageMeasurementResults(
self._output_stream.flush()
@property
def had_errors_or_failures(self):
return self.errors or self.failures
def had_failures(self):
return len(self.failures) > 0
def PrintSummary(self):
"""Print summary data in a format expected by buildbot for perf dashboards.
......@@ -35,7 +35,7 @@ class BuildbotPageMeasurementResults(
perf_tests_helper.PrintPages(
[page.display_name for page in self.pages_that_succeeded])
summary = summary_module.Summary(self.all_page_specific_values,
self.had_errors_or_failures)
self.had_failures)
for value in summary.interleaved_computed_per_page_values_and_summaries:
if value.page:
self._PrintComputedPerPageValue(value)
......@@ -62,7 +62,7 @@ class BuildbotPageMeasurementResults(
# If there were any page errors, we typically will print nothing.
#
# Note: this branch is structured less-densely to improve legibility.
if self.had_errors_or_failures:
if self.had_failures:
return
buildbot_value = value.GetBuildbotValue()
......@@ -79,7 +79,7 @@ class BuildbotPageMeasurementResults(
def _PrintOverallResults(self):
# If there were no failed pages, output the overall results (results not
# associated with a page).
if not self.had_errors_or_failures:
if not self.had_failures:
for value in self._all_summary_values:
buildbot_value = value.GetBuildbotValue()
buildbot_data_type = value.GetBuildbotDataType(
......@@ -98,7 +98,12 @@ class BuildbotPageMeasurementResults(
# Print the number of failed and errored pages.
self._PrintPerfResult('telemetry_page_measurement_results', 'num_failed',
[len(self.failures)], 'count', 'unimportant')
# TODO(chrishenry): Remove this in a separate patch to reduce the risk
# of rolling back due to buildbot breakage.
# Also fix src/tools/bisect-perf-regression_test.py when this is
# removed.
self._PrintPerfResult('telemetry_page_measurement_results', 'num_errored',
[len(self.errors)], 'count', 'unimportant')
[0], 'count', 'unimportant')
super(BuildbotPageMeasurementResults, self).PrintSummary()
......@@ -188,48 +188,6 @@ class BuildbotPageMeasurementResultsTest(
'num_errored= 0 count']
self.assertEquals(expected, measurement_results.results)
def test_repeated_pageset_one_iteration_one_page_error(self):
"""Page error on one iteration, no averaged results should print."""
test_page_set = _MakePageSet()
measurement_results = SummarySavingPageMeasurementResults()
measurement_results.WillMeasurePage(test_page_set.pages[0])
measurement_results.AddValue(scalar.ScalarValue(
test_page_set.pages[0], 'a', 'seconds', 3))
measurement_results.DidMeasurePage()
measurement_results.AddSuccess(test_page_set.pages[0])
measurement_results.WillMeasurePage(test_page_set.pages[1])
measurement_results.AddValue(scalar.ScalarValue(
test_page_set.pages[1], 'a', 'seconds', 7))
measurement_results.DidMeasurePage()
measurement_results.AddErrorMessage(test_page_set.pages[1], 'message')
measurement_results.WillMeasurePage(test_page_set.pages[0])
measurement_results.AddValue(scalar.ScalarValue(
test_page_set.pages[0], 'a', 'seconds', 4))
measurement_results.DidMeasurePage()
measurement_results.AddSuccess(test_page_set.pages[0])
measurement_results.WillMeasurePage(test_page_set.pages[1])
measurement_results.AddValue(scalar.ScalarValue(
test_page_set.pages[1], 'a', 'seconds', 8))
measurement_results.DidMeasurePage()
measurement_results.AddSuccess(test_page_set.pages[1])
measurement_results.PrintSummary()
expected = ['RESULT a: http___www.bar.com_= [7,8] seconds\n' +
'Avg a: 7.500000seconds\n' +
'Sd a: 0.707107seconds',
'RESULT a: http___www.foo.com_= [3,4] seconds\n' +
'Avg a: 3.500000seconds\n' +
'Sd a: 0.707107seconds',
'RESULT telemetry_page_measurement_results: ' +
'num_failed= 0 count',
'RESULT telemetry_page_measurement_results: ' +
'num_errored= 1 count']
self.assertEquals(expected, measurement_results.results)
def test_repeated_pageset(self):
test_page_set = _MakePageSet()
......
......@@ -22,10 +22,6 @@ class GTestTestResults(page_test_results.PageTestResults):
'(%0.f ms)' % self._GetMs())
self._output_stream.flush()
def AddError(self, page, err):
super(GTestTestResults, self).AddError(page, err)
self._emitFailure(page, err)
def AddFailure(self, page, err):
super(GTestTestResults, self).AddFailure(page, err)
self._emitFailure(page, err)
......@@ -56,17 +52,15 @@ class GTestTestResults(page_test_results.PageTestResults):
unit = 'test' if len(self.successes) == 1 else 'tests'
print >> self._output_stream, '[ PASSED ]', (
'%d %s.' % (len(self.successes), unit))
if self.errors or self.failures:
all_errors = self.errors[:]
all_errors.extend(self.failures)
unit = 'test' if len(all_errors) == 1 else 'tests'
if self.failures:
unit = 'test' if len(self.failures) == 1 else 'tests'
print >> self._output_stream, '[ FAILED ]', (
'%d %s, listed below:' % (len(all_errors), unit))
for page, _ in all_errors:
'%d %s, listed below:' % (len(self.failures), unit))
for page, _ in self.failures:
print >> self._output_stream, '[ FAILED ] ', (
page.display_name)
print >> self._output_stream
count = len(self.errors) + len(self.failures)
count = len(self.failures)
unit = 'TEST' if count == 1 else 'TESTS'
print >> self._output_stream, '%d FAILED %s' % (count, unit)
print >> self._output_stream
......
......@@ -69,23 +69,6 @@ class GTestTestResultsTest(
'1 FAILED TEST\n\n' % exception_trace)
self.assertEquals(expected, ''.join(results.output_data))
def testSingleErrorPage(self):
test_page_set = _MakePageSet()
results = SummaryGtestTestResults()
results.StartTest(test_page_set.pages[0])
exception = self.CreateException()
results.AddError(test_page_set.pages[0], exception)
results.PrintSummary()
exception_trace = ''.join(traceback.format_exception(*exception))
expected = ('[ RUN ] http://www.foo.com/\n'
'%s\n'
'[ FAILED ] http://www.foo.com/ (0 ms)\n'
'[ PASSED ] 0 tests.\n'
'[ FAILED ] 1 test, listed below:\n'
'[ FAILED ] http://www.foo.com/\n\n'
'1 FAILED TEST\n\n' % exception_trace)
self.assertEquals(expected, ''.join(results.output_data))
def testSingleSkippedPage(self):
test_page_set = _MakePageSet()
results = SummaryGtestTestResults()
......@@ -109,7 +92,7 @@ class GTestTestResultsTest(
results.StartTest(test_page_set.pages[1])
self._mock_timer.SetTime(0.009)
results.AddError(test_page_set.pages[1], exception)
results.AddFailure(test_page_set.pages[1], exception)
results.StartTest(test_page_set.pages[2])
self._mock_timer.SetTime(0.015)
......@@ -153,7 +136,7 @@ class GTestTestResultsTest(
results.StartTest(test_page_set.pages[1])
self._mock_timer.SetTime(0.009)
exception_trace = ''.join(traceback.format_exception(*exception))
results.AddError(test_page_set.pages[1], exception)
results.AddFailure(test_page_set.pages[1], exception)
expected = ('[ RUN ] http://www.foo.com/\n'
'[ OK ] http://www.foo.com/ (7 ms)\n'
'[ RUN ] http://www.bar.com/\n'
......
......@@ -23,7 +23,7 @@ class PageMeasurementResults(page_test_results.PageTestResults):
@property
def pages_that_succeeded(self):
pages = set([value.page for value in self._all_page_specific_values])
pages.difference_update(self.pages_that_had_errors_or_failures)
pages.difference_update(self.pages_that_had_failures)
return pages
@property
......
......@@ -13,10 +13,8 @@ class PageTestResults(object):
def __init__(self, output_stream=None):
super(PageTestResults, self).__init__()
self._output_stream = output_stream
self.pages_that_had_errors = set()
self.pages_that_had_failures = set()
self.successes = []
self.errors = []
self.failures = []
self.skipped = []
......@@ -29,11 +27,6 @@ class PageTestResults(object):
setattr(result, k, v)
return result
@property
def pages_that_had_errors_or_failures(self):
return self.pages_that_had_errors.union(
self.pages_that_had_failures)
def _GetStringFromExcInfo(self, err):
return ''.join(traceback.format_exception(*err))
......@@ -43,10 +36,6 @@ class PageTestResults(object):
def StopTest(self, page):
pass
def AddError(self, page, err):
self.pages_that_had_errors.add(page)
self.errors.append((page, self._GetStringFromExcInfo(err)))
def AddFailure(self, page, err):
self.pages_that_had_failures.add(page)
self.failures.append((page, self._GetStringFromExcInfo(err)))
......@@ -63,21 +52,11 @@ class PageTestResults(object):
except Exception:
self.AddFailure(page, sys.exc_info())
def AddErrorMessage(self, page, message):
try:
raise Exception(message)
except Exception:
self.AddError(page, sys.exc_info())
def PrintSummary(self):
if self.failures:
logging.error('Failed pages:\n%s', '\n'.join(
p.display_name for p in zip(*self.failures)[0]))
if self.errors:
logging.error('Errored pages:\n%s', '\n'.join(
p.display_name for p in zip(*self.errors)[0]))
if self.skipped:
logging.warning('Skipped pages:\n%s', '\n'.join(
p.display_name for p in zip(*self.skipped)[0]))
......@@ -33,20 +33,3 @@ class PageTestResultsTest(base_test_results_unittest.BaseTestResultsUnittest):
self.assertEquals(results.pages_that_had_failures,
set([self.pages[0]]))
self.assertEquals(results.successes, [self.pages[1]])
def test_errors(self):
results = NonPrintingPageTestResults()
results.AddError(self.pages[0], self.CreateException())
results.AddSuccess(self.pages[1])
self.assertEquals(results.pages_that_had_errors,
set([self.pages[0]]))
self.assertEquals(results.successes, [self.pages[1]])
def test_errors_and_failures(self):
results = NonPrintingPageTestResults()
results.AddError(self.pages[0], self.CreateException())
results.AddError(self.pages[1], self.CreateException())
results.AddSuccess(self.pages[2])
self.assertEquals(results.pages_that_had_errors_or_failures,
set([self.pages[0], self.pages[1]]))
self.assertEquals(results.successes, [self.pages[2]])
......@@ -32,8 +32,8 @@ class Summary(object):
]
"""
def __init__(self, all_page_specific_values, had_errors_or_failures):
self.had_errors_or_failures = had_errors_or_failures
def __init__(self, all_page_specific_values, had_failures):
self.had_failures = had_failures
self._computed_per_page_values = []
self._computed_summary_values = []
self._interleaved_computed_per_page_values_and_summaries = []
......@@ -100,7 +100,7 @@ class Summary(object):
# value name so that we can find them when printing out value names in
# alphabetical order.
merged_pages_value_by_value_name = {}
if not self.had_errors_or_failures:
if not self.had_failures:
for value in merge_values.MergeLikeValuesFromDifferentPages(
all_successful_page_values):
assert value.name not in merged_pages_value_by_value_name
......@@ -142,7 +142,7 @@ class Summary(object):
# Note: this branch is structured less-densely to improve legibility.
if num_successful_pages_for_this_value_name > 1:
should_print = True
elif (self.had_errors_or_failures and
elif (self.had_failures and
num_successful_pages_for_this_value_name == 1):
should_print = True
else:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment