Commit 68084e84 authored by slamm's avatar slamm Committed by Commit bot

Stop execution for unexpected exceptions like KeyboardInterrupt...

We "white list" some exceptions which user story runner can continue running
the rest of user stories even if they are raised during the test run.

Those exceptions are:
page_test.Failure
util.TimeoutException
exceptions.LoginException
exceptions.ProfilingException
page_action.PageActionNotSupported
exceptions.AppCrashException (except if this is thrown inside
shared_user_story_state.state.TearDownState(), which indicates a failure of
recovering the test state)

BUG=437735

Review URL: https://codereview.chromium.org/755323012

Cr-Commit-Position: refs/heads/master@{#308258}
parent 157c05e8
...@@ -32,7 +32,7 @@ class RasterizeAndRecordMicroUnitTest(page_test_test_case.PageTestTestCase): ...@@ -32,7 +32,7 @@ class RasterizeAndRecordMicroUnitTest(page_test_test_case.PageTestTestCase):
report_detailed_results=True) report_detailed_results=True)
try: try:
results = self.RunMeasurement(measurement, ps, options=self._options) results = self.RunMeasurement(measurement, ps, options=self._options)
except page_test.TestNotSupportedOnPlatformFailure as failure: except page_test.TestNotSupportedOnPlatformError as failure:
logging.warning(str(failure)) logging.warning(str(failure))
return return
self.assertEquals(0, len(results.failures)) self.assertEquals(0, len(results.failures))
......
...@@ -17,7 +17,7 @@ class Screenshot(page_test.PageTest): ...@@ -17,7 +17,7 @@ class Screenshot(page_test.PageTest):
def ValidateAndMeasurePage(self, page, tab, results): def ValidateAndMeasurePage(self, page, tab, results):
if not tab.screenshot_supported: if not tab.screenshot_supported:
raise page_test.TestNotSupportedOnPlatformFailure( raise page_test.TestNotSupportedOnPlatformError(
'Browser does not support screenshotting') 'Browser does not support screenshotting')
tab.WaitForDocumentReadyStateToBeComplete() tab.WaitForDocumentReadyStateToBeComplete()
......
...@@ -8,6 +8,7 @@ import tempfile ...@@ -8,6 +8,7 @@ import tempfile
from measurements import screenshot from measurements import screenshot
from telemetry import benchmark from telemetry import benchmark
from telemetry.page import page_test
from telemetry.unittest_util import options_for_unittests from telemetry.unittest_util import options_for_unittests
from telemetry.unittest_util import page_test_test_case from telemetry.unittest_util import page_test_test_case
...@@ -24,9 +25,10 @@ class ScreenshotUnitTest(page_test_test_case.PageTestTestCase): ...@@ -24,9 +25,10 @@ class ScreenshotUnitTest(page_test_test_case.PageTestTestCase):
def testScreenshot(self): def testScreenshot(self):
ps = self.CreatePageSetFromFileInUnittestDataDir('blank.html') ps = self.CreatePageSetFromFileInUnittestDataDir('blank.html')
measurement = screenshot.Screenshot(self._png_outdir) measurement = screenshot.Screenshot(self._png_outdir)
results = self.RunMeasurement(measurement, ps, options=self._options) try:
if results.failures: results = self.RunMeasurement(measurement, ps, options=self._options)
logging.warning(str(results.failures)) except page_test.TestNotSupportedOnPlatformError as e:
logging.warning(e)
return return
saved_picture_count = results.FindAllPageSpecificValuesNamed( saved_picture_count = results.FindAllPageSpecificValuesNamed(
......
...@@ -8,7 +8,6 @@ from metrics import power ...@@ -8,7 +8,6 @@ from metrics import power
from telemetry.core import exceptions from telemetry.core import exceptions
from telemetry.core import wpr_modes from telemetry.core import wpr_modes
from telemetry.page import page from telemetry.page import page
from telemetry.page import page_test
from telemetry.unittest_util import options_for_unittests from telemetry.unittest_util import options_for_unittests
from telemetry.unittest_util import page_test_test_case from telemetry.unittest_util import page_test_test_case
from telemetry.unittest_util import test from telemetry.unittest_util import test
...@@ -176,7 +175,7 @@ class SmoothnessUnitTest(page_test_test_case.PageTestTestCase): ...@@ -176,7 +175,7 @@ class SmoothnessUnitTest(page_test_test_case.PageTestTestCase):
measurement = BuggyMeasurement() measurement = BuggyMeasurement()
try: try:
self.RunMeasurement(measurement, ps) self.RunMeasurement(measurement, ps)
except page_test.TestNotSupportedOnPlatformFailure: except exceptions.IntentionalException:
pass pass
self.assertTrue(measurement.fake_power.start_called) self.assertTrue(measurement.fake_power.start_called)
......
...@@ -19,6 +19,7 @@ from telemetry.page import page_test ...@@ -19,6 +19,7 @@ from telemetry.page import page_test
from telemetry.page import test_expectations from telemetry.page import test_expectations
from telemetry.results import results_options from telemetry.results import results_options
from telemetry.util import cloud_storage from telemetry.util import cloud_storage
from telemetry.util import exception_formatter
Disabled = decorators.Disabled Disabled = decorators.Disabled
Enabled = decorators.Enabled Enabled = decorators.Enabled
...@@ -81,7 +82,12 @@ class Benchmark(command_line.Command): ...@@ -81,7 +82,12 @@ class Benchmark(command_line.Command):
return BenchmarkMetadata(self.Name()) return BenchmarkMetadata(self.Name())
def Run(self, finder_options): def Run(self, finder_options):
"""Run this test with the given options.""" """Run this test with the given options.
Returns:
The number of failure values (up to 254) or 255 if there is an uncaught
exception.
"""
self.CustomizeBrowserOptions(finder_options.browser_options) self.CustomizeBrowserOptions(finder_options.browser_options)
pt = self.CreatePageTest(finder_options) pt = self.CreatePageTest(finder_options)
...@@ -103,8 +109,10 @@ class Benchmark(command_line.Command): ...@@ -103,8 +109,10 @@ class Benchmark(command_line.Command):
results = results_options.CreateResults(benchmark_metadata, finder_options) results = results_options.CreateResults(benchmark_metadata, finder_options)
try: try:
user_story_runner.Run(pt, us, expectations, finder_options, results) user_story_runner.Run(pt, us, expectations, finder_options, results)
except page_test.TestNotSupportedOnPlatformFailure as failure: return_code = min(254, len(results.failures))
logging.warning(str(failure)) except Exception:
exception_formatter.PrintFormattedException()
return_code = 255
bucket = cloud_storage.BUCKET_ALIASES[finder_options.upload_bucket] bucket = cloud_storage.BUCKET_ALIASES[finder_options.upload_bucket]
if finder_options.upload_results: if finder_options.upload_results:
...@@ -112,7 +120,7 @@ class Benchmark(command_line.Command): ...@@ -112,7 +120,7 @@ class Benchmark(command_line.Command):
results.UploadProfilingFilesToCloud(bucket) results.UploadProfilingFilesToCloud(bucket)
results.PrintSummary() results.PrintSummary()
return len(results.failures) return return_code
def _DownloadGeneratedProfileArchive(self, options): def _DownloadGeneratedProfileArchive(self, options):
"""Download and extract profile directory archive if one exists.""" """Download and extract profile directory archive if one exists."""
......
...@@ -6,20 +6,20 @@ from telemetry.page import test_expectations ...@@ -6,20 +6,20 @@ from telemetry.page import test_expectations
from telemetry.page.actions import action_runner as action_runner_module from telemetry.page.actions import action_runner as action_runner_module
class Failure(Exception): class TestNotSupportedOnPlatformError(Exception):
"""Exception that can be thrown from PageTest to indicate an """PageTest Exception raised when a required feature is unavailable.
undesired but designed-for problem."""
The feature required to run the test could be part of the platform,
hardware configuration, or browser.
"""
class TestNotSupportedOnPlatformFailure(Failure): class Failure(Exception):
"""Exception that can be thrown to indicate that a certain feature required """PageTest Exception raised when an undesired but designed-for problem."""
to run the test is not available on the platform, hardware configuration, or
browser version."""
class MeasurementFailure(Failure): class MeasurementFailure(Failure):
"""Exception that can be thrown from MeasurePage to indicate an undesired but """PageTest Exception raised when an undesired but designed-for problem."""
designed-for problem."""
class PageTest(object): class PageTest(object):
......
...@@ -18,7 +18,7 @@ from telemetry.wpr import archive_info ...@@ -18,7 +18,7 @@ from telemetry.wpr import archive_info
class PageTestThatFails(page_test.PageTest): class PageTestThatFails(page_test.PageTest):
def ValidateAndMeasurePage(self, page, tab, results): def ValidateAndMeasurePage(self, page, tab, results):
raise exceptions.IntentionalException raise page_test.Failure
class PageTestForBlank(page_test.PageTest): class PageTestForBlank(page_test.PageTest):
......
...@@ -103,7 +103,7 @@ class PageTestTestCase(unittest.TestCase): ...@@ -103,7 +103,7 @@ class PageTestTestCase(unittest.TestCase):
measurement = BuggyMeasurement() measurement = BuggyMeasurement()
try: try:
self.RunMeasurement(measurement, ps, options=options) self.RunMeasurement(measurement, ps, options=options)
except page_test.TestNotSupportedOnPlatformFailure: except page_test.TestNotSupportedOnPlatformError:
pass pass
if start_tracing_called[0]: if start_tracing_called[0]:
self.assertTrue(stop_tracing_called[0]) self.assertTrue(stop_tracing_called[0])
...@@ -79,7 +79,6 @@ def ProcessCommandLineArgs(parser, args): ...@@ -79,7 +79,6 @@ def ProcessCommandLineArgs(parser, args):
def _RunUserStoryAndProcessErrorIfNeeded( def _RunUserStoryAndProcessErrorIfNeeded(
test, expectations, user_story, results, state): test, expectations, user_story, results, state):
expectation = None
def ProcessError(): def ProcessError():
if expectation == 'fail': if expectation == 'fail':
msg = 'Expected exception while running %s' % user_story.display_name msg = 'Expected exception while running %s' % user_story.display_name
...@@ -87,8 +86,8 @@ def _RunUserStoryAndProcessErrorIfNeeded( ...@@ -87,8 +86,8 @@ def _RunUserStoryAndProcessErrorIfNeeded(
else: else:
msg = 'Exception while running %s' % user_story.display_name msg = 'Exception while running %s' % user_story.display_name
results.AddValue(failure.FailureValue(user_story, sys.exc_info())) results.AddValue(failure.FailureValue(user_story, sys.exc_info()))
try: try:
expectation = None
state.WillRunUserStory(user_story) state.WillRunUserStory(user_story)
expectation, skip_value = state.GetTestExpectationAndSkipValue(expectations) expectation, skip_value = state.GetTestExpectationAndSkipValue(expectations)
if expectation == 'skip': if expectation == 'skip':
...@@ -96,19 +95,16 @@ def _RunUserStoryAndProcessErrorIfNeeded( ...@@ -96,19 +95,16 @@ def _RunUserStoryAndProcessErrorIfNeeded(
results.AddValue(skip_value) results.AddValue(skip_value)
return return
state.RunUserStory(results) state.RunUserStory(results)
except page_test.TestNotSupportedOnPlatformFailure:
raise
except (page_test.Failure, util.TimeoutException, exceptions.LoginException, except (page_test.Failure, util.TimeoutException, exceptions.LoginException,
exceptions.ProfilingException): exceptions.ProfilingException):
ProcessError() ProcessError()
except exceptions.AppCrashException: except exceptions.AppCrashException:
ProcessError() ProcessError()
state.TearDownState(results)
if test.is_multi_tab_test: if test.is_multi_tab_test:
logging.error('Aborting multi-tab test after browser or tab crashed at ' logging.error('Aborting multi-tab test after browser or tab crashed at '
'user story %s' % user_story.display_name) 'user story %s' % user_story.display_name)
test.RequestExit() test.RequestExit()
return raise
except page_action.PageActionNotSupported as e: except page_action.PageActionNotSupported as e:
results.AddValue( results.AddValue(
skip.SkipValue(user_story, 'Unsupported page action: %s' % e)) skip.SkipValue(user_story, 'Unsupported page action: %s' % e))
...@@ -117,8 +113,15 @@ def _RunUserStoryAndProcessErrorIfNeeded( ...@@ -117,8 +113,15 @@ def _RunUserStoryAndProcessErrorIfNeeded(
logging.warning( logging.warning(
'%s was expected to fail, but passed.\n', user_story.display_name) '%s was expected to fail, but passed.\n', user_story.display_name)
finally: finally:
state.DidRunUserStory(results) has_existing_exception = sys.exc_info() is not None
try:
state.DidRunUserStory(results)
except Exception:
if not has_existing_exception:
raise
# Print current exception and propagate existing exception.
exception_formatter.PrintFormattedException(
msg='Exception from DidRunUserStory: ')
@decorators.Cache @decorators.Cache
def _UpdateUserStoryArchivesIfChanged(user_story_set): def _UpdateUserStoryArchivesIfChanged(user_story_set):
...@@ -190,7 +193,12 @@ def GetUserStoryGroupsWithSameSharedUserStoryClass(user_story_set): ...@@ -190,7 +193,12 @@ def GetUserStoryGroupsWithSameSharedUserStoryClass(user_story_set):
def Run(test, user_story_set, expectations, finder_options, results): def Run(test, user_story_set, expectations, finder_options, results):
"""Runs a given test against a given page_set with the given options.""" """Runs a given test against a given page_set with the given options.
Stop execution for unexpected exceptions such as KeyboardInterrupt.
We "white list" certain exceptions for which the user story runner
can continue running the remaining user stories.
"""
test.ValidatePageSet(user_story_set) test.ValidatePageSet(user_story_set)
# Reorder page set based on options. # Reorder page set based on options.
...@@ -227,39 +235,63 @@ def Run(test, user_story_set, expectations, finder_options, results): ...@@ -227,39 +235,63 @@ def Run(test, user_story_set, expectations, finder_options, results):
for group in user_story_groups: for group in user_story_groups:
state = None state = None
try: try:
state = group.shared_user_story_state_class(
test, finder_options, user_story_set)
for _ in xrange(finder_options.pageset_repeat): for _ in xrange(finder_options.pageset_repeat):
for user_story in group.user_stories: for user_story in group.user_stories:
if test.IsExiting():
break
for _ in xrange(finder_options.page_repeat): for _ in xrange(finder_options.page_repeat):
if test.IsExiting():
break
if not state:
state = group.shared_user_story_state_class(
test, finder_options, user_story_set)
results.WillRunPage(user_story) results.WillRunPage(user_story)
try: try:
_WaitForThermalThrottlingIfNeeded(state.platform) _WaitForThermalThrottlingIfNeeded(state.platform)
_RunUserStoryAndProcessErrorIfNeeded( _RunUserStoryAndProcessErrorIfNeeded(
test, expectations, user_story, results, state) test, expectations, user_story, results, state)
except Exception: except exceptions.AppCrashException:
# Tear down & restart the state for unhandled exceptions thrown by # Catch AppCrashException to give the story a chance to retry.
# _RunUserStoryAndProcessErrorIfNeeded. # The retry is enabled by tearing down the state and creating
results.AddValue(failure.FailureValue(user_story, sys.exc_info())) # a new state instance in the next iteration.
state.TearDownState(results) if not test.IsExiting():
state = group.shared_user_story_state_class( try:
test, finder_options, user_story_set) # If TearDownState raises, do not catch the exception.
# (The AppCrashException was saved as a failure value.)
state.TearDownState(results)
finally:
# Later finally-blocks use state, so ensure it is cleared.
state = None
finally: finally:
_CheckThermalThrottling(state.platform) has_existing_exception = sys.exc_info() is not None
discard_run = (test.discard_first_result and try:
user_story not in if state:
user_story_with_discarded_first_results) _CheckThermalThrottling(state.platform)
if discard_run: discard_run = (test.discard_first_result and
user_story_with_discarded_first_results.add(user_story) user_story not in
results.DidRunPage(user_story, discard_run=discard_run) user_story_with_discarded_first_results)
if discard_run:
user_story_with_discarded_first_results.add(user_story)
results.DidRunPage(user_story, discard_run=discard_run)
except Exception:
if not has_existing_exception:
raise
# Print current exception and propagate existing exception.
exception_formatter.PrintFormattedException(
msg='Exception from result processing:')
if max_failures is not None and len(results.failures) > max_failures: if max_failures is not None and len(results.failures) > max_failures:
logging.error('Too many failures. Aborting.') logging.error('Too many failures. Aborting.')
test.RequestExit() test.RequestExit()
finally: finally:
if state: if state:
state.TearDownState(results) has_existing_exception = sys.exc_info() is not None
try:
state.TearDownState(results)
except Exception:
if not has_existing_exception:
raise
# Print current exception and propagate existing exception.
exception_formatter.PrintFormattedException(
msg='Exception from TearDownState:')
def _ShuffleAndFilterUserStorySet(user_story_set, finder_options): def _ShuffleAndFilterUserStorySet(user_story_set, finder_options):
if finder_options.pageset_shuffle_order_file: if finder_options.pageset_shuffle_order_file:
......
...@@ -13,7 +13,6 @@ from telemetry.page import page as page_module ...@@ -13,7 +13,6 @@ from telemetry.page import page as page_module
from telemetry.page import page_test from telemetry.page import page_test
from telemetry.page import test_expectations from telemetry.page import test_expectations
from telemetry.results import results_options from telemetry.results import results_options
from unittest_data import test_simple_one_page_set
from telemetry.unittest_util import options_for_unittests from telemetry.unittest_util import options_for_unittests
from telemetry.unittest_util import system_stub from telemetry.unittest_util import system_stub
from telemetry.user_story import shared_user_story_state from telemetry.user_story import shared_user_story_state
...@@ -161,7 +160,7 @@ class UserStoryRunnerTest(unittest.TestCase): ...@@ -161,7 +160,7 @@ class UserStoryRunnerTest(unittest.TestCase):
self.assertEqual(story_groups[2].shared_user_story_state_class, self.assertEqual(story_groups[2].shared_user_story_state_class,
FooUserStoryState) FooUserStoryState)
def testSuccefulUserStoryTest(self): def testSuccessfulUserStoryTest(self):
us = user_story_set.UserStorySet() us = user_story_set.UserStorySet()
us.AddUserStory(DummyLocalUserStory(FooUserStoryState)) us.AddUserStory(DummyLocalUserStory(FooUserStoryState))
us.AddUserStory(DummyLocalUserStory(FooUserStoryState)) us.AddUserStory(DummyLocalUserStory(FooUserStoryState))
...@@ -207,7 +206,7 @@ class UserStoryRunnerTest(unittest.TestCase): ...@@ -207,7 +206,7 @@ class UserStoryRunnerTest(unittest.TestCase):
self.assertEquals(1, barz_init_call_counter[0]) self.assertEquals(1, barz_init_call_counter[0])
self.assertEquals(1, barz_tear_down_call_counter[0]) self.assertEquals(1, barz_tear_down_call_counter[0])
def testHandlingOfCrashedApp(self): def testAppCrashExceptionCausesFailureValue(self):
self.SuppressExceptionFormatting() self.SuppressExceptionFormatting()
us = user_story_set.UserStorySet() us = user_story_set.UserStorySet()
class SharedUserStoryThatCausesAppCrash(TestSharedUserStoryState): class SharedUserStoryThatCausesAppCrash(TestSharedUserStoryState):
...@@ -220,12 +219,12 @@ class UserStoryRunnerTest(unittest.TestCase): ...@@ -220,12 +219,12 @@ class UserStoryRunnerTest(unittest.TestCase):
self.assertEquals(1, len(self.results.failures)) self.assertEquals(1, len(self.results.failures))
self.assertEquals(0, GetNumberOfSuccessfulPageRuns(self.results)) self.assertEquals(0, GetNumberOfSuccessfulPageRuns(self.results))
def testHandlingOfTestThatRaisesWithNonFatalUnknownExceptions(self): def testUnknownExceptionIsFatal(self):
self.SuppressExceptionFormatting() self.SuppressExceptionFormatting()
us = user_story_set.UserStorySet() us = user_story_set.UserStorySet()
class ExpectedException(Exception): class UnknownException(Exception):
pass pass
class Test(page_test.PageTest): class Test(page_test.PageTest):
def __init__(self, *args): def __init__(self, *args):
...@@ -236,7 +235,7 @@ class UserStoryRunnerTest(unittest.TestCase): ...@@ -236,7 +235,7 @@ class UserStoryRunnerTest(unittest.TestCase):
old_run_count = self.run_count old_run_count = self.run_count
self.run_count += 1 self.run_count += 1
if old_run_count == 0: if old_run_count == 0:
raise ExpectedException() raise UnknownException
def ValidateAndMeasurePage(self, page, tab, results): def ValidateAndMeasurePage(self, page, tab, results):
pass pass
...@@ -244,11 +243,9 @@ class UserStoryRunnerTest(unittest.TestCase): ...@@ -244,11 +243,9 @@ class UserStoryRunnerTest(unittest.TestCase):
us.AddUserStory(DummyLocalUserStory(TestSharedUserStoryState)) us.AddUserStory(DummyLocalUserStory(TestSharedUserStoryState))
us.AddUserStory(DummyLocalUserStory(TestSharedUserStoryState)) us.AddUserStory(DummyLocalUserStory(TestSharedUserStoryState))
test = Test() test = Test()
user_story_runner.Run( with self.assertRaises(UnknownException):
test, us, self.expectations, self.options, self.results) user_story_runner.Run(
self.assertEquals(2, test.run_count) test, us, self.expectations, self.options, self.results)
self.assertEquals(1, len(self.results.failures))
self.assertEquals(1, GetNumberOfSuccessfulPageRuns(self.results))
def testRaiseBrowserGoneExceptionFromRunPage(self): def testRaiseBrowserGoneExceptionFromRunPage(self):
self.SuppressExceptionFormatting() self.SuppressExceptionFormatting()
...@@ -263,7 +260,7 @@ class UserStoryRunnerTest(unittest.TestCase): ...@@ -263,7 +260,7 @@ class UserStoryRunnerTest(unittest.TestCase):
old_run_count = self.run_count old_run_count = self.run_count
self.run_count += 1 self.run_count += 1
if old_run_count == 0: if old_run_count == 0:
raise exceptions.BrowserGoneException() raise exceptions.BrowserGoneException('i am a browser instance')
def ValidateAndMeasurePage(self, page, tab, results): def ValidateAndMeasurePage(self, page, tab, results):
pass pass
...@@ -277,6 +274,48 @@ class UserStoryRunnerTest(unittest.TestCase): ...@@ -277,6 +274,48 @@ class UserStoryRunnerTest(unittest.TestCase):
self.assertEquals(1, len(self.results.failures)) self.assertEquals(1, len(self.results.failures))
self.assertEquals(1, GetNumberOfSuccessfulPageRuns(self.results)) self.assertEquals(1, GetNumberOfSuccessfulPageRuns(self.results))
def testAppCrashThenRaiseInTearDownFatal(self):
self.SuppressExceptionFormatting()
us = user_story_set.UserStorySet()
class DidRunTestError(Exception):
pass
class TestTearDownSharedUserStoryState(TestSharedUserStoryState):
def TearDownState(self, results):
self._test.DidRunTest('app', results)
class Test(page_test.PageTest):
def __init__(self, *args):
super(Test, self).__init__(*args)
self.run_count = 0
self._unit_test_events = [] # track what was called when
def RunPage(self, *_):
old_run_count = self.run_count
self.run_count += 1
if old_run_count == 0:
self._unit_test_events.append('app-crash')
raise exceptions.AppCrashException
def ValidateAndMeasurePage(self, page, tab, results):
pass
def DidRunTest(self, _, __):
self._unit_test_events.append('did-run-test')
raise DidRunTestError
us.AddUserStory(DummyLocalUserStory(TestTearDownSharedUserStoryState))
us.AddUserStory(DummyLocalUserStory(TestTearDownSharedUserStoryState))
test = Test()
with self.assertRaises(DidRunTestError):
user_story_runner.Run(
test, us, self.expectations, self.options, self.results)
self.assertEqual(['app-crash', 'did-run-test'], test._unit_test_events)
# The AppCrashException gets added as a failure.
self.assertEquals(1, len(self.results.failures))
def testDiscardFirstResult(self): def testDiscardFirstResult(self):
us = user_story_set.UserStorySet() us = user_story_set.UserStorySet()
us.AddUserStory(DummyLocalUserStory(TestSharedUserStoryState)) us.AddUserStory(DummyLocalUserStory(TestSharedUserStoryState))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment