Commit df0a3ba3 authored by marja@chromium.org's avatar marja@chromium.org

Telemetry: add a metadata layer between page set and .wpr.

The metadata file describes which pages in the page set are
backed by which .wpr files. This allows us to update individual
pages in the page set.

BUG=155660
NOTRY=true


Committed: https://src.chromium.org/viewvc/chrome?view=rev&revision=180117

Review URL: https://chromiumcodereview.appspot.com/11881051

git-svn-id: svn://svn.chromium.org/chrome/trunk/src@180133 0039d316-1c4b-4281-b951-d872f2087c98
parent 47b6136e
{
"description": "Pages hand-picked from top-lists in Q32012.",
"archive_path": "../data/2012Q3.wpr",
"archive_data_file": "../data/2012Q3.json",
"credentials_path": "../data/credentials.json",
"pages": [
{
......
{
"description": "Facebook's JSGameBench benchmark",
"archive_path": "../data/jsgamebench.wpr",
"archive_data_file": "../data/jsgamebench.json",
"pages": [
{
"url": "http://localhost/"
......
{
"description": "Sites of Interest",
"archive_path": "../data/sites_of_interest.wpr",
"archive_data_file": "../data/sites_of_interest.json",
"credentials_path": "../data/credentials.json",
"pages": [
{
......
{
"description": "Kraken JavaScript benchmark",
"archive_path": "../data/kraken.wpr",
"archive_data_file": "../data/kraken.json",
"pages": [
{
"url": "http://krakenbenchmark.mozilla.org/kraken-1.1/driver.html"
......
{
"description": "RoboHornet Pro benchmark",
"archive_path": "../data/robohornetpro.wpr",
"archive_data_file": "../data/robohornetpro.json",
"pages": [
{
"url": "http://ie.microsoft.com/testdrive/performance/robohornetpro/"
......
{
"description": "Pages hand-picked for 2012 CrOS scrolling tuning efforts.",
"archive_path": "../data/top_25.wpr",
"archive_data_file": "../data/top_25.json",
"credentials_path": "../data/credentials.json",
"user_agent_type": "desktop",
"pages": [
......
{
"description": "Self-driven Canvas2D animation examples",
"archive_path": "../data/tough_canvas_cases.wpr",
"archive_data_file": "../data/tough_canvas_cases.json",
"pages": [
{
"url":"http://mudcu.be/labs/JS1k/BreathingGalaxies.html",
......
......@@ -12,7 +12,6 @@ from telemetry.page_set import PageSet
def _MakePageSet():
return PageSet.FromDict({
"description": "hello",
"archive_path": "foo.wpr",
"pages": [
{"url": "http://www.foo.com/"},
{"url": "http://www.bar.com/"}
......
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
from telemetry import multi_page_benchmark
......@@ -9,6 +10,7 @@ from telemetry import options_for_unittests
from telemetry import page as page_module
from telemetry import page_action
from telemetry import page_set
from telemetry import page_set_archive_info
from telemetry import wpr_modes
class BenchThatFails(multi_page_benchmark.MultiPageBenchmark):
......@@ -83,26 +85,42 @@ class MultiPageBenchmarkUnitTest(
def testRecordAndReplay(self):
test_archive = '/tmp/google.wpr'
google_url = 'http://www.google.com/'
foo_url = 'http://www.foo.com/'
archive_info_template = ("""
{
"archives": {
"%s": ["%s"]
}
}
""")
try:
ps = page_set.PageSet()
ps.archive_path = test_archive
benchmark = BenchForReplay()
# First record an archive with only www.google.com.
self._options.wpr_mode = wpr_modes.WPR_RECORD
ps.pages = [page_module.Page('http://www.google.com/')]
ps.wpr_archive_info = page_set_archive_info.PageSetArchiveInfo(
'', '', json.loads(archive_info_template %
(test_archive, google_url)))
ps.pages = [page_module.Page(google_url, ps)]
all_results = self.RunBenchmark(benchmark, ps, options=self._options)
self.assertEquals(0, len(all_results.page_failures))
# Now replay it and verify that google.com is found but foo.com is not.
self._options.wpr_mode = wpr_modes.WPR_REPLAY
ps.pages = [page_module.Page('http://www.foo.com/')]
ps.wpr_archive_info = page_set_archive_info.PageSetArchiveInfo(
'', '', json.loads(archive_info_template % (test_archive, foo_url)))
ps.pages = [page_module.Page(foo_url, ps)]
all_results = self.RunBenchmark(benchmark, ps, options=self._options)
self.assertEquals(1, len(all_results.page_failures))
ps.pages = [page_module.Page('http://www.google.com/')]
ps.wpr_archive_info = page_set_archive_info.PageSetArchiveInfo(
'', '', json.loads(archive_info_template %
(test_archive, google_url)))
ps.pages = [page_module.Page(google_url, ps)]
all_results = self.RunBenchmark(benchmark, ps, options=self._options)
self.assertEquals(0, len(all_results.page_failures))
......
......@@ -21,9 +21,9 @@ class MultiPageBenchmarkUnitTestBase(unittest.TestCase):
def CreatePageSet(self, test_filename):
base_dir = os.path.dirname(__file__)
page = page_module.Page(test_filename, base_dir=base_dir)
ps = page_set.PageSet(file_path=os.path.join(base_dir, 'foo.json'))
page = page_module.Page(test_filename, ps, base_dir=base_dir)
setattr(page, 'smoothness', {'action': 'scrolling_action'})
ps = page_set.PageSet(base_dir=base_dir)
ps.pages.append(page)
return ps
......
......@@ -9,7 +9,7 @@ import urlparse
from telemetry import util
class Page(object):
def __init__(self, url, attributes=None, base_dir=None):
def __init__(self, url, page_set, attributes=None, base_dir=None):
parsed_url = urlparse.urlparse(url)
if not parsed_url.scheme:
abspath = os.path.abspath(os.path.join(base_dir, parsed_url.path))
......@@ -18,6 +18,7 @@ class Page(object):
else:
raise Exception('URLs must be fully qualified: %s' % url)
self.url = url
self.page_set = page_set
self.base_dir = base_dir
self.credentials = None
self.disabled = False
......@@ -59,6 +60,10 @@ class Page(object):
return os.path.split(self.url)[1]
return re.sub('https?://', '', self.url)
@property
def archive_path(self):
return self.page_set.WprFilePathForPage(self)
def __str__(self):
return self.url
......
......@@ -68,35 +68,31 @@ class PageRunner(object):
self.Close()
def Run(self, options, possible_browser, test, results):
# Set up WPR mode.
if not self.page_set.archive_path:
archive_path = ''
if not self.page_set.ContainsOnlyFileURLs():
# Check if we can run against WPR.
for page in self.page_set.pages:
parsed_url = urlparse.urlparse(page.url)
if parsed_url.scheme == 'file':
continue
if not page.archive_path:
logging.warning("""
No page set archive provided for the chosen page set. Benchmarking against
live sites! Results won't be repeatable or comparable.
""")
else:
archive_path = os.path.abspath(os.path.join(self.page_set.base_dir,
self.page_set.archive_path))
if options.wpr_mode == wpr_modes.WPR_OFF:
if os.path.isfile(archive_path):
possible_browser.options.wpr_mode = wpr_modes.WPR_REPLAY
else:
possible_browser.options.wpr_mode = wpr_modes.WPR_OFF
if not self.page_set.ContainsOnlyFileURLs():
logging.warning("""
The page set archive %s does not exist, benchmarking against live sites!
No page set archive provided for the page %s. Benchmarking against live sites!
Results won't be repeatable or comparable.
""", page.url)
elif options.wpr_mode != wpr_modes.WPR_RECORD:
# The page has an archive, and we're not recording.
if not os.path.isfile(page.archive_path):
logging.warning("""
The page set archive %s for page %s does not exist, benchmarking against live
sites! Results won't be repeatable or comparable.
To fix this, either add svn-internal to your .gclient using
http://goto/read-src-internal, or create a new archive using record_wpr.
""", os.path.relpath(archive_path))
""", os.path.relpath(page.archive_path), page.url)
# Verify credentials path.
credentials_path = None
if self.page_set.credentials_path:
credentials_path = os.path.join(self.page_set.base_dir,
credentials_path = os.path.join(os.path.dirname(self.page_set.file_path),
self.page_set.credentials_path)
if not os.path.exists(credentials_path):
credentials_path = None
......@@ -122,14 +118,24 @@ class PageRunner(object):
pages = _ShuffleAndFilterPageSet(self.page_set, options)
state = _RunState()
last_archive_path = None
try:
for page in pages:
if options.wpr_mode != wpr_modes.WPR_RECORD:
if page.archive_path and os.path.isfile(page.archive_path):
possible_browser.options.wpr_mode = wpr_modes.WPR_REPLAY
else:
possible_browser.options.wpr_mode = wpr_modes.WPR_OFF
if last_archive_path != page.archive_path:
state.Close()
state = _RunState()
last_archive_path = page.archive_path
tries = 3
while tries:
try:
if not state.browser:
self._SetupBrowser(state, test, possible_browser,
credentials_path, archive_path)
credentials_path, page.archive_path)
if not state.tab:
if len(state.browser.tabs) == 0:
state.browser.tabs.New()
......
......@@ -43,9 +43,9 @@ class PageRunnerTests(unittest.TestCase):
# multi_page_benchmark_unittest to here.
def testHandlingOfCrashedTab(self):
page1 = page_module.Page('chrome://crash')
page2 = page_module.Page('http://www.google.com')
ps = page_set.PageSet()
page1 = page_module.Page('chrome://crash', ps)
page2 = page_module.Page('http://www.google.com', ps)
ps.pages.append(page1)
ps.pages.append(page2)
results = page_test.PageTestResults()
......@@ -80,9 +80,9 @@ class PageRunnerTests(unittest.TestCase):
def runCredentialsTest(self, # pylint: disable=R0201
credentials_backend,
results):
page = page_module.Page('http://www.google.com')
page.credentials = "test"
ps = page_set.PageSet()
page = page_module.Page('http://www.google.com', ps)
page.credentials = "test"
ps.pages.append(page)
did_run = [False]
......@@ -112,10 +112,11 @@ class PageRunnerTests(unittest.TestCase):
return did_run[0]
def testUserAgent(self):
ps = page_set.PageSet()
page = page_module.Page(
'file:///' + os.path.join('..', 'unittest_data', 'blank.html'),
ps,
base_dir=os.path.dirname(__file__))
ps = page_set.PageSet()
ps.pages.append(page)
ps.user_agent_type = 'tablet'
......
......@@ -7,12 +7,13 @@ import os
import urlparse
from telemetry import page as page_module
from telemetry import page_set_archive_info
class PageSet(object):
def __init__(self, base_dir='', attributes=None):
def __init__(self, file_path='', attributes=None):
self.description = ''
self.archive_path = ''
self.base_dir = base_dir
self.archive_data_file = ''
self.file_path = file_path
self.credentials_path = None
self.user_agent_type = None
......@@ -22,20 +23,27 @@ class PageSet(object):
self.pages = []
if self.archive_data_file:
base_dir = os.path.dirname(file_path)
self.wpr_archive_info = page_set_archive_info.PageSetArchiveInfo.FromFile(
os.path.join(base_dir, self.archive_data_file), file_path)
else:
self.wpr_archive_info = None
@classmethod
def FromFile(cls, file_path):
with open(file_path, 'r') as f:
contents = f.read()
data = json.loads(contents)
return cls.FromDict(data, os.path.dirname(file_path))
return cls.FromDict(data, file_path)
@classmethod
def FromDict(cls, data, file_path=''):
page_set = cls(file_path, data)
for page_attributes in data['pages']:
url = page_attributes.pop('url')
page = page_module.Page(url, attributes=page_attributes,
base_dir=file_path)
page = page_module.Page(url, page_set, attributes=page_attributes,
base_dir=os.path.dirname(file_path))
page_set.pages.append(page)
return page_set
......@@ -70,6 +78,11 @@ class PageSet(object):
return pages
def WprFilePathForPage(self, page):
if not self.wpr_archive_info:
return None
return self.wpr_archive_info.WprFilePathForPage(page)
def __iter__(self):
return self.pages.__iter__()
......
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import logging
import os
import re
import shutil
class PageSetArchiveInfo(object):
def __init__(self, archive_data_file_path, page_set_file_path, data):
self._archive_data_file_path = archive_data_file_path
self._archive_data_file_dir = os.path.dirname(archive_data_file_path)
# Back pointer to the page set file.
self._page_set_file_path = page_set_file_path
# Map from the relative path (as it appears in the metadata file) of the
# .wpr file to a list of urls it supports.
self._wpr_file_to_urls = data['archives']
# Map from the page url to a relative path (as it appears in the metadata
# file) of the .wpr file.
self._url_to_wpr_file = dict()
# Find out the wpr file names for each page.
for wpr_file in data['archives']:
page_urls = data['archives'][wpr_file]
for url in page_urls:
self._url_to_wpr_file[url] = wpr_file
self.temp_target_wpr_file_path = None
@classmethod
def FromFile(cls, file_path, page_set_file_path):
with open(file_path, 'r') as f:
data = json.load(f)
return cls(file_path, page_set_file_path, data)
def WprFilePathForPage(self, page):
if self.temp_target_wpr_file_path:
return self.temp_target_wpr_file_path
wpr_file = self._url_to_wpr_file.get(page.url, None)
if wpr_file:
return self._WprFileNameToPath(wpr_file)
return None
def AddNewTemporaryRecording(self, temp_target_wpr_file_path):
self.temp_target_wpr_file_path = temp_target_wpr_file_path
def AddRecordedPages(self, pages):
(target_wpr_file, target_wpr_file_path) = self._NextWprFileName()
for page in pages:
self._SetWprFileForPage(page, target_wpr_file)
shutil.move(self.temp_target_wpr_file_path, target_wpr_file_path)
self._WriteToFile()
self._DeleteAbandonedWprFiles()
def _DeleteAbandonedWprFiles(self):
# Update the metadata so that the abandoned wpr files don't have empty url
# arrays.
abandoned_wpr_files = self._AbandonedWprFiles()
for wpr_file in abandoned_wpr_files:
del self._wpr_file_to_urls[wpr_file]
# Don't fail if we're unable to delete some of the files.
wpr_file_path = self._WprFileNameToPath(wpr_file)
try:
os.remove(wpr_file_path)
except Exception:
logging.warning('Failed to delete file: %s' % wpr_file_path)
def _AbandonedWprFiles(self):
abandoned_wpr_files = []
for wpr_file, urls in self._wpr_file_to_urls.iteritems():
if not urls:
abandoned_wpr_files.append(wpr_file)
return abandoned_wpr_files
def _WriteToFile(self):
"""Writes the metadata into the file passed as constructor parameter."""
metadata = dict()
metadata['description'] = (
'Describes the Web Page Replay archives for a page set. Don\'t edit by '
'hand! Use record_wpr for updating.')
# Pointer from the metadata to the page set .json file.
metadata['page_set'] = os.path.relpath(self._page_set_file_path,
self._archive_data_file_dir)
metadata['archives'] = self._wpr_file_to_urls.copy()
# Don't write data for abandones archives.
abandoned_wpr_files = self._AbandonedWprFiles()
for wpr_file in abandoned_wpr_files:
del metadata['archives'][wpr_file]
with open(self._archive_data_file_path, 'w') as f:
json.dump(metadata, f, indent=4)
f.flush()
def _WprFileNameToPath(self, wpr_file):
return os.path.abspath(os.path.join(self._archive_data_file_dir, wpr_file))
def _NextWprFileName(self):
"""Creates a new file name for a wpr archive file."""
# The names are of the format "some_thing_number.wpr". Read the numbers.
highest_number = -1
base = None
for wpr_file in self._wpr_file_to_urls:
match = re.match(r'(?P<BASE>.*)_(?P<NUMBER>[0-9]+)\.wpr', wpr_file)
if not match:
raise Exception('Illegal wpr file name ' + wpr_file)
highest_number = max(int(match.groupdict()['NUMBER']), highest_number)
if base and match.groupdict()['BASE'] != base:
raise Exception('Illegal wpr file name ' + wpr_file +
', doesn\'t begin with ' + base)
base = match.groupdict()['BASE']
new_filename = '%s_%03d.wpr' % (base, highest_number + 1)
return new_filename, self._WprFileNameToPath(new_filename)
def _SetWprFileForPage(self, page, wpr_file):
"""For modifying the metadata when we're going to record a new archive."""
old_wpr_file = self._url_to_wpr_file.get(page.url, None)
if old_wpr_file:
self._wpr_file_to_urls[old_wpr_file].remove(page.url)
self._url_to_wpr_file[page.url] = wpr_file
if wpr_file not in self._wpr_file_to_urls:
self._wpr_file_to_urls[wpr_file] = []
self._wpr_file_to_urls[wpr_file].append(page.url)
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import shutil
import tempfile
import unittest
from telemetry import page_set_archive_info
class MockPage(object):
def __init__(self, url):
self.url = url
url1 = 'http://www.foo.com/'
url2 = 'http://www.bar.com/'
url3 = 'http://www.baz.com/'
recording1 = 'data_001.wpr'
recording2 = 'data_002.wpr'
archive_info_contents = ("""
{
"archives": {
"%s": ["%s", "%s"],
"%s": ["%s"]
}
}
""" % (recording1, url1, url2, recording2, url3))
page1 = MockPage(url1)
page2 = MockPage(url2)
page3 = MockPage(url3)
class TestPageSetArchiveInfo(unittest.TestCase):
def setUp(self):
self.tmp_dir = tempfile.mkdtemp()
# Write the metadata.
self.page_set_archive_info_file = os.path.join(self.tmp_dir, 'info.json')
f = open(self.page_set_archive_info_file, 'w')
f.write(archive_info_contents)
f.close()
# Write the existing .wpr files.
for i in [1, 2]:
f = open(os.path.join(self.tmp_dir, ('data_00%d.wpr' % i)), 'w')
f.write(archive_info_contents)
f.close()
# Create the PageSetArchiveInfo object to be tested.
self.archive_info = page_set_archive_info.PageSetArchiveInfo.FromFile(
self.page_set_archive_info_file, '/tmp/pageset.json')
def tearDown(self):
shutil.rmtree(self.tmp_dir)
def testReadingArchiveInfo(self):
self.assertEquals(recording1, os.path.basename(
self.archive_info.WprFilePathForPage(page1)))
self.assertEquals(recording1, os.path.basename(
self.archive_info.WprFilePathForPage(page2)))
self.assertEquals(recording2, os.path.basename(
self.archive_info.WprFilePathForPage(page3)))
def testModifications(self):
new_recording1 = 'data_003.wpr'
new_temp_recording = os.path.join(self.tmp_dir, 'recording.wpr')
f = open(new_temp_recording, 'w')
f.write('wpr data')
f.close()
self.archive_info.AddNewTemporaryRecording(new_temp_recording)
self.assertEquals(new_temp_recording,
self.archive_info.WprFilePathForPage(page1))
self.assertEquals(new_temp_recording,
self.archive_info.WprFilePathForPage(page2))
self.assertEquals(new_temp_recording,
self.archive_info.WprFilePathForPage(page3))
self.archive_info.AddRecordedPages([page2])
self.assertTrue(os.path.exists(os.path.join(self.tmp_dir, new_recording1)))
self.assertFalse(os.path.exists(
os.path.join(self.tmp_dir, new_temp_recording)))
self.assertTrue(os.path.exists(os.path.join(self.tmp_dir, recording1)))
self.assertTrue(os.path.exists(os.path.join(self.tmp_dir, recording2)))
new_recording2 = 'data_004.wpr'
f = open(new_temp_recording, 'w')
f.write('wpr data')
f.close()
self.archive_info.AddNewTemporaryRecording(new_temp_recording)
self.archive_info.AddRecordedPages([page3])
self.assertTrue(os.path.exists(os.path.join(self.tmp_dir, new_recording2)))
self.assertFalse(os.path.exists(
os.path.join(self.tmp_dir, new_temp_recording)))
self.assertTrue(os.path.exists(os.path.join(self.tmp_dir, recording1)))
# recording2 is no longer needed, so it was deleted.
self.assertFalse(os.path.exists(os.path.join(self.tmp_dir, recording2)))
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import tempfile
import unittest
from telemetry import page_set
simple_archive_info = """
{
"archives": {
"data_01.wpr": ["http://www.foo.com/"],
"data_02.wpr": ["http://www.bar.com/"]
}
}
"""
simple_set = """
{"description": "hello",
"archive_path": "foo.wpr",
"archive_data_file": "%s",
"pages": [
{"url": "http://www.foo.com/"}
{"url": "http://www.foo.com/"},
{"url": "http://www.bar.com/"}
]
}
"""
......@@ -18,11 +29,19 @@ simple_set = """
class TestPageSet(unittest.TestCase):
def testSimpleSet(self):
with tempfile.NamedTemporaryFile() as f:
f.write(simple_set)
f.write(simple_archive_info)
f.flush()
ps = page_set.PageSet.FromFile(f.name)
archive_data_file = f.name
with tempfile.NamedTemporaryFile() as f2:
f2.write(simple_set % archive_data_file)
f2.flush()
ps = page_set.PageSet.FromFile(f2.name)
self.assertEquals('hello', ps.description)
self.assertEquals('foo.wpr', ps.archive_path)
self.assertEquals(1, len(ps.pages))
self.assertEquals(archive_data_file, ps.archive_data_file)
self.assertEquals(2, len(ps.pages))
self.assertEquals('http://www.foo.com/', ps.pages[0].url)
self.assertEquals('http://www.bar.com/', ps.pages[1].url)
self.assertEquals('data_01.wpr', os.path.basename(ps.pages[0].archive_path))
self.assertEquals('data_02.wpr', os.path.basename(ps.pages[1].archive_path))
......@@ -8,6 +8,7 @@ from telemetry import page
class TestPage(unittest.TestCase):
def testGetUrlBaseDirAndFileForAbsolutePath(self):
apage = page.Page('file:///somedir/otherdir/file.html',
None, # In this test, we don't need a page set.
base_dir='basedir')
dirname, filename = apage.url_base_dir_and_file
self.assertEqual(dirname, 'basedir/somedir/otherdir')
......@@ -15,6 +16,7 @@ class TestPage(unittest.TestCase):
def testGetUrlBaseDirAndFileForRelativePath(self):
apage = page.Page('file:///../../otherdir/file.html',
None, # In this test, we don't need a page set.
base_dir='basedir')
dirname, filename = apage.url_base_dir_and_file
self.assertEqual(dirname, 'basedir/../../otherdir')
......@@ -22,6 +24,7 @@ class TestPage(unittest.TestCase):
def testGetUrlBaseDirAndFileForUrlBaseDir(self):
apage = page.Page('file:///../../somedir/otherdir/file.html',
None, # In this test, we don't need a page set.
base_dir='basedir')
setattr(apage, 'url_base_dir', 'file:///../../somedir/')
dirname, filename = apage.url_base_dir_and_file
......@@ -29,9 +32,10 @@ class TestPage(unittest.TestCase):
self.assertEqual(filename, 'otherdir/file.html')
def testDisplayUrlForHttp(self):
self.assertEquals(page.Page('http://www.foo.com/').display_url,
self.assertEquals(page.Page('http://www.foo.com/', None).display_url,
'www.foo.com/')
def testDisplayUrlForFile(self):
self.assertEquals(page.Page('file:///../../otherdir/file.html').display_url,
'file.html')
self.assertEquals(
page.Page('file:///../../otherdir/file.html', None).display_url,
'file.html')
......@@ -3,7 +3,9 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import sys
import tempfile
import time
from telemetry import all_page_actions # pylint: disable=W0611
......@@ -77,6 +79,11 @@ def Main(benchmark_dir):
ps = page_set.PageSet.FromFile(args[0])
# Set the archive path to something temporary.
temp_target_wpr_file_path = tempfile.mkstemp()[1]
ps.wpr_archive_info.AddNewTemporaryRecording(temp_target_wpr_file_path)
# Do the actual recording.
options.wpr_mode = wpr_modes.WPR_RECORD
recorder.CustomizeBrowserOptions(options)
possible_browser = browser_finder.FindBrowser(options)
......@@ -88,11 +95,23 @@ Use --browser=list to figure out which are available.\n"""
with page_runner.PageRunner(ps) as runner:
runner.Run(options, possible_browser, recorder, results)
if len(results.page_failures):
if results.page_failures:
logging.warning('Some pages failed. The recording has not been updated for '
'these pages.')
logging.warning('Failed pages: %s', '\n'.join(
[failure['page'].url for failure in results.page_failures]))
if len(results.skipped_pages):
if results.skipped_pages:
logging.warning('Some pages were skipped. The recording has not been '
'updated for these pages.')
logging.warning('Skipped pages: %s', '\n'.join(
[skipped['page'].url for skipped in results.skipped_pages]))
if results.page_successes:
# Update the metadata for the pages which were recorded.
ps.wpr_archive_info.AddRecordedPages(
[page['page'] for page in results.page_successes])
else:
os.remove(temp_target_wpr_file_path)
return min(255, len(results.page_failures))
......@@ -15,6 +15,7 @@ class ScrollingActionTest(tab_test_case.TabTestCase):
self._browser.SetHTTPServerDirectory(unittest_data_dir)
page = Page(
self._browser.http_server.UrlOf(filename),
None, # In this test, we don't need a page set.
attributes=page_attributes)
self._tab.Navigate(page.url)
......@@ -88,4 +89,3 @@ class ScrollingActionTest(tab_test_case.TabTestCase):
self.assertTrue(rect_bottom <= viewport_height)
self.assertTrue(rect_right <= viewport_width)
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment