Commit d7a7befe authored by dtu@chromium.org's avatar dtu@chromium.org

[telemetry] Fix testPageSetSmoke. The archive_data_file should be relative to the page set.

Also:
- Make it fatal if archive file is not on the filesystem.
- Remove broken archive file links.
- Move the test from test_utils/ to unittest/.


BUG=None.
TEST=content/test/gpu/run_unittests page_set && tools/perf/run_tests page_set

Review URL: https://codereview.chromium.org/286543004

git-svn-id: svn://svn.chromium.org/chrome/trunk/src@273890 0039d316-1c4b-4281-b951-d872f2087c98
parent db347a76
......@@ -7,7 +7,7 @@ import os
from telemetry.core import discover
from telemetry.page import page_set as page_set_module
from telemetry.page import page_set_archive_info
from telemetry.test_util import page_set_smoke_test
from telemetry.unittest import page_set_smoke_test
class PageSetUnitTest(page_set_smoke_test.PageSetSmokeTest):
......
......@@ -26,9 +26,7 @@ class Alexa1To10000PageSet(PageSet):
def __init__(self):
super(Alexa1To10000PageSet, self).__init__(
make_javascript_deterministic=True,
user_agent_type='desktop',
# pylint: disable=C0301
archive_data_file='/home/default/storage/webpages_archive/10k/alexa1-10000.json')
user_agent_type='desktop')
urls_list = [
# Why: #1 in Alexa global
......
......@@ -71,7 +71,6 @@ class GmailComposeDiscardPageSet(page_set_module.PageSet):
def __init__(self):
super(GmailComposeDiscardPageSet, self).__init__(
credentials_path='data/credentials.json',
user_agent_type='desktop',
archive_data_file='data/gmail_compose_discard.json')
user_agent_type='desktop')
self.AddPage(GmailComposeDiscardPage(self))
......@@ -4,7 +4,7 @@
import os
from telemetry.test_util import page_set_smoke_test
from telemetry.unittest import page_set_smoke_test
class PageSetUnitTest(page_set_smoke_test.PageSetSmokeTest):
......
......@@ -12,7 +12,6 @@ class ToughEnergyCasesPage(page_module.Page):
def __init__(self, url, page_set):
super(ToughEnergyCasesPage, self).__init__(url=url, page_set=page_set)
self.credentials_path = 'data/credentials.json'
self.archive_data_file = 'data/tough_energy_cases.json'
class GmailPage(ToughEnergyCasesPage):
......@@ -42,8 +41,7 @@ class ToughEnergyCasesPageSet(page_set_module.PageSet):
def __init__(self):
super(ToughEnergyCasesPageSet, self).__init__(
credentials_path='data/credentials.json',
archive_data_file='data/tough_energy_cases.json')
credentials_path='data/credentials.json')
# Why: Above the fold animated gif running in the background
self.AddPage(ToughEnergyCasesPage(
......
......@@ -25,7 +25,7 @@ try:
from pylib import ports # pylint: disable=F0401
except Exception:
ports = None
from pylib.device import device_utils
from pylib.device import device_utils # pylint: disable=F0401
from pylib.utils import apk_helper # pylint: disable=F0401
......
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A library for bootstrapping Telemetry performance unittesting."""
......@@ -22,23 +22,23 @@ class PageSetSmokeTest(unittest.TestCase):
# Instantiate all page sets and verify that all URLs have an associated
# archive.
page_sets = discover.GetAllPageSetFilenames(page_sets_dir)
for path in page_sets:
page_set = page_set_module.PageSet.FromFile(path)
for page_set_path in page_sets:
page_set = page_set_module.PageSet.FromFile(page_set_path)
# TODO: Eventually these should be fatal.
if not page_set.archive_data_file:
logging.warning('Skipping %s: missing archive data file', path)
continue
if not os.path.exists(os.path.join(page_sets_dir,
page_set.archive_data_file)):
logging.warning('Skipping %s: archive data file not found', path)
logging.warning('Skipping %s: no archive data file', page_set_path)
continue
wpr_archive_info = page_set_archive_info.PageSetArchiveInfo.FromFile(
os.path.join(page_sets_dir, page_set.archive_data_file),
ignore_archive=True)
logging.info('Testing %s', page_set_path)
logging.info('Testing %s', path)
archive_data_file_path = os.path.join(page_set.base_dir,
page_set.archive_data_file)
self.assertTrue(os.path.exists(archive_data_file_path),
msg='Archive data file not found for %s' % page_set_path)
wpr_archive_info = page_set_archive_info.PageSetArchiveInfo.FromFile(
archive_data_file_path, ignore_archive=True)
for page in page_set.pages:
if not page.url.startswith('http'):
continue
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment