Commit 090eb630 authored by jam@chromium.org's avatar jam@chromium.org

Convert find_in_page.py pyauto test to a browser_test. I skipped test cases...

Convert find_in_page.py pyauto test to a browser_test. I skipped test cases that were duplicated in existing tests (i.e. pdf searching which is already in pdf_browsertest.cc, and basic searching\case-sensitive).

I converted the looping and timing to instead hop to the history thread and back, which should make things less flaky.

BUG=143637
Review URL: https://chromiumcodereview.appspot.com/10827395

git-svn-id: svn://svn.chromium.org/chrome/trunk/src@152329 0039d316-1c4b-4281-b951-d872f2087c98
parent e7ea3f91
......@@ -2,10 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/file_util.h"
#include "base/message_loop.h"
#include "base/string16.h"
#include "base/string_util.h"
#include "base/utf_string_conversions.h"
#include "chrome/browser/cancelable_request.h"
#include "chrome/browser/history/history.h"
#include "chrome/browser/history/history_service_factory.h"
#include "chrome/browser/profiles/profile.h"
#include "chrome/browser/ui/browser.h"
#include "chrome/browser/ui/browser_commands.h"
......@@ -22,12 +26,15 @@
#include "chrome/common/url_constants.h"
#include "chrome/test/base/in_process_browser_test.h"
#include "chrome/test/base/ui_test_utils.h"
#include "content/public/browser/download_manager.h"
#include "content/public/browser/notification_service.h"
#include "content/public/browser/notification_types.h"
#include "content/public/browser/render_view_host.h"
#include "content/public/browser/web_contents.h"
#include "content/public/browser/web_contents_view.h"
#include "content/public/test/browser_test_utils.h"
#include "content/public/test/download_test_observer.h"
#include "net/base/net_util.h"
#include "ui/base/accelerators/accelerator.h"
#include "ui/base/keycodes/keyboard_codes.h"
......@@ -68,6 +75,14 @@ const bool kCaseSensitive = true;
const int kMoveIterations = 30;
namespace {
void HistoryServiceQueried(int) {
MessageLoop::current()->Quit();
}
} // namespace
class FindInPageControllerTest : public InProcessBrowserTest {
public:
FindInPageControllerTest() {
......@@ -174,6 +189,15 @@ class FindInPageControllerTest : public InProcessBrowserTest {
FilePath().AppendASCII("find_in_page"),
FilePath().AppendASCII(filename));
}
void FlushHistoryService() {
CancelableRequestConsumer history_consumer;
HistoryServiceFactory::GetForProfile(
browser()->profile(), Profile::IMPLICIT_ACCESS)->
GetNextDownloadId(&history_consumer,
base::Bind(&HistoryServiceQueried));
content::RunMessageLoop();
}
};
// This test loads a page with frames and starts FindInPage requests.
......@@ -191,19 +215,19 @@ IN_PROC_BROWSER_TEST_F(FindInPageControllerTest, FindInPageFrames) {
EXPECT_EQ(11, FindInPageWchar(tab, L"go",
kFwd, kIgnoreCase, &ordinal));
EXPECT_EQ(1, ordinal);
EXPECT_EQ(04, FindInPageWchar(tab, L"goo",
EXPECT_EQ(4, FindInPageWchar(tab, L"goo",
kFwd, kIgnoreCase, &ordinal));
EXPECT_EQ(1, ordinal);
EXPECT_EQ(03, FindInPageWchar(tab, L"goog",
EXPECT_EQ(3, FindInPageWchar(tab, L"goog",
kFwd, kIgnoreCase, &ordinal));
EXPECT_EQ(1, ordinal);
EXPECT_EQ(02, FindInPageWchar(tab, L"googl",
EXPECT_EQ(2, FindInPageWchar(tab, L"googl",
kFwd, kIgnoreCase, &ordinal));
EXPECT_EQ(1, ordinal);
EXPECT_EQ(01, FindInPageWchar(tab, L"google",
EXPECT_EQ(1, FindInPageWchar(tab, L"google",
kFwd, kIgnoreCase, &ordinal));
EXPECT_EQ(1, ordinal);
EXPECT_EQ(00, FindInPageWchar(tab, L"google!",
EXPECT_EQ(0, FindInPageWchar(tab, L"google!",
kFwd, kIgnoreCase, &ordinal));
EXPECT_EQ(0, ordinal);
......@@ -254,6 +278,197 @@ IN_PROC_BROWSER_TEST_F(FindInPageControllerTest, FindInPageFrames) {
EXPECT_EQ(0, ordinal);
}
// Verify search for text within various forms and text areas.
IN_PROC_BROWSER_TEST_F(FindInPageControllerTest, FindInPageFormsTextAreas) {
std::vector<GURL> urls;
urls.push_back(GetURL("textintextarea.html"));
urls.push_back(GetURL("smalltextarea.html"));
urls.push_back(GetURL("populatedform.html"));
TabContents* tab = chrome::GetActiveTabContents(browser());
for (size_t i = 0; i < urls.size(); ++i) {
ui_test_utils::NavigateToURL(browser(), urls[i]);
EXPECT_EQ(1, FindInPageWchar(tab, L"cat", kFwd, kIgnoreCase, NULL));
EXPECT_EQ(0, FindInPageWchar(tab, L"bat", kFwd, kIgnoreCase, NULL));
}
}
// Verify search for text within special URLs such as chrome:history,
// chrome://downloads, data directory
IN_PROC_BROWSER_TEST_F(FindInPageControllerTest, FindNextAndPrevious) {
TabContents* tab = chrome::GetActiveTabContents(browser());
FilePath data_dir = ui_test_utils::GetTestFilePath(FilePath(), FilePath());
ui_test_utils::NavigateToURL(browser(), net::FilePathToFileURL(data_dir));
EXPECT_EQ(1, FindInPageWchar(tab, L"downloads", kFwd, kIgnoreCase, NULL));
ui_test_utils::NavigateToURL(browser(), GURL(chrome::kChromeUIHistoryURL));
// The history page does an async request to the history service and then
// updates the renderer. So we make a query as well, and by the time it comes
// back we know the data is on its way to the renderer.
FlushHistoryService();
string16 query(data_dir.LossyDisplayName());
EXPECT_EQ(1,
ui_test_utils::FindInPage(tab, query, kFwd, kIgnoreCase, NULL,
NULL));
// Start a download.
content::DownloadManager* download_manager =
content::BrowserContext::GetDownloadManager(browser()->profile());
scoped_ptr<content::DownloadTestObserver> observer(
new content::DownloadTestObserverTerminal(
download_manager, 1,
content::DownloadTestObserver::ON_DANGEROUS_DOWNLOAD_ACCEPT));
GURL download_url = ui_test_utils::GetTestUrl(
FilePath().AppendASCII("downloads"),
FilePath().AppendASCII("a_zip_file.zip"));
ui_test_utils::NavigateToURL(browser(), download_url);
observer->WaitForFinished();
ui_test_utils::NavigateToURL(browser(), GURL(chrome::kChromeUIDownloadsURL));
FlushHistoryService();
EXPECT_EQ(1,
FindInPageWchar(tab, L"a_zip_file.zip", kFwd, kIgnoreCase, NULL));
}
// Verify search selection coordinates. The data file used is set-up such that
// the text occurs on the same line, and we verify their positions by verifying
// their relative positions.
IN_PROC_BROWSER_TEST_F(FindInPageControllerTest, FindInPageSpecialURLs) {
std::wstring search_string(L"\u5728\u897f\u660c\u536b\u661f\u53d1");
gfx::Rect first, second, first_reverse;
TabContents* tab = chrome::GetActiveTabContents(browser());
ui_test_utils::NavigateToURL(browser(), GetURL("specialchar.html"));
ui_test_utils::FindInPage(
tab, WideToUTF16(search_string), kFwd, kIgnoreCase, NULL, &first);
ui_test_utils::FindInPage(
tab, WideToUTF16(search_string), kFwd, kIgnoreCase, NULL, &second);
// We have search occurrence in the same row, so top-bottom coordinates should
// be the same even for second search.
ASSERT_EQ(first.y(), second.y());
ASSERT_EQ(first.bottom(), second.bottom());
ASSERT_LT(first.x(), second.x());
ASSERT_LT(first.right(), second.right());
ui_test_utils::FindInPage(
tab, WideToUTF16(search_string), kBack, kIgnoreCase, NULL,
&first_reverse);
// We find next and we go back so find coordinates should be the same as
// previous ones.
ASSERT_EQ(first, first_reverse);
}
// Verifies that comments and meta data are not searchable.
IN_PROC_BROWSER_TEST_F(FindInPageControllerTest,
CommentsAndMetaDataNotSearchable) {
TabContents* tab = chrome::GetActiveTabContents(browser());
ui_test_utils::NavigateToURL(browser(), GetURL("specialchar.html"));
std::wstring search_string =
L"\u4e2d\u65b0\u793e\u8bb0\u8005\u5b8b\u5409\u6cb3\u6444\u4e2d\u65b0\u7f51";
EXPECT_EQ(0, ui_test_utils::FindInPage(
tab, WideToUTF16(search_string), kFwd, kIgnoreCase, NULL, NULL));
}
// Verifies that span and lists are searchable.
IN_PROC_BROWSER_TEST_F(FindInPageControllerTest, SpanAndListsSearchable) {
TabContents* tab = chrome::GetActiveTabContents(browser());
ui_test_utils::NavigateToURL(browser(), GetURL("FindRandomTests.html"));
std::wstring search_string = L"has light blue eyes and my father has dark";
EXPECT_EQ(1, ui_test_utils::FindInPage(
tab, WideToUTF16(search_string), kFwd, kIgnoreCase, NULL, NULL));
search_string = L"Google\nApple\nandroid";
EXPECT_EQ(1, ui_test_utils::FindInPage(
tab, WideToUTF16(search_string), kFwd, kIgnoreCase, NULL, NULL));
}
// Find in a very large page
IN_PROC_BROWSER_TEST_F(FindInPageControllerTest, LargePage) {
TabContents* tab = chrome::GetActiveTabContents(browser());
ui_test_utils::NavigateToURL(browser(), GetURL("largepage.html"));
std::wstring search_string = L"daughter of Prince";
EXPECT_EQ(373,
FindInPageWchar(tab, search_string.c_str(), kFwd, kIgnoreCase,
NULL));
}
// Find a very long string in a large page.
IN_PROC_BROWSER_TEST_F(FindInPageControllerTest, FindLongString) {
TabContents* tab = chrome::GetActiveTabContents(browser());
ui_test_utils::NavigateToURL(browser(), GetURL("largepage.html"));
FilePath path = ui_test_utils::GetTestFilePath(
FilePath().AppendASCII("find_in_page"),
FilePath().AppendASCII("LongFind.txt"));
std::string query;
file_util::ReadFileToString(path, &query);
std::wstring search_string = UTF8ToWide(query);
EXPECT_EQ(1,
FindInPageWchar(tab, search_string.c_str(), kFwd, kIgnoreCase,
NULL));
}
// Find a big font string in a page.
IN_PROC_BROWSER_TEST_F(FindInPageControllerTest, BigString) {
TabContents* tab = chrome::GetActiveTabContents(browser());
ui_test_utils::NavigateToURL(browser(), GetURL("BigText.html"));
EXPECT_EQ(1,
FindInPageWchar(tab, L"SomeLargeString", kFwd, kIgnoreCase, NULL));
}
// Search Back and Forward on a single occurrence.
IN_PROC_BROWSER_TEST_F(FindInPageControllerTest, SingleOccurrence) {
TabContents* tab = chrome::GetActiveTabContents(browser());
ui_test_utils::NavigateToURL(browser(), GetURL("FindRandomTests.html"));
gfx::Rect first_rect;
EXPECT_EQ(1,
ui_test_utils::FindInPage(tab, ASCIIToUTF16("2010 Pro Bowl"), kFwd,
kIgnoreCase, NULL, &first_rect));
gfx::Rect second_rect;
EXPECT_EQ(1,
ui_test_utils::FindInPage(tab, ASCIIToUTF16("2010 Pro Bowl"), kFwd,
kIgnoreCase, NULL, &second_rect));
// Doing a fake find so we have no previous search.
ui_test_utils::FindInPage(tab, ASCIIToUTF16("ghgfjgfh201232rere"), kFwd,
kIgnoreCase, NULL, NULL);
ASSERT_EQ(first_rect, second_rect);
EXPECT_EQ(1,
ui_test_utils::FindInPage(tab, ASCIIToUTF16("2010 Pro Bowl"), kFwd,
kIgnoreCase, NULL, &first_rect));
EXPECT_EQ(1,
ui_test_utils::FindInPage(tab, ASCIIToUTF16("2010 Pro Bowl"), kBack,
kIgnoreCase, NULL, &second_rect));
ASSERT_EQ(first_rect, second_rect);
}
// Find the whole text file page and find count should be 1.
IN_PROC_BROWSER_TEST_F(FindInPageControllerTest, FindWholeFileContent) {
TabContents* tab = chrome::GetActiveTabContents(browser());
FilePath path = ui_test_utils::GetTestFilePath(
FilePath().AppendASCII("find_in_page"),
FilePath().AppendASCII("find_test.txt"));
ui_test_utils::NavigateToURL(browser(), net::FilePathToFileURL(path));
std::string query;
file_util::ReadFileToString(path, &query);
std::wstring search_string = UTF8ToWide(query);
EXPECT_EQ(1,
FindInPageWchar(tab, search_string.c_str(), false, false, NULL));
}
// Specifying a prototype so that we can add the WARN_UNUSED_RESULT attribute.
bool FocusedOnPage(WebContents* web_contents, std::string* result)
WARN_UNUSED_RESULT;
......
......@@ -49,7 +49,6 @@
'downloads',
'execute_javascript',
'extensions',
'find_in_page',
'flash',
'fullscreen_mouselock',
'history',
......@@ -360,8 +359,6 @@
'-extensions.ExtensionsTest.testAllowAccessFileURLs',
'-extensions.ExtensionsTest.testAllowIncognitoExtension',
'-extensions.ExtensionsTest.testDisableEnableExtension',
# Need internal data dirs in autotest package. crosbug.com/6855
'-find_in_page.FindMatchTests.testSearchInPDF',
# crbug.com/134593
'-gtalk.test_basic.BasicTest.testCurrentVersion',
'-gtalk.test_basic.BasicTest.testRCVersion',
......
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import codecs
import os
import unittest
import pyauto_functional
import pyauto
import pyauto_errors
import test_utils
class FindMatchTests(pyauto.PyUITest):
# Data dir where all find test data files are kept
find_test_data_dir = 'find_in_page'
def testCanFindMatchCount(self):
"""Verify Find match count for valid search"""
url = self.GetFileURLForDataPath('title1.html')
self.NavigateToURL(url)
self.assertEqual(1, self.FindInPage('title')['match_count'])
def testCanFindMatchCountFail(self):
"""Verify Find match count for invalid search"""
url = self.GetFileURLForDataPath('title1.html')
self.NavigateToURL(url)
self.assertEqual(0, self.FindInPage('blah')['match_count'])
def testFindIsNotCaseSensitive(self):
"""Verify that find is not case sensitive.
Manually Find is case insensitive. But since FindInPage is
case-sensitive by default we are confirming that we get a
different result when we turn off case matching.
"""
url = self.GetFileURLForDataPath('find_in_page', 'largepage.html')
self.NavigateToURL(url)
case_sensitive_result = self.FindInPage('The')['match_count']
case_insenstive_result = (self.FindInPage('The', match_case=False)
['match_count'])
self.assertTrue(case_insenstive_result >= case_sensitive_result)
def testSearchInTextAreas(self):
"""Verify search for text within various forms and text areas."""
urls = []
urls.append(self.GetFileURLForDataPath(
'find_in_page', 'textintextarea.html'))
urls.append(self.GetFileURLForDataPath(
'find_in_page', 'smalltextarea.html'))
urls.append(self.GetFileURLForDataPath(
'find_in_page', 'populatedform.html'))
for url in urls:
self.NavigateToURL(url)
self.assertEqual(1, self.FindInPage('cat')['match_count'])
self.assertEqual(0, self.FindInPage('bat')['match_count'])
def testSearchWithinSpecialURL(self):
"""Verify search for text within special URLs such as chrome:history.
chrome://history, chrome://downloads, pyAuto Data directory
"""
zip_file = 'a_zip_file.zip'
self.NavigateToURL(self.GetFileURLForPath(self.DataDir()))
# search in Data directory
self.assertEqual(1,
self.FindInPage('downloads', tab_index=0)['match_count'])
# search in History page
self.AppendTab(pyauto.GURL('chrome://history'))
# the contents in the history page load asynchronously after tab loads
search_query = os.path.join('chrome', 'test', 'data')
self.WaitUntil(
lambda: self.FindInPage(search_query, tab_index=1)['match_count'],
expect_retval=1)
self.assertEqual(
1, self.FindInPage(search_query, tab_index=1)['match_count'])
# search in Downloads page
test_utils.DownloadFileFromDownloadsDataDir(self, zip_file)
self.AppendTab(pyauto.GURL('chrome://downloads'))
# the contents in the downloads page load asynchronously after tab loads
self.WaitUntil(
lambda: self.FindInPage(zip_file, tab_index=2)['match_count'],
expect_retval=2)
self.assertEqual(2,
self.FindInPage(zip_file, tab_index=2)['match_count'])
test_utils.RemoveDownloadedTestFile(self, zip_file)
def testFindNextAndPrevious(self):
"""Verify search selection coordinates.
The data file used is set-up such that the text occurs on the same line,
and we verify their positions by verifying their relative positions.
"""
search_string = u'\u5728\u897f\u660c\u536b\u661f\u53d1'
url = self.GetFileURLForDataPath(
self.find_test_data_dir, 'specialchar.html')
self.NavigateToURL(url)
first_find = self.FindInPage(search_string)
second_find = self.FindInPage(search_string, find_next=True)
# We have search occurrence in the same row, so top-bottom
# coordinates should be the same even for second search.
self.assertEqual(first_find['match_top'], second_find['match_top'],
'Words\' top coordinates should be same')
self.assertEqual(first_find['match_bottom'], second_find['match_bottom'],
'Words\' bottom coordinates should be same')
# And left-right coordinates should be in order.
self.assertTrue(first_find['match_left'] < second_find['match_left'],
'Second find left coordinate should be greater than '
'the first find left coordinate')
self.assertTrue(first_find['match_right'] < second_find['match_right'],
'Second find right coordinate should be greater than '
'the first find right coordinate')
first_find_reverse = self.FindInPage(
search_string, find_next=True, forward=False)
# We find next and we go back so find coordinates should be the same
# as previous ones.
self.assertEqual(first_find, first_find_reverse,
'First occurrence must be selected, since we went back')
def testSpecialChars(self):
"""Test find in page with unicode and special characters.
Finds from page content, comments and meta data and verifies that comments
and meta data are not searchable.
"""
search_string = u'\u5728\u897f\u660c\u536b\u661f\u53d1'
url = self.GetFileURLForDataPath(
self.find_test_data_dir, 'specialchar.html')
self.NavigateToURL(url)
self.assertEqual(4, self.FindInPage(search_string)['match_count'])
search_string = u'240^*&%!#~!*&\u518d\u5c31\u8077\u624b\u5f53'
self.assertEqual(2, self.FindInPage(search_string)['match_count'])
# Find for the special chars in the comment and in the meta tag
search_string = u'\u4e2d\u65b0\u793e\u8bb0\u8005\u5b8b\u5409'\
u'\u6cb3\u6444\u4e2d\u65b0\u7f51'
self.assertEqual(0, self.FindInPage(search_string)['match_count'],
'Chrome should not find chars from comment or meta tags')
def testFindInLargePage(self):
"""Find in a very large page"""
url = self.GetFileURLForDataPath(self.find_test_data_dir, 'largepage.html')
self.NavigateToURL(url)
self.assertEqual(373, self.FindInPage('daughter of Prince')['match_count'])
def testFindLongString(self):
"""Find a very long string in a large page"""
url = self.GetFileURLForDataPath(
self.find_test_data_dir, 'largepage.html')
self.NavigateToURL(url)
file = codecs.open(os.path.join(self.DataDir(), self.find_test_data_dir,
'LongFind.txt'), 'r', 'utf-8')
search = file.read()
self.assertEqual(1, self.FindInPage(search)['match_count'])
def testFindBigString(self):
"""Find a big font string in a page"""
url = self.GetFileURLForDataPath(
self.find_test_data_dir, 'BigText.html')
self.NavigateToURL(url)
self.assertEqual(1, self.FindInPage('SomeLargeString')['match_count'])
def testVariousFindTests(self):
"""Test find in page for <span> style text, lists, html comments, etc."""
url = self.GetFileURLForDataPath(
self.find_test_data_dir, 'FindRandomTests.html')
self.NavigateToURL(url)
search = 'has light blue eyes and my father has dark'
self.assertEqual(1, self.FindInPage(search)['match_count'],
'Failed to find text with <span> tag')
# Find for list items
search = 'Google\nApple\nandroid'
self.assertEqual(1, self.FindInPage(search)['match_count'],
'Failed to find the list items')
# Find HTML comments
self.assertEqual(0, self.FindInPage('example comment')['match_count'],
'We should not find HTML comments')
def testFindWholeFileContent(self):
"""Find the whole text file page and find count should be 1"""
find_test_file = os.path.join(self.DataDir(), self.find_test_data_dir,
'find_test.txt')
url = self.GetFileURLForPath(find_test_file)
self.NavigateToURL(url)
file = open(find_test_file)
search = file.read()
self.assertEqual(1, self.FindInPage(search)['match_count'],
'Failed to find the whole page')
def testSingleOccurrence(self):
"""Search Back and Forward on a single occurrence"""
url = self.GetFileURLForDataPath(
self.find_test_data_dir, 'FindRandomTests.html')
self.NavigateToURL(url)
self.assertEqual(1, self.FindInPage('2010 Pro Bowl')['match_count'])
# First occurrence find
first_occurence_dict = self.FindInPage('2010 Pro Bowl')
# Finding next occurrence
next_occurence_dict = self.FindInPage('2010 Pro Bowl', find_next = True)
self.assertEqual(first_occurence_dict, next_occurence_dict,
'We have only one occurrence in this page so'
'first and next coordinates must be same')
# Doing a fake find so we have no previous search
self.FindInPage('ghgfjgfh201232rere')
first_occurence_dict = self.FindInPage('2010 Pro Bowl')
# Finding previous occurrence
back_occurence_dict = self.FindInPage('2010 Pro Bowl',
find_next = True, forward = False)
self.assertEqual(first_occurence_dict, back_occurence_dict,
'We have only one occurrence in this page so '
'first and back search coordinates must be same')
def _VerifySearchInPDFURL(self, url, word, expected_count):
"""Verify that we can find in a pdf file."""
self.NavigateToURL(url)
# Check for JSONInterfaceError thrown when FindInPage called before page
# loaded crbug.com/107448.
num_loops = 10
for loop in range(num_loops):
try:
search_count = self.FindInPage(word, timeout=1000)['match_count']
break
except pyauto_errors.JSONInterfaceError:
if loop == num_loops - 1:
raise
self.assertEqual(expected_count, search_count,
'Failed to find in the %s pdf file' % url)
def testSearchInPDF(self):
"""Verify that we can find in a pdf file.
Only for Google Chrome builds (Chromium builds do not have internal pdf).
"""
# bail out if not a branded build
properties = self.GetBrowserInfo()['properties']
if properties['branding'] != 'Google Chrome':
return
# Search in pdf file over file://.
file_url = self.GetFileURLForContentDataPath('plugin', 'Embed.pdf')
self._VerifySearchInPDFURL(file_url, 'adobe', 8)
# Search in pdf file over http://.
http_url = 'http://www.irs.gov/pub/irs-pdf/fw4.pdf'
self._VerifySearchInPDFURL(http_url, 'Allowances', 16)
if __name__ == '__main__':
pyauto_functional.Main()
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment