Commit ae759364 authored by atwilson@chromium.org's avatar atwilson@chromium.org

Changed CreateWorker to coalesce any matching queued shared workers when a

shared worker is started.

Added tests for worker close and queued shared worker cases.
BUG=29998
TEST=new UI tests added.

Review URL: http://codereview.chromium.org/580007

git-svn-id: svn://svn.chromium.org/chrome/trunk/src@38549 0039d316-1c4b-4281-b951-d872f2087c98
parent 0f780b3f
......@@ -170,8 +170,15 @@ void WorkerProcessHost::CreateWorker(const WorkerInstance& instance) {
instance.worker_route_id()));
UpdateTitle();
WorkerInstance::SenderInfo info = instances_.back().GetSender();
info.first->Send(new ViewMsg_WorkerCreated(info.second));
// Walk all pending senders and let them know the worker has been created
// (could be more than one in the case where we had to queue up worker
// creation because the worker process limit was reached).
for (WorkerInstance::SenderList::const_iterator i =
instance.senders().begin();
i != instance.senders().end(); ++i) {
i->first->Send(new ViewMsg_WorkerCreated(i->second));
}
}
bool WorkerProcessHost::FilterMessage(const IPC::Message& message,
......
......@@ -42,6 +42,9 @@ class WorkerProcessHost : public ChildProcessHost {
// Returns the single sender (must only be one).
SenderInfo GetSender() const;
typedef std::list<SenderInfo> SenderList;
const SenderList& senders() const { return senders_; }
// Checks if this WorkerInstance matches the passed url/name params
// (per the comparison algorithm in the WebWorkers spec). This API only
// applies to shared workers.
......@@ -69,7 +72,6 @@ class WorkerProcessHost : public ChildProcessHost {
private:
// Set of all senders (clients) associated with this worker.
typedef std::list<SenderInfo> SenderList;
GURL url_;
bool shared_;
bool off_the_record_;
......
......@@ -67,13 +67,19 @@ bool WorkerService::CreateWorker(const GURL &url,
instance.worker_document_set()->Add(
sender, document_id, renderer_id, render_view_route_id);
return CreateWorkerFromInstance(instance);
}
bool WorkerService::CreateWorkerFromInstance(
WorkerProcessHost::WorkerInstance instance) {
WorkerProcessHost* worker = NULL;
if (CommandLine::ForCurrentProcess()->HasSwitch(
switches::kWebWorkerProcessPerCore)) {
worker = GetProcessToFillUpCores();
} else if (CommandLine::ForCurrentProcess()->HasSwitch(
switches::kWebWorkerShareProcesses)) {
worker = GetProcessForDomain(url);
worker = GetProcessForDomain(instance.url());
} else { // One process per worker.
if (!CanCreateWorkerProcess(instance)) {
queued_workers_.push_back(instance);
......@@ -83,22 +89,25 @@ bool WorkerService::CreateWorker(const GURL &url,
// Check to see if this shared worker is already running (two pages may have
// tried to start up the worker simultaneously).
if (is_shared) {
if (instance.shared()) {
// See if a worker with this name already exists.
WorkerProcessHost::WorkerInstance* existing_instance =
FindSharedWorkerInstance(url, name, off_the_record);
FindSharedWorkerInstance(
instance.url(), instance.name(), instance.off_the_record());
// If this worker is already running, no need to create a new copy. Just
// inform the caller that the worker has been created.
if (existing_instance) {
// TODO(atwilson): Change this to scan the sender list (crbug.com/29243).
existing_instance->AddSender(sender, sender_route_id);
sender->Send(new ViewMsg_WorkerCreated(sender_route_id));
WorkerProcessHost::WorkerInstance::SenderInfo sender_info =
instance.GetSender();
existing_instance->AddSender(sender_info.first, sender_info.second);
sender_info.first->Send(new ViewMsg_WorkerCreated(sender_info.second));
return true;
}
// Look to see if there's a pending instance.
WorkerProcessHost::WorkerInstance* pending = FindPendingInstance(
url, name, off_the_record);
instance.url(), instance.name(), instance.off_the_record());
// If there's no instance *and* no pending instance, then it means the
// worker started up and exited already. Log a warning because this should
// be a very rare occurrence and is probably a bug, but it *can* happen so
......@@ -112,7 +121,24 @@ bool WorkerService::CreateWorker(const GURL &url,
// worker to the new instance.
DCHECK(!pending->worker_document_set()->IsEmpty());
instance.ShareDocumentSet(*pending);
RemovePendingInstance(url, name, off_the_record);
RemovePendingInstances(
instance.url(), instance.name(), instance.off_the_record());
// Remove any queued instances of this worker and copy over the sender to
// this instance.
for (WorkerProcessHost::Instances::iterator iter = queued_workers_.begin();
iter != queued_workers_.end();) {
if (iter->Matches(instance.url(), instance.name(),
instance.off_the_record())) {
DCHECK(iter->NumSenders() == 1);
WorkerProcessHost::WorkerInstance::SenderInfo sender_info =
iter->GetSender();
instance.AddSender(sender_info.first, sender_info.second);
iter = queued_workers_.erase(iter);
} else {
++iter;
}
}
}
if (!worker) {
......@@ -403,15 +429,16 @@ void WorkerService::WorkerProcessDestroyed(WorkerProcessHost* process) {
for (WorkerProcessHost::Instances::iterator i = queued_workers_.begin();
i != queued_workers_.end();) {
if (CanCreateWorkerProcess(*i)) {
WorkerProcessHost* worker =
new WorkerProcessHost(resource_dispatcher_host_);
if (!worker->Init()) {
delete worker;
return;
}
worker->CreateWorker(*i);
i = queued_workers_.erase(i);
WorkerProcessHost::WorkerInstance instance = *i;
queued_workers_.erase(i);
CreateWorkerFromInstance(instance);
// CreateWorkerFromInstance can modify the queued_workers_ list when it
// coalesces queued instances after starting a shared worker, so we
// have to rescan the list from the beginning (our iterator is now
// invalid). This is not a big deal as having any queued workers will be
// rare in practice so the list will be small.
i = queued_workers_.begin();
} else {
++i;
}
......@@ -466,17 +493,20 @@ WorkerService::FindPendingInstance(const GURL& url, const string16& name,
}
void WorkerService::RemovePendingInstance(const GURL& url,
const string16& name,
bool off_the_record) {
void WorkerService::RemovePendingInstances(const GURL& url,
const string16& name,
bool off_the_record) {
// Walk the pending instances looking for a matching pending worker.
for (WorkerProcessHost::Instances::iterator iter =
pending_shared_workers_.begin();
iter != pending_shared_workers_.end();
++iter) {
iter != pending_shared_workers_.end(); ) {
// Pending workers should have no senders - only actively running worker
// instances have senders.
DCHECK(iter->NumSenders() == 0);
if (iter->Matches(url, name, off_the_record)) {
pending_shared_workers_.erase(iter);
break;
iter = pending_shared_workers_.erase(iter);
} else {
++iter;
}
}
}
......
......@@ -87,6 +87,9 @@ class WorkerService : public NotificationObserver {
WorkerService();
~WorkerService();
// Given a WorkerInstance, create an associated worker process.
bool CreateWorkerFromInstance(WorkerProcessHost::WorkerInstance instance);
// Returns a WorkerProcessHost object if one exists for the given domain, or
// NULL if there are no such workers yet.
WorkerProcessHost* GetProcessForDomain(const GURL& url);
......@@ -126,7 +129,7 @@ class WorkerService : public NotificationObserver {
const GURL& url, const string16& name, bool off_the_record);
WorkerProcessHost::WorkerInstance* FindPendingInstance(
const GURL& url, const string16& name, bool off_the_record);
void RemovePendingInstance(
void RemovePendingInstances(
const GURL& url, const string16& name, bool off_the_record);
NotificationRegistrar registrar_;
......
<html>
<body>
<div id=result></div>
<script>
function log(message)
{
document.getElementById("result").innerHTML += message + "<br>";
}
var url = document.location.toString();
var num_workers = parseInt(url.substr(url.search("count=") + 6));
if (!num_workers) {
log("No count= parameter provided - test aborted");
}
for (var i = 0; i < num_workers ; ++i) {
createWorker(i);
}
var workers_created = 0;
function createWorker(i) {
var worker = new SharedWorker("worker_common.js?id=" + i);
worker.port.postMessage("eval num_clients");
worker.port.onmessage = function(event) {
workers_created++;
log("worker " + i + " started - num_clients = " + event.data);
if (workers_created == num_workers) {
// created the last worker
log("SUCCESS: all workers created");
document.cookie = "status=OK";
}
}
}
</script>
</body>
</html>
<html>
<body>
<div id=result></div>
<script>
function log(message)
{
document.getElementById("result").innerHTML += message + "<br>";
}
var url = document.location.toString();
var num_workers = parseInt(url.substr(url.search("count=") + 6));
if (!num_workers) {
log("No count= parameter provided - test aborted");
} else {
for (var i = 0; i < num_workers ; ++i) {
createWorker(i);
}
// Create two extra workers - these should both be queued, and launched as a
// single instance when we shutdown a worker.
createWorker(num_workers);
createWorker(num_workers);
}
var workers_created = 0;
function createWorker(i) {
var worker = new SharedWorker("worker_common.js?id=" + i);
worker.port.postMessage("ping");
worker.port.onmessage = function() {
workers_created++;
log("worker " + i + " started");
if (workers_created == num_workers) {
// We've created all of our workers. Let's shut down the most recent one
// and see if we startup the queued worker.
log("Shutting down worker " + i);
worker.port.postMessage("close");
} else if (workers_created == (num_workers+2)) {
// created the last worker
log("SUCCESS: queued worker created");
document.cookie = "status=OK";
}
}
}
</script>
</body>
</html>
<html>
<body>
<div id=result></div>
<script>
function log(message)
{
document.getElementById("result").innerHTML += message + "<br>";
}
var url = document.location.toString();
var id = parseInt(url.substr(url.search("id=") + 3));
log("Shutting down worker #" + id);
var worker = new SharedWorker("worker_common.js?id=" + id);
worker.port.postMessage("close");
</script>
</body>
</html>
<html>
<body>
<div id=result></div>
<script>
function log(message)
{
document.getElementById("result").innerHTML += message + "<br>";
}
var url = document.location.toString();
var id = parseInt(url.substr(url.search("id=") + 3));
log("Creating worker #" + id);
var worker = new SharedWorker("worker_common.js?id=" + id);
</script>
</body>
</html>
<html>
<body>
<div id=result></div>
<script>
function log(message)
{
document.getElementById("result").innerHTML += message + "<br>";
}
var worker = new Worker("worker_common.js");
worker.postMessage("ping");
worker.onmessage = workerStarted;
var sharedWorker;
function workerStarted(event) {
log ("worker created");
worker.postMessage("close");
sharedWorker = new SharedWorker("worker_common.js");
sharedWorker.port.postMessage("ping");
sharedWorker.port.onmessage = sharedWorkerStarted;
}
function sharedWorkerStarted(event) {
log ("shared worker created");
sharedWorker.port.postMessage("close");
document.cookie = "status=OK";
}
</script>
</body>
</html>
// Track the number of clients for this worker - tests can use this to ensure
// that shared workers are actually shared, not distinct.
var num_clients = 0;
if (!self.postMessage) {
// This is a shared worker - mimic dedicated worker APIs
onconnect = function(event) {
num_clients++;
event.ports[0].onmessage = function(e) {
self.postMessage = function(msg) {
event.ports[0].postMessage(msg);
};
self.onmessage(e);
};
self.postMessage = function(msg, ports) {
event.ports[0].postMessage(msg, ports);
};
};
} else {
num_clients++;
}
onmessage = function(evt) {
if (evt.data == "ping")
postMessage("pong");
else if (evt.data == "auth")
importScripts("/auth-basic");
else if (evt.data == "close")
close();
else if (/eval.+/.test(evt.data)) {
try {
postMessage(eval(evt.data.substr(5)));
......
......@@ -471,3 +471,84 @@ TEST_F(WorkerTest, LimitTotal) {
ASSERT_TRUE(WaitForProcessCountToBe(tab_count, total_workers));
#endif
}
TEST_F(WorkerTest, WorkerClose) {
scoped_refptr<TabProxy> tab(GetActiveTab());
ASSERT_TRUE(tab.get());
GURL url = GetTestUrl(L"workers", L"worker_close.html");
ASSERT_TRUE(tab->NavigateToURL(url));
std::string value = WaitUntilCookieNonEmpty(tab.get(), url,
kTestCompleteCookie, kTestIntervalMs, kTestWaitTimeoutMs);
ASSERT_STREQ(kTestCompleteSuccess, value.c_str());
ASSERT_TRUE(WaitForProcessCountToBe(1, 0));
}
TEST_F(WorkerTest, QueuedSharedWorkerShutdown) {
// Tests to make sure that queued shared workers are started up when
// shared workers shut down.
int max_workers_per_tab = WorkerService::kMaxWorkersPerTabWhenSeparate;
GURL url = GetTestUrl(L"workers", L"queued_shared_worker_shutdown.html");
url = GURL(url.spec() + StringPrintf("?count=%d", max_workers_per_tab));
scoped_refptr<TabProxy> tab(GetActiveTab());
ASSERT_TRUE(tab.get());
ASSERT_TRUE(tab->NavigateToURL(url));
std::string value = WaitUntilCookieNonEmpty(tab.get(), url,
kTestCompleteCookie, kTestIntervalMs, kTestWaitTimeoutMs);
ASSERT_STREQ(kTestCompleteSuccess, value.c_str());
ASSERT_TRUE(WaitForProcessCountToBe(1, max_workers_per_tab));
}
TEST_F(WorkerTest, MultipleTabsQueuedSharedWorker) {
// Tests to make sure that only one instance of queued shared workers are
// started up even when those instances are on multiple tabs.
int max_workers_per_tab = WorkerService::kMaxWorkersPerTabWhenSeparate;
GURL url = GetTestUrl(L"workers", L"many_shared_workers.html");
url = GURL(url.spec() + StringPrintf("?count=%d", max_workers_per_tab+1));
scoped_refptr<TabProxy> tab(GetActiveTab());
ASSERT_TRUE(tab.get());
ASSERT_TRUE(tab->NavigateToURL(url));
ASSERT_TRUE(WaitForProcessCountToBe(1, max_workers_per_tab));
// Create same set of workers in new tab (leaves one worker queued from this
// tab).
scoped_refptr<BrowserProxy> window(automation()->GetBrowserWindow(0));
ASSERT_TRUE(window->AppendTab(url));
ASSERT_TRUE(WaitForProcessCountToBe(2, max_workers_per_tab));
// Now shutdown one of the shared workers - this will fire both queued
// workers, but only one instance should be started
GURL url2 = GetTestUrl(L"workers", L"shutdown_shared_worker.html?id=0");
ASSERT_TRUE(window->AppendTab(url2));
std::string value = WaitUntilCookieNonEmpty(tab.get(), url,
kTestCompleteCookie, kTestIntervalMs, kTestWaitTimeoutMs);
ASSERT_STREQ(kTestCompleteSuccess, value.c_str());
ASSERT_TRUE(WaitForProcessCountToBe(3, max_workers_per_tab));
}
TEST_F(WorkerTest, QueuedSharedWorkerStartedFromOtherTab) {
// Tests to make sure that queued shared workers are started up when
// an instance is launched from another tab.
int max_workers_per_tab = WorkerService::kMaxWorkersPerTabWhenSeparate;
GURL url = GetTestUrl(L"workers", L"many_shared_workers.html");
url = GURL(url.spec() + StringPrintf("?count=%d", max_workers_per_tab+1));
scoped_refptr<TabProxy> tab(GetActiveTab());
ASSERT_TRUE(tab.get());
ASSERT_TRUE(tab->NavigateToURL(url));
ASSERT_TRUE(WaitForProcessCountToBe(1, max_workers_per_tab));
// First window has hit its limit. Now launch second window which creates
// the same worker that was queued in the first window, to ensure it gets
// connected to the first window too.
scoped_refptr<BrowserProxy> window(automation()->GetBrowserWindow(0));
GURL url2 = GetTestUrl(L"workers", L"single_shared_worker.html");
url2 = GURL(url2.spec() + StringPrintf("?id=%d", max_workers_per_tab));
window->AppendTab(url2);
std::string value = WaitUntilCookieNonEmpty(tab.get(), url,
kTestCompleteCookie, kTestIntervalMs, kTestWaitTimeoutMs);
ASSERT_STREQ(kTestCompleteSuccess, value.c_str());
ASSERT_TRUE(WaitForProcessCountToBe(2, max_workers_per_tab+1));
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment