Commit 362c9d06 authored by reveman@chromium.org's avatar reveman@chromium.org

Re-land: cc: Cancel and re-prioritize worker pool tasks.

This adds a task graph interface to the worker pool and
implements a simple queue instance of this interface for
use by the tile manager.

The task graph interface can be used describe more
complicated task dependencies in the future and
provides the immediate benefit of seamlessly being
able to cancel and re-prioritize tasks.

BUG=178974,244642
TEST=cc_unittests --gtest_filter=WorkerPoolTest.Dependencies

Review URL: https://chromiumcodereview.appspot.com/14689004

git-svn-id: svn://svn.chromium.org/chrome/trunk/src@203041 0039d316-1c4b-4281-b951-d872f2087c98
parent 3d82df66
...@@ -31,7 +31,7 @@ class ScopedPtrHashMap { ...@@ -31,7 +31,7 @@ class ScopedPtrHashMap {
~ScopedPtrHashMap() { clear(); } ~ScopedPtrHashMap() { clear(); }
void swap(ScopedPtrHashMap<Key, Value*>& other) { void swap(ScopedPtrHashMap<Key, Value>& other) {
data_.swap(other.data_); data_.swap(other.data_);
} }
......
...@@ -4,46 +4,93 @@ ...@@ -4,46 +4,93 @@
#include "cc/base/worker_pool.h" #include "cc/base/worker_pool.h"
#include <algorithm> #if defined(OS_ANDROID)
// TODO(epenner): Move thread priorities to base. (crbug.com/170549)
#include <sys/resource.h>
#endif
#include <map>
#include "base/bind.h" #include "base/bind.h"
#include "base/debug/trace_event.h" #include "base/debug/trace_event.h"
#include "base/hash_tables.h"
#include "base/stringprintf.h" #include "base/stringprintf.h"
#include "base/synchronization/condition_variable.h"
#include "base/threading/simple_thread.h" #include "base/threading/simple_thread.h"
#include "base/threading/thread_restrictions.h" #include "base/threading/thread_restrictions.h"
#include "cc/base/scoped_ptr_deque.h"
#include "cc/base/scoped_ptr_hash_map.h"
#if defined(COMPILER_GCC)
namespace BASE_HASH_NAMESPACE {
template <> struct hash<cc::internal::WorkerPoolTask*> {
size_t operator()(cc::internal::WorkerPoolTask* ptr) const {
return hash<size_t>()(reinterpret_cast<size_t>(ptr));
}
};
} // namespace BASE_HASH_NAMESPACE
#endif // COMPILER
namespace cc { namespace cc {
namespace { namespace internal {
class WorkerPoolTaskImpl : public internal::WorkerPoolTask {
public:
WorkerPoolTaskImpl(const WorkerPool::Callback& task,
const base::Closure& reply)
: internal::WorkerPoolTask(reply),
task_(task) {}
virtual void RunOnThread(unsigned thread_index) OVERRIDE { WorkerPoolTask::WorkerPoolTask()
task_.Run(); : did_schedule_(false),
} did_run_(false),
did_complete_(false) {
}
private: WorkerPoolTask::WorkerPoolTask(TaskVector* dependencies)
WorkerPool::Callback task_; : did_schedule_(false),
}; did_run_(false),
did_complete_(false) {
dependencies_.swap(*dependencies);
}
} // namespace WorkerPoolTask::~WorkerPoolTask() {
DCHECK_EQ(did_schedule_, did_complete_);
DCHECK(!did_run_ || did_schedule_);
DCHECK(!did_run_ || did_complete_);
}
namespace internal { void WorkerPoolTask::DidSchedule() {
DCHECK(!did_complete_);
did_schedule_ = true;
}
WorkerPoolTask::WorkerPoolTask(const base::Closure& reply) : reply_(reply) { void WorkerPoolTask::WillRun() {
DCHECK(did_schedule_);
DCHECK(!did_complete_);
DCHECK(!did_run_);
} }
WorkerPoolTask::~WorkerPoolTask() { void WorkerPoolTask::DidRun() {
did_run_ = true;
} }
void WorkerPoolTask::DidComplete() { void WorkerPoolTask::DidComplete() {
reply_.Run(); DCHECK(did_schedule_);
DCHECK(!did_complete_);
did_complete_ = true;
}
bool WorkerPoolTask::IsReadyToRun() const {
// TODO(reveman): Use counter to improve performance.
for (TaskVector::const_reverse_iterator it = dependencies_.rbegin();
it != dependencies_.rend(); ++it) {
WorkerPoolTask* dependency = *it;
if (!dependency->HasFinishedRunning())
return false;
}
return true;
}
bool WorkerPoolTask::HasFinishedRunning() const {
return did_run_;
}
bool WorkerPoolTask::HasCompleted() const {
return did_complete_;
} }
} // namespace internal } // namespace internal
...@@ -60,17 +107,51 @@ class WorkerPool::Inner : public base::DelegateSimpleThread::Delegate { ...@@ -60,17 +107,51 @@ class WorkerPool::Inner : public base::DelegateSimpleThread::Delegate {
void Shutdown(); void Shutdown();
void PostTask(scoped_ptr<internal::WorkerPoolTask> task); // Schedule running of |root| task and all its dependencies. Tasks
// previously scheduled but no longer needed to run |root| will be
// canceled unless already running. Canceled tasks are moved to
// |completed_tasks_| without being run. The result is that once
// scheduled, a task is guaranteed to end up in the |completed_tasks_|
// queue even if they later get canceled by another call to
// ScheduleTasks().
void ScheduleTasks(internal::WorkerPoolTask* root);
// Appends all completed tasks to worker pool's completed tasks queue // Collect all completed tasks in |completed_tasks|. Returns true if idle.
// and returns true if idle. bool CollectCompletedTasks(TaskDeque* completed_tasks);
bool CollectCompletedTasks();
private: private:
// Appends all completed tasks to |completed_tasks|. Lock must class ScheduledTask {
// already be acquired before calling this function. public:
bool AppendCompletedTasksWithLockAcquired( ScheduledTask(internal::WorkerPoolTask* dependent, unsigned priority)
ScopedPtrDeque<internal::WorkerPoolTask>* completed_tasks); : priority_(priority) {
if (dependent)
dependents_.push_back(dependent);
}
internal::WorkerPoolTask::TaskVector& dependents() { return dependents_; }
unsigned priority() const { return priority_; }
private:
internal::WorkerPoolTask::TaskVector dependents_;
unsigned priority_;
};
typedef internal::WorkerPoolTask* ScheduledTaskMapKey;
typedef ScopedPtrHashMap<ScheduledTaskMapKey, ScheduledTask>
ScheduledTaskMap;
// This builds a ScheduledTaskMap from a root task.
static unsigned BuildScheduledTaskMapRecursive(
internal::WorkerPoolTask* task,
internal::WorkerPoolTask* dependent,
unsigned priority,
ScheduledTaskMap* scheduled_tasks);
static void BuildScheduledTaskMap(
internal::WorkerPoolTask* root, ScheduledTaskMap* scheduled_tasks);
// Collect all completed tasks by swapping the contents of
// |completed_tasks| and |completed_tasks_|. Lock must be acquired
// before calling this function. Returns true if idle.
bool CollectCompletedTasksWithLockAcquired(TaskDeque* completed_tasks);
// Schedule an OnIdleOnOriginThread callback if not already pending. // Schedule an OnIdleOnOriginThread callback if not already pending.
// Lock must already be acquired before calling this function. // Lock must already be acquired before calling this function.
...@@ -90,8 +171,8 @@ class WorkerPool::Inner : public base::DelegateSimpleThread::Delegate { ...@@ -90,8 +171,8 @@ class WorkerPool::Inner : public base::DelegateSimpleThread::Delegate {
mutable base::Lock lock_; mutable base::Lock lock_;
// Condition variable that is waited on by worker threads until new // Condition variable that is waited on by worker threads until new
// tasks are posted or shutdown starts. // tasks are ready to run or shutdown starts.
base::ConditionVariable has_pending_tasks_cv_; base::ConditionVariable has_ready_to_run_tasks_cv_;
// Target message loop used for posting callbacks. // Target message loop used for posting callbacks.
scoped_refptr<base::MessageLoopProxy> origin_loop_; scoped_refptr<base::MessageLoopProxy> origin_loop_;
...@@ -106,15 +187,25 @@ class WorkerPool::Inner : public base::DelegateSimpleThread::Delegate { ...@@ -106,15 +187,25 @@ class WorkerPool::Inner : public base::DelegateSimpleThread::Delegate {
// loop index is 0. // loop index is 0.
unsigned next_thread_index_; unsigned next_thread_index_;
// Number of tasks currently running.
unsigned running_task_count_;
// Set during shutdown. Tells workers to exit when no more tasks // Set during shutdown. Tells workers to exit when no more tasks
// are pending. // are pending.
bool shutdown_; bool shutdown_;
typedef ScopedPtrDeque<internal::WorkerPoolTask> TaskDeque; // The root task that is a dependent of all other tasks.
TaskDeque pending_tasks_; scoped_refptr<internal::WorkerPoolTask> root_;
// This set contains all pending tasks.
ScheduledTaskMap pending_tasks_;
// Ordered set of tasks that are ready to run.
// TODO(reveman): priority_queue might be more efficient.
typedef std::map<unsigned, internal::WorkerPoolTask*> TaskMap;
TaskMap ready_to_run_tasks_;
// This set contains all currently running tasks.
ScheduledTaskMap running_tasks_;
// Completed tasks not yet collected by origin thread.
TaskDeque completed_tasks_; TaskDeque completed_tasks_;
ScopedPtrDeque<base::DelegateSimpleThread> workers_; ScopedPtrDeque<base::DelegateSimpleThread> workers_;
...@@ -127,25 +218,24 @@ WorkerPool::Inner::Inner(WorkerPool* worker_pool, ...@@ -127,25 +218,24 @@ WorkerPool::Inner::Inner(WorkerPool* worker_pool,
const std::string& thread_name_prefix) const std::string& thread_name_prefix)
: worker_pool_on_origin_thread_(worker_pool), : worker_pool_on_origin_thread_(worker_pool),
lock_(), lock_(),
has_pending_tasks_cv_(&lock_), has_ready_to_run_tasks_cv_(&lock_),
origin_loop_(base::MessageLoopProxy::current()), origin_loop_(base::MessageLoopProxy::current()),
weak_ptr_factory_(this), weak_ptr_factory_(this),
on_idle_callback_(base::Bind(&WorkerPool::Inner::OnIdleOnOriginThread, on_idle_callback_(base::Bind(&WorkerPool::Inner::OnIdleOnOriginThread,
weak_ptr_factory_.GetWeakPtr())), weak_ptr_factory_.GetWeakPtr())),
on_idle_pending_(false), on_idle_pending_(false),
next_thread_index_(0), next_thread_index_(0),
running_task_count_(0),
shutdown_(false) { shutdown_(false) {
base::AutoLock lock(lock_); base::AutoLock lock(lock_);
while (workers_.size() < num_threads) { while (workers_.size() < num_threads) {
scoped_ptr<base::DelegateSimpleThread> worker = make_scoped_ptr( scoped_ptr<base::DelegateSimpleThread> worker = make_scoped_ptr(
new base::DelegateSimpleThread( new base::DelegateSimpleThread(
this, this,
thread_name_prefix + thread_name_prefix +
base::StringPrintf( base::StringPrintf(
"Worker%u", "Worker%u",
static_cast<unsigned>(workers_.size() + 1)).c_str())); static_cast<unsigned>(workers_.size() + 1)).c_str()));
worker->Start(); worker->Start();
workers_.push_back(worker.Pass()); workers_.push_back(worker.Pass());
} }
...@@ -156,12 +246,10 @@ WorkerPool::Inner::~Inner() { ...@@ -156,12 +246,10 @@ WorkerPool::Inner::~Inner() {
DCHECK(shutdown_); DCHECK(shutdown_);
// Cancel all pending callbacks.
weak_ptr_factory_.InvalidateWeakPtrs();
DCHECK_EQ(0u, pending_tasks_.size()); DCHECK_EQ(0u, pending_tasks_.size());
DCHECK_EQ(0u, ready_to_run_tasks_.size());
DCHECK_EQ(0u, running_tasks_.size());
DCHECK_EQ(0u, completed_tasks_.size()); DCHECK_EQ(0u, completed_tasks_.size());
DCHECK_EQ(0u, running_task_count_);
} }
void WorkerPool::Inner::Shutdown() { void WorkerPool::Inner::Shutdown() {
...@@ -173,7 +261,7 @@ void WorkerPool::Inner::Shutdown() { ...@@ -173,7 +261,7 @@ void WorkerPool::Inner::Shutdown() {
// Wake up a worker so it knows it should exit. This will cause all workers // Wake up a worker so it knows it should exit. This will cause all workers
// to exit as each will wake up another worker before exiting. // to exit as each will wake up another worker before exiting.
has_pending_tasks_cv_.Signal(); has_ready_to_run_tasks_cv_.Signal();
} }
while (workers_.size()) { while (workers_.size()) {
...@@ -183,32 +271,100 @@ void WorkerPool::Inner::Shutdown() { ...@@ -183,32 +271,100 @@ void WorkerPool::Inner::Shutdown() {
base::ThreadRestrictions::ScopedAllowIO allow_io; base::ThreadRestrictions::ScopedAllowIO allow_io;
worker->Join(); worker->Join();
} }
// Cancel any pending OnIdle callback.
weak_ptr_factory_.InvalidateWeakPtrs();
} }
void WorkerPool::Inner::PostTask(scoped_ptr<internal::WorkerPoolTask> task) { void WorkerPool::Inner::ScheduleTasks(internal::WorkerPoolTask* root) {
base::AutoLock lock(lock_); // It is OK to call ScheduleTasks() after shutdown if |root| is NULL.
DCHECK(!root || !shutdown_);
scoped_refptr<internal::WorkerPoolTask> new_root(root);
ScheduledTaskMap new_pending_tasks;
ScheduledTaskMap new_running_tasks;
TaskMap new_ready_to_run_tasks;
// Build scheduled task map before acquiring |lock_|.
if (root)
BuildScheduledTaskMap(root, &new_pending_tasks);
{
base::AutoLock lock(lock_);
// First remove all completed tasks from |new_pending_tasks|.
for (TaskDeque::iterator it = completed_tasks_.begin();
it != completed_tasks_.end(); ++it) {
internal::WorkerPoolTask* task = *it;
new_pending_tasks.take_and_erase(task);
}
pending_tasks_.push_back(task.Pass()); // Move tasks not present in |new_pending_tasks| to |completed_tasks_|.
for (ScheduledTaskMap::iterator it = pending_tasks_.begin();
it != pending_tasks_.end(); ++it) {
internal::WorkerPoolTask* task = it->first;
// There is more work available, so wake up worker thread. // Task has completed if not present in |new_pending_tasks|.
has_pending_tasks_cv_.Signal(); if (!new_pending_tasks.contains(task))
completed_tasks_.push_back(task);
}
// Build new running task set.
for (ScheduledTaskMap::iterator it = running_tasks_.begin();
it != running_tasks_.end(); ++it) {
internal::WorkerPoolTask* task = it->first;
// Transfer scheduled task value from |new_pending_tasks| to
// |new_running_tasks| if currently running. Value must be set to
// NULL if |new_pending_tasks| doesn't contain task. This does
// the right in both cases.
new_running_tasks.set(task, new_pending_tasks.take_and_erase(task));
}
// Build new "ready to run" tasks queue.
for (ScheduledTaskMap::iterator it = new_pending_tasks.begin();
it != new_pending_tasks.end(); ++it) {
internal::WorkerPoolTask* task = it->first;
// Completed tasks should not exist in |new_pending_tasks|.
DCHECK(!task->HasFinishedRunning());
// Call DidSchedule() to indicate that this task has been scheduled.
// Note: This is only for debugging purposes.
task->DidSchedule();
DCHECK_EQ(0u, new_ready_to_run_tasks.count(it->second->priority()));
if (task->IsReadyToRun())
new_ready_to_run_tasks[it->second->priority()] = task;
}
// Swap root taskand task sets.
// Note: old tasks are intentionally destroyed after releasing |lock_|.
root_.swap(new_root);
pending_tasks_.swap(new_pending_tasks);
running_tasks_.swap(new_running_tasks);
ready_to_run_tasks_.swap(new_ready_to_run_tasks);
// If there is more work available, wake up worker thread.
if (!ready_to_run_tasks_.empty())
has_ready_to_run_tasks_cv_.Signal();
}
} }
bool WorkerPool::Inner::CollectCompletedTasks() { bool WorkerPool::Inner::CollectCompletedTasks(TaskDeque* completed_tasks) {
base::AutoLock lock(lock_); base::AutoLock lock(lock_);
return AppendCompletedTasksWithLockAcquired( return CollectCompletedTasksWithLockAcquired(completed_tasks);
&worker_pool_on_origin_thread_->completed_tasks_);
} }
bool WorkerPool::Inner::AppendCompletedTasksWithLockAcquired( bool WorkerPool::Inner::CollectCompletedTasksWithLockAcquired(
ScopedPtrDeque<internal::WorkerPoolTask>* completed_tasks) { TaskDeque* completed_tasks) {
lock_.AssertAcquired(); lock_.AssertAcquired();
while (completed_tasks_.size()) DCHECK_EQ(0u, completed_tasks->size());
completed_tasks->push_back(completed_tasks_.take_front().Pass()); completed_tasks->swap(completed_tasks_);
return !running_task_count_ && pending_tasks_.empty(); return running_tasks_.empty() && pending_tasks_.empty();
} }
void WorkerPool::Inner::ScheduleOnIdleWithLockAcquired() { void WorkerPool::Inner::ScheduleOnIdleWithLockAcquired() {
...@@ -221,6 +377,8 @@ void WorkerPool::Inner::ScheduleOnIdleWithLockAcquired() { ...@@ -221,6 +377,8 @@ void WorkerPool::Inner::ScheduleOnIdleWithLockAcquired() {
} }
void WorkerPool::Inner::OnIdleOnOriginThread() { void WorkerPool::Inner::OnIdleOnOriginThread() {
TaskDeque completed_tasks;
{ {
base::AutoLock lock(lock_); base::AutoLock lock(lock_);
...@@ -228,14 +386,13 @@ void WorkerPool::Inner::OnIdleOnOriginThread() { ...@@ -228,14 +386,13 @@ void WorkerPool::Inner::OnIdleOnOriginThread() {
on_idle_pending_ = false; on_idle_pending_ = false;
// Early out if no longer idle. // Early out if no longer idle.
if (running_task_count_ || !pending_tasks_.empty()) if (!running_tasks_.empty() || !pending_tasks_.empty())
return; return;
AppendCompletedTasksWithLockAcquired( CollectCompletedTasksWithLockAcquired(&completed_tasks);
&worker_pool_on_origin_thread_->completed_tasks_);
} }
worker_pool_on_origin_thread_->OnIdle(); worker_pool_on_origin_thread_->OnIdle(&completed_tasks);
} }
void WorkerPool::Inner::Run() { void WorkerPool::Inner::Run() {
...@@ -251,29 +408,37 @@ void WorkerPool::Inner::Run() { ...@@ -251,29 +408,37 @@ void WorkerPool::Inner::Run() {
int thread_index = next_thread_index_++; int thread_index = next_thread_index_++;
while (true) { while (true) {
if (pending_tasks_.empty()) { if (ready_to_run_tasks_.empty()) {
// Exit when shutdown is set and no more tasks are pending. if (pending_tasks_.empty()) {
if (shutdown_) // Exit when shutdown is set and no more tasks are pending.
break; if (shutdown_)
break;
// Schedule an idle callback if requested and not pending.
if (!running_task_count_) // Schedule an idle callback if no tasks are running.
ScheduleOnIdleWithLockAcquired(); if (running_tasks_.empty())
ScheduleOnIdleWithLockAcquired();
// Wait for new pending tasks. }
has_pending_tasks_cv_.Wait();
// Wait for more tasks.
has_ready_to_run_tasks_cv_.Wait();
continue; continue;
} }
// Get next task. // Take top priority task from |ready_to_run_tasks_|.
scoped_ptr<internal::WorkerPoolTask> task = pending_tasks_.take_front(); scoped_refptr<internal::WorkerPoolTask> task(
ready_to_run_tasks_.begin()->second);
ready_to_run_tasks_.erase(ready_to_run_tasks_.begin());
// Move task from |pending_tasks_| to |running_tasks_|.
DCHECK(pending_tasks_.contains(task));
DCHECK(!running_tasks_.contains(task));
running_tasks_.set(task, pending_tasks_.take_and_erase(task));
// Increment |running_task_count_| before starting to run task. // There may be more work available, so wake up another worker thread.
running_task_count_++; has_ready_to_run_tasks_cv_.Signal();
// There may be more work available, so wake up another // Call WillRun() before releasing |lock_| and running task.
// worker thread. task->WillRun();
has_pending_tasks_cv_.Signal();
{ {
base::AutoUnlock unlock(lock_); base::AutoUnlock unlock(lock_);
...@@ -281,15 +446,95 @@ void WorkerPool::Inner::Run() { ...@@ -281,15 +446,95 @@ void WorkerPool::Inner::Run() {
task->RunOnThread(thread_index); task->RunOnThread(thread_index);
} }
completed_tasks_.push_back(task.Pass()); // This will mark task as finished running.
task->DidRun();
// Now iterate over all dependents to check if they are ready to run.
scoped_ptr<ScheduledTask> scheduled_task = running_tasks_.take_and_erase(
task);
if (scheduled_task) {
typedef internal::WorkerPoolTask::TaskVector TaskVector;
for (TaskVector::iterator it = scheduled_task->dependents().begin();
it != scheduled_task->dependents().end(); ++it) {
internal::WorkerPoolTask* dependent = *it;
if (!dependent->IsReadyToRun())
continue;
// Task is ready. Add it to |ready_to_run_tasks_|.
DCHECK(pending_tasks_.contains(dependent));
unsigned priority = pending_tasks_.get(dependent)->priority();
DCHECK(!ready_to_run_tasks_.count(priority) ||
ready_to_run_tasks_[priority] == dependent);
ready_to_run_tasks_[priority] = dependent;
}
}
// Decrement |running_task_count_| now that we are done running task. // Finally add task to |completed_tasks_|.
running_task_count_--; completed_tasks_.push_back(task);
} }
// We noticed we should exit. Wake up the next worker so it knows it should // We noticed we should exit. Wake up the next worker so it knows it should
// exit as well (because the Shutdown() code only signals once). // exit as well (because the Shutdown() code only signals once).
has_pending_tasks_cv_.Signal(); has_ready_to_run_tasks_cv_.Signal();
}
// BuildScheduledTaskMap() takes a task tree as input and constructs
// a unique set of tasks with edges between dependencies pointing in
// the direction of the dependents. Each task is given a unique priority
// which is currently the same as the DFS traversal order.
//
// Input: Output:
//
// root task4 Task | Priority (lower is better)
// / \ / \ -------+---------------------------
// task1 task2 task3 task2 root | 4
// | | | | task1 | 2
// task3 | task1 | task2 | 3
// | | \ / task3 | 1
// task4 task4 root task4 | 0
//
// The output can be used to efficiently maintain a queue of
// "ready to run" tasks.
// static
unsigned WorkerPool::Inner::BuildScheduledTaskMapRecursive(
internal::WorkerPoolTask* task,
internal::WorkerPoolTask* dependent,
unsigned priority,
ScheduledTaskMap* scheduled_tasks) {
// Skip sub-tree if task has already completed.
if (task->HasCompleted())
return priority;
ScheduledTaskMap::iterator scheduled_it = scheduled_tasks->find(task);
if (scheduled_it != scheduled_tasks->end()) {
DCHECK(dependent);
scheduled_it->second->dependents().push_back(dependent);
return priority;
}
typedef internal::WorkerPoolTask::TaskVector TaskVector;
for (TaskVector::iterator it = task->dependencies().begin();
it != task->dependencies().end(); ++it) {
internal::WorkerPoolTask* dependency = *it;
priority = BuildScheduledTaskMapRecursive(
dependency, task, priority, scheduled_tasks);
}
scheduled_tasks->set(task,
make_scoped_ptr(new ScheduledTask(dependent,
priority)));
return priority + 1;
}
// static
void WorkerPool::Inner::BuildScheduledTaskMap(
internal::WorkerPoolTask* root,
ScheduledTaskMap* scheduled_tasks) {
const unsigned kBasePriority = 0u;
DCHECK(root);
BuildScheduledTaskMapRecursive(root, NULL, kBasePriority, scheduled_tasks);
} }
WorkerPool::WorkerPool(size_t num_threads, WorkerPool::WorkerPool(size_t num_threads,
...@@ -297,83 +542,105 @@ WorkerPool::WorkerPool(size_t num_threads, ...@@ -297,83 +542,105 @@ WorkerPool::WorkerPool(size_t num_threads,
const std::string& thread_name_prefix) const std::string& thread_name_prefix)
: client_(NULL), : client_(NULL),
origin_loop_(base::MessageLoopProxy::current()), origin_loop_(base::MessageLoopProxy::current()),
weak_ptr_factory_(this),
check_for_completed_tasks_delay_(check_for_completed_tasks_delay), check_for_completed_tasks_delay_(check_for_completed_tasks_delay),
check_for_completed_tasks_pending_(false), check_for_completed_tasks_pending_(false),
in_dispatch_completion_callbacks_(false),
inner_(make_scoped_ptr(new Inner(this, inner_(make_scoped_ptr(new Inner(this,
num_threads, num_threads,
thread_name_prefix))) { thread_name_prefix))) {
} }
WorkerPool::~WorkerPool() { WorkerPool::~WorkerPool() {
// Cancel all pending callbacks.
weak_ptr_factory_.InvalidateWeakPtrs();
DCHECK_EQ(0u, completed_tasks_.size());
} }
void WorkerPool::Shutdown() { void WorkerPool::Shutdown() {
TRACE_EVENT0("cc", "WorkerPool::Shutdown");
DCHECK(!in_dispatch_completion_callbacks_);
inner_->Shutdown(); inner_->Shutdown();
inner_->CollectCompletedTasks();
DispatchCompletionCallbacks();
}
void WorkerPool::PostTaskAndReply( TaskDeque completed_tasks;
const Callback& task, const base::Closure& reply) { inner_->CollectCompletedTasks(&completed_tasks);
PostTask(make_scoped_ptr(new WorkerPoolTaskImpl( DispatchCompletionCallbacks(&completed_tasks);
task,
reply)).PassAs<internal::WorkerPoolTask>());
} }
void WorkerPool::OnIdle() { void WorkerPool::OnIdle(TaskDeque* completed_tasks) {
TRACE_EVENT0("cc", "WorkerPool::OnIdle"); TRACE_EVENT0("cc", "WorkerPool::OnIdle");
DispatchCompletionCallbacks(); DCHECK(!in_dispatch_completion_callbacks_);
DispatchCompletionCallbacks(completed_tasks);
// Cancel any pending check for completed tasks.
check_for_completed_tasks_callback_.Cancel();
check_for_completed_tasks_pending_ = false;
} }
void WorkerPool::ScheduleCheckForCompletedTasks() { void WorkerPool::ScheduleCheckForCompletedTasks() {
if (check_for_completed_tasks_pending_) if (check_for_completed_tasks_pending_)
return; return;
check_for_completed_tasks_callback_.Reset(
base::Bind(&WorkerPool::CheckForCompletedTasks,
base::Unretained(this)));
origin_loop_->PostDelayedTask( origin_loop_->PostDelayedTask(
FROM_HERE, FROM_HERE,
base::Bind(&WorkerPool::CheckForCompletedTasks, check_for_completed_tasks_callback_.callback(),
weak_ptr_factory_.GetWeakPtr()),
check_for_completed_tasks_delay_); check_for_completed_tasks_delay_);
check_for_completed_tasks_pending_ = true; check_for_completed_tasks_pending_ = true;
} }
void WorkerPool::CheckForCompletedTasks() { void WorkerPool::CheckForCompletedTasks() {
TRACE_EVENT0("cc", "WorkerPool::CheckForCompletedTasks"); TRACE_EVENT0("cc", "WorkerPool::CheckForCompletedTasks");
DCHECK(check_for_completed_tasks_pending_);
DCHECK(!in_dispatch_completion_callbacks_);
check_for_completed_tasks_callback_.Cancel();
check_for_completed_tasks_pending_ = false; check_for_completed_tasks_pending_ = false;
TaskDeque completed_tasks;
// Schedule another check for completed tasks if not idle. // Schedule another check for completed tasks if not idle.
if (!inner_->CollectCompletedTasks()) if (!inner_->CollectCompletedTasks(&completed_tasks))
ScheduleCheckForCompletedTasks(); ScheduleCheckForCompletedTasks();
DispatchCompletionCallbacks(); DispatchCompletionCallbacks(&completed_tasks);
} }
void WorkerPool::DispatchCompletionCallbacks() { void WorkerPool::DispatchCompletionCallbacks(TaskDeque* completed_tasks) {
TRACE_EVENT0("cc", "WorkerPool::DispatchCompletionCallbacks"); TRACE_EVENT0("cc", "WorkerPool::DispatchCompletionCallbacks");
if (completed_tasks_.empty()) // Early out when |completed_tasks| is empty to prevent unnecessary
// call to DidFinishDispatchingWorkerPoolCompletionCallbacks().
if (completed_tasks->empty())
return; return;
while (completed_tasks_.size()) { // Worker pool instance is not reentrant while processing completed tasks.
scoped_ptr<internal::WorkerPoolTask> task = completed_tasks_.take_front(); in_dispatch_completion_callbacks_ = true;
while (!completed_tasks->empty()) {
scoped_refptr<internal::WorkerPoolTask> task = completed_tasks->front();
completed_tasks->pop_front();
task->DidComplete(); task->DidComplete();
task->DispatchCompletionCallback();
} }
in_dispatch_completion_callbacks_ = false;
DCHECK(client_); DCHECK(client_);
client_->DidFinishDispatchingWorkerPoolCompletionCallbacks(); client_->DidFinishDispatchingWorkerPoolCompletionCallbacks();
} }
void WorkerPool::PostTask(scoped_ptr<internal::WorkerPoolTask> task) { void WorkerPool::ScheduleTasks(internal::WorkerPoolTask* root) {
// Schedule check for completed tasks if not pending. TRACE_EVENT0("cc", "WorkerPool::ScheduleTasks");
ScheduleCheckForCompletedTasks();
DCHECK(!in_dispatch_completion_callbacks_);
// Schedule check for completed tasks.
if (root)
ScheduleCheckForCompletedTasks();
inner_->PostTask(task.Pass()); inner_->ScheduleTasks(root);
} }
} // namespace cc } // namespace cc
...@@ -5,31 +5,52 @@ ...@@ -5,31 +5,52 @@
#ifndef CC_BASE_WORKER_POOL_H_ #ifndef CC_BASE_WORKER_POOL_H_
#define CC_BASE_WORKER_POOL_H_ #define CC_BASE_WORKER_POOL_H_
#include <deque>
#include <string> #include <string>
#include <vector>
#include "base/cancelable_callback.h" #include "base/cancelable_callback.h"
#include "base/memory/ref_counted.h"
#include "base/memory/scoped_ptr.h" #include "base/memory/scoped_ptr.h"
#include "base/memory/weak_ptr.h" #include "base/memory/weak_ptr.h"
#include "base/message_loop.h" #include "base/message_loop.h"
#include "cc/base/cc_export.h" #include "cc/base/cc_export.h"
#include "cc/base/scoped_ptr_deque.h"
namespace cc { namespace cc {
namespace internal { namespace internal {
class WorkerPoolTask { class CC_EXPORT WorkerPoolTask
: public base::RefCountedThreadSafe<WorkerPoolTask> {
public: public:
virtual ~WorkerPoolTask(); typedef std::vector<scoped_refptr<WorkerPoolTask> > TaskVector;
virtual void RunOnThread(unsigned thread_index) = 0; virtual void RunOnThread(unsigned thread_index) = 0;
virtual void DispatchCompletionCallback() = 0;
void DidSchedule();
void WillRun();
void DidRun();
void DidComplete(); void DidComplete();
bool IsReadyToRun() const;
bool HasFinishedRunning() const;
bool HasCompleted() const;
TaskVector& dependencies() { return dependencies_; }
protected: protected:
explicit WorkerPoolTask(const base::Closure& reply); friend class base::RefCountedThreadSafe<WorkerPoolTask>;
WorkerPoolTask();
explicit WorkerPoolTask(TaskVector* dependencies);
virtual ~WorkerPoolTask();
const base::Closure reply_; private:
bool did_schedule_;
bool did_run_;
bool did_complete_;
TaskVector dependencies_;
}; };
} // namespace internal } // namespace internal
...@@ -42,67 +63,50 @@ class CC_EXPORT WorkerPoolClient { ...@@ -42,67 +63,50 @@ class CC_EXPORT WorkerPoolClient {
virtual ~WorkerPoolClient() {} virtual ~WorkerPoolClient() {}
}; };
// A worker thread pool that runs rendering tasks and guarantees completion // A worker thread pool that runs tasks provided by task graph and
// of all pending tasks at shutdown. // guarantees completion of all pending tasks at shutdown.
class CC_EXPORT WorkerPool { class CC_EXPORT WorkerPool {
public: public:
typedef base::Callback<void()> Callback;
virtual ~WorkerPool(); virtual ~WorkerPool();
static scoped_ptr<WorkerPool> Create(
size_t num_threads,
base::TimeDelta check_for_completed_tasks_delay,
const std::string& thread_name_prefix) {
return make_scoped_ptr(new WorkerPool(num_threads,
check_for_completed_tasks_delay,
thread_name_prefix));
}
// Tells the worker pool to shutdown and returns once all pending tasks have // Tells the worker pool to shutdown and returns once all pending tasks have
// completed. // completed.
void Shutdown(); virtual void Shutdown();
// Posts |task| to worker pool. On completion, |reply|
// is posted to the thread that called PostTaskAndReply().
void PostTaskAndReply(const Callback& task, const base::Closure& reply);
// Set a new client. // Set a new client.
void SetClient(WorkerPoolClient* client) { void SetClient(WorkerPoolClient* client) {
client_ = client; client_ = client;
} }
// Force a check for completed tasks.
void CheckForCompletedTasks();
protected: protected:
WorkerPool(size_t num_threads, WorkerPool(size_t num_threads,
base::TimeDelta check_for_completed_tasks_delay, base::TimeDelta check_for_completed_tasks_delay,
const std::string& thread_name_prefix); const std::string& thread_name_prefix);
void PostTask(scoped_ptr<internal::WorkerPoolTask> task); void ScheduleTasks(internal::WorkerPoolTask* root);
private: private:
class Inner; class Inner;
friend class Inner; friend class Inner;
void OnTaskCompleted(); typedef std::deque<scoped_refptr<internal::WorkerPoolTask> > TaskDeque;
void OnIdle();
void OnIdle(TaskDeque* completed_tasks);
void ScheduleCheckForCompletedTasks(); void ScheduleCheckForCompletedTasks();
void CheckForCompletedTasks(); void DispatchCompletionCallbacks(TaskDeque* completed_tasks);
void DispatchCompletionCallbacks();
WorkerPoolClient* client_; WorkerPoolClient* client_;
scoped_refptr<base::MessageLoopProxy> origin_loop_; scoped_refptr<base::MessageLoopProxy> origin_loop_;
base::WeakPtrFactory<WorkerPool> weak_ptr_factory_; base::CancelableClosure check_for_completed_tasks_callback_;
base::TimeDelta check_for_completed_tasks_delay_; base::TimeDelta check_for_completed_tasks_delay_;
bool check_for_completed_tasks_pending_; bool check_for_completed_tasks_pending_;
bool in_dispatch_completion_callbacks_;
// Holds all completed tasks for which we have not yet dispatched
// reply callbacks.
ScopedPtrDeque<internal::WorkerPoolTask> completed_tasks_;
// Hide the gory details of the worker pool in |inner_|. // Hide the gory details of the worker pool in |inner_|.
const scoped_ptr<Inner> inner_; const scoped_ptr<Inner> inner_;
DISALLOW_COPY_AND_ASSIGN(WorkerPool);
}; };
} // namespace cc } // namespace cc
......
// Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "cc/base/worker_pool.h"
#include "base/time.h"
#include "cc/base/completion_event.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace cc {
namespace {
static const int kTimeLimitMillis = 2000;
static const int kWarmupRuns = 5;
static const int kTimeCheckInterval = 10;
class PerfTaskImpl : public internal::WorkerPoolTask {
public:
explicit PerfTaskImpl(internal::WorkerPoolTask::TaskVector* dependencies)
: internal::WorkerPoolTask(dependencies) {}
// Overridden from internal::WorkerPoolTask:
virtual void RunOnThread(unsigned thread_index) OVERRIDE {}
virtual void DispatchCompletionCallback() OVERRIDE {}
private:
virtual ~PerfTaskImpl() {}
};
class PerfControlTaskImpl : public internal::WorkerPoolTask {
public:
explicit PerfControlTaskImpl(
internal::WorkerPoolTask::TaskVector* dependencies)
: internal::WorkerPoolTask(dependencies),
did_start_(new CompletionEvent),
can_finish_(new CompletionEvent) {}
// Overridden from internal::WorkerPoolTask:
virtual void RunOnThread(unsigned thread_index) OVERRIDE {
did_start_->Signal();
can_finish_->Wait();
}
virtual void DispatchCompletionCallback() OVERRIDE {}
void WaitForTaskToStartRunning() {
did_start_->Wait();
}
void AllowTaskToFinish() {
can_finish_->Signal();
}
private:
virtual ~PerfControlTaskImpl() {}
scoped_ptr<CompletionEvent> did_start_;
scoped_ptr<CompletionEvent> can_finish_;
};
class PerfWorkerPool : public WorkerPool {
public:
PerfWorkerPool() : WorkerPool(1, base::TimeDelta::FromDays(1024), "test") {}
virtual ~PerfWorkerPool() {}
static scoped_ptr<PerfWorkerPool> Create() {
return make_scoped_ptr(new PerfWorkerPool);
}
void ScheduleTasks(internal::WorkerPoolTask* root) {
WorkerPool::ScheduleTasks(root);
}
};
class WorkerPoolPerfTest : public testing::Test,
public WorkerPoolClient {
public:
WorkerPoolPerfTest() : num_runs_(0) {}
// Overridden from testing::Test:
virtual void SetUp() OVERRIDE {
worker_pool_ = PerfWorkerPool::Create();
worker_pool_->SetClient(this);
}
virtual void TearDown() OVERRIDE {
worker_pool_->Shutdown();
}
// Overridden from WorkerPoolClient:
virtual void DidFinishDispatchingWorkerPoolCompletionCallbacks() OVERRIDE {}
void EndTest() {
elapsed_ = base::TimeTicks::HighResNow() - start_time_;
}
void AfterTest(const std::string test_name) {
// Format matches chrome/test/perf/perf_test.h:PrintResult
printf("*RESULT %s: %.2f runs/s\n",
test_name.c_str(),
num_runs_ / elapsed_.InSecondsF());
}
void BuildTaskGraph(internal::WorkerPoolTask::TaskVector* dependencies,
unsigned current_depth,
unsigned max_depth,
unsigned num_children_per_node) {
internal::WorkerPoolTask::TaskVector children;
if (current_depth < max_depth) {
for (unsigned i = 0; i < num_children_per_node; ++i) {
BuildTaskGraph(&children,
current_depth + 1,
max_depth,
num_children_per_node);
}
} else if (leaf_task_) {
children.push_back(leaf_task_);
}
dependencies->push_back(make_scoped_refptr(new PerfTaskImpl(&children)));
}
bool DidRun() {
++num_runs_;
if (num_runs_ == kWarmupRuns)
start_time_ = base::TimeTicks::HighResNow();
if (!start_time_.is_null() && (num_runs_ % kTimeCheckInterval) == 0) {
base::TimeDelta elapsed = base::TimeTicks::HighResNow() - start_time_;
if (elapsed >= base::TimeDelta::FromMilliseconds(kTimeLimitMillis)) {
elapsed_ = elapsed;
return false;
}
}
return true;
}
void RunBuildTaskGraphTest(const std::string test_name,
unsigned max_depth,
unsigned num_children_per_node) {
start_time_ = base::TimeTicks();
num_runs_ = 0;
do {
internal::WorkerPoolTask::TaskVector children;
BuildTaskGraph(&children, 0, max_depth, num_children_per_node);
} while (DidRun());
AfterTest(test_name);
}
void RunScheduleTasksTest(const std::string test_name,
unsigned max_depth,
unsigned num_children_per_node) {
start_time_ = base::TimeTicks();
num_runs_ = 0;
do {
internal::WorkerPoolTask::TaskVector empty;
leaf_task_ = make_scoped_refptr(new PerfControlTaskImpl(&empty));
internal::WorkerPoolTask::TaskVector children;
BuildTaskGraph(&children, 0, max_depth, num_children_per_node);
scoped_refptr<PerfTaskImpl> root_task(
make_scoped_refptr(new PerfTaskImpl(&children)));
worker_pool_->ScheduleTasks(root_task);
leaf_task_->WaitForTaskToStartRunning();
worker_pool_->ScheduleTasks(NULL);
worker_pool_->CheckForCompletedTasks();
leaf_task_->AllowTaskToFinish();
} while (DidRun());
AfterTest(test_name);
}
void RunExecuteTasksTest(const std::string test_name,
unsigned max_depth,
unsigned num_children_per_node) {
start_time_ = base::TimeTicks();
num_runs_ = 0;
do {
internal::WorkerPoolTask::TaskVector children;
BuildTaskGraph(&children, 0, max_depth, num_children_per_node);
scoped_refptr<PerfControlTaskImpl> root_task(
make_scoped_refptr(new PerfControlTaskImpl(&children)));
worker_pool_->ScheduleTasks(root_task);
root_task->WaitForTaskToStartRunning();
root_task->AllowTaskToFinish();
worker_pool_->CheckForCompletedTasks();
} while (DidRun());
AfterTest(test_name);
}
protected:
scoped_ptr<PerfWorkerPool> worker_pool_;
scoped_refptr<PerfControlTaskImpl> leaf_task_;
base::TimeTicks start_time_;
base::TimeDelta elapsed_;
int num_runs_;
};
TEST_F(WorkerPoolPerfTest, BuildTaskGraph) {
RunBuildTaskGraphTest("build_task_graph_1_10", 1, 10);
RunBuildTaskGraphTest("build_task_graph_1_1000", 1, 1000);
RunBuildTaskGraphTest("build_task_graph_2_10", 2, 10);
RunBuildTaskGraphTest("build_task_graph_5_5", 5, 5);
RunBuildTaskGraphTest("build_task_graph_10_2", 10, 2);
RunBuildTaskGraphTest("build_task_graph_1000_1", 1000, 1);
RunBuildTaskGraphTest("build_task_graph_10_1", 10, 1);
}
TEST_F(WorkerPoolPerfTest, ScheduleTasks) {
RunScheduleTasksTest("schedule_tasks_1_10", 1, 10);
RunScheduleTasksTest("schedule_tasks_1_1000", 1, 1000);
RunScheduleTasksTest("schedule_tasks_2_10", 2, 10);
RunScheduleTasksTest("schedule_tasks_5_5", 5, 5);
RunScheduleTasksTest("schedule_tasks_10_2", 10, 2);
RunScheduleTasksTest("schedule_tasks_1000_1", 1000, 1);
RunScheduleTasksTest("schedule_tasks_10_1", 10, 1);
}
TEST_F(WorkerPoolPerfTest, ExecuteTasks) {
RunExecuteTasksTest("execute_tasks_1_10", 1, 10);
RunExecuteTasksTest("execute_tasks_1_1000", 1, 1000);
RunExecuteTasksTest("execute_tasks_2_10", 2, 10);
RunExecuteTasksTest("execute_tasks_5_5", 5, 5);
RunExecuteTasksTest("execute_tasks_10_2", 10, 2);
RunExecuteTasksTest("execute_tasks_1000_1", 1000, 1);
RunExecuteTasksTest("execute_tasks_10_1", 10, 1);
}
} // namespace
} // namespace cc
...@@ -4,27 +4,101 @@ ...@@ -4,27 +4,101 @@
#include "cc/base/worker_pool.h" #include "cc/base/worker_pool.h"
#include <vector>
#include "cc/base/completion_event.h"
#include "testing/gtest/include/gtest/gtest.h" #include "testing/gtest/include/gtest/gtest.h"
namespace cc { namespace cc {
namespace { namespace {
class WorkerPoolTest : public testing::Test, class FakeTaskImpl : public internal::WorkerPoolTask {
public WorkerPoolClient {
public: public:
WorkerPoolTest() FakeTaskImpl(const base::Closure& callback,
: run_task_count_(0), const base::Closure& reply,
on_task_completed_count_(0), internal::WorkerPoolTask::TaskVector* dependencies)
finish_dispatching_completion_callbacks_count_(0) { : internal::WorkerPoolTask(dependencies),
callback_(callback),
reply_(reply) {
}
FakeTaskImpl(const base::Closure& callback, const base::Closure& reply)
: callback_(callback),
reply_(reply) {
}
// Overridden from internal::WorkerPoolTask:
virtual void RunOnThread(unsigned thread_index) OVERRIDE {
if (!callback_.is_null())
callback_.Run();
}
virtual void DispatchCompletionCallback() OVERRIDE {
if (!reply_.is_null())
reply_.Run();
}
private:
virtual ~FakeTaskImpl() {}
const base::Closure callback_;
const base::Closure reply_;
};
class FakeWorkerPool : public WorkerPool {
public:
FakeWorkerPool() : WorkerPool(1, base::TimeDelta::FromDays(1024), "test") {}
virtual ~FakeWorkerPool() {}
static scoped_ptr<FakeWorkerPool> Create() {
return make_scoped_ptr(new FakeWorkerPool);
}
void ScheduleTasks(const base::Closure& callback,
const base::Closure& reply,
const base::Closure& dependency,
int count) {
scoped_refptr<FakeTaskImpl> dependency_task(
new FakeTaskImpl(dependency, base::Closure()));
internal::WorkerPoolTask::TaskVector tasks;
for (int i = 0; i < count; ++i) {
internal::WorkerPoolTask::TaskVector dependencies(1, dependency_task);
tasks.push_back(new FakeTaskImpl(callback, reply, &dependencies));
}
scoped_refptr<FakeTaskImpl> completion_task(
new FakeTaskImpl(base::Bind(&FakeWorkerPool::OnTasksCompleted,
base::Unretained(this)),
base::Closure(),
&tasks));
scheduled_tasks_completion_.reset(new CompletionEvent);
WorkerPool::ScheduleTasks(completion_task);
} }
virtual ~WorkerPoolTest() {
void WaitForTasksToComplete() {
DCHECK(scheduled_tasks_completion_);
scheduled_tasks_completion_->Wait();
} }
private:
void OnTasksCompleted() {
DCHECK(scheduled_tasks_completion_);
scheduled_tasks_completion_->Signal();
}
scoped_ptr<CompletionEvent> scheduled_tasks_completion_;
};
class WorkerPoolTest : public testing::Test,
public WorkerPoolClient {
public:
WorkerPoolTest() : finish_dispatching_completion_callbacks_count_(0) {}
virtual ~WorkerPoolTest() {}
// Overridden from testing::Test:
virtual void SetUp() OVERRIDE { virtual void SetUp() OVERRIDE {
Reset(); Reset();
} }
virtual void TearDown() OVERRIDE { virtual void TearDown() OVERRIDE {
worker_pool_->Shutdown(); worker_pool_->Shutdown();
} }
...@@ -35,35 +109,34 @@ class WorkerPoolTest : public testing::Test, ...@@ -35,35 +109,34 @@ class WorkerPoolTest : public testing::Test,
} }
void Reset() { void Reset() {
worker_pool_ = WorkerPool::Create(1, worker_pool_ = FakeWorkerPool::Create();
base::TimeDelta::FromDays(1024),
"test");
worker_pool_->SetClient(this); worker_pool_->SetClient(this);
} }
void RunAllTasksAndReset() { void RunAllTasksAndReset() {
worker_pool_->WaitForTasksToComplete();
worker_pool_->Shutdown(); worker_pool_->Shutdown();
Reset(); Reset();
} }
WorkerPool* worker_pool() { FakeWorkerPool* worker_pool() {
return worker_pool_.get(); return worker_pool_.get();
} }
void RunTask() { void RunTask(unsigned id) {
++run_task_count_; run_task_ids_.push_back(id);
} }
void OnTaskCompleted() { void OnTaskCompleted(unsigned id) {
++on_task_completed_count_; on_task_completed_ids_.push_back(id);
} }
unsigned run_task_count() { const std::vector<unsigned>& run_task_ids() {
return run_task_count_; return run_task_ids_;
} }
unsigned on_task_completed_count() { const std::vector<unsigned>& on_task_completed_ids() {
return on_task_completed_count_; return on_task_completed_ids_;
} }
unsigned finish_dispatching_completion_callbacks_count() { unsigned finish_dispatching_completion_callbacks_count() {
...@@ -71,39 +144,72 @@ class WorkerPoolTest : public testing::Test, ...@@ -71,39 +144,72 @@ class WorkerPoolTest : public testing::Test,
} }
private: private:
scoped_ptr<WorkerPool> worker_pool_; scoped_ptr<FakeWorkerPool> worker_pool_;
unsigned run_task_count_; std::vector<unsigned> run_task_ids_;
unsigned on_task_completed_count_; std::vector<unsigned> on_task_completed_ids_;
unsigned finish_dispatching_completion_callbacks_count_; unsigned finish_dispatching_completion_callbacks_count_;
}; };
TEST_F(WorkerPoolTest, Basic) { TEST_F(WorkerPoolTest, Basic) {
EXPECT_EQ(0u, run_task_count()); EXPECT_EQ(0u, run_task_ids().size());
EXPECT_EQ(0u, on_task_completed_count()); EXPECT_EQ(0u, on_task_completed_ids().size());
EXPECT_EQ(0u, finish_dispatching_completion_callbacks_count()); EXPECT_EQ(0u, finish_dispatching_completion_callbacks_count());
worker_pool()->PostTaskAndReply( worker_pool()->ScheduleTasks(
base::Bind(&WorkerPoolTest::RunTask, base::Unretained(this)), base::Bind(&WorkerPoolTest::RunTask, base::Unretained(this), 0u),
base::Bind(&WorkerPoolTest::OnTaskCompleted, base::Unretained(this))); base::Bind(&WorkerPoolTest::OnTaskCompleted, base::Unretained(this), 0u),
base::Closure(),
1);
RunAllTasksAndReset(); RunAllTasksAndReset();
EXPECT_EQ(1u, run_task_count()); EXPECT_EQ(1u, run_task_ids().size());
EXPECT_EQ(1u, on_task_completed_count()); EXPECT_EQ(1u, on_task_completed_ids().size());
EXPECT_EQ(1u, finish_dispatching_completion_callbacks_count()); EXPECT_EQ(1u, finish_dispatching_completion_callbacks_count());
worker_pool()->PostTaskAndReply( worker_pool()->ScheduleTasks(
base::Bind(&WorkerPoolTest::RunTask, base::Unretained(this)), base::Bind(&WorkerPoolTest::RunTask, base::Unretained(this), 0u),
base::Bind(&WorkerPoolTest::OnTaskCompleted, base::Unretained(this))); base::Bind(&WorkerPoolTest::OnTaskCompleted, base::Unretained(this), 0u),
worker_pool()->PostTaskAndReply( base::Closure(),
base::Bind(&WorkerPoolTest::RunTask, base::Unretained(this)), 2);
base::Bind(&WorkerPoolTest::OnTaskCompleted, base::Unretained(this)));
RunAllTasksAndReset(); RunAllTasksAndReset();
EXPECT_EQ(3u, run_task_count()); EXPECT_EQ(3u, run_task_ids().size());
EXPECT_EQ(3u, on_task_completed_count()); EXPECT_EQ(3u, on_task_completed_ids().size());
EXPECT_EQ(2u, finish_dispatching_completion_callbacks_count()); EXPECT_EQ(2u, finish_dispatching_completion_callbacks_count());
} }
TEST_F(WorkerPoolTest, Dependencies) {
worker_pool()->ScheduleTasks(
base::Bind(&WorkerPoolTest::RunTask, base::Unretained(this), 1u),
base::Bind(&WorkerPoolTest::OnTaskCompleted, base::Unretained(this), 1u),
base::Bind(&WorkerPoolTest::RunTask, base::Unretained(this), 0u),
1);
RunAllTasksAndReset();
// Check if dependency ran before task.
ASSERT_EQ(2u, run_task_ids().size());
EXPECT_EQ(0u, run_task_ids()[0]);
EXPECT_EQ(1u, run_task_ids()[1]);
ASSERT_EQ(1u, on_task_completed_ids().size());
EXPECT_EQ(1u, on_task_completed_ids()[0]);
worker_pool()->ScheduleTasks(
base::Bind(&WorkerPoolTest::RunTask, base::Unretained(this), 1u),
base::Bind(&WorkerPoolTest::OnTaskCompleted, base::Unretained(this), 1u),
base::Bind(&WorkerPoolTest::RunTask, base::Unretained(this), 0u),
2);
RunAllTasksAndReset();
// Dependency should only run once.
ASSERT_EQ(5u, run_task_ids().size());
EXPECT_EQ(0u, run_task_ids()[2]);
EXPECT_EQ(1u, run_task_ids()[3]);
EXPECT_EQ(1u, run_task_ids()[4]);
ASSERT_EQ(3u, on_task_completed_ids().size());
EXPECT_EQ(1u, on_task_completed_ids()[1]);
EXPECT_EQ(1u, on_task_completed_ids()[2]);
}
} // namespace } // namespace
} // namespace cc } // namespace cc
...@@ -225,9 +225,10 @@ ...@@ -225,9 +225,10 @@
'cc_test_support', 'cc_test_support',
], ],
'sources': [ 'sources': [
'trees/layer_tree_host_perftest.cc', 'base/worker_pool_perftest.cc',
'test/run_all_unittests.cc',
'test/cc_test_suite.cc', 'test/cc_test_suite.cc',
'test/run_all_unittests.cc',
'trees/layer_tree_host_perftest.cc',
], ],
'include_dirs': [ 'include_dirs': [
'test', 'test',
......
...@@ -5,9 +5,9 @@ ...@@ -5,9 +5,9 @@
#ifndef CC_RESOURCES_MANAGED_TILE_STATE_H_ #ifndef CC_RESOURCES_MANAGED_TILE_STATE_H_
#define CC_RESOURCES_MANAGED_TILE_STATE_H_ #define CC_RESOURCES_MANAGED_TILE_STATE_H_
#include "base/hash_tables.h"
#include "base/memory/scoped_ptr.h" #include "base/memory/scoped_ptr.h"
#include "cc/resources/platform_color.h" #include "cc/resources/platform_color.h"
#include "cc/resources/raster_worker_pool.h"
#include "cc/resources/resource_pool.h" #include "cc/resources/resource_pool.h"
#include "cc/resources/resource_provider.h" #include "cc/resources/resource_provider.h"
#include "cc/resources/tile_manager.h" #include "cc/resources/tile_manager.h"
...@@ -113,11 +113,10 @@ class CC_EXPORT ManagedTileState { ...@@ -113,11 +113,10 @@ class CC_EXPORT ManagedTileState {
scoped_ptr<base::Value> AsValue() const; scoped_ptr<base::Value> AsValue() const;
// Persisted state: valid all the time. // Persisted state: valid all the time.
typedef base::hash_set<uint32_t> PixelRefSet;
PixelRefSet decoded_pixel_refs;
TileVersion tile_version; TileVersion tile_version;
PicturePileImpl::Analysis picture_pile_analysis;
bool picture_pile_analyzed; bool picture_pile_analyzed;
PicturePileImpl::Analysis picture_pile_analysis;
RasterWorkerPool::Task raster_task;
// Ephemeral state, valid only during TileManager::ManageTiles. // Ephemeral state, valid only during TileManager::ManageTiles.
bool is_in_never_bin_on_both_trees() const { bool is_in_never_bin_on_both_trees() const {
......
...@@ -10,24 +10,72 @@ namespace cc { ...@@ -10,24 +10,72 @@ namespace cc {
namespace { namespace {
class RasterWorkerPoolContainerTaskImpl : public internal::WorkerPoolTask {
public:
RasterWorkerPoolContainerTaskImpl(
internal::WorkerPoolTask::TaskVector* dependencies)
: internal::WorkerPoolTask(dependencies) {
}
// Overridden from internal::WorkerPoolTask:
virtual void RunOnThread(unsigned thread_index) OVERRIDE {}
virtual void DispatchCompletionCallback() OVERRIDE {}
private:
virtual ~RasterWorkerPoolContainerTaskImpl() {}
};
class RasterWorkerPoolTaskImpl : public internal::WorkerPoolTask { class RasterWorkerPoolTaskImpl : public internal::WorkerPoolTask {
public: public:
RasterWorkerPoolTaskImpl(PicturePileImpl* picture_pile, RasterWorkerPoolTaskImpl(const base::Closure& callback,
const RasterWorkerPool::RasterCallback& task, const RasterWorkerPool::Task::Reply& reply)
const base::Closure& reply) : callback_(callback),
: internal::WorkerPoolTask(reply), reply_(reply) {
}
// Overridden from internal::WorkerPoolTask:
virtual void RunOnThread(unsigned thread_index) OVERRIDE {
callback_.Run();
}
virtual void DispatchCompletionCallback() OVERRIDE {
reply_.Run(!HasFinishedRunning());
}
private:
virtual ~RasterWorkerPoolTaskImpl() {}
const base::Closure callback_;
const RasterWorkerPool::Task::Reply reply_;
};
class RasterWorkerPoolPictureTaskImpl : public internal::WorkerPoolTask {
public:
RasterWorkerPoolPictureTaskImpl(
PicturePileImpl* picture_pile,
const RasterWorkerPool::PictureTask::Callback& callback,
const RasterWorkerPool::Task::Reply& reply,
internal::WorkerPoolTask::TaskVector* dependencies)
: internal::WorkerPoolTask(dependencies),
picture_pile_(picture_pile), picture_pile_(picture_pile),
task_(task) { callback_(callback),
reply_(reply) {
DCHECK(picture_pile_); DCHECK(picture_pile_);
} }
// Overridden from internal::WorkerPoolTask:
virtual void RunOnThread(unsigned thread_index) OVERRIDE { virtual void RunOnThread(unsigned thread_index) OVERRIDE {
task_.Run(picture_pile_->GetCloneForDrawingOnThread(thread_index)); callback_.Run(picture_pile_->GetCloneForDrawingOnThread(thread_index));
}
virtual void DispatchCompletionCallback() OVERRIDE {
reply_.Run(!HasFinishedRunning());
} }
private: private:
virtual ~RasterWorkerPoolPictureTaskImpl() {}
scoped_refptr<PicturePileImpl> picture_pile_; scoped_refptr<PicturePileImpl> picture_pile_;
RasterWorkerPool::RasterCallback task_; const RasterWorkerPool::PictureTask::Callback callback_;
const RasterWorkerPool::Task::Reply reply_;
}; };
const char* kWorkerThreadNamePrefix = "CompositorRaster"; const char* kWorkerThreadNamePrefix = "CompositorRaster";
...@@ -36,23 +84,69 @@ const int kCheckForCompletedTasksDelayMs = 6; ...@@ -36,23 +84,69 @@ const int kCheckForCompletedTasksDelayMs = 6;
} // namespace } // namespace
RasterWorkerPool::RasterWorkerPool(size_t num_threads) RasterWorkerPool::Task::Queue::Queue() {
: WorkerPool( }
num_threads,
base::TimeDelta::FromMilliseconds(kCheckForCompletedTasksDelayMs), RasterWorkerPool::Task::Queue::~Queue() {
kWorkerThreadNamePrefix) { }
void RasterWorkerPool::Task::Queue::Append(const Task& task) {
DCHECK(!task.is_null());
tasks_.push_back(task.internal_);
}
RasterWorkerPool::Task::Task() {
}
RasterWorkerPool::Task::Task(const base::Closure& callback,
const Reply& reply)
: internal_(new RasterWorkerPoolTaskImpl(callback, reply)) {
}
RasterWorkerPool::Task::Task(Queue* dependencies)
: internal_(new RasterWorkerPoolContainerTaskImpl(&dependencies->tasks_)) {
}
RasterWorkerPool::Task::Task(scoped_refptr<internal::WorkerPoolTask> internal)
: internal_(internal) {
}
RasterWorkerPool::Task::~Task() {
}
void RasterWorkerPool::Task::Reset() {
internal_ = NULL;
}
RasterWorkerPool::PictureTask::PictureTask(PicturePileImpl* picture_pile,
const Callback& callback,
const Reply& reply,
Task::Queue* dependencies)
: RasterWorkerPool::Task(
new RasterWorkerPoolPictureTaskImpl(picture_pile,
callback,
reply,
&dependencies->tasks_)) {
}
RasterWorkerPool::RasterWorkerPool(size_t num_threads) : WorkerPool(
num_threads,
base::TimeDelta::FromMilliseconds(kCheckForCompletedTasksDelayMs),
kWorkerThreadNamePrefix) {
} }
RasterWorkerPool::~RasterWorkerPool() { RasterWorkerPool::~RasterWorkerPool() {
} }
void RasterWorkerPool::PostRasterTaskAndReply(PicturePileImpl* picture_pile, void RasterWorkerPool::Shutdown() {
const RasterCallback& task, // Cancel all previously scheduled tasks.
const base::Closure& reply) { WorkerPool::ScheduleTasks(NULL);
PostTask(make_scoped_ptr(new RasterWorkerPoolTaskImpl(
picture_pile, WorkerPool::Shutdown();
task, }
reply)).PassAs<internal::WorkerPoolTask>());
void RasterWorkerPool::ScheduleTasks(Task* task) {
WorkerPool::ScheduleTasks(task ? task->internal_ : NULL);
} }
} // namespace cc } // namespace cc
...@@ -5,8 +5,6 @@ ...@@ -5,8 +5,6 @@
#ifndef CC_RESOURCES_RASTER_WORKER_POOL_H_ #ifndef CC_RESOURCES_RASTER_WORKER_POOL_H_
#define CC_RESOURCES_RASTER_WORKER_POOL_H_ #define CC_RESOURCES_RASTER_WORKER_POOL_H_
#include <string>
#include "cc/base/worker_pool.h" #include "cc/base/worker_pool.h"
namespace cc { namespace cc {
...@@ -15,7 +13,55 @@ class PicturePileImpl; ...@@ -15,7 +13,55 @@ class PicturePileImpl;
// A worker thread pool that runs raster tasks. // A worker thread pool that runs raster tasks.
class CC_EXPORT RasterWorkerPool : public WorkerPool { class CC_EXPORT RasterWorkerPool : public WorkerPool {
public: public:
typedef base::Callback<void(PicturePileImpl* picture_pile)> RasterCallback; class Task {
public:
typedef base::Callback<void(bool)> Reply;
// Highest priority task first. Order of execution is not guaranteed.
class Queue {
public:
Queue();
~Queue();
bool empty() const { return tasks_.empty(); }
// Add task to the back of the queue.
void Append(const Task& task);
private:
friend class RasterWorkerPool;
internal::WorkerPoolTask::TaskVector tasks_;
};
Task();
Task(const base::Closure& callback, const Reply& reply);
explicit Task(Queue* dependencies);
~Task();
// Returns true if Task is null (doesn't refer to anything).
bool is_null() const { return !internal_; }
// Returns the Task into an uninitialized state.
void Reset();
protected:
friend class RasterWorkerPool;
explicit Task(scoped_refptr<internal::WorkerPoolTask> internal);
scoped_refptr<internal::WorkerPoolTask> internal_;
};
class PictureTask : public Task {
public:
typedef base::Callback<void(PicturePileImpl*)> Callback;
PictureTask(PicturePileImpl* picture_pile,
const Callback& callback,
const Reply& reply,
Queue* dependencies);
};
virtual ~RasterWorkerPool(); virtual ~RasterWorkerPool();
...@@ -23,9 +69,16 @@ class CC_EXPORT RasterWorkerPool : public WorkerPool { ...@@ -23,9 +69,16 @@ class CC_EXPORT RasterWorkerPool : public WorkerPool {
return make_scoped_ptr(new RasterWorkerPool(num_threads)); return make_scoped_ptr(new RasterWorkerPool(num_threads));
} }
void PostRasterTaskAndReply(PicturePileImpl* picture_pile, // Tells the worker pool to shutdown after canceling all previously
const RasterCallback& task, // scheduled tasks. Reply callbacks are still guaranteed to run.
const base::Closure& reply); virtual void Shutdown() OVERRIDE;
// Schedule running of |root| task and all its dependencies. Tasks
// previously scheduled but no longer needed to run |root| will be
// canceled unless already running. Once scheduled, reply callbacks
// are guaranteed to run for all tasks even if they later get
// canceled by another call to ScheduleTasks().
void ScheduleTasks(Task* root);
private: private:
explicit RasterWorkerPool(size_t num_threads); explicit RasterWorkerPool(size_t num_threads);
......
...@@ -981,6 +981,8 @@ uint8_t* ResourceProvider::MapPixelBuffer(ResourceId id) { ...@@ -981,6 +981,8 @@ uint8_t* ResourceProvider::MapPixelBuffer(ResourceId id) {
context3d->mapBufferCHROMIUM( context3d->mapBufferCHROMIUM(
GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM, GL_WRITE_ONLY)); GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM, GL_WRITE_ONLY));
context3d->bindBuffer(GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM, 0); context3d->bindBuffer(GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM, 0);
// Buffer is required to be 4-byte aligned.
CHECK(!(reinterpret_cast<intptr_t>(image) & 3));
return image; return image;
} }
......
...@@ -135,14 +135,12 @@ TileManager::TileManager( ...@@ -135,14 +135,12 @@ TileManager::TileManager(
resource_pool_(ResourcePool::Create(resource_provider)), resource_pool_(ResourcePool::Create(resource_provider)),
raster_worker_pool_(raster_worker_pool.Pass()), raster_worker_pool_(raster_worker_pool.Pass()),
manage_tiles_pending_(false), manage_tiles_pending_(false),
manage_tiles_call_count_(0),
bytes_pending_upload_(0), bytes_pending_upload_(0),
has_performed_uploads_since_last_flush_(false), has_performed_uploads_since_last_flush_(false),
ever_exceeded_memory_budget_(false), ever_exceeded_memory_budget_(false),
rendering_stats_instrumentation_(rendering_stats_instrumentation), rendering_stats_instrumentation_(rendering_stats_instrumentation),
use_color_estimator_(use_color_estimator), use_color_estimator_(use_color_estimator),
did_initialize_visible_tile_(false), did_initialize_visible_tile_(false),
pending_tasks_(0),
max_pending_tasks_(kMaxNumPendingTasksPerThread * num_raster_threads) { max_pending_tasks_(kMaxNumPendingTasksPerThread * num_raster_threads) {
raster_worker_pool_->SetClient(this); raster_worker_pool_->SetClient(this);
} }
...@@ -309,7 +307,6 @@ void TileManager::ManageTiles() { ...@@ -309,7 +307,6 @@ void TileManager::ManageTiles() {
TRACE_EVENT0("cc", "TileManager::ManageTiles"); TRACE_EVENT0("cc", "TileManager::ManageTiles");
manage_tiles_pending_ = false; manage_tiles_pending_ = false;
++manage_tiles_call_count_;
AssignBinsToTiles(); AssignBinsToTiles();
SortTiles(); SortTiles();
...@@ -319,8 +316,8 @@ void TileManager::ManageTiles() { ...@@ -319,8 +316,8 @@ void TileManager::ManageTiles() {
"cc", "DidManage", TRACE_EVENT_SCOPE_THREAD, "cc", "DidManage", TRACE_EVENT_SCOPE_THREAD,
"state", TracedValue::FromValue(BasicStateAsValue().release())); "state", TracedValue::FromValue(BasicStateAsValue().release()));
// Finally, kick the rasterizer. // Finally, schedule rasterizer tasks.
DispatchMoreTasks(); ScheduleTasks();
} }
void TileManager::CheckForCompletedTileUploads() { void TileManager::CheckForCompletedTileUploads() {
...@@ -349,7 +346,7 @@ void TileManager::CheckForCompletedTileUploads() { ...@@ -349,7 +346,7 @@ void TileManager::CheckForCompletedTileUploads() {
tiles_with_pending_upload_.pop(); tiles_with_pending_upload_.pop();
} }
DispatchMoreTasks(); ScheduleTasks();
} }
void TileManager::AbortPendingTileUploads() { void TileManager::AbortPendingTileUploads() {
...@@ -387,11 +384,6 @@ void TileManager::ForceTileUploadToComplete(Tile* tile) { ...@@ -387,11 +384,6 @@ void TileManager::ForceTileUploadToComplete(Tile* tile) {
DidFinishTileInitialization(tile); DidFinishTileInitialization(tile);
DCHECK(tile->tile_version().IsReadyToDraw()); DCHECK(tile->tile_version().IsReadyToDraw());
} }
if (did_initialize_visible_tile_) {
did_initialize_visible_tile_ = false;
client_->DidInitializeVisibleTile();
}
} }
void TileManager::GetMemoryStats( void TileManager::GetMemoryStats(
...@@ -469,7 +461,7 @@ void TileManager::DidFinishDispatchingWorkerPoolCompletionCallbacks() { ...@@ -469,7 +461,7 @@ void TileManager::DidFinishDispatchingWorkerPoolCompletionCallbacks() {
has_performed_uploads_since_last_flush_ = false; has_performed_uploads_since_last_flush_ = false;
} }
DispatchMoreTasks(); ScheduleTasks();
} }
void TileManager::AssignGpuMemoryToTiles() { void TileManager::AssignGpuMemoryToTiles() {
...@@ -484,7 +476,7 @@ void TileManager::AssignGpuMemoryToTiles() { ...@@ -484,7 +476,7 @@ void TileManager::AssignGpuMemoryToTiles() {
// By clearing the tiles_that_need_to_be_rasterized_ vector list // By clearing the tiles_that_need_to_be_rasterized_ vector list
// above we move all tiles currently waiting for raster to idle state. // above we move all tiles currently waiting for raster to idle state.
// Some memory cannot be released. We figure out how much in this // Some memory cannot be released. We figure out how much in this
// loop as well. // loop.
for (TileVector::const_iterator it = tiles_.begin(); for (TileVector::const_iterator it = tiles_.begin();
it != tiles_.end(); it != tiles_.end();
++it) { ++it) {
...@@ -503,6 +495,7 @@ void TileManager::AssignGpuMemoryToTiles() { ...@@ -503,6 +495,7 @@ void TileManager::AssignGpuMemoryToTiles() {
size_t bytes_left = bytes_allocatable; size_t bytes_left = bytes_allocatable;
size_t bytes_oom_in_now_bin_on_pending_tree = 0; size_t bytes_oom_in_now_bin_on_pending_tree = 0;
TileVector tiles_requiring_memory_but_oomed; TileVector tiles_requiring_memory_but_oomed;
bool higher_priority_tile_oomed = false;
for (TileVector::iterator it = tiles_.begin(); for (TileVector::iterator it = tiles_.begin();
it != tiles_.end(); it != tiles_.end();
++it) { ++it) {
...@@ -514,45 +507,61 @@ void TileManager::AssignGpuMemoryToTiles() { ...@@ -514,45 +507,61 @@ void TileManager::AssignGpuMemoryToTiles() {
if (!tile_version.requires_resource()) if (!tile_version.requires_resource())
continue; continue;
// If the memory is unreleasable, then we do not need to do anything.
if (tile_version.memory_state_ == USING_UNRELEASABLE_MEMORY) {
if (tile->required_for_activation()) {
AddRequiredTileForActivation(tile);
// If after rasterizing, this tile has become required or the client has
// changed its mind about forcing tiles, do that now.
if (!tile->tile_version().forced_upload_ &&
client_->ShouldForceTileUploadsRequiredForActivationToComplete()) {
ForceTileUploadToComplete(tile);
}
}
continue;
}
size_t tile_bytes = tile->bytes_consumed_if_allocated(); size_t tile_bytes = tile->bytes_consumed_if_allocated();
// Memory is already reserved for tile with unreleasable memory
// so adding it to |tiles_that_need_to_be_rasterized_| doesn't
// affect bytes_allocatable.
if (tile_version.memory_state_ == USING_UNRELEASABLE_MEMORY)
tile_bytes = 0;
// If the tile is not needed, free it up. // If the tile is not needed, free it up.
if (mts.is_in_never_bin_on_both_trees()) { if (mts.is_in_never_bin_on_both_trees()) {
FreeResourcesForTile(tile); if (tile_version.memory_state_ != USING_UNRELEASABLE_MEMORY) {
tile_version.memory_state_ = NOT_ALLOWED_TO_USE_MEMORY; FreeResourcesForTile(tile);
tile_version.memory_state_ = NOT_ALLOWED_TO_USE_MEMORY;
}
continue; continue;
} }
// Tile is OOM. // Tile is OOM.
if (tile_bytes > bytes_left) { if (tile_bytes > bytes_left) {
FreeResourcesForTile(tile);
tile->tile_version().set_rasterize_on_demand(); tile->tile_version().set_rasterize_on_demand();
if (mts.tree_bin[PENDING_TREE] == NOW_BIN) { if (mts.tree_bin[PENDING_TREE] == NOW_BIN) {
tiles_requiring_memory_but_oomed.push_back(tile); tiles_requiring_memory_but_oomed.push_back(tile);
bytes_oom_in_now_bin_on_pending_tree += tile_bytes; bytes_oom_in_now_bin_on_pending_tree += tile_bytes;
} }
FreeResourcesForTile(tile);
higher_priority_tile_oomed = true;
continue; continue;
} }
tile_version.set_use_resource(); tile_version.set_use_resource();
bytes_left -= tile_bytes; bytes_left -= tile_bytes;
if (!tile_version.resource_ &&
tile_version.memory_state_ == CAN_USE_MEMORY) { // Tile shouldn't be rasterized if we've failed to assign
// gpu memory to a higher priority tile. This is important for
// two reasons:
// 1. Tile size should not impact raster priority.
// 2. Tile with unreleasable memory could otherwise incorrectly
// be added as it's not affected by |bytes_allocatable|.
if (higher_priority_tile_oomed)
continue;
if (!tile_version.resource_)
tiles_that_need_to_be_rasterized_.push_back(tile); tiles_that_need_to_be_rasterized_.push_back(tile);
}
if (!tile_version.resource_ && tile->required_for_activation()) if (!tile_version.resource_ && tile->required_for_activation())
AddRequiredTileForActivation(tile); AddRequiredTileForActivation(tile);
if (tile_version.memory_state_ == USING_UNRELEASABLE_MEMORY &&
tile->required_for_activation()) {
// If after rasterizing, this tile has become required or the client has
// changed its mind about forcing tiles, do that now.
if (!tile->tile_version().forced_upload_ &&
client_->ShouldForceTileUploadsRequiredForActivationToComplete()) {
ForceTileUploadToComplete(tile);
}
}
} }
// In OOM situation, we iterate tiles_, remove the memory for active tree // In OOM situation, we iterate tiles_, remove the memory for active tree
...@@ -611,16 +620,10 @@ void TileManager::AssignGpuMemoryToTiles() { ...@@ -611,16 +620,10 @@ void TileManager::AssignGpuMemoryToTiles() {
memory_stats_from_last_assign_.bytes_unreleasable = unreleasable_bytes; memory_stats_from_last_assign_.bytes_unreleasable = unreleasable_bytes;
memory_stats_from_last_assign_.bytes_over = memory_stats_from_last_assign_.bytes_over =
bytes_that_exceeded_memory_budget_in_now_bin; bytes_that_exceeded_memory_budget_in_now_bin;
// Reverse two tiles_that_need_* vectors such that pop_back gets
// the highest priority tile.
std::reverse(
tiles_that_need_to_be_rasterized_.begin(),
tiles_that_need_to_be_rasterized_.end());
} }
void TileManager::FreeResourcesForTile(Tile* tile) { void TileManager::FreeResourcesForTile(Tile* tile) {
DCHECK(tile->tile_version().memory_state_ != USING_UNRELEASABLE_MEMORY); DCHECK_NE(USING_UNRELEASABLE_MEMORY, tile->tile_version().memory_state_);
if (tile->tile_version().resource_) { if (tile->tile_version().resource_) {
resource_pool_->ReleaseResource( resource_pool_->ReleaseResource(
tile->tile_version().resource_.Pass()); tile->tile_version().resource_.Pass());
...@@ -628,146 +631,158 @@ void TileManager::FreeResourcesForTile(Tile* tile) { ...@@ -628,146 +631,158 @@ void TileManager::FreeResourcesForTile(Tile* tile) {
tile->tile_version().memory_state_ = NOT_ALLOWED_TO_USE_MEMORY; tile->tile_version().memory_state_ = NOT_ALLOWED_TO_USE_MEMORY;
} }
bool TileManager::CanDispatchRasterTask(Tile* tile) const { void TileManager::ScheduleTasks() {
if (pending_tasks_ >= max_pending_tasks_) TRACE_EVENT0("cc", "TileManager::ScheduleTasks");
return false; RasterWorkerPool::Task::Queue tasks;
size_t new_bytes_pending = bytes_pending_upload_;
new_bytes_pending += tile->bytes_consumed_if_allocated();
return new_bytes_pending <= kMaxPendingUploadBytes &&
tiles_with_pending_upload_.size() < kMaxPendingUploads;
}
void TileManager::DispatchMoreTasks() {
TileVector tiles_with_image_decoding_tasks;
// Process all tiles in the need_to_be_rasterized queue:
// 1. Dispatch image decode tasks.
// 2. If the image decode isn't done, save the tile for later processing.
// 3. Attempt to dispatch a raster task, or break out of the loop.
while (!tiles_that_need_to_be_rasterized_.empty()) {
Tile* tile = tiles_that_need_to_be_rasterized_.back();
DCHECK(tile->tile_version().requires_resource()); size_t bytes_pending_upload = bytes_pending_upload_;
unsigned pending_tasks = 0;
if (DispatchImageDecodeTasksForTile(tile)) { // Build a new task queue containing all task currently needed. Tasks
tiles_with_image_decoding_tasks.push_back(tile); // are added in order of priority, highest priority task first.
} else if (!CanDispatchRasterTask(tile)) { for (TileVector::iterator it = tiles_that_need_to_be_rasterized_.begin();
break; it != tiles_that_need_to_be_rasterized_.end();
} else { ++it) {
DispatchOneRasterTask(tile); Tile* tile = *it;
} ManagedTileState& mts = tile->managed_state();
tiles_that_need_to_be_rasterized_.pop_back();
}
// Put the saved tiles back into the queue. The order is reversed // Skip tile if determined to not require resource.
// to preserve original ordering. if (!tile->tile_version().requires_resource())
tiles_that_need_to_be_rasterized_.insert( continue;
tiles_that_need_to_be_rasterized_.end(),
tiles_with_image_decoding_tasks.rbegin(),
tiles_with_image_decoding_tasks.rend());
if (did_initialize_visible_tile_) { // Skip tile if already rasterized.
did_initialize_visible_tile_ = false; if (tile->tile_version().resource_)
client_->DidInitializeVisibleTile(); continue;
}
}
bool TileManager::DispatchImageDecodeTasksForTile(Tile* tile) { // TODO(reveman): Remove throttling based on max pending tasks.
TRACE_EVENT0("cc", "TileManager::DispatchImageDecodeTasksForTile"); if (pending_tasks >= max_pending_tasks_)
ManagedTileState& mts = tile->managed_state(); break;
bool pending_decode_tasks = false;
for (PicturePileImpl::PixelRefIterator iter(tile->content_rect(), // TODO(reveman): Remove throttling based on max pending uploads.
tile->contents_scale(), if (tiles_with_pending_upload_.size() >= kMaxPendingUploads)
tile->picture_pile()); break;
iter; ++iter) {
skia::LazyPixelRef* pixel_ref = *iter;
uint32_t id = pixel_ref->getGenerationID();
// Check if image has already been decoded. // TODO(reveman): Throttle based on shared memory usage rather
if (mts.decoded_pixel_refs.find(id) != mts.decoded_pixel_refs.end()) // than bytes pending upload.
continue; size_t new_bytes_pending = bytes_pending_upload;
new_bytes_pending += tile->bytes_consumed_if_allocated();
if (new_bytes_pending > kMaxPendingUploadBytes)
break;
bytes_pending_upload = new_bytes_pending;
// Check if decode task is already pending. // Create raster task for this tile if necessary.
if (pending_decode_tasks_.find(id) != pending_decode_tasks_.end()) { if (mts.raster_task.is_null())
pending_decode_tasks = true; mts.raster_task = CreateRasterTask(tile);
continue;
}
// TODO(qinmin): passing correct image size to PrepareToDecode(). // Finally append raster task.
if (pixel_ref->PrepareToDecode(skia::LazyPixelRef::PrepareParams())) { tasks.Append(mts.raster_task);
rendering_stats_instrumentation_->IncrementDeferredImageCacheHitCount(); pending_tasks++;
mts.decoded_pixel_refs.insert(id); }
continue;
}
if (pending_tasks_ >= max_pending_tasks_) if (!tasks.empty()) {
break; RasterWorkerPool::Task root(&tasks);
DispatchOneImageDecodeTask(tile, pixel_ref); // Schedule running of |tasks|. This replaces any previously
pending_decode_tasks = true; // scheduled tasks and effectively cancels all tasks not present
// in |tasks|.
raster_worker_pool_->ScheduleTasks(&root);
} else {
raster_worker_pool_->ScheduleTasks(NULL);
} }
return pending_decode_tasks; if (did_initialize_visible_tile_) {
did_initialize_visible_tile_ = false;
client_->DidInitializeVisibleTile();
}
} }
void TileManager::DispatchOneImageDecodeTask( RasterWorkerPool::Task TileManager::CreateImageDecodeTask(
scoped_refptr<Tile> tile, skia::LazyPixelRef* pixel_ref) { Tile* tile, skia::LazyPixelRef* pixel_ref) {
TRACE_EVENT0("cc", "TileManager::DispatchOneImageDecodeTask"); TRACE_EVENT0("cc", "TileManager::CreateImageDecodeTask");
uint32_t pixel_ref_id = pixel_ref->getGenerationID();
DCHECK(pending_decode_tasks_.end() ==
pending_decode_tasks_.find(pixel_ref_id));
pending_decode_tasks_.insert(pixel_ref_id);
raster_worker_pool_->PostTaskAndReply( return RasterWorkerPool::Task(
base::Bind(&TileManager::RunImageDecodeTask, base::Bind(&TileManager::RunImageDecodeTask,
pixel_ref, pixel_ref,
tile->layer_id(), tile->layer_id(),
rendering_stats_instrumentation_), rendering_stats_instrumentation_),
base::Bind(&TileManager::OnImageDecodeTaskCompleted, base::Bind(&TileManager::OnImageDecodeTaskCompleted,
base::Unretained(this), base::Unretained(this),
tile, make_scoped_refptr(tile),
pixel_ref_id)); pixel_ref->getGenerationID()));
pending_tasks_++;
} }
void TileManager::OnImageDecodeTaskCompleted( void TileManager::OnImageDecodeTaskCompleted(scoped_refptr<Tile> tile,
scoped_refptr<Tile> tile, uint32_t pixel_ref_id) { uint32_t pixel_ref_id,
bool was_canceled) {
TRACE_EVENT0("cc", "TileManager::OnImageDecodeTaskCompleted"); TRACE_EVENT0("cc", "TileManager::OnImageDecodeTaskCompleted");
ManagedTileState& mts = tile->managed_state(); DCHECK(pending_decode_tasks_.find(pixel_ref_id) !=
mts.decoded_pixel_refs.insert(pixel_ref_id); pending_decode_tasks_.end());
pending_decode_tasks_.erase(pixel_ref_id); pending_decode_tasks_.erase(pixel_ref_id);
pending_tasks_--;
} }
scoped_ptr<ResourcePool::Resource> TileManager::PrepareTileForRaster( TileManager::RasterTaskMetadata TileManager::GetRasterTaskMetadata(
Tile* tile) { const Tile& tile) const {
scoped_ptr<ResourcePool::Resource> resource = resource_pool_->AcquireResource( RasterTaskMetadata metadata;
tile->tile_size_.size(), const ManagedTileState& mts = tile.managed_state();
tile->tile_version().resource_format_); metadata.is_tile_in_pending_tree_now_bin =
mts.tree_bin[PENDING_TREE] == NOW_BIN;
metadata.tile_resolution = mts.resolution;
metadata.layer_id = tile.layer_id();
metadata.tile_id = &tile;
metadata.source_frame_number = tile.source_frame_number();
return metadata;
}
RasterWorkerPool::Task TileManager::CreateRasterTask(Tile* tile) {
TRACE_EVENT0("cc", "TileManager::CreateRasterTask");
scoped_ptr<ResourcePool::Resource> resource =
resource_pool_->AcquireResource(
tile->tile_size_.size(),
tile->tile_version().resource_format_);
resource_pool_->resource_provider()->AcquirePixelBuffer(resource->id()); resource_pool_->resource_provider()->AcquirePixelBuffer(resource->id());
DCHECK_EQ(CAN_USE_MEMORY, tile->tile_version().memory_state_);
tile->tile_version().memory_state_ = USING_UNRELEASABLE_MEMORY; tile->tile_version().memory_state_ = USING_UNRELEASABLE_MEMORY;
return resource.Pass();
}
void TileManager::DispatchOneRasterTask(scoped_refptr<Tile> tile) {
TRACE_EVENT0("cc", "TileManager::DispatchOneRasterTask");
scoped_ptr<ResourcePool::Resource> resource = PrepareTileForRaster(tile);
ResourceProvider::ResourceId resource_id = resource->id();
PicturePileImpl::Analysis* analysis = new PicturePileImpl::Analysis; PicturePileImpl::Analysis* analysis = new PicturePileImpl::Analysis;
// MapPixelBuffer() returns NULL if context was lost at the time // MapPixelBuffer() returns NULL if context was lost at the time
// AcquirePixelBuffer() was called. For simplicity we still post // AcquirePixelBuffer() was called. For simplicity we still create
// a raster task that is essentially a noop in these situations. // a raster task that is essentially a noop in these situations.
uint8* buffer = resource_pool_->resource_provider()->MapPixelBuffer( uint8* buffer = resource_pool_->resource_provider()->MapPixelBuffer(
resource_id); resource->id());
// Create and queue all image decode tasks that this tile depends on.
RasterWorkerPool::Task::Queue decode_tasks;
for (PicturePileImpl::PixelRefIterator iter(tile->content_rect(),
tile->contents_scale(),
tile->picture_pile());
iter; ++iter) {
skia::LazyPixelRef* pixel_ref = *iter;
uint32_t id = pixel_ref->getGenerationID();
// skia requires that our buffer be 4-byte aligned // Append existing image decode task if available.
CHECK(!(reinterpret_cast<intptr_t>(buffer) & 3)); PixelRefMap::iterator decode_task_it = pending_decode_tasks_.find(id);
if (decode_task_it != pending_decode_tasks_.end()) {
decode_tasks.Append(decode_task_it->second);
continue;
}
raster_worker_pool_->PostRasterTaskAndReply( // TODO(qinmin): passing correct image size to PrepareToDecode().
if (pixel_ref->PrepareToDecode(skia::LazyPixelRef::PrepareParams())) {
rendering_stats_instrumentation_->IncrementDeferredImageCacheHitCount();
continue;
}
// Create and append new image decode task for this pixel ref.
RasterWorkerPool::Task decode_task = CreateImageDecodeTask(
tile, pixel_ref);
decode_tasks.Append(decode_task);
pending_decode_tasks_[id] = decode_task;
}
return RasterWorkerPool::PictureTask(
tile->picture_pile(), tile->picture_pile(),
base::Bind(&TileManager::RunAnalyzeAndRasterTask, base::Bind(&TileManager::RunAnalyzeAndRasterTask,
base::Bind(&TileManager::RunAnalyzeTask, base::Bind(&TileManager::RunAnalyzeTask,
...@@ -786,43 +801,38 @@ void TileManager::DispatchOneRasterTask(scoped_refptr<Tile> tile) { ...@@ -786,43 +801,38 @@ void TileManager::DispatchOneRasterTask(scoped_refptr<Tile> tile) {
rendering_stats_instrumentation_)), rendering_stats_instrumentation_)),
base::Bind(&TileManager::OnRasterTaskCompleted, base::Bind(&TileManager::OnRasterTaskCompleted,
base::Unretained(this), base::Unretained(this),
tile, make_scoped_refptr(tile),
base::Passed(&resource), base::Passed(&resource),
base::Owned(analysis), base::Owned(analysis)),
manage_tiles_call_count_)); &decode_tasks);
pending_tasks_++;
}
TileManager::RasterTaskMetadata TileManager::GetRasterTaskMetadata(
const Tile& tile) const {
RasterTaskMetadata metadata;
const ManagedTileState& mts = tile.managed_state();
metadata.is_tile_in_pending_tree_now_bin =
mts.tree_bin[PENDING_TREE] == NOW_BIN;
metadata.tile_resolution = mts.resolution;
metadata.layer_id = tile.layer_id();
metadata.tile_id = &tile;
metadata.source_frame_number = tile.source_frame_number();
return metadata;
} }
void TileManager::OnRasterTaskCompleted( void TileManager::OnRasterTaskCompleted(
scoped_refptr<Tile> tile, scoped_refptr<Tile> tile,
scoped_ptr<ResourcePool::Resource> resource, scoped_ptr<ResourcePool::Resource> resource,
PicturePileImpl::Analysis* analysis, PicturePileImpl::Analysis* analysis,
int manage_tiles_call_count_when_dispatched) { bool was_canceled) {
TRACE_EVENT0("cc", "TileManager::OnRasterTaskCompleted"); TRACE_EVENT0("cc", "TileManager::OnRasterTaskCompleted");
pending_tasks_--; ManagedTileState& mts = tile->managed_state();
DCHECK(!mts.raster_task.is_null());
mts.raster_task.Reset();
// Tile resources can't be freed until upload has completed.
DCHECK_EQ(USING_UNRELEASABLE_MEMORY, tile->tile_version().memory_state_);
// Release raster resources. // Release raster resources.
resource_pool_->resource_provider()->UnmapPixelBuffer(resource->id()); resource_pool_->resource_provider()->UnmapPixelBuffer(resource->id());
tile->tile_version().memory_state_ = USING_RELEASABLE_MEMORY; if (was_canceled) {
tile->tile_version().memory_state_ = CAN_USE_MEMORY;
resource_pool_->resource_provider()->ReleasePixelBuffer(resource->id());
resource_pool_->ReleaseResource(resource.Pass());
return;
}
ManagedTileState& managed_tile_state = tile->managed_state(); mts.picture_pile_analysis = *analysis;
managed_tile_state.picture_pile_analysis = *analysis; mts.picture_pile_analyzed = true;
managed_tile_state.picture_pile_analyzed = true;
if (analysis->is_solid_color) { if (analysis->is_solid_color) {
tile->tile_version().set_solid_color(analysis->solid_color); tile->tile_version().set_solid_color(analysis->solid_color);
...@@ -832,35 +842,17 @@ void TileManager::OnRasterTaskCompleted( ...@@ -832,35 +842,17 @@ void TileManager::OnRasterTaskCompleted(
return; return;
} }
// Tile can be freed after the completion of the raster task. Call resource_pool_->resource_provider()->BeginSetPixels(resource->id());
// AssignGpuMemoryToTiles() to re-assign gpu memory to highest priority has_performed_uploads_since_last_flush_ = true;
// tiles if ManageTiles() was called since task was dispatched. The result
// of this could be that this tile is no longer allowed to use gpu
// memory and in that case we need to abort initialization and free all
// associated resources before calling DispatchMoreTasks().
if (manage_tiles_call_count_when_dispatched != manage_tiles_call_count_)
AssignGpuMemoryToTiles();
// Finish resource initialization we're still using memory.
if (tile->tile_version().memory_state_ == USING_RELEASABLE_MEMORY) {
// Tile resources can't be freed until upload has completed.
tile->tile_version().memory_state_ = USING_UNRELEASABLE_MEMORY;
resource_pool_->resource_provider()->BeginSetPixels(resource->id());
has_performed_uploads_since_last_flush_ = true;
tile->tile_version().resource_ = resource.Pass(); tile->tile_version().resource_ = resource.Pass();
bytes_pending_upload_ += tile->bytes_consumed_if_allocated(); bytes_pending_upload_ += tile->bytes_consumed_if_allocated();
tiles_with_pending_upload_.push(tile); tiles_with_pending_upload_.push(tile);
if (tile->required_for_activation() && if (tile->required_for_activation() &&
client_->ShouldForceTileUploadsRequiredForActivationToComplete()) client_->ShouldForceTileUploadsRequiredForActivationToComplete())
ForceTileUploadToComplete(tile); ForceTileUploadToComplete(tile);
} else {
resource_pool_->resource_provider()->ReleasePixelBuffer(resource->id());
resource_pool_->ReleaseResource(resource.Pass());
}
} }
void TileManager::DidFinishTileInitialization(Tile* tile) { void TileManager::DidFinishTileInitialization(Tile* tile) {
...@@ -881,10 +873,24 @@ void TileManager::DidTileTreeBinChange(Tile* tile, ...@@ -881,10 +873,24 @@ void TileManager::DidTileTreeBinChange(Tile* tile,
mts.tree_bin[tree] = new_tree_bin; mts.tree_bin[tree] = new_tree_bin;
} }
// static
void TileManager::RunImageDecodeTask(
skia::LazyPixelRef* pixel_ref,
int layer_id,
RenderingStatsInstrumentation* stats_instrumentation) {
TRACE_EVENT0("cc", "TileManager::RunImageDecodeTask");
devtools_instrumentation::ScopedLayerTask image_decode_task(
devtools_instrumentation::kImageDecodeTask, layer_id);
base::TimeTicks start_time = stats_instrumentation->StartRecording();
pixel_ref->Decode();
base::TimeDelta duration = stats_instrumentation->EndRecording(start_time);
stats_instrumentation->AddDeferredImageDecode(duration);
}
// static // static
void TileManager::RunAnalyzeAndRasterTask( void TileManager::RunAnalyzeAndRasterTask(
const RasterWorkerPool::RasterCallback& analyze_task, const RasterWorkerPool::PictureTask::Callback& analyze_task,
const RasterWorkerPool::RasterCallback& raster_task, const RasterWorkerPool::PictureTask::Callback& raster_task,
PicturePileImpl* picture_pile) { PicturePileImpl* picture_pile) {
analyze_task.Run(picture_pile); analyze_task.Run(picture_pile);
raster_task.Run(picture_pile); raster_task.Run(picture_pile);
...@@ -980,18 +986,4 @@ void TileManager::RunRasterTask( ...@@ -980,18 +986,4 @@ void TileManager::RunRasterTask(
} }
} }
// static
void TileManager::RunImageDecodeTask(
skia::LazyPixelRef* pixel_ref,
int layer_id,
RenderingStatsInstrumentation* stats_instrumentation) {
TRACE_EVENT0("cc", "TileManager::RunImageDecodeTask");
devtools_instrumentation::ScopedLayerTask image_decode_task(
devtools_instrumentation::kImageDecodeTask, layer_id);
base::TimeTicks start_time = stats_instrumentation->StartRecording();
pixel_ref->Decode();
base::TimeDelta duration = stats_instrumentation->EndRecording(start_time);
stats_instrumentation->AddDeferredImageDecode(duration);
}
} // namespace cc } // namespace cc
...@@ -62,6 +62,8 @@ scoped_ptr<base::Value> TileManagerBinPriorityAsValue( ...@@ -62,6 +62,8 @@ scoped_ptr<base::Value> TileManagerBinPriorityAsValue(
// created, and unregister from the manager when they are deleted. // created, and unregister from the manager when they are deleted.
class CC_EXPORT TileManager : public WorkerPoolClient { class CC_EXPORT TileManager : public WorkerPoolClient {
public: public:
typedef base::hash_set<uint32_t> PixelRefSet;
static scoped_ptr<TileManager> Create( static scoped_ptr<TileManager> Create(
TileManagerClient* client, TileManagerClient* client,
ResourceProvider* resource_provider, ResourceProvider* resource_provider,
...@@ -116,7 +118,7 @@ class CC_EXPORT TileManager : public WorkerPoolClient { ...@@ -116,7 +118,7 @@ class CC_EXPORT TileManager : public WorkerPoolClient {
void UnregisterTile(Tile* tile); void UnregisterTile(Tile* tile);
// Virtual for test // Virtual for test
virtual void DispatchMoreTasks(); virtual void ScheduleTasks();
private: private:
// Data that is passed to raster tasks. // Data that is passed to raster tasks.
...@@ -129,8 +131,6 @@ class CC_EXPORT TileManager : public WorkerPoolClient { ...@@ -129,8 +131,6 @@ class CC_EXPORT TileManager : public WorkerPoolClient {
int source_frame_number; int source_frame_number;
}; };
RasterTaskMetadata GetRasterTaskMetadata(const Tile& tile) const;
void AssignBinsToTiles(); void AssignBinsToTiles();
void SortTiles(); void SortTiles();
void AssignGpuMemoryToTiles(); void AssignGpuMemoryToTiles();
...@@ -142,20 +142,19 @@ class CC_EXPORT TileManager : public WorkerPoolClient { ...@@ -142,20 +142,19 @@ class CC_EXPORT TileManager : public WorkerPoolClient {
client_->ScheduleManageTiles(); client_->ScheduleManageTiles();
manage_tiles_pending_ = true; manage_tiles_pending_ = true;
} }
bool DispatchImageDecodeTasksForTile(Tile* tile); RasterWorkerPool::Task CreateImageDecodeTask(
void DispatchOneImageDecodeTask( Tile* tile, skia::LazyPixelRef* pixel_ref);
scoped_refptr<Tile> tile, skia::LazyPixelRef* pixel_ref);
void OnImageDecodeTaskCompleted( void OnImageDecodeTaskCompleted(
scoped_refptr<Tile> tile, scoped_refptr<Tile> tile,
uint32_t pixel_ref_id); uint32_t pixel_ref_id,
bool CanDispatchRasterTask(Tile* tile) const; bool was_canceled);
scoped_ptr<ResourcePool::Resource> PrepareTileForRaster(Tile* tile); RasterTaskMetadata GetRasterTaskMetadata(const Tile& tile) const;
void DispatchOneRasterTask(scoped_refptr<Tile> tile); RasterWorkerPool::Task CreateRasterTask(Tile* tile);
void OnRasterTaskCompleted( void OnRasterTaskCompleted(
scoped_refptr<Tile> tile, scoped_refptr<Tile> tile,
scoped_ptr<ResourcePool::Resource> resource, scoped_ptr<ResourcePool::Resource> resource,
PicturePileImpl::Analysis* analysis, PicturePileImpl::Analysis* analysis,
int manage_tiles_call_count_when_dispatched); bool was_canceled);
void DidFinishTileInitialization(Tile* tile); void DidFinishTileInitialization(Tile* tile);
void DidTileTreeBinChange(Tile* tile, void DidTileTreeBinChange(Tile* tile,
TileManagerBin new_tree_bin, TileManagerBin new_tree_bin,
...@@ -163,9 +162,13 @@ class CC_EXPORT TileManager : public WorkerPoolClient { ...@@ -163,9 +162,13 @@ class CC_EXPORT TileManager : public WorkerPoolClient {
scoped_ptr<Value> GetMemoryRequirementsAsValue() const; scoped_ptr<Value> GetMemoryRequirementsAsValue() const;
void AddRequiredTileForActivation(Tile* tile); void AddRequiredTileForActivation(Tile* tile);
static void RunImageDecodeTask(
skia::LazyPixelRef* pixel_ref,
int layer_id,
RenderingStatsInstrumentation* stats_instrumentation);
static void RunAnalyzeAndRasterTask( static void RunAnalyzeAndRasterTask(
const RasterWorkerPool::RasterCallback& analyze_task, const RasterWorkerPool::PictureTask::Callback& analyze_task,
const RasterWorkerPool::RasterCallback& raster_task, const RasterWorkerPool::PictureTask::Callback& raster_task,
PicturePileImpl* picture_pile); PicturePileImpl* picture_pile);
static void RunAnalyzeTask( static void RunAnalyzeTask(
PicturePileImpl::Analysis* analysis, PicturePileImpl::Analysis* analysis,
...@@ -183,16 +186,11 @@ class CC_EXPORT TileManager : public WorkerPoolClient { ...@@ -183,16 +186,11 @@ class CC_EXPORT TileManager : public WorkerPoolClient {
const RasterTaskMetadata& metadata, const RasterTaskMetadata& metadata,
RenderingStatsInstrumentation* stats_instrumentation, RenderingStatsInstrumentation* stats_instrumentation,
PicturePileImpl* picture_pile); PicturePileImpl* picture_pile);
static void RunImageDecodeTask(
skia::LazyPixelRef* pixel_ref,
int layer_id,
RenderingStatsInstrumentation* stats_instrumentation);
TileManagerClient* client_; TileManagerClient* client_;
scoped_ptr<ResourcePool> resource_pool_; scoped_ptr<ResourcePool> resource_pool_;
scoped_ptr<RasterWorkerPool> raster_worker_pool_; scoped_ptr<RasterWorkerPool> raster_worker_pool_;
bool manage_tiles_pending_; bool manage_tiles_pending_;
int manage_tiles_call_count_;
GlobalStateThatImpactsTilePriority global_state_; GlobalStateThatImpactsTilePriority global_state_;
...@@ -202,8 +200,8 @@ class CC_EXPORT TileManager : public WorkerPoolClient { ...@@ -202,8 +200,8 @@ class CC_EXPORT TileManager : public WorkerPoolClient {
typedef std::set<Tile*> TileSet; typedef std::set<Tile*> TileSet;
TileSet tiles_that_need_to_be_initialized_for_activation_; TileSet tiles_that_need_to_be_initialized_for_activation_;
typedef base::hash_set<uint32_t> PixelRefSet; typedef base::hash_map<uint32_t, RasterWorkerPool::Task> PixelRefMap;
PixelRefSet pending_decode_tasks_; PixelRefMap pending_decode_tasks_;
typedef std::queue<scoped_refptr<Tile> > TileQueue; typedef std::queue<scoped_refptr<Tile> > TileQueue;
TileQueue tiles_with_pending_upload_; TileQueue tiles_with_pending_upload_;
...@@ -217,7 +215,6 @@ class CC_EXPORT TileManager : public WorkerPoolClient { ...@@ -217,7 +215,6 @@ class CC_EXPORT TileManager : public WorkerPoolClient {
bool use_color_estimator_; bool use_color_estimator_;
bool did_initialize_visible_tile_; bool did_initialize_visible_tile_;
size_t pending_tasks_;
size_t max_pending_tasks_; size_t max_pending_tasks_;
DISALLOW_COPY_AND_ASSIGN(TileManager); DISALLOW_COPY_AND_ASSIGN(TileManager);
......
...@@ -17,7 +17,7 @@ class FakeTileManager : public TileManager { ...@@ -17,7 +17,7 @@ class FakeTileManager : public TileManager {
protected: protected:
// Do nothing // Do nothing
virtual void DispatchMoreTasks() OVERRIDE { } virtual void ScheduleTasks() OVERRIDE { }
}; };
} // namespace cc } // namespace cc
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment