Commit 150c9707 authored by reveman@google.com's avatar reveman@google.com

cc: Check for completed raster tasks at interval.

This significantly reduces the context switching overhead for
impl-side painting.

Instead of posting reply task to the impl thread after completing
each raster job, post a reply only when worker pool becomes idle
and poll for completed tasks at a 6ms interval.

This doesn't just make rasterization more efficient but also
reduces the number of shallow flushes, which makes async uploads
more efficient.

BUG=173802

Review URL: https://codereview.chromium.org/12217105

git-svn-id: svn://svn.chromium.org/chrome/trunk/src@182404 0039d316-1c4b-4281-b951-d872f2087c98
parent 6e2a612d
......@@ -23,6 +23,7 @@ class RasterWorkerPoolTaskImpl : public internal::WorkerPoolTask {
virtual void Run(RenderingStats* rendering_stats) OVERRIDE {
task_.Run(picture_pile_.get(), rendering_stats);
base::subtle::Release_Store(&completed_, 1);
}
private:
......@@ -32,8 +33,9 @@ class RasterWorkerPoolTaskImpl : public internal::WorkerPoolTask {
} // namespace
RasterWorkerPool::RasterWorkerPool(size_t num_threads)
: WorkerPool(num_threads) {
RasterWorkerPool::RasterWorkerPool(
WorkerPoolClient* client, size_t num_threads)
: WorkerPool(client, num_threads) {
}
RasterWorkerPool::~RasterWorkerPool() {
......
......@@ -20,8 +20,9 @@ class RasterWorkerPool : public WorkerPool {
virtual ~RasterWorkerPool();
static scoped_ptr<RasterWorkerPool> Create(size_t num_threads) {
return make_scoped_ptr(new RasterWorkerPool(num_threads));
static scoped_ptr<RasterWorkerPool> Create(
WorkerPoolClient* client, size_t num_threads) {
return make_scoped_ptr(new RasterWorkerPool(client, num_threads));
}
void PostRasterTaskAndReply(PicturePileImpl* picture_pile,
......@@ -29,7 +30,7 @@ class RasterWorkerPool : public WorkerPool {
const base::Closure& reply);
private:
explicit RasterWorkerPool(size_t num_threads);
RasterWorkerPool(WorkerPoolClient* client, size_t num_threads);
DISALLOW_COPY_AND_ASSIGN(RasterWorkerPool);
};
......
......@@ -180,10 +180,11 @@ TileManager::TileManager(
bool use_cheapness_estimator)
: client_(client),
resource_pool_(ResourcePool::Create(resource_provider)),
raster_worker_pool_(RasterWorkerPool::Create(num_raster_threads)),
raster_worker_pool_(RasterWorkerPool::Create(this, num_raster_threads)),
manage_tiles_pending_(false),
manage_tiles_call_count_(0),
bytes_pending_set_pixels_(0),
has_performed_uploads_since_last_flush_(false),
ever_exceeded_memory_budget_(false),
record_rendering_stats_(false),
use_cheapness_estimator_(use_cheapness_estimator) {
......@@ -538,6 +539,16 @@ bool TileManager::HasPendingWorkScheduled(WhichTree tree) const {
return false;
}
void TileManager::DidFinishDispatchingWorkerPoolCompletionCallbacks() {
// If a flush is needed, do it now before starting to dispatch more tasks.
if (has_performed_uploads_since_last_flush_) {
resource_pool_->resource_provider()->shallowFlushIfSupported();
has_performed_uploads_since_last_flush_ = false;
}
DispatchMoreTasks();
}
void TileManager::AssignGpuMemoryToTiles() {
TRACE_EVENT0("cc", "TileManager::AssignGpuMemoryToTiles");
size_t unreleasable_bytes = 0;
......@@ -749,8 +760,6 @@ void TileManager::OnImageDecodeTaskCompleted(
}
}
}
DispatchMoreTasks();
}
scoped_ptr<ResourcePool::Resource> TileManager::PrepareTileForRaster(
......@@ -838,7 +847,8 @@ void TileManager::OnRasterCompleted(
managed_tile_state.can_be_freed = false;
resource_pool_->resource_provider()->beginSetPixels(resource->id());
resource_pool_->resource_provider()->shallowFlushIfSupported();
has_performed_uploads_since_last_flush_ = true;
managed_tile_state.resource = resource.Pass();
bytes_pending_set_pixels_ += tile->bytes_consumed_if_allocated();
......@@ -858,7 +868,6 @@ void TileManager::OnRasterTaskCompleted(
int manage_tiles_call_count_when_dispatched) {
OnRasterCompleted(tile, resource.Pass(),
manage_tiles_call_count_when_dispatched);
DispatchMoreTasks();
}
void TileManager::DidFinishTileInitialization(Tile* tile) {
......
......@@ -17,6 +17,7 @@
#include "cc/rendering_stats.h"
#include "cc/resource_pool.h"
#include "cc/tile_priority.h"
#include "cc/worker_pool.h"
namespace cc {
class RasterWorkerPool;
......@@ -102,7 +103,7 @@ class CC_EXPORT ManagedTileState {
// should no longer have any memory assigned to them. Tile objects are "owned"
// by layers; they automatically register with the manager when they are
// created, and unregister from the manager when they are deleted.
class CC_EXPORT TileManager {
class CC_EXPORT TileManager : public WorkerPoolClient {
public:
TileManager(TileManagerClient* client,
ResourceProvider *resource_provider,
......@@ -128,7 +129,12 @@ class CC_EXPORT TileManager {
void GetRenderingStats(RenderingStats* stats);
bool HasPendingWorkScheduled(WhichTree tree) const;
const MemoryHistory::Entry& memory_stats_from_last_assign() const { return memory_stats_from_last_assign_; }
const MemoryHistory::Entry& memory_stats_from_last_assign() const {
return memory_stats_from_last_assign_;
}
// Overridden from WorkerPoolClient:
virtual void DidFinishDispatchingWorkerPoolCompletionCallbacks() OVERRIDE;
protected:
// Methods called by Tile
......@@ -158,7 +164,8 @@ class CC_EXPORT TileManager {
void DispatchOneImageDecodeTask(
scoped_refptr<Tile> tile, skia::LazyPixelRef* pixel_ref);
void OnImageDecodeTaskCompleted(
scoped_refptr<Tile> tile, uint32_t pixel_ref_id);
scoped_refptr<Tile> tile,
uint32_t pixel_ref_id);
bool CanDispatchRasterTask(Tile* tile);
scoped_ptr<ResourcePool::Resource> PrepareTileForRaster(Tile* tile);
void DispatchOneRasterTask(scoped_refptr<Tile> tile);
......@@ -216,6 +223,7 @@ class CC_EXPORT TileManager {
typedef std::queue<scoped_refptr<Tile> > TileQueue;
TileQueue tiles_with_pending_set_pixels_;
size_t bytes_pending_set_pixels_;
bool has_performed_uploads_since_last_flush_;
bool ever_exceeded_memory_budget_;
MemoryHistory::Entry memory_stats_from_last_assign_;
......
......@@ -28,6 +28,7 @@ class WorkerPoolTaskImpl : public internal::WorkerPoolTask {
virtual void Run(RenderingStats* rendering_stats) OVERRIDE {
task_.Run(rendering_stats);
base::subtle::Release_Store(&completed_, 1);
}
private:
......@@ -36,22 +37,31 @@ class WorkerPoolTaskImpl : public internal::WorkerPoolTask {
const char* kWorkerThreadNamePrefix = "Compositor";
// Allow two pending tasks per worker. This keeps resource usage
// low while making sure workers aren't unnecessarily idle.
const int kNumPendingTasksPerWorker = 2;
#if defined(OS_ANDROID)
const int kNumPendingTasksPerWorker = 8;
#else
const int kNumPendingTasksPerWorker = 40;
#endif
const int kCheckForCompletedTasksDelayMs = 6;
} // namespace
namespace internal {
WorkerPoolTask::WorkerPoolTask(const base::Closure& reply)
: reply_(reply) {
WorkerPoolTask::WorkerPoolTask(const base::Closure& reply) : reply_(reply) {
base::subtle::Acquire_Store(&completed_, 0);
}
WorkerPoolTask::~WorkerPoolTask() {
}
void WorkerPoolTask::Completed() {
bool WorkerPoolTask::HasCompleted() {
return base::subtle::Acquire_Load(&completed_) == 1;
}
void WorkerPoolTask::DidComplete() {
DCHECK_EQ(base::subtle::Acquire_Load(&completed_), 1);
reply_.Run();
}
......@@ -60,7 +70,6 @@ void WorkerPoolTask::Completed() {
WorkerPool::Worker::Worker(WorkerPool* worker_pool, const std::string name)
: base::Thread(name.c_str()),
worker_pool_(worker_pool),
weak_ptr_factory_(ALLOW_THIS_IN_INITIALIZER_LIST(this)),
rendering_stats_(make_scoped_ptr(new RenderingStats)),
record_rendering_stats_(false) {
Start();
......@@ -80,27 +89,22 @@ void WorkerPool::Worker::StopAfterCompletingAllPendingTasks() {
// all tasks have finished running.
while (!pending_tasks_.empty())
OnTaskCompleted();
// Cancel all pending replies.
weak_ptr_factory_.InvalidateWeakPtrs();
}
void WorkerPool::Worker::PostTask(scoped_ptr<internal::WorkerPoolTask> task) {
DCHECK_LT(num_pending_tasks(), kNumPendingTasksPerWorker);
RenderingStats* stats =
record_rendering_stats_ ? rendering_stats_.get() : NULL;
message_loop_proxy()->PostTaskAndReply(
worker_pool_->WillPostTask();
message_loop_proxy()->PostTask(
FROM_HERE,
base::Bind(&Worker::RunTask,
base::Unretained(task.get()),
base::Unretained(stats)),
base::Bind(&Worker::OnTaskCompleted, weak_ptr_factory_.GetWeakPtr()));
base::Unretained(worker_pool_),
base::Unretained(stats)));
pending_tasks_.push_back(task.Pass());
worker_pool_->DidNumPendingTasksChange();
}
void WorkerPool::Worker::Init() {
......@@ -113,22 +117,43 @@ void WorkerPool::Worker::Init() {
// static
void WorkerPool::Worker::RunTask(
internal::WorkerPoolTask* task, RenderingStats* rendering_stats) {
internal::WorkerPoolTask* task,
WorkerPool* worker_pool,
RenderingStats* rendering_stats) {
task->Run(rendering_stats);
worker_pool->OnWorkCompletedOnWorkerThread();
}
void WorkerPool::Worker::OnTaskCompleted() {
CHECK(!pending_tasks_.empty());
scoped_ptr<internal::WorkerPoolTask> task = pending_tasks_.take_front();
task->Completed();
worker_pool_->DidNumPendingTasksChange();
// Notify worker pool of task completion.
worker_pool_->OnTaskCompleted();
task->DidComplete();
}
WorkerPool::WorkerPool(size_t num_threads)
: workers_need_sorting_(false),
shutdown_(false) {
void WorkerPool::Worker::CheckForCompletedTasks() {
while (!pending_tasks_.empty()) {
if (!pending_tasks_.front()->HasCompleted())
return;
OnTaskCompleted();
}
}
WorkerPool::WorkerPool(WorkerPoolClient* client, size_t num_threads)
: client_(client),
origin_loop_(base::MessageLoopProxy::current()),
weak_ptr_factory_(ALLOW_THIS_IN_INITIALIZER_LIST(this)),
workers_need_sorting_(false),
pending_task_count_(0),
shutdown_(false),
check_for_completed_tasks_pending_(false),
idle_callback_(
base::Bind(&WorkerPool::OnIdle, weak_ptr_factory_.GetWeakPtr())) {
const std::string thread_name_prefix = kWorkerThreadNamePrefix;
while (workers_.size() < num_threads) {
int thread_number = workers_.size() + 1;
......@@ -136,11 +161,15 @@ WorkerPool::WorkerPool(size_t num_threads)
this,
thread_name_prefix + StringPrintf("Worker%d", thread_number).c_str()));
}
base::subtle::Acquire_Store(&pending_task_count_, 0);
}
WorkerPool::~WorkerPool() {
Shutdown();
STLDeleteElements(&workers_);
// Cancel all pending callbacks.
weak_ptr_factory_.InvalidateWeakPtrs();
DCHECK_EQ(base::subtle::Acquire_Load(&pending_task_count_), 0);
}
void WorkerPool::Shutdown() {
......@@ -204,7 +233,62 @@ WorkerPool::Worker* WorkerPool::GetWorkerForNextTask() {
return workers_.front();
}
void WorkerPool::DidNumPendingTasksChange() {
void WorkerPool::ScheduleCheckForCompletedTasks() {
if (check_for_completed_tasks_pending_)
return;
check_for_completed_tasks_callback_.Reset(
base::Bind(&WorkerPool::CheckForCompletedTasks,
weak_ptr_factory_.GetWeakPtr()));
origin_loop_->PostDelayedTask(
FROM_HERE,
check_for_completed_tasks_callback_.callback(),
base::TimeDelta::FromMilliseconds(kCheckForCompletedTasksDelayMs));
check_for_completed_tasks_pending_ = true;
}
void WorkerPool::WillPostTask() {
base::subtle::Barrier_AtomicIncrement(&pending_task_count_, 1);
ScheduleCheckForCompletedTasks();
workers_need_sorting_ = true;
}
void WorkerPool::OnWorkCompletedOnWorkerThread() {
// Post idle handler task when pool work count reaches 0.
if (base::subtle::Barrier_AtomicIncrement(&pending_task_count_, -1) == 0) {
origin_loop_->PostTask(FROM_HERE, idle_callback_);
}
}
void WorkerPool::OnIdle() {
if (base::subtle::Acquire_Load(&pending_task_count_) == 0) {
check_for_completed_tasks_callback_.Cancel();
CheckForCompletedTasks();
}
}
void WorkerPool::CheckForCompletedTasks() {
check_for_completed_tasks_pending_ = false;
for (WorkerVector::iterator it = workers_.begin();
it != workers_.end(); it++) {
Worker* worker = *it;
worker->CheckForCompletedTasks();
}
client_->DidFinishDispatchingWorkerPoolCompletionCallbacks();
for (WorkerVector::iterator it = workers_.begin();
it != workers_.end(); it++) {
Worker* worker = *it;
if (worker->num_pending_tasks()) {
ScheduleCheckForCompletedTasks();
break;
}
}
}
void WorkerPool::OnTaskCompleted() {
workers_need_sorting_ = true;
}
......
......@@ -9,6 +9,7 @@
#include "base/basictypes.h"
#include "base/callback.h"
#include "base/cancelable_callback.h"
#include "base/memory/scoped_ptr.h"
#include "base/memory/weak_ptr.h"
#include "base/threading/thread.h"
......@@ -24,16 +25,28 @@ class WorkerPoolTask {
virtual void Run(RenderingStats* rendering_stats) = 0;
void Completed();
bool HasCompleted();
void DidComplete();
protected:
WorkerPoolTask(const base::Closure& reply);
base::Closure reply_;
const base::Closure reply_;
// Accessed from multiple threads. Set to 1 when task has completed.
base::subtle::Atomic32 completed_;
};
} // namespace internal
class CC_EXPORT WorkerPoolClient {
public:
virtual void DidFinishDispatchingWorkerPoolCompletionCallbacks() = 0;
protected:
virtual ~WorkerPoolClient() {}
};
// A worker thread pool that runs rendering tasks and guarantees completion
// of all pending tasks at shutdown.
class WorkerPool {
......@@ -42,8 +55,9 @@ class WorkerPool {
virtual ~WorkerPool();
static scoped_ptr<WorkerPool> Create(size_t num_threads) {
return make_scoped_ptr(new WorkerPool(num_threads));
static scoped_ptr<WorkerPool> Create(
WorkerPoolClient* client, size_t num_threads) {
return make_scoped_ptr(new WorkerPool(client, num_threads));
}
// Tells the worker pool to shutdown and returns once all pending tasks have
......@@ -76,6 +90,9 @@ class WorkerPool {
// Posts a task to the worker thread.
void PostTask(scoped_ptr<internal::WorkerPoolTask> task);
// Check for completed tasks and run reply callbacks.
void CheckForCompletedTasks();
int num_pending_tasks() const { return pending_tasks_.size(); }
void set_record_rendering_stats(bool record_rendering_stats) {
record_rendering_stats_ = record_rendering_stats;
......@@ -89,18 +106,19 @@ class WorkerPool {
private:
static void RunTask(
internal::WorkerPoolTask* task, RenderingStats* rendering_stats);
internal::WorkerPoolTask* task,
WorkerPool* worker_pool,
RenderingStats* rendering_stats);
void OnTaskCompleted();
WorkerPool* worker_pool_;
base::WeakPtrFactory<Worker> weak_ptr_factory_;
ScopedPtrDeque<internal::WorkerPoolTask> pending_tasks_;
scoped_ptr<RenderingStats> rendering_stats_;
bool record_rendering_stats_;
};
explicit WorkerPool(size_t num_threads);
WorkerPool(WorkerPoolClient* client, size_t num_threads);
WorkerPool::Worker* GetWorkerForNextTask();
......@@ -112,13 +130,39 @@ class WorkerPool {
}
};
void DidNumPendingTasksChange();
// Schedule a completed tasks check if not already pending.
void ScheduleCheckForCompletedTasks();
// Called on origin thread before posting task to worker.
void WillPostTask();
// Called on worker thread after completing work.
void OnWorkCompletedOnWorkerThread();
// Called on origin thread after becoming idle.
void OnIdle();
// Check for completed tasks and run reply callbacks.
void CheckForCompletedTasks();
// Called when processing task completion.
void OnTaskCompleted();
// Ensure workers are sorted by number of pending tasks.
void SortWorkersIfNeeded();
typedef std::vector<Worker*> WorkerVector;
WorkerVector workers_;
WorkerPoolClient* client_;
scoped_refptr<base::MessageLoopProxy> origin_loop_;
base::WeakPtrFactory<WorkerPool> weak_ptr_factory_;
bool workers_need_sorting_;
bool shutdown_;
base::CancelableClosure check_for_completed_tasks_callback_;
bool check_for_completed_tasks_pending_;
base::Closure idle_callback_;
// Accessed from multiple threads. 0 when worker pool is idle.
base::subtle::Atomic32 pending_task_count_;
DISALLOW_COPY_AND_ASSIGN(WorkerPool);
};
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment