Commit d2f00426 authored by jdduke's avatar jdduke Committed by Commit bot

Ensure deterministic heap prioritization of raster task nodes

Previously, task nodes were all fed the same priority. While certain
heap implementations ensured a consistent prioritization given the same
task priority and input ordering, this is not guaranteed by the spec.
In particular, Android's libc++ implementation appears to differ in this
respect, resulting in tasks from different sets being processed in an
order different from node creation order.

Avoid this by including the task set type in the node priority. All
conforming heap implementations should now yield the same ordering
with respect to task set type.

BUG=427718

Review URL: https://codereview.chromium.org/875573006

Cr-Commit-Position: refs/heads/master@{#313972}
parent 1102b557
...@@ -128,7 +128,8 @@ void BitmapTileTaskWorkerPool::ScheduleTasks(TileTaskQueue* queue) { ...@@ -128,7 +128,8 @@ void BitmapTileTaskWorkerPool::ScheduleTasks(TileTaskQueue* queue) {
for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) { for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) {
InsertNodeForTask(&graph_, new_task_set_finished_tasks[task_set].get(), InsertNodeForTask(&graph_, new_task_set_finished_tasks[task_set].get(),
kTaskSetFinishedTaskPriority, task_count[task_set]); kTaskSetFinishedTaskPriorityBase + task_set,
task_count[task_set]);
} }
ScheduleTasksOnOriginThread(this, &graph_); ScheduleTasksOnOriginThread(this, &graph_);
......
...@@ -124,7 +124,8 @@ void GpuTileTaskWorkerPool::ScheduleTasks(TileTaskQueue* queue) { ...@@ -124,7 +124,8 @@ void GpuTileTaskWorkerPool::ScheduleTasks(TileTaskQueue* queue) {
for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) { for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) {
InsertNodeForTask(&graph_, new_task_set_finished_tasks[task_set].get(), InsertNodeForTask(&graph_, new_task_set_finished_tasks[task_set].get(),
kTaskSetFinishedTaskPriority, task_count[task_set]); kTaskSetFinishedTaskPriorityBase + task_set,
task_count[task_set]);
} }
ScheduleTasksOnOriginThread(this, &graph_); ScheduleTasksOnOriginThread(this, &graph_);
......
...@@ -214,7 +214,8 @@ void OneCopyTileTaskWorkerPool::ScheduleTasks(TileTaskQueue* queue) { ...@@ -214,7 +214,8 @@ void OneCopyTileTaskWorkerPool::ScheduleTasks(TileTaskQueue* queue) {
for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) { for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) {
InsertNodeForTask(&graph_, new_task_set_finished_tasks[task_set].get(), InsertNodeForTask(&graph_, new_task_set_finished_tasks[task_set].get(),
kTaskSetFinishedTaskPriority, task_count[task_set]); kTaskSetFinishedTaskPriorityBase + task_set,
task_count[task_set]);
} }
ScheduleTasksOnOriginThread(this, &graph_); ScheduleTasksOnOriginThread(this, &graph_);
......
...@@ -588,7 +588,7 @@ void PixelBufferTileTaskWorkerPool::ScheduleMoreTasks() { ...@@ -588,7 +588,7 @@ void PixelBufferTileTaskWorkerPool::ScheduleMoreTasks() {
task_set)); task_set));
task_set_finished_tasks_pending_[task_set] = true; task_set_finished_tasks_pending_[task_set] = true;
InsertNodeForTask(&graph_, new_task_set_finished_tasks[task_set].get(), InsertNodeForTask(&graph_, new_task_set_finished_tasks[task_set].get(),
kTaskSetFinishedTaskPriority, kTaskSetFinishedTaskPriorityBase + task_set,
scheduled_task_counts[task_set]); scheduled_task_counts[task_set]);
for (RasterTaskVector::ContainerType::const_iterator it = for (RasterTaskVector::ContainerType::const_iterator it =
tasks[task_set].container().begin(); tasks[task_set].container().begin();
......
...@@ -93,9 +93,12 @@ class TaskSetFinishedTaskImpl : public TileTask { ...@@ -93,9 +93,12 @@ class TaskSetFinishedTaskImpl : public TileTask {
// since it should finish as quickly as possible. // since it should finish as quickly as possible.
unsigned TileTaskWorkerPool::kBenchmarkTaskPriority = 0u; unsigned TileTaskWorkerPool::kBenchmarkTaskPriority = 0u;
// Task priorities that make sure task set finished tasks run before any // Task priorities that make sure task set finished tasks run before any
// other remaining tasks. // other remaining tasks. This is combined with the task set type to ensure
unsigned TileTaskWorkerPool::kTaskSetFinishedTaskPriority = 1u; // proper prioritization ordering between task set types.
unsigned TileTaskWorkerPool::kTileTaskPriorityBase = 2u; unsigned TileTaskWorkerPool::kTaskSetFinishedTaskPriorityBase = 1u;
// For correctness, |kTileTaskPriorityBase| must be greater than
// |kTaskSetFinishedTaskPriorityBase + kNumberOfTaskSets|.
unsigned TileTaskWorkerPool::kTileTaskPriorityBase = 10u;
TileTaskWorkerPool::TileTaskWorkerPool() { TileTaskWorkerPool::TileTaskWorkerPool() {
} }
......
...@@ -21,7 +21,7 @@ class RenderingStatsInstrumentation; ...@@ -21,7 +21,7 @@ class RenderingStatsInstrumentation;
class CC_EXPORT TileTaskWorkerPool { class CC_EXPORT TileTaskWorkerPool {
public: public:
static unsigned kBenchmarkTaskPriority; static unsigned kBenchmarkTaskPriority;
static unsigned kTaskSetFinishedTaskPriority; static unsigned kTaskSetFinishedTaskPriorityBase;
static unsigned kTileTaskPriorityBase; static unsigned kTileTaskPriorityBase;
TileTaskWorkerPool(); TileTaskWorkerPool();
......
...@@ -133,7 +133,8 @@ void ZeroCopyTileTaskWorkerPool::ScheduleTasks(TileTaskQueue* queue) { ...@@ -133,7 +133,8 @@ void ZeroCopyTileTaskWorkerPool::ScheduleTasks(TileTaskQueue* queue) {
for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) { for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) {
InsertNodeForTask(&graph_, new_task_set_finished_tasks[task_set].get(), InsertNodeForTask(&graph_, new_task_set_finished_tasks[task_set].get(),
kTaskSetFinishedTaskPriority, task_count[task_set]); kTaskSetFinishedTaskPriorityBase + task_set,
task_count[task_set]);
} }
ScheduleTasksOnOriginThread(this, &graph_); ScheduleTasksOnOriginThread(this, &graph_);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment