Commit 482d82ff authored by Alex Clarke's avatar Alex Clarke Committed by Commit Bot

Stop using LazilyDeallocatedDeque for now

Seems there where some hard to understand crashes associated with it.
I aim to re-land if possible.

Change-Id: Iaf6a179ef186c1b0d027c17fa9276fafef23f55e
Bug: 851381
Reviewed-on: https://chromium-review.googlesource.com/1097489Reviewed-by: default avatarAlexander Timin <altimin@chromium.org>
Commit-Queue: Alex Clarke <alexclarke@chromium.org>
Cr-Commit-Position: refs/heads/master@{#566779}
parent 4176876d
......@@ -132,10 +132,12 @@ class LazilyDeallocatedDeque {
void pop_front() {
DCHECK(tail_);
DCHECK_GT(size_, 0u);
head_->pop_front();
// If the ring has become empty and we have more than one ring, remove the
// head one (which we expect to have lower capacity than the remaining one).
// If the ring has become empty and we have several rings then, remove the
// head one (which we expect to have lower capacity than the remaining
// ones).
if (head_->empty() && head_->next_) {
head_ = std::move(head_->next_);
}
......@@ -190,6 +192,7 @@ class LazilyDeallocatedDeque {
size_t real_size = size_;
while (!empty()) {
DCHECK(new_ring->CanPush());
new_ring->push_back(std::move(head_->front()));
pop_front();
}
......@@ -213,18 +216,8 @@ class LazilyDeallocatedDeque {
front_index_(0),
back_index_(0),
data_(reinterpret_cast<T*>(new char[sizeof(T) * capacity])),
next_(nullptr) {}
Ring(Ring&& other) noexcept {
capacity_ = other.capacity_;
front_index_ = other.front_index_;
back_index_ = other.back_index_;
data_ = other.data_;
other.capacity_ = 0;
other.front_index_ = 0;
other.back_index_ = 0;
other.data_ = nullptr;
next_(nullptr) {
DCHECK_GE(capacity_, kMinimumRingSize);
}
~Ring() {
......@@ -286,10 +279,11 @@ class LazilyDeallocatedDeque {
size_t CircularDecrement(size_t index) const {
if (index == 0)
return capacity_ - 1;
return --index;
return index - 1;
}
size_t CircularIncrement(size_t index) const {
DCHECK_LT(index, capacity_);
++index;
if (index == capacity_)
return 0;
......
......@@ -347,10 +347,6 @@ void TaskQueueImpl::ReloadEmptyImmediateQueue(TaskDeque* queue) {
AutoLock immediate_incoming_queue_lock(immediate_incoming_queue_lock_);
queue->swap(immediate_incoming_queue());
// Since |immediate_incoming_queue| is empty, now is a good time to consider
// reducing it's capacity if we're wasting memory.
immediate_incoming_queue().MaybeShrinkQueue();
// Activate delayed fence if necessary. This is ideologically similar to
// ActivateDelayedFenceIfNeeded, but due to immediate tasks being posted
// from any thread we can't generate an enqueue order for the fence there,
......@@ -529,13 +525,6 @@ void TaskQueueImpl::AsValueInto(TimeTicks now,
state->SetInteger("delayed_work_queue_size",
main_thread_only().delayed_work_queue->Size());
state->SetInteger("immediate_incoming_queue_capacity",
immediate_incoming_queue().capacity());
state->SetInteger("immediate_work_queue_capacity",
immediate_work_queue()->Capacity());
state->SetInteger("delayed_work_queue_capacity",
delayed_work_queue()->Capacity());
if (!main_thread_only().delayed_incoming_queue.empty()) {
TimeDelta delay_to_next_task =
(main_thread_only().delayed_incoming_queue.top().delayed_run_time -
......@@ -904,9 +893,6 @@ void TaskQueueImpl::SweepCanceledDelayedTasks(TimeTicks now) {
main_thread_only().delayed_incoming_queue = std::move(remaining_tasks);
// Also consider shrinking the work queue if it's wasting memory.
main_thread_only().delayed_work_queue->MaybeShrinkQueue();
LazyNow lazy_now(now);
UpdateDelayedWakeUp(&lazy_now);
}
......
......@@ -11,6 +11,7 @@
#include <set>
#include "base/callback.h"
#include "base/containers/circular_deque.h"
#include "base/macros.h"
#include "base/memory/weak_ptr.h"
#include "base/message_loop/message_loop.h"
......@@ -22,7 +23,6 @@
#include "third_party/blink/renderer/platform/scheduler/base/enqueue_order.h"
#include "third_party/blink/renderer/platform/scheduler/base/graceful_queue_shutdown_helper.h"
#include "third_party/blink/renderer/platform/scheduler/base/intrusive_heap.h"
#include "third_party/blink/renderer/platform/scheduler/base/lazily_deallocated_deque.h"
#include "third_party/blink/renderer/platform/scheduler/base/task_queue_forward.h"
namespace base {
......@@ -389,9 +389,7 @@ class PLATFORM_EXPORT TaskQueueImpl {
// empty.
void PushOntoImmediateIncomingQueueLocked(Task task);
// We reserve an inline capacity of 8 tasks to try and reduce the load on
// PartitionAlloc.
using TaskDeque = sequence_manager::LazilyDeallocatedDeque<Task>;
using TaskDeque = circular_deque<Task>;
// Extracts all the tasks from the immediate incoming queue and swaps it with
// |queue| which must be empty.
......
......@@ -67,7 +67,7 @@ void WorkQueue::Push(TaskQueueImpl::Task task) {
#endif
// Make sure the |enqueue_order()| is monotonically increasing.
DCHECK(was_empty || tasks_.back().enqueue_order() < task.enqueue_order());
DCHECK(was_empty || tasks_.rbegin()->enqueue_order() < task.enqueue_order());
// Amoritized O(1).
tasks_.push_back(std::move(task));
......@@ -132,16 +132,10 @@ TaskQueueImpl::Task WorkQueue::TakeTaskFromWorkQueue() {
TaskQueueImpl::Task pending_task = std::move(tasks_.front());
tasks_.pop_front();
if (tasks_.empty()) {
// NB delayed tasks are inserted via Push, no don't need to reload those.
if (queue_type_ == QueueType::kImmediate) {
// Short-circuit the queue reload so that OnPopQueue does the right
// thing.
task_queue_->ReloadEmptyImmediateQueue(&tasks_);
}
// Since the queue is empty, now is a good time to consider reducing it's
// capacity if we're wasting memory.
tasks_.MaybeShrinkQueue();
// NB immediate tasks have a different pipeline to delayed ones.
if (queue_type_ == QueueType::kImmediate && tasks_.empty()) {
// Short-circuit the queue reload so that OnPopQueue does the right thing.
task_queue_->ReloadEmptyImmediateQueue(&tasks_);
}
// OnPopQueue calls GetFrontTaskEnqueueOrder which checks BlockedByFence() so
......@@ -160,16 +154,10 @@ bool WorkQueue::RemoveAllCanceledTasksFromFront() {
task_removed = true;
}
if (task_removed) {
if (tasks_.empty()) {
// NB delayed tasks are inserted via Push, no don't need to reload those.
if (queue_type_ == QueueType::kImmediate) {
// Short-circuit the queue reload so that OnPopQueue does the right
// thing.
task_queue_->ReloadEmptyImmediateQueue(&tasks_);
}
// Since the queue is empty, now is a good time to consider reducing it's
// capacity if we're wasting memory.
tasks_.MaybeShrinkQueue();
// NB immediate tasks have a different pipeline to delayed ones.
if (queue_type_ == QueueType::kImmediate && tasks_.empty()) {
// Short-circuit the queue reload so that OnPopQueue does the right thing.
task_queue_->ReloadEmptyImmediateQueue(&tasks_);
}
work_queue_sets_->OnPopQueue(this);
task_queue_->TraceQueueSize();
......@@ -243,10 +231,6 @@ void WorkQueue::PopTaskForTesting() {
tasks_.pop_front();
}
void WorkQueue::MaybeShrinkQueue() {
tasks_.MaybeShrinkQueue();
}
} // namespace internal
} // namespace sequence_manager
} // namespace base
......@@ -80,8 +80,6 @@ class PLATFORM_EXPORT WorkQueue {
size_t Size() const { return tasks_.size(); }
size_t Capacity() const { return tasks_.capacity(); }
// Pulls a task off the |tasks_| and informs the WorkQueueSets. If the
// task removed had an enqueue order >= the current fence then WorkQueue
// pretends to be empty as far as the WorkQueueSets is concerned.
......@@ -135,9 +133,6 @@ class PLATFORM_EXPORT WorkQueue {
// Test support function. This should not be used in production code.
void PopTaskForTesting();
// Shrinks |tasks_| if it's wasting memory.
void MaybeShrinkQueue();
private:
bool InsertFenceImpl(EnqueueOrder fence);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment