Commit 7041363f authored by Alexandre Courbot's avatar Alexandre Courbot Committed by Commit Bot

media/gpu/v4l2vda: use the V4L2Queue class

Convert the V4L2VideoDecodeAccelerator class to use the V4L2Queue. This
considerably reduces its amount of code, while also making buffers
lifecycle safer.

BUG=792790
TEST=Made sure that VDA unittest was compiling and running on Hana

Change-Id: I145778421cfc6d5951249cb9c934ac5e3b1e230b
Reviewed-on: https://chromium-review.googlesource.com/1170707Reviewed-by: default avatarPawel Osciak <posciak@chromium.org>
Commit-Queue: Alexandre Courbot <acourbot@chromium.org>
Cr-Commit-Position: refs/heads/master@{#595372}
parent 14d350d4
...@@ -105,11 +105,6 @@ V4L2VideoDecodeAccelerator::BitstreamBufferRef::~BitstreamBufferRef() { ...@@ -105,11 +105,6 @@ V4L2VideoDecodeAccelerator::BitstreamBufferRef::~BitstreamBufferRef() {
} }
} }
V4L2VideoDecodeAccelerator::InputRecord::InputRecord()
: at_device(false), address(NULL), length(0), bytes_used(0), input_id(-1) {}
V4L2VideoDecodeAccelerator::InputRecord::~InputRecord() {}
V4L2VideoDecodeAccelerator::OutputRecord::OutputRecord() V4L2VideoDecodeAccelerator::OutputRecord::OutputRecord()
: state(kFree), : state(kFree),
egl_image(EGL_NO_IMAGE_KHR), egl_image(EGL_NO_IMAGE_KHR),
...@@ -139,7 +134,6 @@ V4L2VideoDecodeAccelerator::V4L2VideoDecodeAccelerator( ...@@ -139,7 +134,6 @@ V4L2VideoDecodeAccelerator::V4L2VideoDecodeAccelerator(
output_mode_(Config::OutputMode::ALLOCATE), output_mode_(Config::OutputMode::ALLOCATE),
device_(device), device_(device),
decoder_delay_bitstream_buffer_id_(-1), decoder_delay_bitstream_buffer_id_(-1),
decoder_current_input_buffer_(-1),
decoder_decode_buffer_tasks_scheduled_(0), decoder_decode_buffer_tasks_scheduled_(0),
decoder_frames_at_client_(0), decoder_frames_at_client_(0),
decoder_flushing_(false), decoder_flushing_(false),
...@@ -147,10 +141,6 @@ V4L2VideoDecodeAccelerator::V4L2VideoDecodeAccelerator( ...@@ -147,10 +141,6 @@ V4L2VideoDecodeAccelerator::V4L2VideoDecodeAccelerator(
flush_awaiting_last_output_buffer_(false), flush_awaiting_last_output_buffer_(false),
reset_pending_(false), reset_pending_(false),
decoder_partial_frame_pending_(false), decoder_partial_frame_pending_(false),
input_streamon_(false),
input_buffer_queued_count_(0),
output_streamon_(false),
output_buffer_queued_count_(0),
output_dpb_size_(0), output_dpb_size_(0),
output_planes_count_(0), output_planes_count_(0),
picture_clearing_count_(0), picture_clearing_count_(0),
...@@ -174,7 +164,6 @@ V4L2VideoDecodeAccelerator::~V4L2VideoDecodeAccelerator() { ...@@ -174,7 +164,6 @@ V4L2VideoDecodeAccelerator::~V4L2VideoDecodeAccelerator() {
// These maps have members that should be manually destroyed, e.g. file // These maps have members that should be manually destroyed, e.g. file
// descriptors, mmap() segments, etc. // descriptors, mmap() segments, etc.
DCHECK(input_buffer_map_.empty());
DCHECK(output_buffer_map_.empty()); DCHECK(output_buffer_map_.empty());
} }
...@@ -252,6 +241,15 @@ bool V4L2VideoDecodeAccelerator::Initialize(const Config& config, ...@@ -252,6 +241,15 @@ bool V4L2VideoDecodeAccelerator::Initialize(const Config& config,
} }
output_mode_ = config.output_mode; output_mode_ = config.output_mode;
input_queue_ = device_->GetQueue(V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
if (!input_queue_)
return false;
output_queue_ = device_->GetQueue(V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
if (!output_queue_)
return false;
if (!SetupFormats()) if (!SetupFormats())
return false; return false;
...@@ -354,33 +352,40 @@ void V4L2VideoDecodeAccelerator::AssignPictureBuffersTask( ...@@ -354,33 +352,40 @@ void V4L2VideoDecodeAccelerator::AssignPictureBuffersTask(
return; return;
} }
// Allocate the output buffers. enum v4l2_memory memory;
struct v4l2_requestbuffers reqbufs; if (!image_processor_device_ && output_mode_ == Config::OutputMode::IMPORT)
memset(&reqbufs, 0, sizeof(reqbufs)); memory = V4L2_MEMORY_DMABUF;
reqbufs.count = buffers.size(); else
reqbufs.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; memory = V4L2_MEMORY_MMAP;
if (!image_processor_device_ && output_mode_ == Config::OutputMode::IMPORT) {
reqbufs.memory = V4L2_MEMORY_DMABUF; if (output_queue_->AllocateBuffers(buffers.size(), memory) == 0) {
} else { VLOGF(1) << "Failed to request buffers!";
reqbufs.memory = V4L2_MEMORY_MMAP; NOTIFY_ERROR(PLATFORM_FAILURE);
return;
} }
IOCTL_OR_ERROR_RETURN(VIDIOC_REQBUFS, &reqbufs);
if (reqbufs.count != buffers.size()) { if (output_queue_->AllocatedBuffersCount() != buffers.size()) {
VLOGF(1) << "Could not allocate enough output buffers"; VLOGF(1) << "Could not allocate requested number of output buffers";
NOTIFY_ERROR(PLATFORM_FAILURE); NOTIFY_ERROR(PLATFORM_FAILURE);
return; return;
} }
DCHECK(free_output_buffers_.empty());
DCHECK(output_buffer_map_.empty()); DCHECK(output_buffer_map_.empty());
DCHECK(output_wait_map_.empty());
output_buffer_map_.resize(buffers.size()); output_buffer_map_.resize(buffers.size());
if (image_processor_device_ && output_mode_ == Config::OutputMode::ALLOCATE) { if (image_processor_device_ && output_mode_ == Config::OutputMode::ALLOCATE) {
if (!CreateImageProcessor()) if (!CreateImageProcessor())
return; return;
} }
for (size_t i = 0; i < output_buffer_map_.size(); ++i) { while (output_queue_->FreeBuffersCount() > 0) {
V4L2WritableBufferRef buffer(output_queue_->GetFreeBuffer());
DCHECK(buffer.IsValid());
int i = buffer.BufferId();
// Keep the buffer on our side until ImportBufferForPictureTask() is called
output_wait_map_.emplace(buffers[i].id(), std::move(buffer));
DCHECK(buffers[i].size() == egl_image_size_); DCHECK(buffers[i].size() == egl_image_size_);
OutputRecord& output_record = output_buffer_map_[i]; OutputRecord& output_record = output_buffer_map_[i];
...@@ -431,6 +436,8 @@ void V4L2VideoDecodeAccelerator::AssignPictureBuffersTask( ...@@ -431,6 +436,8 @@ void V4L2VideoDecodeAccelerator::AssignPictureBuffersTask(
DVLOGF(3) << "buffer[" << i << "]: picture_id=" << output_record.picture_id; DVLOGF(3) << "buffer[" << i << "]: picture_id=" << output_record.picture_id;
} }
DCHECK_EQ(output_queue_->FreeBuffersCount(), 0u);
if (output_mode_ == Config::OutputMode::ALLOCATE) { if (output_mode_ == Config::OutputMode::ALLOCATE) {
ScheduleDecodeBufferTaskIfNeeded(); ScheduleDecodeBufferTaskIfNeeded();
} }
...@@ -511,15 +518,15 @@ void V4L2VideoDecodeAccelerator::AssignEGLImage( ...@@ -511,15 +518,15 @@ void V4L2VideoDecodeAccelerator::AssignEGLImage(
DCHECK_EQ(output_record.egl_image, EGL_NO_IMAGE_KHR); DCHECK_EQ(output_record.egl_image, EGL_NO_IMAGE_KHR);
DCHECK(!output_record.egl_fence); DCHECK(!output_record.egl_fence);
DCHECK_EQ(output_record.state, kFree); DCHECK_EQ(output_record.state, kFree);
DCHECK_EQ(std::count(free_output_buffers_.begin(), free_output_buffers_.end(),
buffer_index),
0);
output_record.egl_image = egl_image; output_record.egl_image = egl_image;
if (output_mode_ == Config::OutputMode::IMPORT) { if (output_mode_ == Config::OutputMode::IMPORT) {
DCHECK(output_record.output_fds.empty()); DCHECK(output_record.output_fds.empty());
output_record.output_fds.swap(dmabuf_fds); output_record.output_fds.swap(dmabuf_fds);
} }
free_output_buffers_.push_back(buffer_index); // Drop our reference so the buffer returns to the queue and can be reused.
output_wait_map_.erase(picture_buffer_id);
if (decoder_state_ != kChangingResolution) { if (decoder_state_ != kChangingResolution) {
Enqueue(); Enqueue();
ScheduleDecodeBufferTaskIfNeeded(); ScheduleDecodeBufferTaskIfNeeded();
...@@ -638,9 +645,6 @@ void V4L2VideoDecodeAccelerator::ImportBufferForPictureTask( ...@@ -638,9 +645,6 @@ void V4L2VideoDecodeAccelerator::ImportBufferForPictureTask(
} }
size_t index = iter - output_buffer_map_.begin(); size_t index = iter - output_buffer_map_.begin();
DCHECK_EQ(std::count(free_output_buffers_.begin(), free_output_buffers_.end(),
index),
0);
iter->state = kFree; iter->state = kFree;
if (iter->texture_id != 0) { if (iter->texture_id != 0) {
...@@ -661,7 +665,9 @@ void V4L2VideoDecodeAccelerator::ImportBufferForPictureTask( ...@@ -661,7 +665,9 @@ void V4L2VideoDecodeAccelerator::ImportBufferForPictureTask(
// No need for an EGLImage, start using this buffer now. // No need for an EGLImage, start using this buffer now.
DCHECK_EQ(egl_image_planes_count_, dmabuf_fds.size()); DCHECK_EQ(egl_image_planes_count_, dmabuf_fds.size());
iter->output_fds.swap(dmabuf_fds); iter->output_fds.swap(dmabuf_fds);
free_output_buffers_.push_back(index); // If this was the first import, release the reference to the buffer
// so it can be used.
output_wait_map_.erase(picture_buffer_id);
if (decoder_state_ != kChangingResolution) { if (decoder_state_ != kChangingResolution) {
Enqueue(); Enqueue();
ScheduleDecodeBufferTaskIfNeeded(); ScheduleDecodeBufferTaskIfNeeded();
...@@ -850,9 +856,8 @@ void V4L2VideoDecodeAccelerator::DecodeBufferTask() { ...@@ -850,9 +856,8 @@ void V4L2VideoDecodeAccelerator::DecodeBufferTask() {
// Enqueue a buffer guaranteed to be empty. To do that, we flush the // Enqueue a buffer guaranteed to be empty. To do that, we flush the
// current input, enqueue no data to the next frame, then flush that down. // current input, enqueue no data to the next frame, then flush that down.
schedule_task = true; schedule_task = true;
if (decoder_current_input_buffer_ != -1 && if (current_input_buffer_.IsValid() &&
input_buffer_map_[decoder_current_input_buffer_].input_id != current_input_buffer_.GetTimeStamp().tv_sec != kFlushBufferId)
kFlushBufferId)
schedule_task = FlushInputFrame(); schedule_task = FlushInputFrame();
if (schedule_task && AppendToInputFrame(NULL, 0) && FlushInputFrame()) { if (schedule_task && AppendToInputFrame(NULL, 0) && FlushInputFrame()) {
...@@ -1080,35 +1085,32 @@ bool V4L2VideoDecodeAccelerator::AppendToInputFrame(const void* data, ...@@ -1080,35 +1085,32 @@ bool V4L2VideoDecodeAccelerator::AppendToInputFrame(const void* data,
// we queue an empty buffer for the purposes of flushing the pipe. // we queue an empty buffer for the purposes of flushing the pipe.
// Flush if we're too big // Flush if we're too big
if (decoder_current_input_buffer_ != -1) { if (current_input_buffer_.IsValid()) {
InputRecord& input_record = size_t plane_size = current_input_buffer_.GetPlaneSize(0);
input_buffer_map_[decoder_current_input_buffer_]; size_t bytes_used = current_input_buffer_.GetPlaneBytesUsed(0);
if (input_record.bytes_used + size > input_record.length) { if (bytes_used + size > plane_size) {
if (!FlushInputFrame()) if (!FlushInputFrame())
return false; return false;
decoder_current_input_buffer_ = -1;
} }
} }
// Try to get an available input buffer // Try to get an available input buffer.
if (decoder_current_input_buffer_ == -1) { if (!current_input_buffer_.IsValid()) {
if (free_input_buffers_.empty()) { DCHECK(decoder_current_bitstream_buffer_ != NULL);
// See if we can get more free buffers from HW
// See if we can get more free buffers from HW.
if (input_queue_->FreeBuffersCount() == 0)
Dequeue(); Dequeue();
if (free_input_buffers_.empty()) {
// Nope! current_input_buffer_ = input_queue_->GetFreeBuffer();
if (!current_input_buffer_.IsValid()) {
// No buffer available yet.
DVLOGF(4) << "stalled for input buffers"; DVLOGF(4) << "stalled for input buffers";
return false; return false;
} }
} struct timeval timestamp = {
decoder_current_input_buffer_ = free_input_buffers_.back(); .tv_sec = decoder_current_bitstream_buffer_->input_id};
free_input_buffers_.pop_back(); current_input_buffer_.SetTimeStamp(timestamp);
InputRecord& input_record =
input_buffer_map_[decoder_current_input_buffer_];
DCHECK_EQ(input_record.bytes_used, 0);
DCHECK_EQ(input_record.input_id, -1);
DCHECK(decoder_current_bitstream_buffer_ != NULL);
input_record.input_id = decoder_current_bitstream_buffer_->input_id;
} }
DCHECK(data != NULL || size == 0); DCHECK(data != NULL || size == 0);
...@@ -1120,16 +1122,17 @@ bool V4L2VideoDecodeAccelerator::AppendToInputFrame(const void* data, ...@@ -1120,16 +1122,17 @@ bool V4L2VideoDecodeAccelerator::AppendToInputFrame(const void* data,
} }
// Copy in to the buffer. // Copy in to the buffer.
InputRecord& input_record = input_buffer_map_[decoder_current_input_buffer_]; size_t plane_size = current_input_buffer_.GetPlaneSize(0);
if (size > input_record.length - input_record.bytes_used) { size_t bytes_used = current_input_buffer_.GetPlaneBytesUsed(0);
if (size > plane_size - bytes_used) {
VLOGF(1) << "over-size frame, erroring"; VLOGF(1) << "over-size frame, erroring";
NOTIFY_ERROR(UNREADABLE_INPUT); NOTIFY_ERROR(UNREADABLE_INPUT);
return false; return false;
} }
memcpy(reinterpret_cast<uint8_t*>(input_record.address) + void* mapping = current_input_buffer_.GetPlaneMapping(0);
input_record.bytes_used, memcpy(reinterpret_cast<uint8_t*>(mapping) + bytes_used, data, size);
data, size); current_input_buffer_.SetPlaneBytesUsed(0, bytes_used + size);
input_record.bytes_used += size;
return true; return true;
} }
...@@ -1141,29 +1144,26 @@ bool V4L2VideoDecodeAccelerator::FlushInputFrame() { ...@@ -1141,29 +1144,26 @@ bool V4L2VideoDecodeAccelerator::FlushInputFrame() {
DCHECK_NE(decoder_state_, kResetting); DCHECK_NE(decoder_state_, kResetting);
DCHECK_NE(decoder_state_, kError); DCHECK_NE(decoder_state_, kError);
if (decoder_current_input_buffer_ == -1) if (!current_input_buffer_.IsValid())
return true; return true;
InputRecord& input_record = input_buffer_map_[decoder_current_input_buffer_]; const int32_t input_buffer_id = current_input_buffer_.GetTimeStamp().tv_sec;
DCHECK_NE(input_record.input_id, -1);
DCHECK(input_record.input_id != kFlushBufferId || DCHECK(input_buffer_id != kFlushBufferId ||
input_record.bytes_used == 0); current_input_buffer_.GetPlaneBytesUsed(0) == 0);
// * if input_id >= 0, this input buffer was prompted by a bitstream buffer we // * if input_id >= 0, this input buffer was prompted by a bitstream buffer we
// got from the client. We can skip it if it is empty. // got from the client. We can skip it if it is empty.
// * if input_id < 0 (should be kFlushBufferId in this case), this input // * if input_id < 0 (should be kFlushBufferId in this case), this input
// buffer was prompted by a flush buffer, and should be queued even when // buffer was prompted by a flush buffer, and should be queued even when
// empty. // empty.
if (input_record.input_id >= 0 && input_record.bytes_used == 0) { if (input_buffer_id >= 0 && current_input_buffer_.GetPlaneBytesUsed(0) == 0) {
input_record.input_id = -1; current_input_buffer_ = V4L2WritableBufferRef();
free_input_buffers_.push_back(decoder_current_input_buffer_);
decoder_current_input_buffer_ = -1;
return true; return true;
} }
// Queue it. // Queue it.
input_ready_queue_.push(decoder_current_input_buffer_); DVLOGF(4) << "submitting input_id=" << input_buffer_id;
decoder_current_input_buffer_ = -1; input_ready_queue_.push(std::move(current_input_buffer_));
DVLOGF(4) << "submitting input_id=" << input_record.input_id;
// Enqueue once since there's new available input for it. // Enqueue once since there's new available input for it.
Enqueue(); Enqueue();
...@@ -1224,7 +1224,8 @@ void V4L2VideoDecodeAccelerator::ServiceDeviceTask(bool event_pending) { ...@@ -1224,7 +1224,8 @@ void V4L2VideoDecodeAccelerator::ServiceDeviceTask(bool event_pending) {
bool poll_device = false; bool poll_device = false;
// Add fd, if we should poll on it. // Add fd, if we should poll on it.
// Can be polled as soon as either input or output buffers are queued. // Can be polled as soon as either input or output buffers are queued.
if (input_buffer_queued_count_ + output_buffer_queued_count_ > 0) if (input_queue_->QueuedBuffersCount() + output_queue_->QueuedBuffersCount() >
0)
poll_device = true; poll_device = true;
// ServiceDeviceTask() should only ever be scheduled from DevicePollTask(), // ServiceDeviceTask() should only ever be scheduled from DevicePollTask(),
...@@ -1240,13 +1241,12 @@ void V4L2VideoDecodeAccelerator::ServiceDeviceTask(bool event_pending) { ...@@ -1240,13 +1241,12 @@ void V4L2VideoDecodeAccelerator::ServiceDeviceTask(bool event_pending) {
base::Unretained(this), poll_device)); base::Unretained(this), poll_device));
DVLOGF(3) << "ServiceDeviceTask(): buffer counts: DEC[" DVLOGF(3) << "ServiceDeviceTask(): buffer counts: DEC["
<< decoder_input_queue_.size() << "->" << decoder_input_queue_.size() << "->" << input_ready_queue_.size()
<< input_ready_queue_.size() << "] => DEVICE[" << "] => DEVICE[" << input_queue_->FreeBuffersCount() << "+"
<< free_input_buffers_.size() << "+" << input_queue_->QueuedBuffersCount() << "/"
<< input_buffer_queued_count_ << "/" << input_queue_->AllocatedBuffersCount() << "->"
<< input_buffer_map_.size() << "->" << output_queue_->FreeBuffersCount() << "+"
<< free_output_buffers_.size() << "+" << output_queue_->QueuedBuffersCount() << "/"
<< output_buffer_queued_count_ << "/"
<< output_buffer_map_.size() << "] => PROCESSOR[" << output_buffer_map_.size() << "] => PROCESSOR["
<< image_processor_bitstream_buffer_ids_.size() << "] => CLIENT[" << image_processor_bitstream_buffer_ids_.size() << "] => CLIENT["
<< decoder_frames_at_client_ << "]"; << decoder_frames_at_client_ << "]";
...@@ -1263,13 +1263,11 @@ void V4L2VideoDecodeAccelerator::Enqueue() { ...@@ -1263,13 +1263,11 @@ void V4L2VideoDecodeAccelerator::Enqueue() {
TRACE_EVENT0("media,gpu", "V4L2VDA::Enqueue"); TRACE_EVENT0("media,gpu", "V4L2VDA::Enqueue");
// Drain the pipe of completed decode buffers. // Drain the pipe of completed decode buffers.
const int old_inputs_queued = input_buffer_queued_count_; const int old_inputs_queued = input_queue_->QueuedBuffersCount();
while (!input_ready_queue_.empty()) { while (!input_ready_queue_.empty()) {
const int buffer = input_ready_queue_.front();
InputRecord& input_record = input_buffer_map_[buffer];
bool flush_handled = false; bool flush_handled = false;
if (input_record.input_id == kFlushBufferId) { int32_t input_id = input_ready_queue_.front().GetTimeStamp().tv_sec;
if (input_id == kFlushBufferId) {
// Send the flush command after all input buffers are dequeued. This makes // Send the flush command after all input buffers are dequeued. This makes
// sure all previous resolution changes have been handled because the // sure all previous resolution changes have been handled because the
// driver must hold the input buffer that triggers resolution change. The // driver must hold the input buffer that triggers resolution change. The
...@@ -1280,10 +1278,10 @@ void V4L2VideoDecodeAccelerator::Enqueue() { ...@@ -1280,10 +1278,10 @@ void V4L2VideoDecodeAccelerator::Enqueue() {
// yet. Also, V4L2VDA calls STREAMOFF and STREAMON after resolution // yet. Also, V4L2VDA calls STREAMOFF and STREAMON after resolution
// change. They implicitly send a V4L2_DEC_CMD_STOP and V4L2_DEC_CMD_START // change. They implicitly send a V4L2_DEC_CMD_STOP and V4L2_DEC_CMD_START
// to the decoder. // to the decoder.
if (input_buffer_queued_count_ > 0) if (input_queue_->QueuedBuffersCount() > 0)
break; break;
if (coded_size_.IsEmpty() || !input_streamon_) { if (coded_size_.IsEmpty() || !input_queue_->IsStreaming()) {
// In these situations, we should call NotifyFlushDone() immediately: // In these situations, we should call NotifyFlushDone() immediately:
// (1) If coded_size_.IsEmpty(), no output buffer could have been // (1) If coded_size_.IsEmpty(), no output buffer could have been
// allocated and there is nothing to flush. // allocated and there is nothing to flush.
...@@ -1301,8 +1299,6 @@ void V4L2VideoDecodeAccelerator::Enqueue() { ...@@ -1301,8 +1299,6 @@ void V4L2VideoDecodeAccelerator::Enqueue() {
if (flush_handled) { if (flush_handled) {
// Recycle the buffer directly if we already handled the flush request. // Recycle the buffer directly if we already handled the flush request.
input_ready_queue_.pop(); input_ready_queue_.pop();
free_input_buffers_.push_back(buffer);
input_record.input_id = -1;
} else { } else {
// Enqueue an input buffer, or an empty flush buffer if decoder cmd // Enqueue an input buffer, or an empty flush buffer if decoder cmd
// is not supported and there may be buffers to be flushed. // is not supported and there may be buffers to be flushed.
...@@ -1311,7 +1307,7 @@ void V4L2VideoDecodeAccelerator::Enqueue() { ...@@ -1311,7 +1307,7 @@ void V4L2VideoDecodeAccelerator::Enqueue() {
} }
} }
if (old_inputs_queued == 0 && input_buffer_queued_count_ != 0) { if (old_inputs_queued == 0 && input_queue_->QueuedBuffersCount() != 0) {
// We just started up a previously empty queue. // We just started up a previously empty queue.
// Queue state changed; signal interrupt. // Queue state changed; signal interrupt.
if (!device_->SetDevicePollInterrupt()) { if (!device_->SetDevicePollInterrupt()) {
...@@ -1320,24 +1316,23 @@ void V4L2VideoDecodeAccelerator::Enqueue() { ...@@ -1320,24 +1316,23 @@ void V4L2VideoDecodeAccelerator::Enqueue() {
return; return;
} }
// Start VIDIOC_STREAMON if we haven't yet. // Start VIDIOC_STREAMON if we haven't yet.
if (!input_streamon_) { if (!input_queue_->Streamon()) {
__u32 type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; NOTIFY_ERROR(PLATFORM_FAILURE);
IOCTL_OR_ERROR_RETURN(VIDIOC_STREAMON, &type); return;
input_streamon_ = true;
} }
} }
// OUTPUT queue must be started before CAPTURE queue as per codec API. // OUTPUT queue must be started before CAPTURE queue as per codec API.
if (!input_streamon_) if (!input_queue_->IsStreaming())
return; return;
// Enqueue all the outputs we can. // Enqueue all the outputs we can.
const int old_outputs_queued = output_buffer_queued_count_; const int old_outputs_queued = output_queue_->QueuedBuffersCount();
while (!free_output_buffers_.empty()) { while (output_queue_->FreeBuffersCount() > 0) {
if (!EnqueueOutputRecord()) if (!EnqueueOutputRecord())
return; return;
} }
if (old_outputs_queued == 0 && output_buffer_queued_count_ != 0) { if (old_outputs_queued == 0 && output_queue_->QueuedBuffersCount() != 0) {
// We just started up a previously empty queue. // We just started up a previously empty queue.
// Queue state changed; signal interrupt. // Queue state changed; signal interrupt.
if (!device_->SetDevicePollInterrupt()) { if (!device_->SetDevicePollInterrupt()) {
...@@ -1345,11 +1340,10 @@ void V4L2VideoDecodeAccelerator::Enqueue() { ...@@ -1345,11 +1340,10 @@ void V4L2VideoDecodeAccelerator::Enqueue() {
NOTIFY_ERROR(PLATFORM_FAILURE); NOTIFY_ERROR(PLATFORM_FAILURE);
return; return;
} }
// Start VIDIOC_STREAMON if we haven't yet.
if (!output_streamon_) { if (!output_queue_->Streamon()) {
__u32 type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; NOTIFY_ERROR(PLATFORM_FAILURE);
IOCTL_OR_ERROR_RETURN(VIDIOC_STREAMON, &type); return;
output_streamon_ = true;
} }
} }
} }
...@@ -1381,11 +1375,11 @@ void V4L2VideoDecodeAccelerator::Dequeue() { ...@@ -1381,11 +1375,11 @@ void V4L2VideoDecodeAccelerator::Dequeue() {
DCHECK_NE(decoder_state_, kUninitialized); DCHECK_NE(decoder_state_, kUninitialized);
TRACE_EVENT0("media,gpu", "V4L2VDA::Dequeue"); TRACE_EVENT0("media,gpu", "V4L2VDA::Dequeue");
while (input_buffer_queued_count_ > 0) { while (input_queue_->QueuedBuffersCount() > 0) {
if (!DequeueInputBuffer()) if (!DequeueInputBuffer())
break; break;
} }
while (output_buffer_queued_count_ > 0) { while (output_queue_->QueuedBuffersCount() > 0) {
if (!DequeueOutputBuffer()) if (!DequeueOutputBuffer())
break; break;
} }
...@@ -1394,82 +1388,55 @@ void V4L2VideoDecodeAccelerator::Dequeue() { ...@@ -1394,82 +1388,55 @@ void V4L2VideoDecodeAccelerator::Dequeue() {
bool V4L2VideoDecodeAccelerator::DequeueInputBuffer() { bool V4L2VideoDecodeAccelerator::DequeueInputBuffer() {
DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread()); DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
DCHECK_GT(input_buffer_queued_count_, 0); DCHECK_GT(input_queue_->QueuedBuffersCount(), 0u);
DCHECK(input_streamon_);
// Dequeue a completed input (VIDEO_OUTPUT) buffer, and recycle to the free // Dequeue a completed input (VIDEO_OUTPUT) buffer, and recycle to the free
// list. // list.
struct v4l2_buffer dqbuf; auto ret = input_queue_->DequeueBuffer();
struct v4l2_plane planes[1];
memset(&dqbuf, 0, sizeof(dqbuf)); if (ret.first == false) {
memset(planes, 0, sizeof(planes));
dqbuf.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
dqbuf.memory = V4L2_MEMORY_MMAP;
dqbuf.m.planes = planes;
dqbuf.length = 1;
if (device_->Ioctl(VIDIOC_DQBUF, &dqbuf) != 0) {
if (errno == EAGAIN) {
// EAGAIN if we're just out of buffers to dequeue.
return false;
}
VPLOGF(1) << "ioctl() failed: VIDIOC_DQBUF";
NOTIFY_ERROR(PLATFORM_FAILURE); NOTIFY_ERROR(PLATFORM_FAILURE);
return false; return false;
} else if (!ret.second) {
// we're just out of buffers to dequeue.
return false;
} }
InputRecord& input_record = input_buffer_map_[dqbuf.index];
DCHECK(input_record.at_device);
free_input_buffers_.push_back(dqbuf.index);
input_record.at_device = false;
input_record.bytes_used = 0;
input_record.input_id = -1;
input_buffer_queued_count_--;
return true; return true;
} }
bool V4L2VideoDecodeAccelerator::DequeueOutputBuffer() { bool V4L2VideoDecodeAccelerator::DequeueOutputBuffer() {
DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread()); DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
DCHECK_GT(output_buffer_queued_count_, 0); DCHECK_GT(output_queue_->QueuedBuffersCount(), 0u);
DCHECK(output_streamon_); DCHECK(output_queue_->IsStreaming());
// Dequeue a completed output (VIDEO_CAPTURE) buffer, and queue to the // Dequeue a completed output (VIDEO_CAPTURE) buffer, and queue to the
// completed queue. // completed queue.
struct v4l2_buffer dqbuf; auto ret = output_queue_->DequeueBuffer();
std::unique_ptr<struct v4l2_plane[]> planes( if (ret.first == false) {
new v4l2_plane[output_planes_count_]); NOTIFY_ERROR(PLATFORM_FAILURE);
memset(&dqbuf, 0, sizeof(dqbuf));
memset(planes.get(), 0, sizeof(struct v4l2_plane) * output_planes_count_);
dqbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
dqbuf.memory = V4L2_MEMORY_MMAP;
dqbuf.m.planes = planes.get();
dqbuf.length = output_planes_count_;
if (device_->Ioctl(VIDIOC_DQBUF, &dqbuf) != 0) {
if (errno == EAGAIN) {
// EAGAIN if we're just out of buffers to dequeue.
return false;
} else if (errno == EPIPE) {
DVLOGF(3) << "Got EPIPE. Last output buffer was already dequeued.";
return false; return false;
} }
VPLOGF(1) << "ioctl() failed: VIDIOC_DQBUF"; if (!ret.second) {
NOTIFY_ERROR(PLATFORM_FAILURE);
return false; return false;
} }
OutputRecord& output_record = output_buffer_map_[dqbuf.index];
V4L2ReadableBufferRef buf = ret.second;
DCHECK_LT(buf->BufferId(), output_buffer_map_.size());
OutputRecord& output_record = output_buffer_map_[buf->BufferId()];
DCHECK_EQ(output_record.state, kAtDevice); DCHECK_EQ(output_record.state, kAtDevice);
DCHECK_NE(output_record.picture_id, -1); DCHECK_NE(output_record.picture_id, -1);
output_buffer_queued_count_--; if (buf->GetPlaneBytesUsed(0) == 0) {
if (dqbuf.m.planes[0].bytesused == 0) {
// This is an empty output buffer returned as part of a flush. // This is an empty output buffer returned as part of a flush.
output_record.state = kFree; output_record.state = kFree;
free_output_buffers_.push_back(dqbuf.index);
} else { } else {
int32_t bitstream_buffer_id = dqbuf.timestamp.tv_sec; int32_t bitstream_buffer_id = buf->GetTimeStamp().tv_sec;
DCHECK_GE(bitstream_buffer_id, 0); DCHECK_GE(bitstream_buffer_id, 0);
DVLOGF(4) << "Dequeue output buffer: dqbuf index=" << dqbuf.index DVLOGF(4) << "Dequeue output buffer: dqbuf index=" << buf->BufferId()
<< " bitstream input_id=" << bitstream_buffer_id; << " bitstream input_id=" << bitstream_buffer_id;
if (image_processor_device_) { if (image_processor_device_) {
if (!ProcessFrame(bitstream_buffer_id, dqbuf.index)) { if (!ProcessFrame(bitstream_buffer_id, buf->BufferId())) {
VLOGF(1) << "Processing frame failed"; VLOGF(1) << "Processing frame failed";
NOTIFY_ERROR(PLATFORM_FAILURE); NOTIFY_ERROR(PLATFORM_FAILURE);
return false; return false;
...@@ -1486,7 +1453,7 @@ bool V4L2VideoDecodeAccelerator::DequeueOutputBuffer() { ...@@ -1486,7 +1453,7 @@ bool V4L2VideoDecodeAccelerator::DequeueOutputBuffer() {
output_record.cleared = true; output_record.cleared = true;
} }
} }
if (dqbuf.flags & V4L2_BUF_FLAG_LAST) { if (buf->IsLast()) {
DVLOGF(3) << "Got last output buffer. Waiting last buffer=" DVLOGF(3) << "Got last output buffer. Waiting last buffer="
<< flush_awaiting_last_output_buffer_; << flush_awaiting_last_output_buffer_;
if (flush_awaiting_last_output_buffer_) { if (flush_awaiting_last_output_buffer_) {
...@@ -1497,6 +1464,13 @@ bool V4L2VideoDecodeAccelerator::DequeueOutputBuffer() { ...@@ -1497,6 +1464,13 @@ bool V4L2VideoDecodeAccelerator::DequeueOutputBuffer() {
IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_DECODER_CMD, &cmd); IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_DECODER_CMD, &cmd);
} }
} }
if (buf->GetPlaneBytesUsed(0) > 0) {
// Keep a reference to this buffer until the client returns it
DCHECK_EQ(buffers_at_client_.count(output_record.picture_id), 0u);
buffers_at_client_.emplace(output_record.picture_id, std::move(buf));
}
return true; return true;
} }
...@@ -1505,36 +1479,23 @@ bool V4L2VideoDecodeAccelerator::EnqueueInputRecord() { ...@@ -1505,36 +1479,23 @@ bool V4L2VideoDecodeAccelerator::EnqueueInputRecord() {
DCHECK(!input_ready_queue_.empty()); DCHECK(!input_ready_queue_.empty());
// Enqueue an input (VIDEO_OUTPUT) buffer. // Enqueue an input (VIDEO_OUTPUT) buffer.
const int buffer = input_ready_queue_.front(); auto buffer = std::move(input_ready_queue_.front());
InputRecord& input_record = input_buffer_map_[buffer];
DCHECK(!input_record.at_device);
struct v4l2_buffer qbuf;
struct v4l2_plane qbuf_plane;
memset(&qbuf, 0, sizeof(qbuf));
memset(&qbuf_plane, 0, sizeof(qbuf_plane));
qbuf.index = buffer;
qbuf.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
qbuf.timestamp.tv_sec = input_record.input_id;
qbuf.memory = V4L2_MEMORY_MMAP;
qbuf.m.planes = &qbuf_plane;
qbuf.m.planes[0].bytesused = input_record.bytes_used;
qbuf.length = 1;
IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QBUF, &qbuf);
input_ready_queue_.pop(); input_ready_queue_.pop();
input_record.at_device = true; int32_t input_id = buffer.GetTimeStamp().tv_sec;
input_buffer_queued_count_++; size_t bytes_used = buffer.GetPlaneBytesUsed(0);
DVLOGF(4) << "enqueued input_id=" << input_record.input_id if (!std::move(buffer).QueueMMap()) {
<< " size=" << input_record.bytes_used; NOTIFY_ERROR(PLATFORM_FAILURE);
return false;
}
DVLOGF(4) << "enqueued input_id=" << input_id << " size=" << bytes_used;
return true; return true;
} }
bool V4L2VideoDecodeAccelerator::EnqueueOutputRecord() { bool V4L2VideoDecodeAccelerator::EnqueueOutputRecord() {
DCHECK(!free_output_buffers_.empty()); V4L2WritableBufferRef buffer = output_queue_->GetFreeBuffer();
DCHECK(buffer.IsValid());
// Enqueue an output (VIDEO_CAPTURE) buffer. OutputRecord& output_record = output_buffer_map_[buffer.BufferId()];
const int buffer = free_output_buffers_.front();
DVLOGF(4) << "buffer " << buffer;
OutputRecord& output_record = output_buffer_map_[buffer];
DCHECK_EQ(output_record.state, kFree); DCHECK_EQ(output_record.state, kFree);
DCHECK_NE(output_record.picture_id, -1); DCHECK_NE(output_record.picture_id, -1);
if (output_record.egl_fence) { if (output_record.egl_fence) {
...@@ -1565,30 +1526,26 @@ bool V4L2VideoDecodeAccelerator::EnqueueOutputRecord() { ...@@ -1565,30 +1526,26 @@ bool V4L2VideoDecodeAccelerator::EnqueueOutputRecord() {
output_record.egl_fence.reset(); output_record.egl_fence.reset();
} }
struct v4l2_buffer qbuf;
std::unique_ptr<struct v4l2_plane[]> qbuf_planes( bool ret = false;
new v4l2_plane[output_planes_count_]); switch (buffer.Memory()) {
memset(&qbuf, 0, sizeof(qbuf)); case V4L2_MEMORY_MMAP:
memset(qbuf_planes.get(), 0, ret = std::move(buffer).QueueMMap();
sizeof(struct v4l2_plane) * output_planes_count_); break;
qbuf.index = buffer; case V4L2_MEMORY_DMABUF:
qbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
if (!image_processor_device_ && output_mode_ == Config::OutputMode::IMPORT) {
DCHECK_EQ(output_planes_count_, output_record.output_fds.size()); DCHECK_EQ(output_planes_count_, output_record.output_fds.size());
for (size_t i = 0; i < output_planes_count_; ++i) { ret = std::move(buffer).QueueDMABuf(output_record.output_fds);
qbuf_planes[i].m.fd = output_record.output_fds[i].get(); break;
default:
NOTREACHED();
} }
qbuf.memory = V4L2_MEMORY_DMABUF;
} else { if (!ret) {
qbuf.memory = V4L2_MEMORY_MMAP; NOTIFY_ERROR(PLATFORM_FAILURE);
return false;
} }
qbuf.m.planes = qbuf_planes.get();
qbuf.length = output_planes_count_;
DVLOGF(4) << "qbuf.index=" << qbuf.index;
IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QBUF, &qbuf);
free_output_buffers_.pop_front();
output_record.state = kAtDevice; output_record.state = kAtDevice;
output_buffer_queued_count_++;
return true; return true;
} }
...@@ -1613,12 +1570,8 @@ void V4L2VideoDecodeAccelerator::ReusePictureBufferTask( ...@@ -1613,12 +1570,8 @@ void V4L2VideoDecodeAccelerator::ReusePictureBufferTask(
return; return;
} }
size_t index; auto iter = buffers_at_client_.find(picture_buffer_id);
for (index = 0; index < output_buffer_map_.size(); ++index) if (iter == buffers_at_client_.end()) {
if (output_buffer_map_[index].picture_id == picture_buffer_id)
break;
if (index >= output_buffer_map_.size()) {
// It's possible that we've already posted a DismissPictureBuffer for this // It's possible that we've already posted a DismissPictureBuffer for this
// picture, but it has not yet executed when this ReusePictureBuffer was // picture, but it has not yet executed when this ReusePictureBuffer was
// posted to us by the client. In that case just ignore this (we've already // posted to us by the client. In that case just ignore this (we've already
...@@ -1628,8 +1581,10 @@ void V4L2VideoDecodeAccelerator::ReusePictureBufferTask( ...@@ -1628,8 +1581,10 @@ void V4L2VideoDecodeAccelerator::ReusePictureBufferTask(
<< " not in use (anymore?)."; << " not in use (anymore?).";
return; return;
} }
V4L2ReadableBufferRef buffer = std::move(iter->second);
buffers_at_client_.erase(iter);
OutputRecord& output_record = output_buffer_map_[index]; OutputRecord& output_record = output_buffer_map_[buffer->BufferId()];
if (output_record.state != kAtClient) { if (output_record.state != kAtClient) {
VLOGF(1) << "picture_buffer_id not reusable"; VLOGF(1) << "picture_buffer_id not reusable";
NOTIFY_ERROR(INVALID_ARGUMENT); NOTIFY_ERROR(INVALID_ARGUMENT);
...@@ -1638,7 +1593,6 @@ void V4L2VideoDecodeAccelerator::ReusePictureBufferTask( ...@@ -1638,7 +1593,6 @@ void V4L2VideoDecodeAccelerator::ReusePictureBufferTask(
DCHECK(!output_record.egl_fence); DCHECK(!output_record.egl_fence);
output_record.state = kFree; output_record.state = kFree;
free_output_buffers_.push_back(index);
decoder_frames_at_client_--; decoder_frames_at_client_--;
// Take ownership of the EGL fence. // Take ownership of the EGL fence.
output_record.egl_fence = std::move(egl_fence); output_record.egl_fence = std::move(egl_fence);
...@@ -1690,11 +1644,11 @@ void V4L2VideoDecodeAccelerator::NotifyFlushDoneIfNeeded() { ...@@ -1690,11 +1644,11 @@ void V4L2VideoDecodeAccelerator::NotifyFlushDoneIfNeeded() {
return; return;
} }
} }
if (decoder_current_input_buffer_ != -1) { if (current_input_buffer_.IsValid()) {
DVLOGF(3) << "Current input buffer != -1"; DVLOGF(3) << "Current input buffer != -1";
return; return;
} }
if ((input_ready_queue_.size() + input_buffer_queued_count_) != 0) { if ((input_ready_queue_.size() + input_queue_->QueuedBuffersCount()) != 0) {
DVLOGF(3) << "Some input buffers are not dequeued."; DVLOGF(3) << "Some input buffers are not dequeued.";
return; return;
} }
...@@ -1780,7 +1734,7 @@ void V4L2VideoDecodeAccelerator::ResetTask() { ...@@ -1780,7 +1734,7 @@ void V4L2VideoDecodeAccelerator::ResetTask() {
while (!decoder_input_queue_.empty()) while (!decoder_input_queue_.empty())
decoder_input_queue_.pop(); decoder_input_queue_.pop();
decoder_current_input_buffer_ = -1; current_input_buffer_ = V4L2WritableBufferRef();
// If we are in the middle of switching resolutions or awaiting picture // If we are in the middle of switching resolutions or awaiting picture
// buffers, postpone reset until it's done. We don't have to worry about // buffers, postpone reset until it's done. We don't have to worry about
...@@ -1888,7 +1842,7 @@ void V4L2VideoDecodeAccelerator::DestroyTask() { ...@@ -1888,7 +1842,7 @@ void V4L2VideoDecodeAccelerator::DestroyTask() {
StopInputStream(); StopInputStream();
decoder_current_bitstream_buffer_.reset(); decoder_current_bitstream_buffer_.reset();
decoder_current_input_buffer_ = -1; current_input_buffer_ = V4L2WritableBufferRef();
decoder_decode_buffer_tasks_scheduled_ = 0; decoder_decode_buffer_tasks_scheduled_ = 0;
decoder_frames_at_client_ = 0; decoder_frames_at_client_ = 0;
while (!decoder_input_queue_.empty()) while (!decoder_input_queue_.empty())
...@@ -1946,12 +1900,13 @@ bool V4L2VideoDecodeAccelerator::StopDevicePoll() { ...@@ -1946,12 +1900,13 @@ bool V4L2VideoDecodeAccelerator::StopDevicePoll() {
bool V4L2VideoDecodeAccelerator::StopOutputStream() { bool V4L2VideoDecodeAccelerator::StopOutputStream() {
VLOGF(2); VLOGF(2);
if (!output_streamon_) if (!output_queue_->IsStreaming())
return true; return true;
__u32 type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; if (!output_queue_->Streamoff()) {
IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_STREAMOFF, &type); NOTIFY_ERROR(PLATFORM_FAILURE);
output_streamon_ = false; return false;
}
// Output stream is stopped. No need to wait for the buffer anymore. // Output stream is stopped. No need to wait for the buffer anymore.
flush_awaiting_last_output_buffer_ = false; flush_awaiting_last_output_buffer_ = false;
...@@ -1963,34 +1918,25 @@ bool V4L2VideoDecodeAccelerator::StopOutputStream() { ...@@ -1963,34 +1918,25 @@ bool V4L2VideoDecodeAccelerator::StopOutputStream() {
OutputRecord& output_record = output_buffer_map_[i]; OutputRecord& output_record = output_buffer_map_[i];
if (output_record.state == kAtDevice) { if (output_record.state == kAtDevice) {
output_record.state = kFree; output_record.state = kFree;
free_output_buffers_.push_back(i);
DCHECK(!output_record.egl_fence); DCHECK(!output_record.egl_fence);
} }
} }
output_buffer_queued_count_ = 0;
return true; return true;
} }
bool V4L2VideoDecodeAccelerator::StopInputStream() { bool V4L2VideoDecodeAccelerator::StopInputStream() {
VLOGF(2); VLOGF(2);
if (!input_streamon_) if (!input_queue_->IsStreaming())
return true; return true;
__u32 type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; if (!input_queue_->Streamoff()) {
IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_STREAMOFF, &type); NOTIFY_ERROR(PLATFORM_FAILURE);
input_streamon_ = false; return false;
}
// Reset accounting info for input. // Reset accounting info for input.
while (!input_ready_queue_.empty()) while (!input_ready_queue_.empty())
input_ready_queue_.pop(); input_ready_queue_.pop();
free_input_buffers_.clear();
for (size_t i = 0; i < input_buffer_map_.size(); ++i) {
free_input_buffers_.push_back(i);
input_buffer_map_[i].at_device = false;
input_buffer_map_[i].bytes_used = 0;
input_buffer_map_[i].input_id = -1;
}
input_buffer_queued_count_ = 0;
return true; return true;
} }
...@@ -2232,42 +2178,11 @@ bool V4L2VideoDecodeAccelerator::CreateInputBuffers() { ...@@ -2232,42 +2178,11 @@ bool V4L2VideoDecodeAccelerator::CreateInputBuffers() {
DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread()); DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
// We always run this as we prepare to initialize. // We always run this as we prepare to initialize.
DCHECK_EQ(decoder_state_, kInitialized); DCHECK_EQ(decoder_state_, kInitialized);
DCHECK(!input_streamon_);
DCHECK(input_buffer_map_.empty()); if (input_queue_->AllocateBuffers(kInputBufferCount, V4L2_MEMORY_MMAP) == 0) {
NOTIFY_ERROR(PLATFORM_FAILURE);
struct v4l2_requestbuffers reqbufs;
memset(&reqbufs, 0, sizeof(reqbufs));
reqbufs.count = kInputBufferCount;
reqbufs.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
reqbufs.memory = V4L2_MEMORY_MMAP;
IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_REQBUFS, &reqbufs);
input_buffer_map_.resize(reqbufs.count);
for (size_t i = 0; i < input_buffer_map_.size(); ++i) {
free_input_buffers_.push_back(i);
// Query for the MEMORY_MMAP pointer.
struct v4l2_plane planes[1];
struct v4l2_buffer buffer;
memset(&buffer, 0, sizeof(buffer));
memset(planes, 0, sizeof(planes));
buffer.index = i;
buffer.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
buffer.memory = V4L2_MEMORY_MMAP;
buffer.m.planes = planes;
buffer.length = 1;
IOCTL_OR_ERROR_RETURN_FALSE(VIDIOC_QUERYBUF, &buffer);
void* address = device_->Mmap(NULL,
buffer.m.planes[0].length,
PROT_READ | PROT_WRITE,
MAP_SHARED,
buffer.m.planes[0].m.mem_offset);
if (address == MAP_FAILED) {
VPLOGF(1) << "mmap() failed";
return false; return false;
} }
input_buffer_map_[i].address = address;
input_buffer_map_[i].length = buffer.m.planes[0].length;
}
return true; return true;
} }
...@@ -2276,8 +2191,9 @@ bool V4L2VideoDecodeAccelerator::SetupFormats() { ...@@ -2276,8 +2191,9 @@ bool V4L2VideoDecodeAccelerator::SetupFormats() {
// We always run this as we prepare to initialize. // We always run this as we prepare to initialize.
DCHECK(child_task_runner_->BelongsToCurrentThread()); DCHECK(child_task_runner_->BelongsToCurrentThread());
DCHECK_EQ(decoder_state_, kUninitialized); DCHECK_EQ(decoder_state_, kUninitialized);
DCHECK(!input_streamon_); // TODO(acourbot@) this is running in the wrong thread!
DCHECK(!output_streamon_); // DCHECK(!input_queue_->IsStreaming());
// DCHECK(!output_queue_->IsStreaming());
size_t input_size; size_t input_size;
gfx::Size max_resolution, min_resolution; gfx::Size max_resolution, min_resolution;
...@@ -2425,8 +2341,9 @@ bool V4L2VideoDecodeAccelerator::ResetImageProcessor() { ...@@ -2425,8 +2341,9 @@ bool V4L2VideoDecodeAccelerator::ResetImageProcessor() {
for (size_t i = 0; i < output_buffer_map_.size(); ++i) { for (size_t i = 0; i < output_buffer_map_.size(); ++i) {
OutputRecord& output_record = output_buffer_map_[i]; OutputRecord& output_record = output_buffer_map_[i];
if (output_record.state == kAtProcessor) { if (output_record.state == kAtProcessor) {
DCHECK_EQ(buffers_at_client_.count(output_record.picture_id), 1u);
buffers_at_client_.erase(output_record.picture_id);
output_record.state = kFree; output_record.state = kFree;
free_output_buffers_.push_back(i);
} }
} }
while (!image_processor_bitstream_buffer_ids_.empty()) while (!image_processor_bitstream_buffer_ids_.empty())
...@@ -2512,7 +2429,7 @@ bool V4L2VideoDecodeAccelerator::CreateOutputBuffers() { ...@@ -2512,7 +2429,7 @@ bool V4L2VideoDecodeAccelerator::CreateOutputBuffers() {
VLOGF(2); VLOGF(2);
DCHECK(decoder_state_ == kInitialized || DCHECK(decoder_state_ == kInitialized ||
decoder_state_ == kChangingResolution); decoder_state_ == kChangingResolution);
DCHECK(!output_streamon_); DCHECK(!output_queue_->IsStreaming());
DCHECK(output_buffer_map_.empty()); DCHECK(output_buffer_map_.empty());
// Number of output buffers we need. // Number of output buffers we need.
...@@ -2560,39 +2477,23 @@ void V4L2VideoDecodeAccelerator::DestroyInputBuffers() { ...@@ -2560,39 +2477,23 @@ void V4L2VideoDecodeAccelerator::DestroyInputBuffers() {
VLOGF(2); VLOGF(2);
DCHECK(!decoder_thread_.IsRunning() || DCHECK(!decoder_thread_.IsRunning() ||
decoder_thread_.task_runner()->BelongsToCurrentThread()); decoder_thread_.task_runner()->BelongsToCurrentThread());
DCHECK(!input_streamon_);
if (input_buffer_map_.empty()) input_queue_->DeallocateBuffers();
return;
for (size_t i = 0; i < input_buffer_map_.size(); ++i) {
if (input_buffer_map_[i].address != NULL) {
device_->Munmap(input_buffer_map_[i].address,
input_buffer_map_[i].length);
}
}
struct v4l2_requestbuffers reqbufs;
memset(&reqbufs, 0, sizeof(reqbufs));
reqbufs.count = 0;
reqbufs.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
reqbufs.memory = V4L2_MEMORY_MMAP;
IOCTL_OR_LOG_ERROR(VIDIOC_REQBUFS, &reqbufs);
input_buffer_map_.clear();
free_input_buffers_.clear();
} }
bool V4L2VideoDecodeAccelerator::DestroyOutputBuffers() { bool V4L2VideoDecodeAccelerator::DestroyOutputBuffers() {
VLOGF(2); VLOGF(2);
DCHECK(!decoder_thread_.IsRunning() || DCHECK(!decoder_thread_.IsRunning() ||
decoder_thread_.task_runner()->BelongsToCurrentThread()); decoder_thread_.task_runner()->BelongsToCurrentThread());
DCHECK(!output_streamon_); DCHECK(!output_queue_->IsStreaming());
bool success = true; bool success = true;
if (output_buffer_map_.empty()) if (output_buffer_map_.empty())
return true; return true;
// Release all buffers waiting for an import buffer event
output_wait_map_.clear();
for (size_t i = 0; i < output_buffer_map_.size(); ++i) { for (size_t i = 0; i < output_buffer_map_.size(); ++i) {
OutputRecord& output_record = output_buffer_map_[i]; OutputRecord& output_record = output_buffer_map_[i];
...@@ -2611,25 +2512,16 @@ bool V4L2VideoDecodeAccelerator::DestroyOutputBuffers() { ...@@ -2611,25 +2512,16 @@ bool V4L2VideoDecodeAccelerator::DestroyOutputBuffers() {
output_record.picture_id)); output_record.picture_id));
} }
struct v4l2_requestbuffers reqbufs; // TODO(acourbot@) the client should properly drop all references to the
memset(&reqbufs, 0, sizeof(reqbufs)); // frames it holds instead!
reqbufs.count = 0; buffers_at_client_.clear();
reqbufs.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
if (!image_processor_device_ && output_mode_ == Config::OutputMode::IMPORT) { if (!output_queue_->DeallocateBuffers()) {
reqbufs.memory = V4L2_MEMORY_DMABUF;
} else {
reqbufs.memory = V4L2_MEMORY_MMAP;
}
if (device_->Ioctl(VIDIOC_REQBUFS, &reqbufs) != 0) {
VPLOGF(1) << "ioctl() failed: VIDIOC_REQBUFS";
NOTIFY_ERROR(PLATFORM_FAILURE); NOTIFY_ERROR(PLATFORM_FAILURE);
success = false; success = false;
} }
output_buffer_map_.clear(); output_buffer_map_.clear();
while (!free_output_buffers_.empty())
free_output_buffers_.pop_front();
output_buffer_queued_count_ = 0;
// The client may still hold some buffers. The texture holds a reference to // The client may still hold some buffers. The texture holds a reference to
// the buffer. It is OK to free the buffer and destroy EGLImage here. // the buffer. It is OK to free the buffer and destroy EGLImage here.
decoder_frames_at_client_ = 0; decoder_frames_at_client_ = 0;
......
...@@ -180,17 +180,6 @@ class MEDIA_GPU_EXPORT V4L2VideoDecodeAccelerator ...@@ -180,17 +180,6 @@ class MEDIA_GPU_EXPORT V4L2VideoDecodeAccelerator
Picture picture; // The decoded picture. Picture picture; // The decoded picture.
}; };
// Record for input buffers.
struct InputRecord {
InputRecord();
~InputRecord();
bool at_device; // held by device.
void* address; // mmap() address.
size_t length; // mmap() length.
off_t bytes_used; // bytes filled in the mmap() segment.
int32_t input_id; // triggering input_id as given to Decode().
};
// Record for output buffers. // Record for output buffers.
struct OutputRecord { struct OutputRecord {
OutputRecord(); OutputRecord();
...@@ -473,8 +462,6 @@ class MEDIA_GPU_EXPORT V4L2VideoDecodeAccelerator ...@@ -473,8 +462,6 @@ class MEDIA_GPU_EXPORT V4L2VideoDecodeAccelerator
// queued afterwards. For flushing or resetting the pipeline then, we will // queued afterwards. For flushing or resetting the pipeline then, we will
// delay these buffers until after the flush or reset completes. // delay these buffers until after the flush or reset completes.
int decoder_delay_bitstream_buffer_id_; int decoder_delay_bitstream_buffer_id_;
// Input buffer we're presently filling.
int decoder_current_input_buffer_;
// We track the number of buffer decode tasks we have scheduled, since each // We track the number of buffer decode tasks we have scheduled, since each
// task execution should complete one buffer. If we fall behind (due to // task execution should complete one buffer. If we fall behind (due to
// resource backpressure, etc.), we'll have to schedule more to catch up. // resource backpressure, etc.), we'll have to schedule more to catch up.
...@@ -507,31 +494,25 @@ class MEDIA_GPU_EXPORT V4L2VideoDecodeAccelerator ...@@ -507,31 +494,25 @@ class MEDIA_GPU_EXPORT V4L2VideoDecodeAccelerator
// //
// Hardware state and associated queues. Since decoder_thread_ services // Hardware state and associated queues. Since decoder_thread_ services
// the hardware, decoder_thread_ owns these too. // the hardware, decoder_thread_ owns these too.
// output_buffer_map_, free_output_buffers_ and output_planes_count_ are an // output_buffer_map_ and output_planes_count_ are an
// exception during the buffer (re)allocation sequence, when the // exception during the buffer (re)allocation sequence, when the
// decoder_thread_ is blocked briefly while the Child thread manipulates // decoder_thread_ is blocked briefly while the Child thread manipulates
// them. // them.
// //
// Completed decode buffers. V4L2WritableBufferRef current_input_buffer_;
base::queue<int> input_ready_queue_;
scoped_refptr<V4L2Queue> input_queue_;
// Input buffer state. scoped_refptr<V4L2Queue> output_queue_;
bool input_streamon_; // Input buffers ready to be queued.
// Input buffers enqueued to device. base::queue<V4L2WritableBufferRef> input_ready_queue_;
int input_buffer_queued_count_;
// Input buffers ready to use, as a LIFO since we don't care about ordering. // Buffers that have been allocated but are awaiting an ImportBuffer
std::vector<int> free_input_buffers_; // or AssignEGLImage event.
// Mapping of int index to input buffer record. std::map<int32_t, V4L2WritableBufferRef> output_wait_map_;
std::vector<InputRecord> input_buffer_map_; // Keeps decoded buffers out of the free list until the client returns them.
std::map<int32_t, V4L2ReadableBufferRef> buffers_at_client_;
// Output buffer state.
bool output_streamon_;
// Output buffers enqueued to device.
int output_buffer_queued_count_;
// Output buffers ready to use, as a FIFO since we want oldest-first to hide
// synchronization latency with GL.
std::list<int> free_output_buffers_;
// Mapping of int index to output buffer record. // Mapping of int index to output buffer record.
std::vector<OutputRecord> output_buffer_map_; std::vector<OutputRecord> output_buffer_map_;
// Required size of DPB for decoding. // Required size of DPB for decoding.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment