Commit 92a67a9a authored by Francois Buergisser's avatar Francois Buergisser Committed by Commit Bot

media/gpu/v4l2: Abstract buffer usage inside reference

The writable buffer references are passing their buffers to
the decode surface to set the controls, requests or config store
settings which is making the writable buffer references aware of the
decoded surface.
This patch removes references to the decode surfaces in the writable
buffer reference and at the same time simplifies to the decode surface
submission.

BUG=chromium:1009921
TEST=ran tast with video.DecodeAccelVD.vp8_resolution_switch on veyron_minnie.
TEST=ran tast with video.DecodeAccelVD.h264_resolution_switch on veyron_minnie.
TEST=ran tast with video.DecodeAccelVD.vp8 on veyron_minnie.
TEST=ran tast with video.DecodeAccelVD.h264 on veyron_minnie.
TEST=ran tast with video.DecodeAccel.vp8 on veyron_minnie.
TEST=ran tast with video.DecodeAccel.h264 on veyron_minnie.
TEST=ran tast with video.DecodeAccelVD.vp8_resolution_switch on kevin.
TEST=ran tast with video.DecodeAccelVD.h264_resolution_switch on kevin.
TEST=ran tast with video.DecodeAccelVD.vp8 on kevin.
TEST=ran tast with video.DecodeAccelVD.h264 on kevin.
TEST=ran tast with video.DecodeAccel.vp8 on kevin.
TEST=ran tast with video.DecodeAccel.h264 on kevin.
Signed-off-by: default avatarFrancois Buergisser <fbuergisser@chromium.org>
Change-Id: I5097a2bb9b2ce1ede7e7ad573d37a47d15dfaf26
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1969411Reviewed-by: default avatarAlexandre Courbot <acourbot@chromium.org>
Commit-Queue: Alexandre Courbot <acourbot@chromium.org>
Cr-Commit-Position: refs/heads/master@{#730065}
parent 77e1b616
......@@ -106,15 +106,6 @@ void V4L2ConfigStoreDecodeSurface::PrepareSetCtrls(
ctrls->config_store = config_store_;
}
void V4L2ConfigStoreDecodeSurface::PrepareQueueBuffer(
struct v4l2_buffer* buffer) const {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK_NE(buffer, nullptr);
DCHECK_GT(config_store_, 0u);
buffer->config_store = config_store_;
}
uint64_t V4L2ConfigStoreDecodeSurface::GetReferenceID() const {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
......@@ -124,9 +115,24 @@ uint64_t V4L2ConfigStoreDecodeSurface::GetReferenceID() const {
bool V4L2ConfigStoreDecodeSurface::Submit() {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK_GT(config_store_, 0u);
input_buffer().SetConfigStore(config_store_);
// There is nothing extra to submit when using the config store
return true;
if (!std::move(input_buffer()).QueueMMap()) {
return false;
}
switch (output_buffer().Memory()) {
case V4L2_MEMORY_MMAP:
return std::move(output_buffer()).QueueMMap();
case V4L2_MEMORY_DMABUF:
return std::move(output_buffer()).QueueDMABuf(video_frame()->DmabufFds());
default:
NOTREACHED() << "We should only use MMAP or DMABUF.";
}
return false;
}
void V4L2RequestDecodeSurface::PrepareSetCtrls(
......@@ -139,22 +145,6 @@ void V4L2RequestDecodeSurface::PrepareSetCtrls(
request_ref_.SetCtrls(ctrls);
}
void V4L2RequestDecodeSurface::PrepareQueueBuffer(
struct v4l2_buffer* buffer) const {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK_NE(buffer, nullptr);
DCHECK(request_ref_.IsValid());
request_ref_.SetQueueBuffer(buffer);
// Use the output buffer index as the timestamp.
// Since the client is supposed to keep the output buffer out of the V4L2
// queue for as long as it is used as a reference frame, this ensures that
// all the requests we submit have unique IDs at any point in time.
DCHECK_EQ(static_cast<int>(buffer->index), input_record());
buffer->timestamp.tv_sec = 0;
buffer->timestamp.tv_usec = output_record();
}
uint64_t V4L2RequestDecodeSurface::GetReferenceID() const {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
......@@ -166,6 +156,35 @@ uint64_t V4L2RequestDecodeSurface::GetReferenceID() const {
bool V4L2RequestDecodeSurface::Submit() {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK(request_ref_.IsValid());
// Use the output buffer index as the timestamp.
// Since the client is supposed to keep the output buffer out of the V4L2
// queue for as long as it is used as a reference frame, this ensures that
// all the requests we submit have unique IDs at any point in time.
struct timeval timestamp = {
.tv_sec = 0,
.tv_usec = output_record()
};
input_buffer().SetTimeStamp(timestamp);
if (!std::move(input_buffer()).QueueMMap(&request_ref_)) {
return false;
}
bool result = false;
switch (output_buffer().Memory()) {
case V4L2_MEMORY_MMAP:
result = std::move(output_buffer()).QueueMMap();
break;
case V4L2_MEMORY_DMABUF:
result = std::move(output_buffer())
.QueueDMABuf(video_frame()->DmabufFds());
break;
default:
NOTREACHED() << "We should only use MMAP or DMABUF.";
}
if (!result)
return result;
return std::move(request_ref_).Submit().IsValid();
}
......
......@@ -56,13 +56,10 @@ class V4L2DecodeSurface : public base::RefCounted<V4L2DecodeSurface> {
// Update the passed v4l2_ext_controls structure to add the request or
// config store information.
virtual void PrepareSetCtrls(struct v4l2_ext_controls* ctrls) const = 0;
// Update the passed v4l2_buffer structure to add the request or
// config store information.
virtual void PrepareQueueBuffer(struct v4l2_buffer* buffer) const = 0;
// Return the ID to use in order to reference this frame.
virtual uint64_t GetReferenceID() const = 0;
// Submit the request corresponding to this surface once all controls have
// been set and all buffers queued.
// Set controls, queue buffers and submit the request corresponding to this
// surface.
virtual bool Submit() = 0;
bool decoded() const { return decoded_; }
......@@ -124,7 +121,6 @@ class V4L2ConfigStoreDecodeSurface : public V4L2DecodeSurface {
config_store_(this->input_buffer().BufferId() + 1) {}
void PrepareSetCtrls(struct v4l2_ext_controls* ctrls) const override;
void PrepareQueueBuffer(struct v4l2_buffer* buffer) const override;
uint64_t GetReferenceID() const override;
bool Submit() override;
......@@ -151,7 +147,6 @@ class V4L2RequestDecodeSurface : public V4L2DecodeSurface {
request_ref_(std::move(request_ref)) {}
void PrepareSetCtrls(struct v4l2_ext_controls* ctrls) const override;
void PrepareQueueBuffer(struct v4l2_buffer* buffer) const override;
uint64_t GetReferenceID() const override;
bool Submit() override;
......
......@@ -29,7 +29,6 @@
#include "media/gpu/chromeos/fourcc.h"
#include "media/gpu/macros.h"
#include "media/gpu/v4l2/generic_v4l2_device.h"
#include "media/gpu/v4l2/v4l2_decode_surface.h"
#include "ui/gfx/native_pixmap_handle.h"
#if defined(ARCH_CPU_ARMEL)
......@@ -312,6 +311,7 @@ class V4L2BufferRefBase {
private:
size_t BufferId() const { return v4l2_buffer_.index; }
friend class V4L2WritableBufferRef;
// A weak pointer to the queue this buffer belongs to. Will remain valid as
// long as the underlying V4L2 buffer is valid too.
// This can only be accessed from the sequence protected by sequence_checker_.
......@@ -464,10 +464,13 @@ enum v4l2_memory V4L2WritableBufferRef::Memory() const {
return static_cast<enum v4l2_memory>(buffer_data_->v4l2_buffer_.memory);
}
bool V4L2WritableBufferRef::DoQueue() && {
bool V4L2WritableBufferRef::DoQueue(V4L2RequestRef* request_ref) && {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK(buffer_data_);
if (request_ref && buffer_data_->queue_->SupportsRequests())
request_ref->SetQueueBuffer(&(buffer_data_->v4l2_buffer_));
bool queued = buffer_data_->QueueBuffer();
// Clear our own reference.
......@@ -476,7 +479,8 @@ bool V4L2WritableBufferRef::DoQueue() && {
return queued;
}
bool V4L2WritableBufferRef::QueueMMap() && {
bool V4L2WritableBufferRef::QueueMMap(
V4L2RequestRef* request_ref) && {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK(buffer_data_);
......@@ -488,10 +492,12 @@ bool V4L2WritableBufferRef::QueueMMap() && {
return false;
}
return std::move(self).DoQueue();
return std::move(self).DoQueue(request_ref);
}
bool V4L2WritableBufferRef::QueueUserPtr(const std::vector<void*>& ptrs) && {
bool V4L2WritableBufferRef::QueueUserPtr(
const std::vector<void*>& ptrs,
V4L2RequestRef* request_ref) && {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK(buffer_data_);
......@@ -513,11 +519,12 @@ bool V4L2WritableBufferRef::QueueUserPtr(const std::vector<void*>& ptrs) && {
self.buffer_data_->v4l2_buffer_.m.planes[i].m.userptr =
reinterpret_cast<unsigned long>(ptrs[i]);
return std::move(self).DoQueue();
return std::move(self).DoQueue(request_ref);
}
bool V4L2WritableBufferRef::QueueDMABuf(
const std::vector<base::ScopedFD>& fds) && {
const std::vector<base::ScopedFD>& fds,
V4L2RequestRef* request_ref) && {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK(buffer_data_);
......@@ -536,11 +543,12 @@ bool V4L2WritableBufferRef::QueueDMABuf(
for (size_t i = 0; i < num_planes; i++)
self.buffer_data_->v4l2_buffer_.m.planes[i].m.fd = fds[i].get();
return std::move(self).DoQueue();
return std::move(self).DoQueue(request_ref);
}
bool V4L2WritableBufferRef::QueueDMABuf(
const std::vector<gfx::NativePixmapPlane>& planes) && {
const std::vector<gfx::NativePixmapPlane>& planes,
V4L2RequestRef* request_ref) && {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK(buffer_data_);
......@@ -559,7 +567,7 @@ bool V4L2WritableBufferRef::QueueDMABuf(
for (size_t i = 0; i < num_planes; i++)
self.buffer_data_->v4l2_buffer_.m.planes[i].m.fd = planes[i].fd.get();
return std::move(self).DoQueue();
return std::move(self).DoQueue(request_ref);
}
size_t V4L2WritableBufferRef::PlanesCount() const {
......@@ -666,11 +674,6 @@ void V4L2WritableBufferRef::SetPlaneDataOffset(const size_t plane,
buffer_data_->v4l2_buffer_.m.planes[plane].data_offset = data_offset;
}
void V4L2WritableBufferRef::PrepareQueueBuffer(
const V4L2DecodeSurface& surface) {
surface.PrepareQueueBuffer(&(buffer_data_->v4l2_buffer_));
}
size_t V4L2WritableBufferRef::BufferId() const {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK(buffer_data_);
......@@ -678,6 +681,13 @@ size_t V4L2WritableBufferRef::BufferId() const {
return buffer_data_->v4l2_buffer_.index;
}
void V4L2WritableBufferRef::SetConfigStore(uint32_t config_store) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK(buffer_data_);
buffer_data_->v4l2_buffer_.config_store = config_store;
}
V4L2ReadableBuffer::V4L2ReadableBuffer(const struct v4l2_buffer& v4l2_buffer,
base::WeakPtr<V4L2Queue> queue)
: buffer_data_(
......
......@@ -63,6 +63,7 @@ class V4L2Queue;
class V4L2BufferRefBase;
class V4L2BuffersList;
class V4L2DecodeSurface;
class V4L2RequestRef;
// A unique reference to a buffer for clients to prepare and submit.
//
......@@ -80,36 +81,47 @@ class MEDIA_GPU_EXPORT V4L2WritableBufferRef {
enum v4l2_memory Memory() const;
// Queue a MMAP buffer.
// When requests are supported, a |request_ref| can be passed along this
// the buffer to be submitted.
// If successful, true is returned and the reference to the buffer is dropped
// so this reference becomes invalid.
// In case of error, false is returned and the buffer is returned to the free
// list.
bool QueueMMap() &&;
bool QueueMMap(V4L2RequestRef* request_ref = nullptr) &&;
// Queue a USERPTR buffer, assigning |ptrs| as pointer for each plane.
// The size of |ptrs| must be equal to the number of planes of this buffer.
// When requests are supported, a |request_ref| can be passed along this
// the buffer to be submitted.
// If successful, true is returned and the reference to the buffer is dropped
// so this reference becomes invalid.
// In case of error, false is returned and the buffer is returned to the free
// list.
bool QueueUserPtr(const std::vector<void*>& ptrs) &&;
bool QueueUserPtr(const std::vector<void*>& ptrs,
V4L2RequestRef* request_ref = nullptr) &&;
// Queue a DMABUF buffer, assigning |fds| as file descriptors for each plane.
// It is allowed the number of |fds| might be greater than the number of
// planes of this buffer. It happens when the v4l2 pixel format is single
// planar. The fd of the first plane is only used in that case.
// When requests are supported, a |request_ref| can be passed along this
// the buffer to be submitted.
// If successful, true is returned and the reference to the buffer is dropped
// so this reference becomes invalid.
// In case of error, false is returned and the buffer is returned to the free
// list.
bool QueueDMABuf(const std::vector<base::ScopedFD>& fds) &&;
bool QueueDMABuf(const std::vector<base::ScopedFD>& fds,
V4L2RequestRef* request_ref = nullptr) &&;
// Queue a DMABUF buffer, assigning file descriptors of |planes| for planes.
// It is allowed the number of |planes| might be greater than the number of
// planes of this buffer. It happens when the v4l2 pixel format is single
// planar. The fd of the first plane of |planes| is only used in that case.
// When requests are supported, a |request_ref| can be passed along this
// the buffer to be submitted.
// If successful, true is returned and the reference to the buffer is dropped
// so this reference becomes invalid.
// In case of error, false is returned and the buffer is returned to the free
// list.
bool QueueDMABuf(const std::vector<gfx::NativePixmapPlane>& planes) &&;
bool QueueDMABuf(const std::vector<gfx::NativePixmapPlane>& planes,
V4L2RequestRef* request_ref = nullptr) &&;
// Returns the number of planes in this buffer.
size_t PlanesCount() const;
......@@ -144,22 +156,24 @@ class MEDIA_GPU_EXPORT V4L2WritableBufferRef {
// return nullptr for any other buffer type.
scoped_refptr<VideoFrame> GetVideoFrame() WARN_UNUSED_RESULT;
// Add the request or config store information to |surface|.
// TODO(acourbot): This method is a temporary hack. Implement proper config
// store/request API support.
void PrepareQueueBuffer(const V4L2DecodeSurface& surface);
// Return the V4L2 buffer ID of the underlying buffer.
// TODO(acourbot) This is used for legacy clients but should be ultimately
// removed. See crbug/879971
size_t BufferId() const;
// Set the passed config store to this buffer.
// This method is only used for backward compatibility until the config
// store is deprecated and should not be called by new code.
void SetConfigStore(uint32_t config_store);
~V4L2WritableBufferRef();
private:
// Do the actual queue operation once the v4l2_buffer structure is properly
// filled.
bool DoQueue() &&;
// When requests are supported, a |request_ref| can be passed along this
// the buffer to be submitted.
bool DoQueue(V4L2RequestRef* request_ref) &&;
V4L2WritableBufferRef(const struct v4l2_buffer& v4l2_buffer,
base::WeakPtr<V4L2Queue> queue);
......
......@@ -777,25 +777,6 @@ void V4L2SliceVideoDecodeAccelerator::ServiceDeviceTask(bool event) {
Dequeue();
}
void V4L2SliceVideoDecodeAccelerator::Enqueue(
scoped_refptr<V4L2DecodeSurface> dec_surface) {
DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
if (!EnqueueInputRecord(dec_surface.get())) {
VLOGF(1) << "Failed queueing an input buffer";
NOTIFY_ERROR(PLATFORM_FAILURE);
return;
}
if (!EnqueueOutputRecord(dec_surface.get())) {
VLOGF(1) << "Failed queueing an output buffer";
NOTIFY_ERROR(PLATFORM_FAILURE);
return;
}
surfaces_at_device_.push(dec_surface);
}
void V4L2SliceVideoDecodeAccelerator::Dequeue() {
DVLOGF(4);
DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
......@@ -915,60 +896,6 @@ void V4L2SliceVideoDecodeAccelerator::ReuseOutputBuffer(
ScheduleDecodeBufferTaskIfNeeded();
}
bool V4L2SliceVideoDecodeAccelerator::EnqueueInputRecord(
V4L2DecodeSurface* dec_surface) {
DVLOGF(4);
DCHECK_NE(dec_surface, nullptr);
// Enqueue an input (VIDEO_OUTPUT) buffer for an input video frame.
V4L2WritableBufferRef input_buffer = std::move(dec_surface->input_buffer());
const int index = input_buffer.BufferId();
input_buffer.PrepareQueueBuffer(*dec_surface);
if (!std::move(input_buffer).QueueMMap()) {
NOTIFY_ERROR(PLATFORM_FAILURE);
return false;
}
DVLOGF(4) << "Enqueued input=" << index
<< " count: " << input_queue_->QueuedBuffersCount();
return true;
}
bool V4L2SliceVideoDecodeAccelerator::EnqueueOutputRecord(
V4L2DecodeSurface* dec_surface) {
DVLOGF(4);
// Enqueue an output (VIDEO_CAPTURE) buffer.
V4L2WritableBufferRef output_buffer = std::move(dec_surface->output_buffer());
size_t index = output_buffer.BufferId();
OutputRecord& output_record = output_buffer_map_[index];
DCHECK_NE(output_record.picture_id, -1);
bool ret = false;
switch (output_buffer.Memory()) {
case V4L2_MEMORY_MMAP:
ret = std::move(output_buffer).QueueMMap();
break;
case V4L2_MEMORY_DMABUF:
ret = std::move(output_buffer)
.QueueDMABuf(output_record.output_frame->DmabufFds());
break;
default:
NOTREACHED();
}
if (!ret) {
NOTIFY_ERROR(PLATFORM_FAILURE);
return false;
}
DVLOGF(4) << "Enqueued output=" << index
<< " count: " << output_queue_->QueuedBuffersCount();
return true;
}
bool V4L2SliceVideoDecodeAccelerator::StartDevicePoll() {
DVLOGF(3) << "Starting device poll";
DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
......@@ -1948,12 +1875,12 @@ void V4L2SliceVideoDecodeAccelerator::DecodeSurface(
DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
DVLOGF(3) << "Submitting decode for surface: " << dec_surface->ToString();
Enqueue(dec_surface);
if (!dec_surface->Submit()) {
VLOGF(1) << "Error while submitting frame for decoding!";
NOTIFY_ERROR(PLATFORM_FAILURE);
}
surfaces_at_device_.push(dec_surface);
}
void V4L2SliceVideoDecodeAccelerator::SurfaceReady(
......@@ -2082,6 +2009,10 @@ V4L2SliceVideoDecodeAccelerator::CreateSurface() {
scoped_refptr<V4L2DecodeSurface> dec_surface;
size_t index = output_buffer->BufferId();
OutputRecord& output_record = output_buffer_map_[index];
DCHECK_NE(output_record.picture_id, -1);
if (supports_requests_) {
// Get a free request from the queue for a new surface.
V4L2RequestRef request_ref = requests_queue_->GetFreeRequest();
......@@ -2091,10 +2022,12 @@ V4L2SliceVideoDecodeAccelerator::CreateSurface() {
}
dec_surface = new V4L2RequestDecodeSurface(std::move(*input_buffer),
std::move(*output_buffer),
nullptr, std::move(request_ref));
output_record.output_frame,
std::move(request_ref));
} else {
dec_surface = new V4L2ConfigStoreDecodeSurface(
std::move(*input_buffer), std::move(*output_buffer), nullptr);
dec_surface = new V4L2ConfigStoreDecodeSurface(std::move(*input_buffer),
std::move(*output_buffer),
output_record.output_frame);
}
DVLOGF(4) << "Created surface " << input << " -> " << output;
......
......@@ -161,16 +161,9 @@ class MEDIA_GPU_EXPORT V4L2SliceVideoDecodeAccelerator
// Recycle V4L2 output buffer with |index|. Used as surface release callback.
void ReuseOutputBuffer(V4L2ReadableBufferRef buffer);
// Queue a |dec_surface| to device for decoding.
void Enqueue(scoped_refptr<V4L2DecodeSurface> dec_surface);
// Dequeue any V4L2 buffers available and process.
void Dequeue();
// V4L2 QBUF helpers.
bool EnqueueInputRecord(V4L2DecodeSurface* dec_surface);
bool EnqueueOutputRecord(V4L2DecodeSurface* dec_surface);
// Set input and output formats in hardware.
bool SetupFormats();
// Reset image processor and drop all processing frames.
......
......@@ -308,31 +308,6 @@ void V4L2StatelessVideoDecoderBackend::DecodeSurface(
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DVLOGF(3);
// Enqueue input_buf and output_buf
dec_surface->input_buffer().PrepareQueueBuffer(*dec_surface);
if (!std::move(dec_surface->input_buffer()).QueueMMap()) {
client_->OnBackendError();
return;
}
bool result = false;
switch (output_queue_->GetMemoryType()) {
case V4L2_MEMORY_MMAP:
result = std::move(dec_surface->output_buffer()).QueueMMap();
break;
case V4L2_MEMORY_DMABUF:
result = std::move(dec_surface->output_buffer())
.QueueDMABuf(dec_surface->video_frame()->DmabufFds());
break;
default:
NOTREACHED() << "We should only use MMAP or DMABUF.";
}
if (!result) {
client_->OnBackendError();
return;
}
if (!dec_surface->Submit()) {
VLOGF(1) << "Error while submitting frame for decoding!";
client_->OnBackendError();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment