Commit 61ae3ddf authored by Alexandre Courbot's avatar Alexandre Courbot Committed by Commit Bot

media/gpu/v4l2vd: move stateless-specific parts into backend class

There are two V4L2 decode APIs: stateless and stateful. In the VDA era
these were implemented as two completely separate classes
(V4L2VideoDecodeAccelerator and V4L2SliceVideoDecodeAccelerator), which
resulted in a lot of duplicated code and bugs due to fixes not being
replicated on both classes.

For the VD-based decoder, we want to avoid doing the same error and
use a single V4L2 decoder, supported by different backends depending on
which V4L2 API we are using.

This CL is the first step towards that direction: it splits the
V4L2SliceVideoDecoder into its general V4L2 part and the one that is
specific to the stateless API.

Bug: 1003223
Test: video_decode_accelerator_tests passing on Kevin.

Change-Id: I373f3ea739b6799ecad009d053ca7e415b8484bd
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1840034
Commit-Queue: Alexandre Courbot <acourbot@chromium.org>
Reviewed-by: default avatarHirokazu Honda <hiroh@chromium.org>
Reviewed-by: default avatarDavid Staessens <dstaessens@chromium.org>
Reviewed-by: default avatarChih-Yu Huang <akahuang@chromium.org>
Cr-Commit-Position: refs/heads/master@{#705894}
parent a33a2c5c
......@@ -51,6 +51,10 @@ source_set("v4l2") {
"v4l2_vda_helpers.h",
"v4l2_video_decode_accelerator.cc",
"v4l2_video_decode_accelerator.h",
"v4l2_video_decoder_backend.cc",
"v4l2_video_decoder_backend.h",
"v4l2_video_decoder_backend_stateless.cc",
"v4l2_video_decoder_backend_stateless.h",
"v4l2_video_encode_accelerator.cc",
"v4l2_video_encode_accelerator.h",
"v4l2_vp8_accelerator.cc",
......
......@@ -4,10 +4,6 @@
#include "media/gpu/v4l2/v4l2_slice_video_decoder.h"
#include <fcntl.h>
#include <linux/media.h>
#include <sys/ioctl.h>
#include <algorithm>
#include "base/bind.h"
......@@ -15,18 +11,12 @@
#include "base/logging.h"
#include "base/memory/ptr_util.h"
#include "base/task/post_task.h"
#include "media/base/scopedfd_helper.h"
#include "media/base/video_util.h"
#include "media/gpu/accelerated_video_decoder.h"
#include "media/gpu/chromeos/fourcc.h"
#include "media/gpu/gpu_video_decode_accelerator_helpers.h"
#include "media/gpu/linux/dmabuf_video_frame_pool.h"
#include "media/gpu/macros.h"
#include "media/gpu/v4l2/v4l2_h264_accelerator.h"
#include "media/gpu/v4l2/v4l2_h264_accelerator_legacy.h"
#include "media/gpu/v4l2/v4l2_vp8_accelerator.h"
#include "media/gpu/v4l2/v4l2_vp8_accelerator_legacy.h"
#include "media/gpu/v4l2/v4l2_vp9_accelerator.h"
#include "media/gpu/v4l2/v4l2_video_decoder_backend_stateless.h"
namespace media {
......@@ -40,9 +30,6 @@ constexpr size_t kInputBufferMaxSizeFor1080p = 1024 * 1024;
constexpr size_t kInputBufferMaxSizeFor4k = 4 * kInputBufferMaxSizeFor1080p;
constexpr size_t kNumInputBuffers = 16;
// Size of the timestamp cache, needs to be large enough for frame-reordering.
constexpr size_t kTimestampCacheSize = 128;
// Input format V4L2 fourccs this class supports.
constexpr uint32_t kSupportedInputFourccs[] = {
V4L2_PIX_FMT_H264_SLICE,
......@@ -52,63 +39,6 @@ constexpr uint32_t kSupportedInputFourccs[] = {
} // namespace
V4L2SliceVideoDecoder::DecodeRequest::DecodeRequest(
scoped_refptr<DecoderBuffer> buf,
DecodeCB cb,
int32_t id)
: buffer(std::move(buf)), decode_cb(std::move(cb)), bitstream_id(id) {}
V4L2SliceVideoDecoder::DecodeRequest::DecodeRequest(DecodeRequest&&) = default;
V4L2SliceVideoDecoder::DecodeRequest& V4L2SliceVideoDecoder::DecodeRequest::
operator=(DecodeRequest&&) = default;
V4L2SliceVideoDecoder::DecodeRequest::~DecodeRequest() = default;
struct V4L2SliceVideoDecoder::OutputRequest {
enum OutputRequestType {
// The surface to be outputted.
kSurface,
// The fence to indicate the flush request.
kFlushFence,
// The fence to indicate resolution change request.
kChangeResolutionFence,
};
// The type of the request.
const OutputRequestType type;
// The surface to be outputted.
scoped_refptr<V4L2DecodeSurface> surface;
// The timestamp of the output frame. Because a surface might be outputted
// multiple times with different timestamp, we need to store timestamp out of
// surface.
base::TimeDelta timestamp;
static OutputRequest Surface(scoped_refptr<V4L2DecodeSurface> s,
base::TimeDelta t) {
return OutputRequest(std::move(s), t);
}
static OutputRequest FlushFence() { return OutputRequest(kFlushFence); }
static OutputRequest ChangeResolutionFence() {
return OutputRequest(kChangeResolutionFence);
}
bool IsReady() const {
return (type != OutputRequestType::kSurface) || surface->decoded();
}
// Allow move, but not copy.
OutputRequest(OutputRequest&&) = default;
private:
OutputRequest(scoped_refptr<V4L2DecodeSurface> s, base::TimeDelta t)
: type(kSurface), surface(std::move(s)), timestamp(t) {}
explicit OutputRequest(OutputRequestType t) : type(t) {}
DISALLOW_COPY_AND_ASSIGN(OutputRequest);
};
// static
std::unique_ptr<VideoDecoder> V4L2SliceVideoDecoder::Create(
scoped_refptr<base::SequencedTaskRunner> client_task_runner,
......@@ -149,7 +79,6 @@ V4L2SliceVideoDecoder::V4L2SliceVideoDecoder(
get_pool_cb_(std::move(get_pool_cb)),
client_task_runner_(std::move(client_task_runner)),
decoder_task_runner_(std::move(decoder_task_runner)),
bitstream_id_to_timestamp_(kTimestampCacheSize),
weak_this_factory_(this) {
DETACH_FROM_SEQUENCE(client_sequence_checker_);
DETACH_FROM_SEQUENCE(decoder_sequence_checker_);
......@@ -161,7 +90,6 @@ V4L2SliceVideoDecoder::~V4L2SliceVideoDecoder() {
// We might be called from either the client or the decoder sequence.
DETACH_FROM_SEQUENCE(client_sequence_checker_);
DETACH_FROM_SEQUENCE(decoder_sequence_checker_);
DCHECK(requests_.empty());
VLOGF(2);
}
......@@ -209,9 +137,7 @@ void V4L2SliceVideoDecoder::DestroyTask() {
DVLOGF(2);
// Call all pending decode callback.
ClearPendingRequests(DecodeStatus::ABORTED);
avd_ = nullptr;
backend_->ClearPendingRequests(DecodeStatus::ABORTED);
// Stop and Destroy device.
StopStreamV4L2Queue();
......@@ -223,12 +149,6 @@ void V4L2SliceVideoDecoder::DestroyTask() {
output_queue_->DeallocateBuffers();
output_queue_ = nullptr;
}
DCHECK(surfaces_at_device_.empty());
if (supports_requests_) {
requests_ = {};
media_fd_.reset();
}
weak_this_factory_.InvalidateWeakPtrs();
......@@ -269,14 +189,6 @@ void V4L2SliceVideoDecoder::InitializeTask(const VideoDecoderConfig& config,
DCHECK(state_ == State::kUninitialized || state_ == State::kDecoding);
DVLOGF(3);
if (!output_request_queue_.empty() || flush_cb_ || current_decode_request_ ||
!decode_request_queue_.empty()) {
VLOGF(1) << "Should not call Initialize() during pending decode";
client_task_runner_->PostTask(FROM_HERE,
base::BindOnce(std::move(init_cb), false));
return;
}
// Reset V4L2 device and queue if reinitializing decoder.
if (state_ != State::kUninitialized) {
if (!StopStreamV4L2Queue()) {
......@@ -298,10 +210,9 @@ void V4L2SliceVideoDecoder::InitializeTask(const VideoDecoderConfig& config,
return;
}
if (avd_) {
avd_->Reset();
avd_ = nullptr;
}
if (backend_)
backend_ = nullptr;
SetState(State::kUninitialized);
}
......@@ -332,41 +243,6 @@ void V4L2SliceVideoDecoder::InitializeTask(const VideoDecoderConfig& config,
return;
}
if (!CheckRequestAPISupport()) {
VPLOGF(1) << "Failed to check request api support.";
client_task_runner_->PostTask(FROM_HERE,
base::BindOnce(std::move(init_cb), false));
return;
}
// Create codec-specific AcceleratedVideoDecoder.
// TODO(akahuang): Check the profile is supported.
if (profile >= H264PROFILE_MIN && profile <= H264PROFILE_MAX) {
if (supports_requests_) {
avd_.reset(new H264Decoder(
std::make_unique<V4L2H264Accelerator>(this, device_.get())));
} else {
avd_.reset(new H264Decoder(
std::make_unique<V4L2LegacyH264Accelerator>(this, device_.get())));
}
} else if (profile >= VP8PROFILE_MIN && profile <= VP8PROFILE_MAX) {
if (supports_requests_) {
avd_.reset(new VP8Decoder(
std::make_unique<V4L2VP8Accelerator>(this, device_.get())));
} else {
avd_.reset(new VP8Decoder(
std::make_unique<V4L2LegacyVP8Accelerator>(this, device_.get())));
}
} else if (profile >= VP9PROFILE_MIN && profile <= VP9PROFILE_MAX) {
avd_.reset(new VP9Decoder(
std::make_unique<V4L2VP9Accelerator>(this, device_.get())));
} else {
VLOGF(1) << "Unsupported profile " << GetProfileName(profile);
client_task_runner_->PostTask(FROM_HERE,
base::BindOnce(std::move(init_cb), false));
return;
}
needs_bitstream_conversion_ = (config.codec() == kCodecH264);
pixel_aspect_ratio_ = config.GetPixelAspectRatio();
......@@ -380,6 +256,15 @@ void V4L2SliceVideoDecoder::InitializeTask(const VideoDecoderConfig& config,
return;
}
// Create the backend (only stateless API supported as of now).
backend_ = std::make_unique<V4L2StatelessVideoDecoderBackend>(
this, device_, frame_pool_, profile, decoder_task_runner_);
if (!backend_->Initialize()) {
client_task_runner_->PostTask(FROM_HERE,
base::BindOnce(std::move(init_cb), false));
return;
}
// Setup input format.
if (!SetupInputFormat(input_format_fourcc)) {
VLOGF(1) << "Failed to setup input format.";
......@@ -410,12 +295,6 @@ void V4L2SliceVideoDecoder::InitializeTask(const VideoDecoderConfig& config,
return;
}
if (supports_requests_ && !AllocateRequests()) {
client_task_runner_->PostTask(FROM_HERE,
base::BindOnce(std::move(init_cb), false));
return;
}
// Call init_cb
output_cb_ = output_cb;
SetState(State::kDecoding);
......@@ -423,62 +302,6 @@ void V4L2SliceVideoDecoder::InitializeTask(const VideoDecoderConfig& config,
base::BindOnce(std::move(init_cb), true));
}
bool V4L2SliceVideoDecoder::CheckRequestAPISupport() {
DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
DVLOGF(3);
struct v4l2_requestbuffers reqbufs;
memset(&reqbufs, 0, sizeof(reqbufs));
reqbufs.count = 0;
reqbufs.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
reqbufs.memory = V4L2_MEMORY_MMAP;
if (device_->Ioctl(VIDIOC_REQBUFS, &reqbufs) != 0) {
VPLOGF(1) << "VIDIOC_REQBUFS ioctl failed.";
return false;
}
if (reqbufs.capabilities & V4L2_BUF_CAP_SUPPORTS_REQUESTS) {
supports_requests_ = true;
VLOGF(1) << "Using request API.";
DCHECK(!media_fd_.is_valid());
// Let's try to open the media device
// TODO(crbug.com/985230): remove this hardcoding, replace with V4L2Device
// integration.
int media_fd = open("/dev/media-dec0", O_RDWR, 0);
if (media_fd < 0) {
VPLOGF(1) << "Failed to open media device.";
return false;
}
media_fd_ = base::ScopedFD(media_fd);
} else {
VLOGF(1) << "Using config store.";
}
return true;
}
bool V4L2SliceVideoDecoder::AllocateRequests() {
DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
DVLOGF(3);
DCHECK(requests_.empty());
for (size_t i = 0; i < input_queue_->AllocatedBuffersCount(); i++) {
int request_fd;
int ret = HANDLE_EINTR(
ioctl(media_fd_.get(), MEDIA_IOC_REQUEST_ALLOC, &request_fd));
if (ret < 0) {
VPLOGF(1) << "Failed to create request: ";
return false;
}
requests_.push(base::ScopedFD(request_fd));
}
DCHECK_EQ(requests_.size(), input_queue_->AllocatedBuffersCount());
return true;
}
bool V4L2SliceVideoDecoder::SetupInputFormat(uint32_t input_format_fourcc) {
DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
DCHECK_EQ(state_, State::kUninitialized);
......@@ -610,7 +433,7 @@ void V4L2SliceVideoDecoder::ResetTask(base::OnceClosure closure) {
DVLOGF(3);
// Call all pending decode callback.
ClearPendingRequests(DecodeStatus::ABORTED);
backend_->ClearPendingRequests(DecodeStatus::ABORTED);
// Streamoff V4L2 queues to drop input and output buffers.
// If the queues are streaming before reset, then we need to start streaming
......@@ -627,33 +450,6 @@ void V4L2SliceVideoDecoder::ResetTask(base::OnceClosure closure) {
client_task_runner_->PostTask(FROM_HERE, std::move(closure));
}
void V4L2SliceVideoDecoder::ClearPendingRequests(DecodeStatus status) {
DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
DVLOGF(3);
if (avd_)
avd_->Reset();
// Clear output_request_queue_.
while (!output_request_queue_.empty())
output_request_queue_.pop();
if (flush_cb_)
RunDecodeCB(std::move(flush_cb_), status);
// Clear current_decode_request_ and decode_request_queue_.
if (current_decode_request_) {
RunDecodeCB(std::move(current_decode_request_->decode_cb), status);
current_decode_request_ = base::nullopt;
}
while (!decode_request_queue_.empty()) {
auto request = std::move(decode_request_queue_.front());
decode_request_queue_.pop();
RunDecodeCB(std::move(request.decode_cb), status);
}
}
void V4L2SliceVideoDecoder::Decode(scoped_refptr<DecoderBuffer> buffer,
DecodeCB decode_cb) {
DCHECK_CALLED_ON_VALID_SEQUENCE(client_sequence_checker_);
......@@ -676,177 +472,87 @@ void V4L2SliceVideoDecoder::EnqueueDecodeTask(
}
const int32_t bitstream_id = bitstream_id_generator_.GetNextBitstreamId();
backend_->EnqueueDecodeTask(std::move(buffer), std::move(decode_cb),
bitstream_id);
}
bool V4L2SliceVideoDecoder::StartStreamV4L2Queue() {
DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
DVLOGF(3);
if (!buffer->end_of_stream()) {
bitstream_id_to_timestamp_.Put(bitstream_id, buffer->timestamp());
if (!input_queue_->Streamon() || !output_queue_->Streamon()) {
VLOGF(1) << "Failed to streamon V4L2 queue.";
SetState(State::kError);
return false;
}
decode_request_queue_.push(
DecodeRequest(std::move(buffer), std::move(decode_cb), bitstream_id));
if (!device_->StartPolling(
base::BindRepeating(&V4L2SliceVideoDecoder::ServiceDeviceTask,
weak_this_),
base::BindRepeating(&V4L2SliceVideoDecoder::SetState, weak_this_,
State::kError))) {
SetState(State::kError);
return false;
}
// If we are already decoding, then we don't need to pump again.
if (!current_decode_request_)
PumpDecodeTask();
return true;
}
void V4L2SliceVideoDecoder::PumpDecodeTask() {
bool V4L2SliceVideoDecoder::StopStreamV4L2Queue() {
DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
DVLOGF(3) << "state_:" << static_cast<int>(state_)
<< " Number of Decode requests: " << decode_request_queue_.size();
if (state_ != State::kDecoding)
return;
DVLOGF(3);
pause_reason_ = PauseReason::kNone;
while (true) {
switch (avd_->Decode()) {
case AcceleratedVideoDecoder::kAllocateNewSurfaces:
DVLOGF(3) << "Need to change resolution. Pause decoding.";
SetState(State::kFlushing);
output_request_queue_.push(OutputRequest::ChangeResolutionFence());
PumpOutputSurfaces();
return;
case AcceleratedVideoDecoder::kRanOutOfStreamData:
// Current decode request is finished processing.
if (current_decode_request_) {
DCHECK(current_decode_request_->decode_cb);
RunDecodeCB(std::move(current_decode_request_->decode_cb),
DecodeStatus::OK);
current_decode_request_ = base::nullopt;
}
// Process next decodee request.
if (decode_request_queue_.empty())
return;
current_decode_request_ = std::move(decode_request_queue_.front());
decode_request_queue_.pop();
if (current_decode_request_->buffer->end_of_stream()) {
if (!avd_->Flush()) {
VLOGF(1) << "Failed flushing the decoder.";
SetState(State::kError);
return;
}
// Put the decoder in an idle state, ready to resume.
avd_->Reset();
SetState(State::kFlushing);
DCHECK(!flush_cb_);
flush_cb_ = std::move(current_decode_request_->decode_cb);
output_request_queue_.push(OutputRequest::FlushFence());
PumpOutputSurfaces();
current_decode_request_ = base::nullopt;
return;
}
avd_->SetStream(current_decode_request_->bitstream_id,
*current_decode_request_->buffer);
break;
case AcceleratedVideoDecoder::kRanOutOfSurfaces:
DVLOGF(3) << "Ran out of surfaces. Resume when buffer is returned.";
pause_reason_ = PauseReason::kRanOutOfSurfaces;
return;
case AcceleratedVideoDecoder::kNeedContextUpdate:
DVLOGF(3) << "Awaiting context update";
pause_reason_ = PauseReason::kWaitSubFrameDecoded;
return;
case AcceleratedVideoDecoder::kDecodeError:
DVLOGF(3) << "Error decoding stream";
SetState(State::kError);
return;
case AcceleratedVideoDecoder::kTryAgain:
NOTREACHED() << "Should not reach here unless this class accepts "
"encrypted streams.";
DVLOGF(4) << "No key for decoding stream.";
SetState(State::kError);
return;
}
if (!device_->StopPolling()) {
SetState(State::kError);
return false;
}
// Streamoff input and output queue.
if (input_queue_)
input_queue_->Streamoff();
if (output_queue_)
output_queue_->Streamoff();
backend_->OnStreamStopped();
return true;
}
void V4L2SliceVideoDecoder::PumpOutputSurfaces() {
void V4L2SliceVideoDecoder::InitiateFlush() {
DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
DVLOGF(3) << "state_: " << static_cast<int>(state_)
<< " Number of display surfaces: " << output_request_queue_.size();
DVLOGF(3);
bool resume_decode = false;
while (!output_request_queue_.empty()) {
if (!output_request_queue_.front().IsReady()) {
DVLOGF(3) << "The first surface is not ready yet.";
break;
}
SetState(State::kFlushing);
}
OutputRequest request = std::move(output_request_queue_.front());
output_request_queue_.pop();
switch (request.type) {
case OutputRequest::kFlushFence:
DCHECK(output_request_queue_.empty());
DVLOGF(2) << "Flush finished.";
RunDecodeCB(std::move(flush_cb_), DecodeStatus::OK);
resume_decode = true;
break;
case OutputRequest::kChangeResolutionFence:
DCHECK(output_request_queue_.empty());
if (!ChangeResolution()) {
SetState(State::kError);
return;
}
resume_decode = true;
break;
case OutputRequest::kSurface:
scoped_refptr<V4L2DecodeSurface> surface = std::move(request.surface);
DCHECK(surface->video_frame());
RunOutputCB(surface->video_frame(), surface->visible_rect(),
request.timestamp);
break;
}
}
void V4L2SliceVideoDecoder::CompleteFlush() {
DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
DVLOGF(3);
if (resume_decode) {
SetState(State::kDecoding);
decoder_task_runner_->PostTask(
FROM_HERE,
base::BindOnce(&V4L2SliceVideoDecoder::PumpDecodeTask, weak_this_));
}
SetState(State::kDecoding);
}
bool V4L2SliceVideoDecoder::ChangeResolution() {
bool V4L2SliceVideoDecoder::ChangeResolution(gfx::Size pic_size,
gfx::Rect visible_rect,
size_t num_output_frames) {
DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
DVLOGF(3);
DCHECK_EQ(state_, State::kFlushing);
// We change resolution after outputting all pending surfaces, there should
// be no V4L2DecodeSurface left.
DCHECK(surfaces_at_device_.empty());
DCHECK_EQ(input_queue_->QueuedBuffersCount(), 0u);
DCHECK_EQ(output_queue_->QueuedBuffersCount(), 0u);
DCHECK(output_request_queue_.empty());
if (!StopStreamV4L2Queue())
return false;
// Set output format with the new resolution.
gfx::Size pic_size = avd_->GetPicSize();
DCHECK(!pic_size.IsEmpty());
DVLOGF(3) << "Change resolution to " << pic_size.width() << "x"
<< pic_size.height();
if (!SetCodedSizeOnInputQueue(pic_size)) {
VLOGF(1) << "Failed to set coded size on input queue";
return false;
}
auto frame_layout = SetupOutputFormat(pic_size, avd_->GetVisibleRect());
auto frame_layout = SetupOutputFormat(pic_size, visible_rect);
if (!frame_layout) {
VLOGF(1) << "No format is available with thew new resolution";
SetState(State::kError);
return false;
}
......@@ -855,208 +561,34 @@ bool V4L2SliceVideoDecoder::ChangeResolution() {
DCHECK_EQ(coded_size.height() % 16, 0);
if (!gfx::Rect(coded_size).Contains(gfx::Rect(pic_size))) {
VLOGF(1) << "Got invalid adjusted coded size: " << coded_size.ToString();
SetState(State::kError);
return false;
}
// Allocate new output buffers.
if (!output_queue_->DeallocateBuffers())
if (!output_queue_->DeallocateBuffers()) {
SetState(State::kError);
return false;
size_t num_output_frames = avd_->GetRequiredNumOfPictures();
}
DCHECK_GT(num_output_frames, 0u);
if (output_queue_->AllocateBuffers(num_output_frames, V4L2_MEMORY_DMABUF) ==
0) {
VLOGF(1) << "Failed to request output buffers.";
SetState(State::kError);
return false;
}
if (output_queue_->AllocatedBuffersCount() != num_output_frames) {
VLOGF(1) << "Could not allocate requested number of output buffers.";
return false;
}
frame_pool_->SetMaxNumFrames(num_output_frames);
if (!StartStreamV4L2Queue())
return false;
SetState(State::kDecoding);
return true;
}
scoped_refptr<V4L2DecodeSurface> V4L2SliceVideoDecoder::CreateSurface() {
DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
DVLOGF(4);
// Request VideoFrame.
scoped_refptr<VideoFrame> frame = frame_pool_->GetFrame();
if (!frame) {
// We allocate the same number of output buffer slot in V4L2 device and the
// output VideoFrame. If there is free output buffer slot but no free
// VideoFrame, surface_it means the VideoFrame is not released at client
// side. Post PumpDecodeTask when the pool has available frames.
DVLOGF(3) << "There is no available VideoFrame.";
frame_pool_->NotifyWhenFrameAvailable(base::BindOnce(
base::IgnoreResult(&base::SequencedTaskRunner::PostTask),
decoder_task_runner_, FROM_HERE,
base::BindOnce(&V4L2SliceVideoDecoder::PumpDecodeTask, weak_this_)));
return nullptr;
}
// Request V4L2 input and output buffers.
V4L2WritableBufferRef input_buf = input_queue_->GetFreeBuffer();
V4L2WritableBufferRef output_buf = output_queue_->GetFreeBuffer();
if (!input_buf.IsValid() || !output_buf.IsValid()) {
DVLOGF(3) << "There is no free V4L2 buffer.";
return nullptr;
}
scoped_refptr<V4L2DecodeSurface> dec_surface;
if (supports_requests_) {
DCHECK(!requests_.empty());
base::ScopedFD request = std::move(requests_.front());
requests_.pop();
auto ret = V4L2RequestDecodeSurface::Create(
std::move(input_buf), std::move(output_buf), std::move(frame),
request.get());
requests_.push(std::move(request));
if (!ret) {
DVLOGF(3) << "Could not create surface.";
return nullptr;
}
dec_surface = std::move(*ret);
} else {
dec_surface = new V4L2ConfigStoreDecodeSurface(
std::move(input_buf), std::move(output_buf), std::move(frame));
}
return dec_surface;
}
void V4L2SliceVideoDecoder::ReuseOutputBuffer(V4L2ReadableBufferRef buffer) {
DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
DVLOGF(3) << "Reuse output surface #" << buffer->BufferId();
// Resume decoding in case of ran out of surface.
if (pause_reason_ == PauseReason::kRanOutOfSurfaces) {
decoder_task_runner_->PostTask(
FROM_HERE,
base::BindOnce(&V4L2SliceVideoDecoder::PumpDecodeTask, weak_this_));
}
}
bool V4L2SliceVideoDecoder::SubmitSlice(
const scoped_refptr<V4L2DecodeSurface>& dec_surface,
const uint8_t* data,
size_t size) {
DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
DVLOGF(3);
size_t plane_size = dec_surface->input_buffer().GetPlaneSize(0);
size_t bytes_used = dec_surface->input_buffer().GetPlaneBytesUsed(0);
if (size > plane_size - bytes_used) {
VLOGF(1) << "The size of submitted slice(" << size
<< ") is larger than the remaining buffer size("
<< plane_size - bytes_used << "). Plane size is " << plane_size;
SetState(State::kError);
return false;
}
void* mapping = dec_surface->input_buffer().GetPlaneMapping(0);
memcpy(reinterpret_cast<uint8_t*>(mapping) + bytes_used, data, size);
dec_surface->input_buffer().SetPlaneBytesUsed(0, bytes_used + size);
return true;
}
void V4L2SliceVideoDecoder::DecodeSurface(
const scoped_refptr<V4L2DecodeSurface>& dec_surface) {
DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
DVLOGF(3);
// Enqueue input_buf and output_buf
dec_surface->input_buffer().PrepareQueueBuffer(*dec_surface);
if (!std::move(dec_surface->input_buffer()).QueueMMap()) {
SetState(State::kError);
return;
}
if (!std::move(dec_surface->output_buffer())
.QueueDMABuf(dec_surface->video_frame()->DmabufFds())) {
SetState(State::kError);
return;
}
if (!dec_surface->Submit()) {
VLOGF(1) << "Error while submitting frame for decoding!";
SetState(State::kError);
return;
}
surfaces_at_device_.push(std::move(dec_surface));
}
void V4L2SliceVideoDecoder::SurfaceReady(
const scoped_refptr<V4L2DecodeSurface>& dec_surface,
int32_t bitstream_id,
const gfx::Rect& visible_rect,
const VideoColorSpace& /* color_space */) {
DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
DVLOGF(3);
// Find the timestamp associated with |bitstream_id|. It's possible that a
// surface is output multiple times for different |bitstream_id|s (e.g. VP9
// show_existing_frame feature). This means we need to output the same frame
// again with a different timestamp.
// On some rare occasions it's also possible that a single DecoderBuffer
// produces multiple surfaces with the same |bitstream_id|, so we shouldn't
// remove the timestamp from the cache.
const auto it = bitstream_id_to_timestamp_.Peek(bitstream_id);
DCHECK(it != bitstream_id_to_timestamp_.end());
base::TimeDelta timestamp = it->second;
dec_surface->SetVisibleRect(visible_rect);
output_request_queue_.push(
OutputRequest::Surface(std::move(dec_surface), timestamp));
PumpOutputSurfaces();
}
bool V4L2SliceVideoDecoder::StartStreamV4L2Queue() {
DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
DVLOGF(3);
if (!input_queue_->Streamon() || !output_queue_->Streamon()) {
VLOGF(1) << "Failed to streamon V4L2 queue.";
SetState(State::kError);
return false;
}
if (!device_->StartPolling(
base::BindRepeating(&V4L2SliceVideoDecoder::ServiceDeviceTask,
weak_this_),
base::BindRepeating(&V4L2SliceVideoDecoder::SetState, weak_this_,
State::kError))) {
SetState(State::kError);
return false;
}
frame_pool_->SetMaxNumFrames(num_output_frames);
return true;
}
bool V4L2SliceVideoDecoder::StopStreamV4L2Queue() {
DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
DVLOGF(3);
if (!device_->StopPolling()) {
if (!StartStreamV4L2Queue()) {
SetState(State::kError);
return false;
}
// Streamoff input and output queue.
if (input_queue_)
input_queue_->Streamoff();
if (output_queue_)
output_queue_->Streamoff();
while (!surfaces_at_device_.empty())
surfaces_at_device_.pop();
return true;
}
......@@ -1067,7 +599,6 @@ void V4L2SliceVideoDecoder::ServiceDeviceTask(bool /* event */) {
<< ", Number of queued output buffers: "
<< output_queue_->QueuedBuffersCount();
bool resume_decode = false;
// Dequeue V4L2 output buffer first to reduce output latency.
bool success;
V4L2ReadableBufferRef dequeued_buffer;
......@@ -1080,29 +611,7 @@ void V4L2SliceVideoDecoder::ServiceDeviceTask(bool /* event */) {
if (!dequeued_buffer)
break;
// Mark the output buffer decoded, and try to output surface.
DCHECK(!surfaces_at_device_.empty());
auto surface = std::move(surfaces_at_device_.front());
DCHECK_EQ(static_cast<size_t>(surface->output_record()),
dequeued_buffer->BufferId());
surfaces_at_device_.pop();
surface->SetDecoded();
// VP9Decoder update context after surface is decoded. Resume decoding for
// previous pause of AVD::kWaitSubFrameDecoded.
resume_decode = true;
// Keep a reference to the V4L2 buffer until the buffer is reused. The
// reason for this is that the config store uses V4L2 buffer IDs to
// reference frames, therefore we cannot reuse the same V4L2 buffer ID for
// another decode operation until all references to that frame are gone.
// Request API does not have this limitation, so we can probably remove this
// after config store is gone.
surface->SetReleaseCallback(
base::BindOnce(&V4L2SliceVideoDecoder::ReuseOutputBuffer, weak_this_,
std::move(dequeued_buffer)));
PumpOutputSurfaces();
backend_->OnOutputBufferDequeued(std::move(dequeued_buffer));
}
// Dequeue V4L2 input buffer.
......@@ -1115,12 +624,6 @@ void V4L2SliceVideoDecoder::ServiceDeviceTask(bool /* event */) {
if (!dequeued_buffer)
break;
}
if (resume_decode && pause_reason_ == PauseReason::kWaitSubFrameDecoded) {
decoder_task_runner_->PostTask(
FROM_HERE,
base::BindOnce(&V4L2SliceVideoDecoder::PumpDecodeTask, weak_this_));
}
}
void V4L2SliceVideoDecoder::RunDecodeCB(DecodeCB cb, DecodeStatus status) {
......@@ -1130,7 +633,7 @@ void V4L2SliceVideoDecoder::RunDecodeCB(DecodeCB cb, DecodeStatus status) {
base::BindOnce(std::move(cb), status));
}
void V4L2SliceVideoDecoder::RunOutputCB(scoped_refptr<VideoFrame> frame,
void V4L2SliceVideoDecoder::OutputFrame(scoped_refptr<VideoFrame> frame,
const gfx::Rect& visible_rect,
base::TimeDelta timestamp) {
DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
......@@ -1199,11 +702,25 @@ void V4L2SliceVideoDecoder::SetState(State new_state) {
if (new_state == State::kError) {
VLOGF(1) << "Error occurred.";
ClearPendingRequests(DecodeStatus::DECODE_ERROR);
backend_->ClearPendingRequests(DecodeStatus::DECODE_ERROR);
return;
}
state_ = new_state;
return;
}
void V4L2SliceVideoDecoder::OnBackendError() {
DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
DVLOGF(2);
SetState(State::kError);
}
bool V4L2SliceVideoDecoder::IsDecoding() const {
DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
DVLOGF(3);
return state_ == State::kDecoding;
}
} // namespace media
......@@ -27,18 +27,18 @@
#include "media/base/video_frame_layout.h"
#include "media/base/video_types.h"
#include "media/gpu/media_gpu_export.h"
#include "media/gpu/v4l2/v4l2_decode_surface_handler.h"
#include "media/gpu/v4l2/v4l2_device.h"
#include "media/gpu/v4l2/v4l2_video_decoder_backend.h"
#include "media/video/supported_video_decoder_config.h"
namespace media {
class AcceleratedVideoDecoder;
class DmabufVideoFramePool;
class V4L2DecodeSurface;
class MEDIA_GPU_EXPORT V4L2SliceVideoDecoder : public VideoDecoder,
public V4L2DecodeSurfaceHandler {
class MEDIA_GPU_EXPORT V4L2SliceVideoDecoder
: public VideoDecoder,
public V4L2VideoDecoderBackend::Client {
public:
using GetFramePoolCB = base::RepeatingCallback<DmabufVideoFramePool*()>;
......@@ -68,17 +68,18 @@ class MEDIA_GPU_EXPORT V4L2SliceVideoDecoder : public VideoDecoder,
void Reset(base::OnceClosure closure) override;
void Decode(scoped_refptr<DecoderBuffer> buffer, DecodeCB decode_cb) override;
// V4L2DecodeSurfaceHandler implementation.
scoped_refptr<V4L2DecodeSurface> CreateSurface() override;
bool SubmitSlice(const scoped_refptr<V4L2DecodeSurface>& dec_surface,
const uint8_t* data,
size_t size) override;
void DecodeSurface(
const scoped_refptr<V4L2DecodeSurface>& dec_surface) override;
void SurfaceReady(const scoped_refptr<V4L2DecodeSurface>& dec_surface,
int32_t bitstream_id,
const gfx::Rect& visible_rect,
const VideoColorSpace& /* color_space */) override;
// V4L2VideoDecoderBackend::Client implementation
void OnBackendError() override;
bool IsDecoding() const override;
void InitiateFlush() override;
void CompleteFlush() override;
bool ChangeResolution(gfx::Size pic_size,
gfx::Rect visible_rect,
size_t num_output_frames) override;
void RunDecodeCB(DecodeCB cb, DecodeStatus status) override;
void OutputFrame(scoped_refptr<VideoFrame> frame,
const gfx::Rect& visible_rect,
base::TimeDelta timestamp) override;
private:
friend class V4L2SliceVideoDecoderTest;
......@@ -91,29 +92,6 @@ class MEDIA_GPU_EXPORT V4L2SliceVideoDecoder : public VideoDecoder,
~V4L2SliceVideoDecoder() override;
void Destroy() override;
// Request for decoding buffer. Every Decode() call generates 1 DecodeRequest.
struct DecodeRequest {
// The decode buffer passed from Decode().
scoped_refptr<DecoderBuffer> buffer;
// The callback function passed from Decode().
DecodeCB decode_cb;
// The identifier for the decoder buffer.
int32_t bitstream_id;
DecodeRequest(scoped_refptr<DecoderBuffer> buf, DecodeCB cb, int32_t id);
// Allow move, but not copy
DecodeRequest(DecodeRequest&&);
DecodeRequest& operator=(DecodeRequest&&);
~DecodeRequest();
DISALLOW_COPY_AND_ASSIGN(DecodeRequest);
};
// Request for displaying the surface or calling the decode callback.
struct OutputRequest;
enum class State {
// Initial state. Transitions to |kDecoding| if Initialize() is successful,
// |kError| otherwise.
......@@ -128,18 +106,6 @@ class MEDIA_GPU_EXPORT V4L2SliceVideoDecoder : public VideoDecoder,
kError,
};
// The reason the decoding is paused.
enum class PauseReason {
// Not stopped, decoding normally.
kNone,
// Cannot create a new V4L2 surface. Waiting for surfaces to be released.
kRanOutOfSurfaces,
// A VP9 superframe contains multiple subframes. Before decoding the next
// subframe, we need to wait for previous subframes decoded and update the
// context.
kWaitSubFrameDecoded,
};
class BitstreamIdGenerator {
public:
BitstreamIdGenerator() { DETACH_FROM_SEQUENCE(sequence_checker_); }
......@@ -185,30 +151,11 @@ class MEDIA_GPU_EXPORT V4L2SliceVideoDecoder : public VideoDecoder,
void DestroyTask();
// Reset on decoder thread.
void ResetTask(base::OnceClosure closure);
// Reset |avd_|, clear all pending requests, and call all pending decode
// callback with |status| argument.
void ClearPendingRequests(DecodeStatus status);
// Enqueue |request| to the pending decode request queue, and try to decode
// from the queue.
// Enqueue |buffer| to be decoded. |decode_cb| will be called once |buffer|
// is no longer used.
void EnqueueDecodeTask(scoped_refptr<DecoderBuffer> buffer,
V4L2SliceVideoDecoder::DecodeCB decode_cb);
// Try to decode buffer from the pending decode request queue.
// This method stops decoding when:
// - Run out of surface
// - Flushing or changing resolution
// Invoke this method again when these situation ends.
void PumpDecodeTask();
// Try to output surface from |output_request_queue_|.
// This method stops outputting surface when the first surface is not dequeued
// from the V4L2 device. Invoke this method again when any surface is
// dequeued from the V4L2 device.
void PumpOutputSurfaces();
// Setup the format of V4L2 output buffer, and allocate new buffer set.
bool ChangeResolution();
// Callback which is called when V4L2 surface is destroyed.
void ReuseOutputBuffer(V4L2ReadableBufferRef buffer);
// Start streaming V4L2 input and output queues. Attempt to start
// |device_poll_thread_| before starting streaming.
bool StartStreamV4L2Queue();
......@@ -218,27 +165,18 @@ class MEDIA_GPU_EXPORT V4L2SliceVideoDecoder : public VideoDecoder,
// Try to dequeue input and output buffers from device.
void ServiceDeviceTask(bool event);
// Convert the frame and call the output callback.
void RunOutputCB(scoped_refptr<VideoFrame> frame,
const gfx::Rect& visible_rect,
base::TimeDelta timestamp);
// Call the decode callback and count the number of pending callbacks.
void RunDecodeCB(DecodeCB cb, DecodeStatus status);
// Change the state and check the state transition is valid.
void SetState(State new_state);
// Check whether request api is supported or not.
bool CheckRequestAPISupport();
// Allocate necessary request buffers is request api is supported.
bool AllocateRequests();
// The V4L2 backend, i.e. the part of the decoder that sends
// decoding jobs to the kernel.
std::unique_ptr<V4L2VideoDecoderBackend> backend_;
// V4L2 device in use.
scoped_refptr<V4L2Device> device_;
// VideoFrame manager used to allocate and recycle video frame.
GetFramePoolCB get_pool_cb_;
DmabufVideoFramePool* frame_pool_ = nullptr;
// Video decoder used to parse stream headers by software.
std::unique_ptr<AcceleratedVideoDecoder> avd_;
// Client task runner. All public methods of VideoDecoder interface are
// executed at this task runner.
......@@ -249,8 +187,6 @@ class MEDIA_GPU_EXPORT V4L2SliceVideoDecoder : public VideoDecoder,
// State of the instance.
State state_ = State::kUninitialized;
// Indicates why decoding is currently paused.
PauseReason pause_reason_ = PauseReason::kNone;
// Parameters for generating output VideoFrame.
base::Optional<VideoFrameLayout> frame_layout_;
......@@ -259,41 +195,16 @@ class MEDIA_GPU_EXPORT V4L2SliceVideoDecoder : public VideoDecoder,
// Callbacks passed from Initialize().
OutputCB output_cb_;
// Callbacks of EOS buffer passed from Decode().
DecodeCB flush_cb_;
// V4L2 input and output queue.
scoped_refptr<V4L2Queue> input_queue_;
scoped_refptr<V4L2Queue> output_queue_;
// The time at which each buffer decode operation started. Not each decode
// operation leads to a frame being output and frames might be reordered, so
// we don't know when it's safe to drop a timestamp. This means we need to use
// a cache here, with a size large enough to account for frame reordering.
base::MRUCache<int32_t, base::TimeDelta> bitstream_id_to_timestamp_;
// Queue of pending decode request.
base::queue<DecodeRequest> decode_request_queue_;
// Surfaces enqueued to V4L2 device. Since we are stateless, they are
// guaranteed to be proceeded in FIFO order.
base::queue<scoped_refptr<V4L2DecodeSurface>> surfaces_at_device_;
// The decode request which is currently processed.
base::Optional<DecodeRequest> current_decode_request_;
// Queue of pending output request.
base::queue<OutputRequest> output_request_queue_;
BitstreamIdGenerator bitstream_id_generator_;
// True if the decoder needs bitstream conversion before decoding.
bool needs_bitstream_conversion_ = false;
BitstreamIdGenerator bitstream_id_generator_;
// Set to true by CreateInputBuffers() if the codec driver supports requests.
bool supports_requests_ = false;
// FIFO queue of requests, only used if supports_requests_ is true.
std::queue<base::ScopedFD> requests_;
// Stores the media file descriptor, only used if supports_requests_ is true.
base::ScopedFD media_fd_;
SEQUENCE_CHECKER(client_sequence_checker_);
SEQUENCE_CHECKER(decoder_sequence_checker_);
......
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "media/gpu/v4l2/v4l2_video_decoder_backend.h"
#include "base/logging.h"
#include "media/gpu/macros.h"
#include "media/gpu/v4l2/v4l2_device.h"
namespace media {
V4L2VideoDecoderBackend::V4L2VideoDecoderBackend(
Client* const client,
scoped_refptr<V4L2Device> device)
: client_(client), device_(std::move(device)) {
input_queue_ = device_->GetQueue(V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
output_queue_ = device_->GetQueue(V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
if (!input_queue_ || !output_queue_) {
VLOGF(1) << "Failed to get V4L2 queue. This should not happen since the "
<< "queues are supposed to be initialized when we are called.";
NOTREACHED();
}
}
V4L2VideoDecoderBackend::~V4L2VideoDecoderBackend() = default;
} // namespace media
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef MEDIA_GPU_V4L2_V4L2_VIDEO_DECODER_BACKEND_H_
#define MEDIA_GPU_V4L2_V4L2_VIDEO_DECODER_BACKEND_H_
#include "base/memory/scoped_refptr.h"
#include "media/base/decode_status.h"
#include "media/base/video_decoder.h"
#include "ui/gfx/geometry/rect.h"
namespace media {
class V4L2Device;
class V4L2Queue;
class V4L2ReadableBuffer;
using V4L2ReadableBufferRef = scoped_refptr<V4L2ReadableBuffer>;
// Abstract class that performs the low-level V4L2 decoding tasks depending
// on the decoding API chosen (stateful or stateless).
//
// The backend receives encoded buffers via EnqueueDecodeTask() and is
// responsible for acquiring the V4L2 resources (output and capture buffers,
// etc) and sending them to the V4L2 driver. When a decoded buffer is dequeued,
// |OutputBufferDequeued| is automatically called from the decoder.
//
// The backend can call into some of the decoder methods, notably OutputFrame()
// to send a |VideoFrame| to the decoder's client, and Error() to signal that
// an unrecoverable error has occurred.
//
// This class must run entirely inside the decoder thread. All overridden
// methods must check that they are called from sequence_checker_.
class V4L2VideoDecoderBackend {
public:
// Interface for the backend to call back into the decoder it is serving.
// All methods must be called from the same sequence as the backend.
class Client {
public:
// Inform that an unrecoverable error has occurred in the backend.
virtual void OnBackendError() = 0;
// Returns true is we are in a state that allows decoding to proceed.
virtual bool IsDecoding() const = 0;
// Start flushing. No new decoding requests will be processed until
// CompleteFlush() is called.
virtual void InitiateFlush() = 0;
// Inform the flushing is complete.
virtual void CompleteFlush() = 0;
// Stop the stream to reallocate the CAPTURE buffers. Can only be done
// between calls to |InitiateFlush| and |CompleteFlush|.
virtual bool ChangeResolution(gfx::Size pic_size,
gfx::Rect visible_rect,
size_t num_output_frames) = 0;
// Call the decode callback and count the number of pending callbacks.
virtual void RunDecodeCB(VideoDecoder::DecodeCB cb,
DecodeStatus status) = 0;
// Convert the frame and call the output callback.
virtual void OutputFrame(scoped_refptr<VideoFrame> frame,
const gfx::Rect& visible_rect,
base::TimeDelta timestamp) = 0;
};
virtual ~V4L2VideoDecoderBackend();
virtual bool Initialize() = 0;
// Schedule |buffer| to be processed, with bitstream ID |bitstream_id|.
// The backend must call V4L2SliceVideoDecoder::RunDecodeCB() with |decode_cb|
// as argument once the buffer is not used anymore.
virtual void EnqueueDecodeTask(scoped_refptr<DecoderBuffer> buffer,
VideoDecoder::DecodeCB decode_cb,
int32_t bitstream_id) = 0;
// Called by the decoder when it has dequeued a buffer from the CAPTURE queue.
virtual void OnOutputBufferDequeued(V4L2ReadableBufferRef buf) = 0;
// Called whenever the V4L2 stream is stopped (|Streamoff| called on both
// |V4L2Queue|s).
virtual void OnStreamStopped() = 0;
// Clear all pending decoding tasks and call all pending decode callbacks
// with |status| as argument.
virtual void ClearPendingRequests(DecodeStatus status) = 0;
protected:
V4L2VideoDecoderBackend(Client* const client,
scoped_refptr<V4L2Device> device);
// The decoder we are serving. |client_| is the owner of this backend
// instance, and is guaranteed to live longer than it. Thus it is safe to use
// a raw pointer here.
Client* const client_;
// V4L2 device to use.
scoped_refptr<V4L2Device> device_;
// Input and output queued from which to get buffers.
scoped_refptr<V4L2Queue> input_queue_;
scoped_refptr<V4L2Queue> output_queue_;
SEQUENCE_CHECKER(sequence_checker_);
DISALLOW_COPY_AND_ASSIGN(V4L2VideoDecoderBackend);
};
} // namespace media
#endif // MEDIA_GPU_V4L2_V4L2_VIDEO_DECODER_BACKEND_H_
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "media/gpu/v4l2/v4l2_video_decoder_backend_stateless.h"
#include <fcntl.h>
#include <linux/media.h>
#include <sys/ioctl.h>
#include "base/bind.h"
#include "base/bind_helpers.h"
#include "base/sequenced_task_runner.h"
#include "media/base/decode_status.h"
#include "media/base/video_codecs.h"
#include "media/base/video_frame.h"
#include "media/gpu/accelerated_video_decoder.h"
#include "media/gpu/macros.h"
#include "media/gpu/v4l2/v4l2_device.h"
#include "media/gpu/v4l2/v4l2_h264_accelerator.h"
#include "media/gpu/v4l2/v4l2_h264_accelerator_legacy.h"
#include "media/gpu/v4l2/v4l2_vp8_accelerator.h"
#include "media/gpu/v4l2/v4l2_vp8_accelerator_legacy.h"
#include "media/gpu/v4l2/v4l2_vp9_accelerator.h"
namespace media {
namespace {
// Size of the timestamp cache, needs to be large enough for frame-reordering.
constexpr size_t kTimestampCacheSize = 128;
// Number of requests to allocate for submitting input buffers, if requests
// are used.
constexpr size_t kNumRequests = 16;
} // namespace
struct V4L2StatelessVideoDecoderBackend::OutputRequest {
static OutputRequest Surface(scoped_refptr<V4L2DecodeSurface> s,
base::TimeDelta t) {
return OutputRequest(std::move(s), t);
}
static OutputRequest FlushFence() { return OutputRequest(kFlushFence); }
static OutputRequest ChangeResolutionFence() {
return OutputRequest(kChangeResolutionFence);
}
bool IsReady() const {
return (type != OutputRequestType::kSurface) || surface->decoded();
}
// Allow move, but not copy.
OutputRequest(OutputRequest&&) = default;
enum OutputRequestType {
// The surface to be outputted.
kSurface,
// The fence to indicate the flush request.
kFlushFence,
// The fence to indicate resolution change request.
kChangeResolutionFence,
};
// The type of the request.
const OutputRequestType type;
// The surface to be outputted.
scoped_refptr<V4L2DecodeSurface> surface;
// The timestamp of the output frame. Because a surface might be outputted
// multiple times with different timestamp, we need to store timestamp out of
// surface.
base::TimeDelta timestamp;
private:
OutputRequest(scoped_refptr<V4L2DecodeSurface> s, base::TimeDelta t)
: type(kSurface), surface(std::move(s)), timestamp(t) {}
explicit OutputRequest(OutputRequestType t) : type(t) {}
DISALLOW_COPY_AND_ASSIGN(OutputRequest);
};
V4L2StatelessVideoDecoderBackend::DecodeRequest::DecodeRequest(
scoped_refptr<DecoderBuffer> buf,
VideoDecoder::DecodeCB cb,
int32_t id)
: buffer(std::move(buf)), decode_cb(std::move(cb)), bitstream_id(id) {}
V4L2StatelessVideoDecoderBackend::DecodeRequest::DecodeRequest(
DecodeRequest&&) = default;
V4L2StatelessVideoDecoderBackend::DecodeRequest&
V4L2StatelessVideoDecoderBackend::DecodeRequest::operator=(DecodeRequest&&) =
default;
V4L2StatelessVideoDecoderBackend::DecodeRequest::~DecodeRequest() = default;
V4L2StatelessVideoDecoderBackend::V4L2StatelessVideoDecoderBackend(
Client* const client,
scoped_refptr<V4L2Device> device,
DmabufVideoFramePool* const frame_pool,
VideoCodecProfile profile,
scoped_refptr<base::SequencedTaskRunner> task_runner)
: V4L2VideoDecoderBackend(client, std::move(device)),
frame_pool_(frame_pool),
profile_(profile),
bitstream_id_to_timestamp_(kTimestampCacheSize),
task_runner_(task_runner) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
weak_this_ = weak_this_factory_.GetWeakPtr();
}
V4L2StatelessVideoDecoderBackend::~V4L2StatelessVideoDecoderBackend() {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK(surfaces_at_device_.empty());
if (!output_request_queue_.empty() || flush_cb_ || current_decode_request_ ||
!decode_request_queue_.empty()) {
VLOGF(1) << "Should not destroy backend during pending decode!";
}
if (avd_) {
avd_->Reset();
avd_ = nullptr;
}
if (supports_requests_) {
requests_ = {};
media_fd_.reset();
}
}
bool V4L2StatelessVideoDecoderBackend::Initialize() {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
if (!CheckRequestAPISupport()) {
VPLOGF(1) << "Failed to check request api support.";
return false;
}
// Create codec-specific AcceleratedVideoDecoder.
// TODO(akahuang): Check the profile is supported.
if (profile_ >= H264PROFILE_MIN && profile_ <= H264PROFILE_MAX) {
if (supports_requests_) {
avd_.reset(new H264Decoder(
std::make_unique<V4L2H264Accelerator>(this, device_.get())));
} else {
avd_.reset(new H264Decoder(
std::make_unique<V4L2LegacyH264Accelerator>(this, device_.get())));
}
} else if (profile_ >= VP8PROFILE_MIN && profile_ <= VP8PROFILE_MAX) {
if (supports_requests_) {
avd_.reset(new VP8Decoder(
std::make_unique<V4L2VP8Accelerator>(this, device_.get())));
} else {
avd_.reset(new VP8Decoder(
std::make_unique<V4L2LegacyVP8Accelerator>(this, device_.get())));
}
} else if (profile_ >= VP9PROFILE_MIN && profile_ <= VP9PROFILE_MAX) {
avd_.reset(new VP9Decoder(
std::make_unique<V4L2VP9Accelerator>(this, device_.get())));
} else {
VLOGF(1) << "Unsupported profile " << GetProfileName(profile_);
return false;
}
if (supports_requests_ && !AllocateRequests()) {
return false;
}
return true;
}
void V4L2StatelessVideoDecoderBackend::ReuseOutputBuffer(
V4L2ReadableBufferRef buffer) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DVLOGF(3) << "Reuse output surface #" << buffer->BufferId();
// Resume decoding in case of ran out of surface.
if (pause_reason_ == PauseReason::kRanOutOfSurfaces) {
pause_reason_ = PauseReason::kNone;
task_runner_->PostTask(
FROM_HERE,
base::BindOnce(&V4L2StatelessVideoDecoderBackend::DoDecodeWork,
weak_this_));
}
}
void V4L2StatelessVideoDecoderBackend::OnOutputBufferDequeued(
V4L2ReadableBufferRef dequeued_buffer) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
// Mark the output buffer decoded, and try to output surface.
DCHECK(!surfaces_at_device_.empty());
auto surface = std::move(surfaces_at_device_.front());
DCHECK_EQ(static_cast<size_t>(surface->output_record()),
dequeued_buffer->BufferId());
surfaces_at_device_.pop();
surface->SetDecoded();
// Keep a reference to the V4L2 buffer until the buffer is reused. The
// reason for this is that the config store uses V4L2 buffer IDs to
// reference frames, therefore we cannot reuse the same V4L2 buffer ID for
// another decode operation until all references to that frame are gone.
// Request API does not have this limitation, so we can probably remove this
// after config store is gone.
surface->SetReleaseCallback(
base::BindOnce(&V4L2StatelessVideoDecoderBackend::ReuseOutputBuffer,
weak_this_, std::move(dequeued_buffer)));
PumpOutputSurfaces();
// If we were waiting for an output buffer to be available, schedule a
// decode task.
if (pause_reason_ == PauseReason::kWaitSubFrameDecoded) {
pause_reason_ = PauseReason::kNone;
task_runner_->PostTask(
FROM_HERE,
base::BindOnce(&V4L2StatelessVideoDecoderBackend::DoDecodeWork,
weak_this_));
}
}
scoped_refptr<V4L2DecodeSurface>
V4L2StatelessVideoDecoderBackend::CreateSurface() {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DVLOGF(4);
// Request VideoFrame.
scoped_refptr<VideoFrame> frame = frame_pool_->GetFrame();
if (!frame) {
// We allocate the same number of output buffer slot in V4L2 device and the
// output VideoFrame. If there is free output buffer slot but no free
// VideoFrame, surface_it means the VideoFrame is not released at client
// side. Post DoDecodeWork when the pool has available frames.
DVLOGF(3) << "There is no available VideoFrame.";
frame_pool_->NotifyWhenFrameAvailable(base::BindOnce(
base::IgnoreResult(&base::SequencedTaskRunner::PostTask), task_runner_,
FROM_HERE,
base::BindOnce(&V4L2StatelessVideoDecoderBackend::DoDecodeWork,
weak_this_)));
return nullptr;
}
// Request V4L2 input and output buffers.
V4L2WritableBufferRef input_buf = input_queue_->GetFreeBuffer();
V4L2WritableBufferRef output_buf = output_queue_->GetFreeBuffer();
if (!input_buf.IsValid() || !output_buf.IsValid()) {
DVLOGF(3) << "There is no free V4L2 buffer.";
return nullptr;
}
scoped_refptr<V4L2DecodeSurface> dec_surface;
if (supports_requests_) {
DCHECK(!requests_.empty());
base::ScopedFD request = std::move(requests_.front());
requests_.pop();
auto ret = V4L2RequestDecodeSurface::Create(
std::move(input_buf), std::move(output_buf), std::move(frame),
request.get());
requests_.push(std::move(request));
if (!ret) {
DVLOGF(3) << "Could not create surface.";
return nullptr;
}
dec_surface = std::move(*ret);
} else {
dec_surface = new V4L2ConfigStoreDecodeSurface(
std::move(input_buf), std::move(output_buf), std::move(frame));
}
return dec_surface;
}
bool V4L2StatelessVideoDecoderBackend::SubmitSlice(
const scoped_refptr<V4L2DecodeSurface>& dec_surface,
const uint8_t* data,
size_t size) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DVLOGF(3);
size_t plane_size = dec_surface->input_buffer().GetPlaneSize(0);
size_t bytes_used = dec_surface->input_buffer().GetPlaneBytesUsed(0);
if (size > plane_size - bytes_used) {
VLOGF(1) << "The size of submitted slice(" << size
<< ") is larger than the remaining buffer size("
<< plane_size - bytes_used << "). Plane size is " << plane_size;
client_->OnBackendError();
return false;
}
void* mapping = dec_surface->input_buffer().GetPlaneMapping(0);
memcpy(reinterpret_cast<uint8_t*>(mapping) + bytes_used, data, size);
dec_surface->input_buffer().SetPlaneBytesUsed(0, bytes_used + size);
return true;
}
void V4L2StatelessVideoDecoderBackend::DecodeSurface(
const scoped_refptr<V4L2DecodeSurface>& dec_surface) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DVLOGF(3);
// Enqueue input_buf and output_buf
dec_surface->input_buffer().PrepareQueueBuffer(*dec_surface);
if (!std::move(dec_surface->input_buffer()).QueueMMap()) {
client_->OnBackendError();
return;
}
if (!std::move(dec_surface->output_buffer())
.QueueDMABuf(dec_surface->video_frame()->DmabufFds())) {
client_->OnBackendError();
return;
}
if (!dec_surface->Submit()) {
VLOGF(1) << "Error while submitting frame for decoding!";
client_->OnBackendError();
return;
}
surfaces_at_device_.push(std::move(dec_surface));
}
void V4L2StatelessVideoDecoderBackend::SurfaceReady(
const scoped_refptr<V4L2DecodeSurface>& dec_surface,
int32_t bitstream_id,
const gfx::Rect& visible_rect,
const VideoColorSpace& /* color_space */) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DVLOGF(3);
// Find the timestamp associated with |bitstream_id|. It's possible that a
// surface is output multiple times for different |bitstream_id|s (e.g. VP9
// show_existing_frame feature). This means we need to output the same frame
// again with a different timestamp.
// On some rare occasions it's also possible that a single DecoderBuffer
// produces multiple surfaces with the same |bitstream_id|, so we shouldn't
// remove the timestamp from the cache.
const auto it = bitstream_id_to_timestamp_.Peek(bitstream_id);
DCHECK(it != bitstream_id_to_timestamp_.end());
base::TimeDelta timestamp = it->second;
dec_surface->SetVisibleRect(visible_rect);
output_request_queue_.push(
OutputRequest::Surface(std::move(dec_surface), timestamp));
PumpOutputSurfaces();
}
void V4L2StatelessVideoDecoderBackend::EnqueueDecodeTask(
scoped_refptr<DecoderBuffer> buffer,
VideoDecoder::DecodeCB decode_cb,
int32_t bitstream_id) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
if (!buffer->end_of_stream()) {
bitstream_id_to_timestamp_.Put(bitstream_id, buffer->timestamp());
}
decode_request_queue_.push(
DecodeRequest(std::move(buffer), std::move(decode_cb), bitstream_id));
// If we are already decoding, then we don't need to pump again.
if (!current_decode_request_)
DoDecodeWork();
}
void V4L2StatelessVideoDecoderBackend::DoDecodeWork() {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
if (!client_->IsDecoding())
return;
if (!PumpDecodeTask())
client_->OnBackendError();
}
bool V4L2StatelessVideoDecoderBackend::PumpDecodeTask() {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DVLOGF(3) << " Number of Decode requests: " << decode_request_queue_.size();
pause_reason_ = PauseReason::kNone;
while (true) {
switch (avd_->Decode()) {
case AcceleratedVideoDecoder::kAllocateNewSurfaces:
DVLOGF(3) << "Need to change resolution. Pause decoding.";
client_->InitiateFlush();
output_request_queue_.push(OutputRequest::ChangeResolutionFence());
PumpOutputSurfaces();
return true;
case AcceleratedVideoDecoder::kRanOutOfStreamData:
// Current decode request is finished processing.
if (current_decode_request_) {
DCHECK(current_decode_request_->decode_cb);
client_->RunDecodeCB(std::move(current_decode_request_->decode_cb),
DecodeStatus::OK);
current_decode_request_ = base::nullopt;
}
// Process next decode request.
if (decode_request_queue_.empty())
return true;
current_decode_request_ = std::move(decode_request_queue_.front());
decode_request_queue_.pop();
if (current_decode_request_->buffer->end_of_stream()) {
if (!avd_->Flush()) {
VLOGF(1) << "Failed flushing the decoder.";
return false;
}
// Put the decoder in an idle state, ready to resume.
avd_->Reset();
client_->InitiateFlush();
DCHECK(!flush_cb_);
flush_cb_ = std::move(current_decode_request_->decode_cb);
output_request_queue_.push(OutputRequest::FlushFence());
PumpOutputSurfaces();
current_decode_request_ = base::nullopt;
return true;
}
avd_->SetStream(current_decode_request_->bitstream_id,
*current_decode_request_->buffer);
break;
case AcceleratedVideoDecoder::kRanOutOfSurfaces:
DVLOGF(3) << "Ran out of surfaces. Resume when buffer is returned.";
pause_reason_ = PauseReason::kRanOutOfSurfaces;
return true;
case AcceleratedVideoDecoder::kNeedContextUpdate:
DVLOGF(3) << "Awaiting context update";
pause_reason_ = PauseReason::kWaitSubFrameDecoded;
return true;
case AcceleratedVideoDecoder::kDecodeError:
DVLOGF(3) << "Error decoding stream";
return false;
case AcceleratedVideoDecoder::kTryAgain:
NOTREACHED() << "Should not reach here unless this class accepts "
"encrypted streams.";
DVLOGF(4) << "No key for decoding stream.";
return false;
}
}
}
void V4L2StatelessVideoDecoderBackend::PumpOutputSurfaces() {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DVLOGF(3) << "Number of display surfaces: " << output_request_queue_.size();
bool resume_decode = false;
while (!output_request_queue_.empty()) {
if (!output_request_queue_.front().IsReady()) {
DVLOGF(3) << "The first surface is not ready yet.";
break;
}
OutputRequest request = std::move(output_request_queue_.front());
output_request_queue_.pop();
switch (request.type) {
case OutputRequest::kFlushFence:
DCHECK(output_request_queue_.empty());
DVLOGF(2) << "Flush finished.";
client_->RunDecodeCB(std::move(flush_cb_), DecodeStatus::OK);
resume_decode = true;
break;
case OutputRequest::kChangeResolutionFence:
DCHECK(output_request_queue_.empty());
if (!ChangeResolution()) {
client_->OnBackendError();
return;
}
resume_decode = true;
break;
case OutputRequest::kSurface:
scoped_refptr<V4L2DecodeSurface> surface = std::move(request.surface);
DCHECK(surface->video_frame());
client_->OutputFrame(surface->video_frame(), surface->visible_rect(),
request.timestamp);
break;
}
}
if (resume_decode) {
client_->CompleteFlush();
task_runner_->PostTask(
FROM_HERE,
base::BindOnce(&V4L2StatelessVideoDecoderBackend::DoDecodeWork,
weak_this_));
}
}
bool V4L2StatelessVideoDecoderBackend::ChangeResolution() {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
// We change resolution after outputting all pending surfaces, there should
// be no V4L2DecodeSurface left.
DCHECK(surfaces_at_device_.empty());
DCHECK(output_request_queue_.empty());
// Set output format with the new resolution.
gfx::Size pic_size = avd_->GetPicSize();
DCHECK(!pic_size.IsEmpty());
DVLOGF(3) << "Change resolution to " << pic_size.ToString();
size_t num_output_frames = avd_->GetRequiredNumOfPictures();
gfx::Rect visible_rect = avd_->GetVisibleRect();
return client_->ChangeResolution(pic_size, visible_rect, num_output_frames);
}
void V4L2StatelessVideoDecoderBackend::OnStreamStopped() {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DVLOGF(3);
// The V4L2 stream has been stopped stopped, so all surfaces on the device
// have been returned to the client.
surfaces_at_device_ = {};
}
void V4L2StatelessVideoDecoderBackend::ClearPendingRequests(
DecodeStatus status) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DVLOGF(3);
if (avd_)
avd_->Reset();
// Clear output_request_queue_.
while (!output_request_queue_.empty())
output_request_queue_.pop();
if (flush_cb_)
client_->RunDecodeCB(std::move(flush_cb_), status);
// Clear current_decode_request_ and decode_request_queue_.
if (current_decode_request_) {
client_->RunDecodeCB(std::move(current_decode_request_->decode_cb), status);
current_decode_request_ = base::nullopt;
}
while (!decode_request_queue_.empty()) {
auto request = std::move(decode_request_queue_.front());
decode_request_queue_.pop();
client_->RunDecodeCB(std::move(request.decode_cb), status);
}
}
bool V4L2StatelessVideoDecoderBackend::CheckRequestAPISupport() {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DVLOGF(3);
struct v4l2_requestbuffers reqbufs;
memset(&reqbufs, 0, sizeof(reqbufs));
reqbufs.count = 0;
reqbufs.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
reqbufs.memory = V4L2_MEMORY_MMAP;
if (device_->Ioctl(VIDIOC_REQBUFS, &reqbufs) != 0) {
VPLOGF(1) << "VIDIOC_REQBUFS ioctl failed.";
return false;
}
if (reqbufs.capabilities & V4L2_BUF_CAP_SUPPORTS_REQUESTS) {
supports_requests_ = true;
VLOGF(1) << "Using request API.";
DCHECK(!media_fd_.is_valid());
// Let's try to open the media device
// TODO(crbug.com/985230): remove this hardcoding, replace with V4L2Device
// integration.
int media_fd = open("/dev/media-dec0", O_RDWR, 0);
if (media_fd < 0) {
VPLOGF(1) << "Failed to open media device.";
return false;
}
media_fd_ = base::ScopedFD(media_fd);
} else {
VLOGF(1) << "Using config store.";
}
return true;
}
bool V4L2StatelessVideoDecoderBackend::AllocateRequests() {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DVLOGF(3);
DCHECK(requests_.empty());
for (size_t i = 0; i < kNumRequests; i++) {
int request_fd;
int ret = HANDLE_EINTR(
ioctl(media_fd_.get(), MEDIA_IOC_REQUEST_ALLOC, &request_fd));
if (ret < 0) {
VPLOGF(1) << "Failed to create request: ";
return false;
}
requests_.push(base::ScopedFD(request_fd));
}
DCHECK_EQ(requests_.size(), kNumRequests);
return true;
}
} // namespace media
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef MEDIA_GPU_V4L2_V4L2_VIDEO_DECODER_BACKEND_STATELESS_H_
#define MEDIA_GPU_V4L2_V4L2_VIDEO_DECODER_BACKEND_STATELESS_H_
#include "base/containers/mru_cache.h"
#include "base/containers/queue.h"
#include "base/memory/weak_ptr.h"
#include "base/sequence_checker.h"
#include "base/sequenced_task_runner.h"
#include "media/base/decode_status.h"
#include "media/base/video_decoder.h"
#include "media/gpu/linux/dmabuf_video_frame_pool.h"
#include "media/gpu/v4l2/v4l2_decode_surface_handler.h"
#include "media/gpu/v4l2/v4l2_video_decoder_backend.h"
namespace media {
class AcceleratedVideoDecoder;
class V4L2StatelessVideoDecoderBackend : public V4L2VideoDecoderBackend,
public V4L2DecodeSurfaceHandler {
public:
// Constructor for the stateless backend. Arguments are:
// |client| the decoder we will be backing.
// |device| the V4L2 decoder device.
// |frame_pool| pool from which to get backing memory for decoded frames.
// |profile| profile of the codec we will decode.
// |task_runner| the decoder task runner, to which we will post our tasks.
V4L2StatelessVideoDecoderBackend(
Client* const client,
scoped_refptr<V4L2Device> device,
DmabufVideoFramePool* const frame_pool,
VideoCodecProfile profile,
scoped_refptr<base::SequencedTaskRunner> task_runner);
~V4L2StatelessVideoDecoderBackend() override;
// V4L2VideoDecoderBackend implementation
bool Initialize() override;
void EnqueueDecodeTask(scoped_refptr<DecoderBuffer> buffer,
VideoDecoder::DecodeCB decode_cb,
int32_t bitstream_id) override;
void OnOutputBufferDequeued(V4L2ReadableBufferRef buffer) override;
void OnStreamStopped() override;
void ClearPendingRequests(DecodeStatus status) override;
// V4L2DecodeSurfaceHandler implementation.
scoped_refptr<V4L2DecodeSurface> CreateSurface() override;
bool SubmitSlice(const scoped_refptr<V4L2DecodeSurface>& dec_surface,
const uint8_t* data,
size_t size) override;
void DecodeSurface(
const scoped_refptr<V4L2DecodeSurface>& dec_surface) override;
void SurfaceReady(const scoped_refptr<V4L2DecodeSurface>& dec_surface,
int32_t bitstream_id,
const gfx::Rect& visible_rect,
const VideoColorSpace& color_space) override;
private:
// Request for displaying the surface or calling the decode callback.
struct OutputRequest;
// Request for decoding buffer. Every EnqueueDecodeTask() call generates 1
// DecodeRequest.
struct DecodeRequest {
// The decode buffer passed to EnqueueDecodeTask().
scoped_refptr<DecoderBuffer> buffer;
// The callback function passed to EnqueueDecodeTask().
VideoDecoder::DecodeCB decode_cb;
// The identifier for the decoder buffer.
int32_t bitstream_id;
DecodeRequest(scoped_refptr<DecoderBuffer> buf,
VideoDecoder::DecodeCB cb,
int32_t id);
// Allow move, but not copy
DecodeRequest(DecodeRequest&&);
DecodeRequest& operator=(DecodeRequest&&);
~DecodeRequest();
DISALLOW_COPY_AND_ASSIGN(DecodeRequest);
};
// The reason the decoding is paused.
enum class PauseReason {
// Not stopped, decoding normally.
kNone,
// Cannot create a new V4L2 surface. Waiting for surfaces to be released.
kRanOutOfSurfaces,
// A VP9 superframe contains multiple subframes. Before decoding the next
// subframe, we need to wait for previous subframes decoded and update the
// context.
kWaitSubFrameDecoded,
};
// Callback which is called when V4L2 surface is destroyed.
void ReuseOutputBuffer(V4L2ReadableBufferRef buffer);
// Try to advance the decoding work.
void DoDecodeWork();
// Try to decode buffer from the pending decode request queue.
// This method stops decoding when:
// - Run out of surface
// - Flushing or changing resolution
// Invoke this method again when these situation ends.
bool PumpDecodeTask();
// Try to output surface from |output_request_queue_|.
// This method stops outputting surface when the first surface is not dequeued
// from the V4L2 device. Invoke this method again when any surface is
// dequeued from the V4L2 device.
void PumpOutputSurfaces();
// Setup the format of V4L2 output buffer, and allocate new buffer set.
bool ChangeResolution();
// Check whether request api is supported or not.
bool CheckRequestAPISupport();
// Allocate necessary request buffers is request api is supported.
bool AllocateRequests();
// Video frame pool provided by the decoder.
DmabufVideoFramePool* const frame_pool_;
// Video profile we will be decoding.
const VideoCodecProfile profile_;
// Video decoder used to parse stream headers by software.
std::unique_ptr<AcceleratedVideoDecoder> avd_;
// The decode request which is currently processed.
base::Optional<DecodeRequest> current_decode_request_;
// Surfaces enqueued to V4L2 device. Since we are stateless, they are
// guaranteed to be proceeded in FIFO order.
base::queue<scoped_refptr<V4L2DecodeSurface>> surfaces_at_device_;
// Queue of pending decode request.
base::queue<DecodeRequest> decode_request_queue_;
// Queue of pending output request.
base::queue<OutputRequest> output_request_queue_;
// Indicates why decoding is currently paused.
PauseReason pause_reason_ = PauseReason::kNone;
// The time at which each buffer decode operation started. Not each decode
// operation leads to a frame being output and frames might be reordered, so
// we don't know when it's safe to drop a timestamp. This means we need to use
// a cache here, with a size large enough to account for frame reordering.
base::MRUCache<int32_t, base::TimeDelta> bitstream_id_to_timestamp_;
// The task runner we are running on, for convenience.
const scoped_refptr<base::SequencedTaskRunner> task_runner_;
// Callbacks of EOS buffer passed from Decode().
VideoDecoder::DecodeCB flush_cb_;
// Set to true during Initialize() if the codec driver supports request API.
bool supports_requests_ = false;
// FIFO queue of requests, only used if supports_requests_ is true.
base::queue<base::ScopedFD> requests_;
// Stores the media file descriptor, only used if supports_requests_ is true.
base::ScopedFD media_fd_;
base::WeakPtr<V4L2StatelessVideoDecoderBackend> weak_this_;
base::WeakPtrFactory<V4L2StatelessVideoDecoderBackend> weak_this_factory_{
this};
DISALLOW_COPY_AND_ASSIGN(V4L2StatelessVideoDecoderBackend);
};
} // namespace media
#endif // MEDIA_GPU_V4L2_V4L2_VIDEO_DECODER_BACKEND_STATELESS_H_
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment