Commit e6bf70c2 authored by Shuo-Peng Liao's avatar Shuo-Peng Liao Committed by Commit Bot

media/gpu: Add ImageProcessor support in VideoDecoderPipeline.

VDPipeline senses its VideoDecoder cannot produce compositor renderable
output format by asking VD to call its PickDecoderOutputFormat().
When it is called, VDPipeline instantiates an ImageProcessor to see if
it can convert any VD's supported output format to a renderable format.

VDPipeline defines a PickFormatCB for VD to ask VDPipeline which output
format shall be used. The callback is assigned to VDPipeline supported
VD via its constructor. Currently V4L2SliceVideoDecoder is modified.

For ImageProcessorFactory, it adds a CreateWithInputCandidates() factory
method that instantiate an ImageProcessor to see if it can convert
any of VD's supported output formats to a valid renderable format.
The knowledge of which format is renderable is provided by
VDPipeline::PickRenderableFormat(), which is currently implemented by
hardcoding known renderable formats.

Bug: 1004727
Test: pass vda_tests --use_vd --gtest_filter=-*Reset* on Kukui,
      remaining failure might cause by crbug.com/1023703
Test: pass VD test on Kevin and Eve, which uses V4L2SVD and VaapiVD

Change-Id: I2aaf6a6fad347fe1b43a30e56a8f1a9ced84f1d2
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1911068Reviewed-by: default avatarHirokazu Honda <hiroh@chromium.org>
Reviewed-by: default avatarAlexandre Courbot <acourbot@chromium.org>
Commit-Queue: Chih-Yu Huang <akahuang@chromium.org>
Cr-Commit-Position: refs/heads/master@{#721802}
parent 5bc62241
......@@ -7,8 +7,10 @@
#include <stddef.h>
#include "base/callback.h"
#include "base/memory/scoped_refptr.h"
#include "media/gpu/buildflags.h"
#include "media/gpu/chromeos/libyuv_image_processor.h"
#include "media/gpu/macros.h"
#if BUILDFLAG(USE_VAAPI)
#include "media/gpu/vaapi/vaapi_image_processor.h"
......@@ -17,10 +19,69 @@
#if BUILDFLAG(USE_V4L2_CODEC)
#include "media/gpu/v4l2/v4l2_device.h"
#include "media/gpu/v4l2/v4l2_image_processor.h"
#include "media/gpu/v4l2/v4l2_vda_helpers.h"
#endif // BUILDFLAG(USE_V4L2_CODEC)
namespace media {
namespace {
#if BUILDFLAG(USE_V4L2_CODEC)
std::unique_ptr<ImageProcessor> CreateV4L2ImageProcessorWithInputCandidates(
const std::vector<std::pair<Fourcc, gfx::Size>>& input_candidates,
const gfx::Size& visible_size,
size_t num_buffers,
scoped_refptr<base::SequencedTaskRunner> client_task_runner,
ImageProcessorFactory::PickFormatCB out_format_picker,
ImageProcessor::ErrorCB error_cb) {
// Pick a renderable output format, and try each available input format.
// TODO(akahuang): let |out_format_picker| return a list of supported output
// formats, and try all combination of input/output format, if any platform
// fails to create ImageProcessor via current approach.
const std::vector<uint32_t> supported_output_formats =
V4L2ImageProcessor::GetSupportedOutputFormats();
std::vector<Fourcc> supported_fourccs;
for (const auto& format : supported_output_formats)
supported_fourccs.push_back(Fourcc::FromV4L2PixFmt(format));
const uint32_t output_format =
out_format_picker.Run(supported_fourccs).ToV4L2PixFmt();
if (!output_format)
return nullptr;
const auto supported_input_pixfmts =
V4L2ImageProcessor::GetSupportedInputFormats();
for (const auto& input_candidate : input_candidates) {
const uint32_t input_pixfmt = input_candidate.first.ToV4L2PixFmt();
const gfx::Size& input_size = input_candidate.second;
if (std::find(supported_input_pixfmts.begin(),
supported_input_pixfmts.end(),
input_pixfmt) == supported_input_pixfmts.end()) {
continue;
}
// Try to get an image size as close as possible to the final size.
gfx::Size output_size(visible_size.width(), visible_size.height());
size_t num_planes = 0;
if (!V4L2ImageProcessor::TryOutputFormat(input_pixfmt, output_format,
input_size, &output_size,
&num_planes)) {
VLOGF(2) << "Failed to get output size and plane count of IP";
continue;
}
return v4l2_vda_helpers::CreateImageProcessor(
input_pixfmt, output_format, input_size, output_size, visible_size,
num_buffers, V4L2Device::Create(), ImageProcessor::OutputMode::IMPORT,
std::move(client_task_runner), std::move(error_cb));
}
return nullptr;
}
#endif // BUILDFLAG(USE_V4L2_CODEC)
} // namespace
// static
std::unique_ptr<ImageProcessor> ImageProcessorFactory::Create(
const ImageProcessor::PortConfig& input_config,
......@@ -55,4 +116,26 @@ std::unique_ptr<ImageProcessor> ImageProcessorFactory::Create(
return nullptr;
}
// static
std::unique_ptr<ImageProcessor>
ImageProcessorFactory::CreateWithInputCandidates(
const std::vector<std::pair<Fourcc, gfx::Size>>& input_candidates,
const gfx::Size& visible_size,
size_t num_buffers,
scoped_refptr<base::SequencedTaskRunner> client_task_runner,
PickFormatCB out_format_picker,
ImageProcessor::ErrorCB error_cb) {
#if BUILDFLAG(USE_V4L2_CODEC)
auto processor = CreateV4L2ImageProcessorWithInputCandidates(
input_candidates, visible_size, num_buffers, client_task_runner,
out_format_picker, error_cb);
if (processor)
return processor;
#endif // BUILDFLAG(USE_V4L2_CODEC)
// TODO(crbug.com/1004727): Implement LibYUVImageProcessor and
// VaapiImageProcessor.
return nullptr;
}
} // namespace media
......@@ -5,18 +5,26 @@
#ifndef MEDIA_GPU_CHROMEOS_IMAGE_PROCESSOR_FACTORY_H_
#define MEDIA_GPU_CHROMEOS_IMAGE_PROCESSOR_FACTORY_H_
#include <stdint.h>
#include <memory>
#include <vector>
#include "base/callback_forward.h"
#include "base/macros.h"
#include "media/gpu/chromeos/fourcc.h"
#include "media/gpu/chromeos/image_processor.h"
#include "media/gpu/media_gpu_export.h"
#include "ui/gfx/geometry/size.h"
namespace media {
class MEDIA_GPU_EXPORT ImageProcessorFactory {
public:
// Callback to pick a valid format from given |candidates| formats.
using PickFormatCB = base::RepeatingCallback<Fourcc(
const std::vector<Fourcc>& /* candidates */)>;
// Factory method to create ImageProcessor.
// Given input and output PortConfig, it tries to find out the most suitable
// ImageProcessor to be used for the current platform.
......@@ -49,6 +57,19 @@ class MEDIA_GPU_EXPORT ImageProcessorFactory {
scoped_refptr<base::SequencedTaskRunner> client_task_runner,
ImageProcessor::ErrorCB error_cb);
// Factory method to create ImageProcessor.
// Unlike Create(), caller gives a list of valid input for the
// ImageProcessor, |candidates|; frame's |input_size|; |out_format_picker| for
// caller to pick a valid output. With the parameters the factory can
// instantiate a suitable ImageProcessor if exists.
static std::unique_ptr<ImageProcessor> CreateWithInputCandidates(
const std::vector<std::pair<Fourcc, gfx::Size>>& input_candidates,
const gfx::Size& visible_size,
size_t num_buffers,
scoped_refptr<base::SequencedTaskRunner> client_task_runner,
PickFormatCB out_format_picker,
ImageProcessor::ErrorCB error_cb);
DISALLOW_IMPLICIT_CONSTRUCTORS(ImageProcessorFactory);
};
......
......@@ -3,17 +3,53 @@
// found in the LICENSE file.
#include "media/gpu/chromeos/video_decoder_pipeline.h"
#include <memory>
#include "base/bind.h"
#include "base/memory/ptr_util.h"
#include "base/sequenced_task_runner.h"
#include "base/task/post_task.h"
#include "base/task/task_traits.h"
#include "build/build_config.h"
#include "media/gpu/chromeos/dmabuf_video_frame_pool.h"
#include "media/gpu/chromeos/image_processor.h"
#include "media/gpu/chromeos/image_processor_factory.h"
#include "media/gpu/chromeos/platform_video_frame_pool.h"
#include "media/gpu/macros.h"
namespace media {
namespace {
// The number of requested frames used for the image processor.
constexpr size_t kNumFramesForImageProcessor = 4;
// Pick a compositor renderable format from |candidates|.
// Return zero if not found.
Fourcc PickRenderableFourcc(const std::vector<Fourcc>& candidates) {
// Hardcode compositor renderable format now.
// TODO: figure out a way to pick the best one dynamically.
// Prefer YVU420 and NV12 because ArcGpuVideoDecodeAccelerator only supports
// single physical plane.
constexpr Fourcc::Value kPreferredFourccValues[] = {
#if defined(ARCH_CPU_ARM_FAMILY)
Fourcc::NV12,
Fourcc::YV12,
#endif
// For kepler.
Fourcc::AR24,
};
for (const auto& value : kPreferredFourccValues) {
if (std::find(candidates.begin(), candidates.end(), Fourcc(value)) !=
candidates.end()) {
return Fourcc(value);
}
}
return Fourcc();
}
} // namespace
DecoderInterface::DecoderInterface(
scoped_refptr<base::SequencedTaskRunner> decoder_task_runner,
......@@ -280,6 +316,8 @@ void VideoDecoderPipeline::OnResetDone() {
DCHECK(client_reset_cb_);
DVLOGF(3);
if (image_processor_)
image_processor_->Reset();
frame_converter_->AbortPendingFrames();
CallFlushCbIfNeeded(DecodeStatus::ABORTED);
......@@ -335,6 +373,21 @@ void VideoDecoderPipeline::OnFrameDecoded(scoped_refptr<VideoFrame> frame) {
DCHECK(frame_converter_);
DVLOGF(4);
if (image_processor_) {
image_processor_->Process(
std::move(frame),
base::BindOnce(&VideoDecoderPipeline::OnFrameProcessed,
decoder_weak_this_));
} else {
frame_converter_->ConvertFrame(std::move(frame));
}
}
void VideoDecoderPipeline::OnFrameProcessed(scoped_refptr<VideoFrame> frame) {
DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
DCHECK(frame_converter_);
DVLOGF(4);
frame_converter_->ConvertFrame(std::move(frame));
}
......@@ -371,7 +424,8 @@ bool VideoDecoderPipeline::HasPendingFrames() const {
DVLOGF(3);
DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
return frame_converter_->HasPendingFrames();
return frame_converter_->HasPendingFrames() ||
(image_processor_ && image_processor_->HasPendingFrames());
}
void VideoDecoderPipeline::OnError(const std::string& msg) {
......@@ -417,7 +471,70 @@ void VideoDecoderPipeline::CallOnPipelineFlushedIfNeeded() {
}
DmabufVideoFramePool* VideoDecoderPipeline::GetVideoFramePool() const {
DVLOGF(3);
DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
// |main_frame_pool_| is used by |image_processor_| in this case.
// |decoder_| will output native buffer allocated by itself.
// (e.g. V4L2 MMAP buffer in V4L2 API and VA surface in VA API.)
if (image_processor_)
return nullptr;
return main_frame_pool_.get();
}
base::Optional<Fourcc> VideoDecoderPipeline::PickDecoderOutputFormat(
const std::vector<std::pair<Fourcc, gfx::Size>>& candidates,
const gfx::Rect& visible_rect) {
DVLOGF(3);
DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
if (candidates.empty())
return base::nullopt;
image_processor_.reset();
// Check if any candidate format is renderable without the need of
// ImageProcessor.
std::vector<Fourcc> fourccs;
for (const auto& candidate : candidates)
fourccs.push_back(candidate.first);
const Fourcc renderable_fourcc = PickRenderableFourcc(fourccs);
if (renderable_fourcc)
return renderable_fourcc;
std::unique_ptr<ImageProcessor> image_processor =
ImageProcessorFactory::CreateWithInputCandidates(
candidates, visible_rect.size(), kNumFramesForImageProcessor,
decoder_task_runner_, base::BindRepeating(&PickRenderableFourcc),
base::BindRepeating(&VideoDecoderPipeline::OnImageProcessorError,
decoder_weak_this_));
if (!image_processor) {
DVLOGF(2) << "Unable to find ImageProcessor to convert format";
return base::nullopt;
}
// Note that fourcc is specified in ImageProcessor's factory method.
auto fourcc = image_processor->input_config().fourcc;
// Setup new pipeline.
image_processor_ = ImageProcessorWithPool::Create(
std::move(image_processor), main_frame_pool_.get(),
kNumFramesForImageProcessor, decoder_task_runner_);
if (!image_processor_) {
DVLOGF(2) << "Unable to create ImageProcessorWithPool.";
return base::nullopt;
}
return fourcc;
}
void VideoDecoderPipeline::OnImageProcessorError() {
VLOGF(1);
DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
client_task_runner_->PostTask(
FROM_HERE, base::BindOnce(&VideoDecoderPipeline::OnError,
client_weak_this_, "Image processor error"));
}
} // namespace media
......@@ -14,8 +14,11 @@
#include "base/sequence_checker.h"
#include "media/base/video_decoder.h"
#include "media/base/video_decoder_config.h"
#include "media/gpu/chromeos/fourcc.h"
#include "media/gpu/chromeos/image_processor_with_pool.h"
#include "media/gpu/chromeos/video_frame_converter.h"
#include "media/gpu/media_gpu_export.h"
#include "ui/gfx/geometry/size.h"
namespace base {
class SequencedTaskRunner;
......@@ -51,13 +54,21 @@ class MEDIA_GPU_EXPORT DecoderInterface {
Client() = default;
virtual ~Client() = default;
// Get the video frame pool without passing the ownership.
// Get the video frame pool without passing the ownership. Return nullptr if
// the decoder is responsible for allocating its own frames.
virtual DmabufVideoFramePool* GetVideoFramePool() const = 0;
// After this method is called from |decoder_|, the client needs to call
// DecoderInterface::OnPipelineFlushed() when all pending frames are
// flushed.
virtual void PrepareChangeResolution() = 0;
// Return a valid format for |decoder_| output from given |candidates| and
// the visible rect.
// Return base::nullopt if no valid format is found.
virtual base::Optional<Fourcc> PickDecoderOutputFormat(
const std::vector<std::pair<Fourcc, gfx::Size>>& candidates,
const gfx::Rect& visible_rect) = 0;
};
DecoderInterface(scoped_refptr<base::SequencedTaskRunner> decoder_task_runner,
......@@ -154,9 +165,16 @@ class MEDIA_GPU_EXPORT VideoDecoderPipeline : public VideoDecoder,
// DecoderInterface::Client implementation.
DmabufVideoFramePool* GetVideoFramePool() const override;
void PrepareChangeResolution() override;
// After picking a format, it instantiates an |image_processor_| if none of
// format in |candidates| is renderable and an ImageProcessor can convert a
// candidate to renderable format.
base::Optional<Fourcc> PickDecoderOutputFormat(
const std::vector<std::pair<Fourcc, gfx::Size>>& candidates,
const gfx::Rect& visible_rect) override;
private:
// Get a list of the available functions for creating VideoDeocoder.
// Get a list of the available functions for creating VideoDeocoder except
// |current_func| one.
static base::queue<CreateVDFunc> GetCreateVDFunctions(
CreateVDFunc current_func);
......@@ -183,11 +201,18 @@ class MEDIA_GPU_EXPORT VideoDecoderPipeline : public VideoDecoder,
void OnDecodeDone(bool eos_buffer, DecodeCB decode_cb, DecodeStatus status);
void OnResetDone();
void OnFrameConverted(scoped_refptr<VideoFrame> frame);
void OnError(const std::string& msg);
// Called when |decoder_| finishes decoding a frame.
void OnFrameDecoded(scoped_refptr<VideoFrame> frame);
// Called when |image_processor_| finishes processing a frame.
void OnFrameProcessed(scoped_refptr<VideoFrame> frame);
// Called when |frame_converter_| finishes converting a frame.
void OnFrameConverted(scoped_refptr<VideoFrame> frame);
// Return true if the pipeline has pending frames that are returned from
// |decoder_| but haven't been passed to the client.
// i.e. |image_processor_| or |frame_converter_| has pending frames.
bool HasPendingFrames() const;
// Call DecoderInterface::OnPipelineFlushed() when we need to.
......@@ -196,6 +221,9 @@ class MEDIA_GPU_EXPORT VideoDecoderPipeline : public VideoDecoder,
// Call |client_flush_cb_| with |status|.
void CallFlushCbIfNeeded(DecodeStatus status);
// Handle ImageProcessor error callback.
void OnImageProcessorError();
// The client task runner and its sequence checker. All public methods should
// run on this task runner.
const scoped_refptr<base::SequencedTaskRunner> client_task_runner_;
......@@ -216,6 +244,10 @@ class MEDIA_GPU_EXPORT VideoDecoderPipeline : public VideoDecoder,
// alive as long as the GPU process is.
gpu::GpuMemoryBufferFactory* const gpu_memory_buffer_factory_;
// The image processor is only created when the decoder cannot output frames
// with renderable format.
std::unique_ptr<ImageProcessorWithPool> image_processor_;
// The frame converter passed from the client. Destroyed on
// |client_task_runner_|.
std::unique_ptr<VideoFrameConverter> frame_converter_;
......
......@@ -18,6 +18,7 @@
#include "base/files/scoped_file.h"
#include "base/macros.h"
#include "base/memory/scoped_refptr.h"
#include "base/optional.h"
#include "base/sequence_checker.h"
#include "base/sequenced_task_runner.h"
#include "base/single_thread_task_runner.h"
......
......@@ -7,7 +7,6 @@
#include <algorithm>
#include "base/bind.h"
#include "base/bind_helpers.h"
#include "base/callback_helpers.h"
#include "base/logging.h"
#include "base/memory/ptr_util.h"
......@@ -15,7 +14,6 @@
#include "media/base/video_util.h"
#include "media/gpu/chromeos/dmabuf_video_frame_pool.h"
#include "media/gpu/chromeos/fourcc.h"
#include "media/gpu/chromeos/video_decoder_pipeline.h"
#include "media/gpu/gpu_video_decode_accelerator_helpers.h"
#include "media/gpu/macros.h"
#include "media/gpu/v4l2/v4l2_video_decoder_backend_stateless.h"
......@@ -140,10 +138,6 @@ void V4L2SliceVideoDecoder::Initialize(const VideoDecoderConfig& config,
SetState(State::kUninitialized);
}
// Setup frame pool.
DCHECK(client_);
frame_pool_ = client_->GetVideoFramePool();
// Open V4L2 device.
VideoCodecProfile profile = config.profile();
uint32_t input_format_fourcc =
......@@ -179,7 +173,7 @@ void V4L2SliceVideoDecoder::Initialize(const VideoDecoderConfig& config,
// Create the backend (only stateless API supported as of now).
backend_ = std::make_unique<V4L2StatelessVideoDecoderBackend>(
this, device_, frame_pool_, profile, decoder_task_runner_);
this, device_, profile, decoder_task_runner_);
if (!backend_->Initialize()) {
std::move(init_cb).Run(false);
return;
......@@ -258,60 +252,74 @@ bool V4L2SliceVideoDecoder::SetCodedSizeOnInputQueue(
return true;
}
base::Optional<GpuBufferLayout> V4L2SliceVideoDecoder::SetupOutputFormat(
const gfx::Size& size,
const gfx::Rect& visible_rect) {
bool V4L2SliceVideoDecoder::SetupOutputFormat(const gfx::Size& size,
const gfx::Rect& visible_rect) {
DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
DVLOGF(3) << "size: " << size.ToString()
<< ", visible_rect: " << visible_rect.ToString();
const std::vector<uint32_t> formats = device_->EnumerateSupportedPixelformats(
V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
DCHECK(!formats.empty());
for (const auto format_fourcc : formats) {
if (!device_->CanCreateEGLImageFrom(format_fourcc))
continue;
// Get the supported output formats and their corresponding negotiated sizes.
std::vector<std::pair<Fourcc, gfx::Size>> candidates;
for (const uint32_t& pixfmt : device_->EnumerateSupportedPixelformats(
V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)) {
base::Optional<struct v4l2_format> format =
output_queue_->SetFormat(format_fourcc, size, 0);
if (!format)
continue;
// SetFormat is successful. Next make sure VFPool can allocate video frames
// with width and height adjusted by a video driver.
gfx::Size adjusted_size(format->fmt.pix_mp.width,
format->fmt.pix_mp.height);
output_queue_->SetFormat(pixfmt, size, 0);
if (format) {
gfx::Size adjusted_size(format->fmt.pix_mp.width,
format->fmt.pix_mp.height);
candidates.push_back(
std::make_pair(Fourcc::FromV4L2PixFmt(pixfmt), adjusted_size));
}
}
// Make sure VFPool can allocate video frames with width and height.
auto layout =
UpdateVideoFramePoolFormat(format_fourcc, adjusted_size, visible_rect);
if (!layout)
continue;
// Ask the pipeline to pick the output format.
base::Optional<Fourcc> fourcc =
client_->PickDecoderOutputFormat(candidates, visible_rect);
if (!fourcc) {
VLOGF(1) << "Failed to pick a output format.";
return false;
}
const uint32_t pixfmt = fourcc->ToV4L2PixFmt();
// We successfully picked the output format. Now setup output format again.
base::Optional<struct v4l2_format> format =
output_queue_->SetFormat(pixfmt, size, 0);
DCHECK(format);
gfx::Size adjusted_size(format->fmt.pix_mp.width, format->fmt.pix_mp.height);
DCHECK_EQ(adjusted_size.width() % 16, 0);
DCHECK_EQ(adjusted_size.height() % 16, 0);
if (!gfx::Rect(adjusted_size).Contains(gfx::Rect(size))) {
VLOGF(1) << "The adjusted coded size (" << adjusted_size.ToString()
<< ") should contains the original coded size(" << size.ToString()
<< ").";
return false;
}
// Got the adjusted size from the V4L2 driver. Now setup the frame pool.
// TODO(akahuang): It is possible there is an allocatable formats among
// candidates, but PickDecoderOutputFormat() selects other non-allocatable
// format. The correct flow is to attach an info to candidates if it is
// created by VideoFramePool.
DmabufVideoFramePool* pool = client_->GetVideoFramePool();
if (pool) {
base::Optional<GpuBufferLayout> layout = pool->RequestFrames(
Fourcc::FromV4L2PixFmt(pixfmt), adjusted_size, visible_rect,
GetNaturalSize(visible_rect, pixel_aspect_ratio_), num_output_frames_);
if (!layout) {
VLOGF(1) << "Failed to setup format to VFPool";
return false;
}
if (layout->size() != adjusted_size) {
VLOGF(1) << "The size adjusted by VFPool is different from one "
<< "adjusted by a video driver. fourcc: " << format_fourcc
<< ", (video driver v.s. VFPool) " << adjusted_size.ToString()
<< "adjusted by a video driver. fourcc: "
<< FourccToString(pixfmt) << ", (video driver v.s. VFPool) "
<< adjusted_size.ToString()
<< " != " << layout->size().ToString();
continue;
return false;
}
return layout;
}
// TODO(akahuang): Use ImageProcessor in this case.
VLOGF(2) << "WARNING: Cannot find format that can create EGL image. "
<< "We need ImageProcessor to convert pixel format.";
NOTIMPLEMENTED();
return base::nullopt;
}
base::Optional<GpuBufferLayout>
V4L2SliceVideoDecoder::UpdateVideoFramePoolFormat(
uint32_t output_format_fourcc,
const gfx::Size& size,
const gfx::Rect& visible_rect) {
gfx::Size natural_size = GetNaturalSize(visible_rect, pixel_aspect_ratio_);
return frame_pool_->RequestFrames(
Fourcc::FromV4L2PixFmt(output_format_fourcc), size, visible_rect,
natural_size, num_output_frames_);
return true;
}
void V4L2SliceVideoDecoder::Reset(base::OnceClosure closure) {
......@@ -465,24 +473,15 @@ void V4L2SliceVideoDecoder::ContinueChangeResolution(
return;
}
auto layout = SetupOutputFormat(pic_size, visible_rect);
if (!layout) {
VLOGF(1) << "No format is available with thew new resolution";
if (!SetupOutputFormat(pic_size, visible_rect)) {
VLOGF(1) << "Failed to setup output format.";
SetState(State::kError);
return;
}
auto coded_size = layout->size();
DCHECK_EQ(coded_size.width() % 16, 0);
DCHECK_EQ(coded_size.height() % 16, 0);
if (!gfx::Rect(coded_size).Contains(gfx::Rect(pic_size))) {
VLOGF(1) << "Got invalid adjusted coded size: " << coded_size.ToString();
SetState(State::kError);
return;
}
if (output_queue_->AllocateBuffers(num_output_frames, V4L2_MEMORY_DMABUF) ==
0) {
v4l2_memory type =
client_->GetVideoFramePool() ? V4L2_MEMORY_DMABUF : V4L2_MEMORY_MMAP;
if (output_queue_->AllocateBuffers(num_output_frames_, type) == 0) {
VLOGF(1) << "Failed to request output buffers.";
SetState(State::kError);
return;
......@@ -566,6 +565,13 @@ void V4L2SliceVideoDecoder::OutputFrame(scoped_refptr<VideoFrame> frame,
output_cb_.Run(std::move(frame));
}
DmabufVideoFramePool* V4L2SliceVideoDecoder::GetVideoFramePool() const {
DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
DVLOGF(4);
return client_->GetVideoFramePool();
}
void V4L2SliceVideoDecoder::SetState(State new_state) {
DCHECK_CALLED_ON_VALID_SEQUENCE(decoder_sequence_checker_);
DVLOGF(3) << "Change state from " << static_cast<int>(state_) << " to "
......
......@@ -13,7 +13,6 @@
#include <utility>
#include <vector>
#include "base/bind_helpers.h"
#include "base/callback_forward.h"
#include "base/containers/mru_cache.h"
#include "base/containers/queue.h"
......@@ -24,7 +23,6 @@
#include "base/sequenced_task_runner.h"
#include "base/threading/thread.h"
#include "base/time/time.h"
#include "media/base/video_frame_layout.h"
#include "media/base/video_types.h"
#include "media/gpu/chromeos/gpu_buffer_layout.h"
#include "media/gpu/chromeos/video_decoder_pipeline.h"
......@@ -32,6 +30,7 @@
#include "media/gpu/v4l2/v4l2_device.h"
#include "media/gpu/v4l2/v4l2_video_decoder_backend.h"
#include "media/video/supported_video_decoder_config.h"
#include "ui/gfx/geometry/size.h"
namespace media {
......@@ -69,6 +68,7 @@ class MEDIA_GPU_EXPORT V4L2SliceVideoDecoder
void OutputFrame(scoped_refptr<VideoFrame> frame,
const gfx::Rect& visible_rect,
base::TimeDelta timestamp) override;
DmabufVideoFramePool* GetVideoFramePool() const override;
private:
friend class V4L2SliceVideoDecoderTest;
......@@ -77,7 +77,6 @@ class MEDIA_GPU_EXPORT V4L2SliceVideoDecoder
scoped_refptr<base::SequencedTaskRunner> decoder_task_runner,
base::WeakPtr<DecoderInterface::Client> client,
scoped_refptr<V4L2Device> device);
~V4L2SliceVideoDecoder() override;
enum class State {
......@@ -118,18 +117,8 @@ class MEDIA_GPU_EXPORT V4L2SliceVideoDecoder
// Setup format for output queue. This function sets output format on output
// queue that is supported by a v4l2 driver, can be allocatable by
// VideoFramePool and can be composited by chrome. This also updates format
// in VideoFramePool. The returned VideoFrameLayout is one of VideoFrame that
// VideoFramePool will allocate. Returns base::nullopt on failure of if there
// is no format that satisfies the above conditions.
base::Optional<GpuBufferLayout> SetupOutputFormat(
const gfx::Size& size,
const gfx::Rect& visible_rect);
// Update the format of frames in |frame_pool_| with |output_format_fourcc|,
// |size| and |visible_rect|.
base::Optional<GpuBufferLayout> UpdateVideoFramePoolFormat(
uint32_t output_format_fourcc,
const gfx::Size& size,
const gfx::Rect& visible_rect);
// in VideoFramePool. Return true if the setup is successful.
bool SetupOutputFormat(const gfx::Size& size, const gfx::Rect& visible_rect);
// Start streaming V4L2 input and output queues. Attempt to start
// |device_poll_thread_| before starting streaming.
......@@ -155,8 +144,6 @@ class MEDIA_GPU_EXPORT V4L2SliceVideoDecoder
// V4L2 device in use.
scoped_refptr<V4L2Device> device_;
// VideoFrame manager used to allocate and recycle video frame.
DmabufVideoFramePool* frame_pool_ = nullptr;
// Callback to change resolution, called after the pipeline is flushed.
base::OnceClosure continue_change_resolution_cb_;
......@@ -164,8 +151,6 @@ class MEDIA_GPU_EXPORT V4L2SliceVideoDecoder
// State of the instance.
State state_ = State::kUninitialized;
// Parameters for generating output VideoFrame.
base::Optional<VideoFrameLayout> frame_layout_;
// Number of output frames requested to |frame_pool_|.
// The default value is only used at the first time of
// DmabufVideoFramePool::RequestFrames() during Initialize().
......
......@@ -8,10 +8,12 @@
#include "base/memory/scoped_refptr.h"
#include "media/base/decode_status.h"
#include "media/base/video_decoder.h"
#include "media/gpu/chromeos/dmabuf_video_frame_pool.h"
#include "ui/gfx/geometry/rect.h"
namespace media {
class DmabufVideoFramePool;
class V4L2Device;
class V4L2Queue;
class V4L2ReadableBuffer;
......@@ -55,6 +57,8 @@ class V4L2VideoDecoderBackend {
virtual void OutputFrame(scoped_refptr<VideoFrame> frame,
const gfx::Rect& visible_rect,
base::TimeDelta timestamp) = 0;
// Get the video frame pool without passing the ownership.
virtual DmabufVideoFramePool* GetVideoFramePool() const = 0;
};
virtual ~V4L2VideoDecoderBackend();
......
......@@ -16,6 +16,7 @@
#include "media/base/video_codecs.h"
#include "media/base/video_frame.h"
#include "media/gpu/accelerated_video_decoder.h"
#include "media/gpu/chromeos/dmabuf_video_frame_pool.h"
#include "media/gpu/macros.h"
#include "media/gpu/v4l2/v4l2_device.h"
#include "media/gpu/v4l2/v4l2_h264_accelerator.h"
......@@ -98,11 +99,9 @@ V4L2StatelessVideoDecoderBackend::DecodeRequest::~DecodeRequest() = default;
V4L2StatelessVideoDecoderBackend::V4L2StatelessVideoDecoderBackend(
Client* const client,
scoped_refptr<V4L2Device> device,
DmabufVideoFramePool* const frame_pool,
VideoCodecProfile profile,
scoped_refptr<base::SequencedTaskRunner> task_runner)
: V4L2VideoDecoderBackend(client, std::move(device)),
frame_pool_(frame_pool),
profile_(profile),
bitstream_id_to_timestamp_(kTimestampCacheSize),
task_runner_(task_runner) {
......@@ -177,6 +176,26 @@ bool V4L2StatelessVideoDecoderBackend::Initialize() {
return true;
}
// static
void V4L2StatelessVideoDecoderBackend::ReuseOutputBufferThunk(
scoped_refptr<base::SequencedTaskRunner> task_runner,
base::Optional<base::WeakPtr<V4L2StatelessVideoDecoderBackend>> weak_this,
V4L2ReadableBufferRef buffer) {
DVLOGF(3);
DCHECK(weak_this);
if (task_runner->RunsTasksInCurrentSequence()) {
if (*weak_this) {
(*weak_this)->ReuseOutputBuffer(std::move(buffer));
}
} else {
task_runner->PostTask(
FROM_HERE,
base::BindOnce(&V4L2StatelessVideoDecoderBackend::ReuseOutputBuffer,
*weak_this, std::move(buffer)));
}
}
void V4L2StatelessVideoDecoderBackend::ReuseOutputBuffer(
V4L2ReadableBufferRef buffer) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
......@@ -205,15 +224,22 @@ void V4L2StatelessVideoDecoderBackend::OnOutputBufferDequeued(
surface->SetDecoded();
// Keep a reference to the V4L2 buffer until the buffer is reused. The
// reason for this is that the config store uses V4L2 buffer IDs to
// reference frames, therefore we cannot reuse the same V4L2 buffer ID for
// another decode operation until all references to that frame are gone.
// Request API does not have this limitation, so we can probably remove this
// after config store is gone.
surface->SetReleaseCallback(
base::BindOnce(&V4L2StatelessVideoDecoderBackend::ReuseOutputBuffer,
weak_this_, std::move(dequeued_buffer)));
auto reuse_buffer_cb =
base::BindOnce(&V4L2StatelessVideoDecoderBackend::ReuseOutputBufferThunk,
task_runner_, weak_this_, std::move(dequeued_buffer));
if (output_queue_->GetMemoryType() == V4L2_MEMORY_MMAP) {
// Keep a reference to the V4L2 buffer until the frame is reused, because
// the frame is backed up by the V4L2 buffer's memory.
surface->video_frame()->AddDestructionObserver(std::move(reuse_buffer_cb));
} else {
// Keep a reference to the V4L2 buffer until the buffer is reused. The
// reason for this is that the config store uses V4L2 buffer IDs to
// reference frames, therefore we cannot reuse the same V4L2 buffer ID for
// another decode operation until all references to that frame are gone.
// Request API does not have this limitation, so we can probably remove this
// after config store is gone.
surface->SetReleaseCallback(std::move(reuse_buffer_cb));
}
PumpOutputSurfaces();
......@@ -233,22 +259,6 @@ V4L2StatelessVideoDecoderBackend::CreateSurface() {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DVLOGF(4);
// Request VideoFrame.
scoped_refptr<VideoFrame> frame = frame_pool_->GetFrame();
if (!frame) {
// We allocate the same number of output buffer slot in V4L2 device and the
// output VideoFrame. If there is free output buffer slot but no free
// VideoFrame, surface_it means the VideoFrame is not released at client
// side. Post DoDecodeWork when the pool has available frames.
DVLOGF(3) << "There is no available VideoFrame.";
frame_pool_->NotifyWhenFrameAvailable(base::BindOnce(
base::IgnoreResult(&base::SequencedTaskRunner::PostTask), task_runner_,
FROM_HERE,
base::BindOnce(&V4L2StatelessVideoDecoderBackend::DoDecodeWork,
weak_this_)));
return nullptr;
}
// Request V4L2 input and output buffers.
V4L2WritableBufferRef input_buf = input_queue_->GetFreeBuffer();
V4L2WritableBufferRef output_buf = output_queue_->GetFreeBuffer();
......@@ -257,6 +267,35 @@ V4L2StatelessVideoDecoderBackend::CreateSurface() {
return nullptr;
}
DmabufVideoFramePool* pool = client_->GetVideoFramePool();
scoped_refptr<VideoFrame> frame;
if (!pool) {
// Get VideoFrame from the V4L2 buffer because now we allocate from V4L2
// driver via MMAP. The VideoFrame received from V4L2 buffer will remain
// until deallocating V4L2Queue. But we need to know when the buffer is not
// used by the client. So we wrap the frame here.
scoped_refptr<VideoFrame> origin_frame = output_buf.GetVideoFrame();
frame = VideoFrame::WrapVideoFrame(origin_frame, origin_frame->format(),
origin_frame->visible_rect(),
origin_frame->natural_size());
} else {
// Try to get VideoFrame from the pool.
frame = pool->GetFrame();
if (!frame) {
// We allocate the same number of output buffer slot in V4L2 device and
// the output VideoFrame. If there is free output buffer slot but no free
// VideoFrame, it means the VideoFrame is not released at client
// side. Post DoDecodeWork when the pool has available frames.
DVLOGF(3) << "There is no available VideoFrame.";
pool->NotifyWhenFrameAvailable(base::BindOnce(
base::IgnoreResult(&base::SequencedTaskRunner::PostTask),
task_runner_, FROM_HERE,
base::BindOnce(&V4L2StatelessVideoDecoderBackend::DoDecodeWork,
weak_this_)));
return nullptr;
}
}
scoped_refptr<V4L2DecodeSurface> dec_surface;
if (supports_requests_) {
V4L2RequestRef request_ref = requests_queue_->GetFreeRequest();
......@@ -313,8 +352,19 @@ void V4L2StatelessVideoDecoderBackend::DecodeSurface(
return;
}
if (!std::move(dec_surface->output_buffer())
.QueueDMABuf(dec_surface->video_frame()->DmabufFds())) {
bool result = false;
switch (output_queue_->GetMemoryType()) {
case V4L2_MEMORY_MMAP:
result = std::move(dec_surface->output_buffer()).QueueMMap();
break;
case V4L2_MEMORY_DMABUF:
result = std::move(dec_surface->output_buffer())
.QueueDMABuf(dec_surface->video_frame()->DmabufFds());
break;
default:
NOTREACHED() << "We should only use MMAP or DMABUF.";
}
if (!result) {
client_->OnBackendError();
return;
}
......
......@@ -32,7 +32,6 @@ class V4L2StatelessVideoDecoderBackend : public V4L2VideoDecoderBackend,
V4L2StatelessVideoDecoderBackend(
Client* const client,
scoped_refptr<V4L2Device> device,
DmabufVideoFramePool* const frame_pool,
VideoCodecProfile profile,
scoped_refptr<base::SequencedTaskRunner> task_runner);
......@@ -99,7 +98,11 @@ class V4L2StatelessVideoDecoderBackend : public V4L2VideoDecoderBackend,
kWaitSubFrameDecoded,
};
// Callback which is called when V4L2 surface is destroyed.
// Callback which is called when the output buffer is not used anymore.
static void ReuseOutputBufferThunk(
scoped_refptr<base::SequencedTaskRunner> task_runner,
base::Optional<base::WeakPtr<V4L2StatelessVideoDecoderBackend>> weak_this,
V4L2ReadableBufferRef buffer);
void ReuseOutputBuffer(V4L2ReadableBufferRef buffer);
// Try to advance the decoding work.
......@@ -124,9 +127,6 @@ class V4L2StatelessVideoDecoderBackend : public V4L2VideoDecoderBackend,
// Returns whether |profile| is supported by a v4l2 stateless decoder driver.
bool IsSupportedProfile(VideoCodecProfile profile);
// Video frame pool provided by the decoder.
DmabufVideoFramePool* const frame_pool_;
// Video profile we are decoding.
VideoCodecProfile profile_;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment