Commit c0786251 authored by Hirokazu Honda's avatar Hirokazu Honda Committed by Commit Bot

media/gpu/v4l2VEA: Replace visible_size with visible_rect

gfx::Rect is a more reasonable struct to stand for a dimension to
be encoded in VideoEncodeAccelerator than gfx::Size. It is
because the area of VideoFrame to be encoded does not necessarily
begin with (0, 0).

This CL basically changes the type to gfx::Rect keeping the logic
of V4L2VEA and the assumption about the area origin.

Bug: 1033799
Test: video.EncodeAccel* on kukui
Change-Id: Ie7a0b6bbdb9a66bfd1b85c9afa5bfff80779ee81
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2010508
Commit-Queue: Hirokazu Honda <hiroh@chromium.org>
Reviewed-by: default avatarAlexandre Courbot <acourbot@chromium.org>
Cr-Commit-Position: refs/heads/master@{#735769}
parent 40af502e
...@@ -188,7 +188,7 @@ bool V4L2VideoEncodeAccelerator::Initialize(const Config& config, ...@@ -188,7 +188,7 @@ bool V4L2VideoEncodeAccelerator::Initialize(const Config& config,
TRACE_EVENT0("media,gpu", "V4L2VEA::Initialize"); TRACE_EVENT0("media,gpu", "V4L2VEA::Initialize");
VLOGF(2) << ": " << config.AsHumanReadableString(); VLOGF(2) << ": " << config.AsHumanReadableString();
visible_size_ = config.input_visible_size; encoder_input_visible_rect_ = gfx::Rect(config.input_visible_size);
client_ptr_factory_.reset(new base::WeakPtrFactory<Client>(client)); client_ptr_factory_.reset(new base::WeakPtrFactory<Client>(client));
client_ = client_ptr_factory_->GetWeakPtr(); client_ = client_ptr_factory_->GetWeakPtr();
...@@ -266,7 +266,7 @@ void V4L2VideoEncodeAccelerator::InitializeTask(const Config& config, ...@@ -266,7 +266,7 @@ void V4L2VideoEncodeAccelerator::InitializeTask(const Config& config,
// TODO(hiroh): Decide the appropriate planar in some way. // TODO(hiroh): Decide the appropriate planar in some way.
auto input_layout = VideoFrameLayout::CreateMultiPlanar( auto input_layout = VideoFrameLayout::CreateMultiPlanar(
config.input_format, visible_size_, config.input_format, encoder_input_visible_rect_.size(),
std::vector<ColorPlaneLayout>( std::vector<ColorPlaneLayout>(
VideoFrame::NumPlanes(config.input_format))); VideoFrame::NumPlanes(config.input_format)));
if (!input_layout) { if (!input_layout) {
...@@ -274,8 +274,10 @@ void V4L2VideoEncodeAccelerator::InitializeTask(const Config& config, ...@@ -274,8 +274,10 @@ void V4L2VideoEncodeAccelerator::InitializeTask(const Config& config,
return; return;
} }
// ImageProcessor for a pixel format conversion.
if (!CreateImageProcessor(*input_layout, *device_input_layout_, if (!CreateImageProcessor(*input_layout, *device_input_layout_,
visible_size_)) { encoder_input_visible_rect_,
encoder_input_visible_rect_)) {
VLOGF(1) << "Failed to create image processor"; VLOGF(1) << "Failed to create image processor";
return; return;
} }
...@@ -307,25 +309,25 @@ void V4L2VideoEncodeAccelerator::InitializeTask(const Config& config, ...@@ -307,25 +309,25 @@ void V4L2VideoEncodeAccelerator::InitializeTask(const Config& config,
bool V4L2VideoEncodeAccelerator::CreateImageProcessor( bool V4L2VideoEncodeAccelerator::CreateImageProcessor(
const VideoFrameLayout& input_layout, const VideoFrameLayout& input_layout,
const VideoFrameLayout& output_layout, const VideoFrameLayout& output_layout,
const gfx::Size& visible_size) { const gfx::Rect& input_visible_rect,
const gfx::Rect& output_visible_rect) {
VLOGF(2); VLOGF(2);
DCHECK_CALLED_ON_VALID_SEQUENCE(encoder_sequence_checker_); DCHECK_CALLED_ON_VALID_SEQUENCE(encoder_sequence_checker_);
DCHECK_NE(input_layout.format(), output_layout.format()); DCHECK_NE(input_layout.format(), output_layout.format());
// Convert from |config.input_format| to |device_input_layout_->format()|, // Convert from |config.input_format| + |input_visible_rect| to
// keeping the size at |visible_size| and requiring the output buffers to // |device_input_layout_->format()| + |output_visible_rect|, requiring the
// be of at least |device_input_layout_->coded_size()|. // output buffers to be of at least |device_input_layout_->coded_size()|.
// |input_storage_type| can be STORAGE_SHMEM and STORAGE_MOJO_SHARED_BUFFER. // |input_storage_type| can be STORAGE_SHMEM and STORAGE_MOJO_SHARED_BUFFER.
// However, it doesn't matter VideoFrame::STORAGE_OWNED_MEMORY is specified // However, it doesn't matter VideoFrame::STORAGE_OWNED_MEMORY is specified
// for |input_storage_type| here, as long as VideoFrame on Process()'s data // for |input_storage_type| here, as long as VideoFrame on Process()'s data
// can be accessed by VideoFrame::data(). // can be accessed by VideoFrame::data().
auto input_config = auto input_config = VideoFrameLayoutToPortConfig(
VideoFrameLayoutToPortConfig(input_layout, gfx::Rect(visible_size), input_layout, input_visible_rect, {VideoFrame::STORAGE_OWNED_MEMORY});
{VideoFrame::STORAGE_OWNED_MEMORY});
if (!input_config) if (!input_config)
return false; return false;
auto output_config = VideoFrameLayoutToPortConfig( auto output_config = VideoFrameLayoutToPortConfig(
output_layout, gfx::Rect(visible_size), output_layout, output_visible_rect,
{VideoFrame::STORAGE_DMABUFS, VideoFrame::STORAGE_OWNED_MEMORY}); {VideoFrame::STORAGE_DMABUFS, VideoFrame::STORAGE_OWNED_MEMORY});
if (!output_config) if (!output_config)
return false; return false;
...@@ -362,13 +364,11 @@ bool V4L2VideoEncodeAccelerator::CreateImageProcessor( ...@@ -362,13 +364,11 @@ bool V4L2VideoEncodeAccelerator::CreateImageProcessor(
free_image_processor_output_buffer_indices_.resize(kImageProcBufferCount); free_image_processor_output_buffer_indices_.resize(kImageProcBufferCount);
std::iota(free_image_processor_output_buffer_indices_.begin(), std::iota(free_image_processor_output_buffer_indices_.begin(),
free_image_processor_output_buffer_indices_.end(), 0); free_image_processor_output_buffer_indices_.end(), 0);
return AllocateImageProcessorOutputBuffers(kImageProcBufferCount, return AllocateImageProcessorOutputBuffers(kImageProcBufferCount);
visible_size);
} }
bool V4L2VideoEncodeAccelerator::AllocateImageProcessorOutputBuffers( bool V4L2VideoEncodeAccelerator::AllocateImageProcessorOutputBuffers(
size_t count, size_t count) {
const gfx::Size& visible_size) {
DCHECK_CALLED_ON_VALID_SEQUENCE(encoder_sequence_checker_); DCHECK_CALLED_ON_VALID_SEQUENCE(encoder_sequence_checker_);
DCHECK(image_processor_); DCHECK(image_processor_);
// Allocate VideoFrames for image processor output if its mode is IMPORT. // Allocate VideoFrames for image processor output if its mode is IMPORT.
...@@ -377,14 +377,14 @@ bool V4L2VideoEncodeAccelerator::AllocateImageProcessorOutputBuffers( ...@@ -377,14 +377,14 @@ bool V4L2VideoEncodeAccelerator::AllocateImageProcessorOutputBuffers(
} }
image_processor_output_buffers_.resize(count); image_processor_output_buffers_.resize(count);
const auto output_storage_type = const ImageProcessor::PortConfig& output_config =
image_processor_->output_config().storage_type(); image_processor_->output_config();
for (size_t i = 0; i < count; i++) { for (size_t i = 0; i < count; i++) {
switch (output_storage_type) { switch (output_config.storage_type()) {
case VideoFrame::STORAGE_OWNED_MEMORY: case VideoFrame::STORAGE_OWNED_MEMORY:
image_processor_output_buffers_[i] = VideoFrame::CreateFrameWithLayout( image_processor_output_buffers_[i] = VideoFrame::CreateFrameWithLayout(
*device_input_layout_, gfx::Rect(visible_size), visible_size, *device_input_layout_, output_config.visible_rect,
base::TimeDelta(), true); output_config.visible_rect.size(), base::TimeDelta(), true);
if (!image_processor_output_buffers_[i]) { if (!image_processor_output_buffers_[i]) {
VLOG(1) << "Failed to create VideoFrame"; VLOG(1) << "Failed to create VideoFrame";
return false; return false;
...@@ -393,7 +393,7 @@ bool V4L2VideoEncodeAccelerator::AllocateImageProcessorOutputBuffers( ...@@ -393,7 +393,7 @@ bool V4L2VideoEncodeAccelerator::AllocateImageProcessorOutputBuffers(
// TODO(crbug.com/910590): Support VideoFrame::STORAGE_DMABUFS. // TODO(crbug.com/910590): Support VideoFrame::STORAGE_DMABUFS.
default: default:
VLOGF(1) << "Unsupported output storage type of image processor: " VLOGF(1) << "Unsupported output storage type of image processor: "
<< output_storage_type; << output_config.storage_type();
return false; return false;
} }
} }
...@@ -640,8 +640,7 @@ void V4L2VideoEncodeAccelerator::EncodeTask(scoped_refptr<VideoFrame> frame, ...@@ -640,8 +640,7 @@ void V4L2VideoEncodeAccelerator::EncodeTask(scoped_refptr<VideoFrame> frame,
return; return;
} }
if (frame && if (frame && !ReconfigureFormatIfNeeded(*frame)) {
!ReconfigureFormatIfNeeded(frame->format(), frame->coded_size())) {
NOTIFY_ERROR(kInvalidArgumentError); NOTIFY_ERROR(kInvalidArgumentError);
encoder_state_ = kError; encoder_state_ = kError;
return; return;
...@@ -662,18 +661,17 @@ void V4L2VideoEncodeAccelerator::EncodeTask(scoped_refptr<VideoFrame> frame, ...@@ -662,18 +661,17 @@ void V4L2VideoEncodeAccelerator::EncodeTask(scoped_refptr<VideoFrame> frame,
} }
bool V4L2VideoEncodeAccelerator::ReconfigureFormatIfNeeded( bool V4L2VideoEncodeAccelerator::ReconfigureFormatIfNeeded(
VideoPixelFormat format, const VideoFrame& frame) {
const gfx::Size& new_frame_size) {
DCHECK_CALLED_ON_VALID_SEQUENCE(encoder_sequence_checker_); DCHECK_CALLED_ON_VALID_SEQUENCE(encoder_sequence_checker_);
// We should apply the frame size change to ImageProcessor if there is. // We should apply the frame size change to ImageProcessor if there is.
if (image_processor_) { if (image_processor_) {
// Stride is the same. There is no need of executing S_FMT again. // Stride is the same. There is no need of executing S_FMT again.
if (image_processor_->input_config().size == new_frame_size) { if (image_processor_->input_config().size == frame.coded_size()) {
return true; return true;
} }
VLOGF(2) << "Call S_FMT with a new size=" << new_frame_size.ToString() VLOGF(2) << "Call S_FMT with a new size=" << frame.coded_size().ToString()
<< ", the previous size =" << ", the previous size ="
<< device_input_layout_->coded_size().ToString(); << device_input_layout_->coded_size().ToString();
if (!input_buffer_map_.empty()) { if (!input_buffer_map_.empty()) {
...@@ -682,22 +680,14 @@ bool V4L2VideoEncodeAccelerator::ReconfigureFormatIfNeeded( ...@@ -682,22 +680,14 @@ bool V4L2VideoEncodeAccelerator::ReconfigureFormatIfNeeded(
return false; return false;
} }
// TODO(hiroh): Decide the appropriate planar in some way. if (!CreateImageProcessor(frame.layout(), *device_input_layout_,
auto input_layout = VideoFrameLayout::CreateMultiPlanar( frame.visible_rect(),
format, new_frame_size, encoder_input_visible_rect_)) {
std::vector<ColorPlaneLayout>(VideoFrame::NumPlanes(format)));
if (!input_layout) {
VLOGF(1) << "Invalid image processor input layout";
return false;
}
if (!CreateImageProcessor(*input_layout, *device_input_layout_,
visible_size_)) {
NOTIFY_ERROR(kPlatformFailureError); NOTIFY_ERROR(kPlatformFailureError);
return false; return false;
} }
if (image_processor_->input_config().size.width() != if (image_processor_->input_config().size.width() !=
new_frame_size.width()) { frame.coded_size().width()) {
NOTIFY_ERROR(kPlatformFailureError); NOTIFY_ERROR(kPlatformFailureError);
return false; return false;
} }
...@@ -713,8 +703,8 @@ bool V4L2VideoEncodeAccelerator::ReconfigureFormatIfNeeded( ...@@ -713,8 +703,8 @@ bool V4L2VideoEncodeAccelerator::ReconfigureFormatIfNeeded(
// whose frame size as |input_allocated_size_|. VEAClient for ARC++ might give // whose frame size as |input_allocated_size_|. VEAClient for ARC++ might give
// a different frame size but |input_allocated_size_| is always the same as // a different frame size but |input_allocated_size_| is always the same as
// |device_input_layout_->coded_size()|. // |device_input_layout_->coded_size()|.
if (new_frame_size != input_allocated_size_) { if (frame.coded_size() != input_allocated_size_) {
VLOGF(2) << "Call S_FMT with a new size=" << new_frame_size.ToString() VLOGF(2) << "Call S_FMT with a new size=" << frame.coded_size().ToString()
<< ", the previous size =" << ", the previous size ="
<< device_input_layout_->coded_size().ToString() << device_input_layout_->coded_size().ToString()
<< " (the size requested to client=" << " (the size requested to client="
...@@ -724,11 +714,13 @@ bool V4L2VideoEncodeAccelerator::ReconfigureFormatIfNeeded( ...@@ -724,11 +714,13 @@ bool V4L2VideoEncodeAccelerator::ReconfigureFormatIfNeeded(
NOTIFY_ERROR(kInvalidArgumentError); NOTIFY_ERROR(kInvalidArgumentError);
return false; return false;
} }
if (!NegotiateInputFormat(device_input_layout_->format(), new_frame_size)) { if (!NegotiateInputFormat(device_input_layout_->format(),
frame.coded_size())) {
NOTIFY_ERROR(kPlatformFailureError); NOTIFY_ERROR(kPlatformFailureError);
return false; return false;
} }
if (device_input_layout_->coded_size().width() != new_frame_size.width()) { if (device_input_layout_->coded_size().width() !=
frame.coded_size().width()) {
NOTIFY_ERROR(kPlatformFailureError); NOTIFY_ERROR(kPlatformFailureError);
return false; return false;
} }
...@@ -1296,11 +1288,13 @@ bool V4L2VideoEncodeAccelerator::SetOutputFormat( ...@@ -1296,11 +1288,13 @@ bool V4L2VideoEncodeAccelerator::SetOutputFormat(
DCHECK(!input_queue_->IsStreaming()); DCHECK(!input_queue_->IsStreaming());
DCHECK(!output_queue_->IsStreaming()); DCHECK(!output_queue_->IsStreaming());
DCHECK(!visible_size_.IsEmpty()); DCHECK(!encoder_input_visible_rect_.IsEmpty());
output_buffer_byte_size_ = GetEncodeBitstreamBufferSize(visible_size_); output_buffer_byte_size_ =
GetEncodeBitstreamBufferSize(encoder_input_visible_rect_.size());
base::Optional<struct v4l2_format> format = output_queue_->SetFormat( base::Optional<struct v4l2_format> format = output_queue_->SetFormat(
output_format_fourcc_, visible_size_, output_buffer_byte_size_); output_format_fourcc_, encoder_input_visible_rect_.size(),
output_buffer_byte_size_);
if (!format) { if (!format) {
return false; return false;
} }
...@@ -1384,14 +1378,14 @@ bool V4L2VideoEncodeAccelerator::SetFormats(VideoPixelFormat input_format, ...@@ -1384,14 +1378,14 @@ bool V4L2VideoEncodeAccelerator::SetFormats(VideoPixelFormat input_format,
if (!SetOutputFormat(output_profile)) if (!SetOutputFormat(output_profile))
return false; return false;
if (!NegotiateInputFormat(input_format, visible_size_)) if (!NegotiateInputFormat(input_format, encoder_input_visible_rect_.size()))
return false; return false;
struct v4l2_rect visible_rect; struct v4l2_rect visible_rect;
visible_rect.left = 0; visible_rect.left = encoder_input_visible_rect_.x();
visible_rect.top = 0; visible_rect.top = encoder_input_visible_rect_.y();
visible_rect.width = visible_size_.width(); visible_rect.width = encoder_input_visible_rect_.width();
visible_rect.height = visible_size_.height(); visible_rect.height = encoder_input_visible_rect_.height();
struct v4l2_selection selection_arg{}; struct v4l2_selection selection_arg{};
selection_arg.type = V4L2_BUF_TYPE_VIDEO_OUTPUT; selection_arg.type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
...@@ -1399,7 +1393,7 @@ bool V4L2VideoEncodeAccelerator::SetFormats(VideoPixelFormat input_format, ...@@ -1399,7 +1393,7 @@ bool V4L2VideoEncodeAccelerator::SetFormats(VideoPixelFormat input_format,
selection_arg.r = visible_rect; selection_arg.r = visible_rect;
// The width and height might be adjusted by driver. // The width and height might be adjusted by driver.
// Need to read it back and set to visible_size_. // Need to read it back and set to |encoder_input_visible_rect_|.
if (device_->Ioctl(VIDIOC_S_SELECTION, &selection_arg) == 0) { if (device_->Ioctl(VIDIOC_S_SELECTION, &selection_arg) == 0) {
DVLOGF(2) << "VIDIOC_S_SELECTION is supported"; DVLOGF(2) << "VIDIOC_S_SELECTION is supported";
visible_rect = selection_arg.r; visible_rect = selection_arg.r;
...@@ -1413,10 +1407,11 @@ bool V4L2VideoEncodeAccelerator::SetFormats(VideoPixelFormat input_format, ...@@ -1413,10 +1407,11 @@ bool V4L2VideoEncodeAccelerator::SetFormats(VideoPixelFormat input_format,
visible_rect = crop.c; visible_rect = crop.c;
} }
visible_size_.SetSize(visible_rect.width, visible_rect.height); encoder_input_visible_rect_ =
VLOGF(2) << "After adjusted by driver, visible_size_=" gfx::Rect(visible_rect.left, visible_rect.top, visible_rect.width,
<< visible_size_.ToString(); visible_rect.height);
VLOGF(2) << "After adjusted by driver, encoder_input_visible_rect_="
<< encoder_input_visible_rect_.ToString();
return true; return true;
} }
......
...@@ -179,11 +179,12 @@ class MEDIA_GPU_EXPORT V4L2VideoEncodeAccelerator ...@@ -179,11 +179,12 @@ class MEDIA_GPU_EXPORT V4L2VideoEncodeAccelerator
// Other utility functions. Called on the |encoder_task_runner_|. // Other utility functions. Called on the |encoder_task_runner_|.
// //
// Create image processor that will process input_layout to output_layout. The // Create image processor that will process |input_layout| +
// visible size of processed video frames are |visible_size|. // |input_visible_rect| to |output_layout|+|output_visible_rect|.
bool CreateImageProcessor(const VideoFrameLayout& input_layout, bool CreateImageProcessor(const VideoFrameLayout& input_layout,
const VideoFrameLayout& output_layout, const VideoFrameLayout& output_layout,
const gfx::Size& visible_size); const gfx::Rect& input_visible_rect,
const gfx::Rect& output_visible_rect);
// Process one video frame in |image_processor_input_queue_| by // Process one video frame in |image_processor_input_queue_| by
// |image_processor_|. // |image_processor_|.
void InputImageProcessorTask(); void InputImageProcessorTask();
...@@ -203,8 +204,7 @@ class MEDIA_GPU_EXPORT V4L2VideoEncodeAccelerator ...@@ -203,8 +204,7 @@ class MEDIA_GPU_EXPORT V4L2VideoEncodeAccelerator
// Reconfigure format of input buffers and image processor if frame size // Reconfigure format of input buffers and image processor if frame size
// given by client is different from one set in input buffers. // given by client is different from one set in input buffers.
bool ReconfigureFormatIfNeeded(VideoPixelFormat format, bool ReconfigureFormatIfNeeded(const VideoFrame& frame);
const gfx::Size& new_frame_size);
// Try to set up the device to the input format we were Initialized() with, // Try to set up the device to the input format we were Initialized() with,
// or if the device doesn't support it, use one it can support, so that we // or if the device doesn't support it, use one it can support, so that we
...@@ -228,8 +228,7 @@ class MEDIA_GPU_EXPORT V4L2VideoEncodeAccelerator ...@@ -228,8 +228,7 @@ class MEDIA_GPU_EXPORT V4L2VideoEncodeAccelerator
// Allocates |count| video frames with |visible_size| for image processor's // Allocates |count| video frames with |visible_size| for image processor's
// output buffers. Returns false if there's something wrong. // output buffers. Returns false if there's something wrong.
bool AllocateImageProcessorOutputBuffers(size_t count, bool AllocateImageProcessorOutputBuffers(size_t count);
const gfx::Size& visible_size);
// Recycle output buffer of image processor with |output_buffer_index|. // Recycle output buffer of image processor with |output_buffer_index|.
void ReuseImageProcessorOutputBuffer(size_t output_buffer_index); void ReuseImageProcessorOutputBuffer(size_t output_buffer_index);
...@@ -249,7 +248,10 @@ class MEDIA_GPU_EXPORT V4L2VideoEncodeAccelerator ...@@ -249,7 +248,10 @@ class MEDIA_GPU_EXPORT V4L2VideoEncodeAccelerator
const scoped_refptr<base::SingleThreadTaskRunner> child_task_runner_; const scoped_refptr<base::SingleThreadTaskRunner> child_task_runner_;
SEQUENCE_CHECKER(child_sequence_checker_); SEQUENCE_CHECKER(child_sequence_checker_);
gfx::Size visible_size_; // Visible rectangle of VideoFrame to be fed to an encoder driver, in other
// words, a visible rectangle that output encoded bitstream buffers represent.
gfx::Rect encoder_input_visible_rect_;
// Layout of device accepted input VideoFrame. // Layout of device accepted input VideoFrame.
base::Optional<VideoFrameLayout> device_input_layout_; base::Optional<VideoFrameLayout> device_input_layout_;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment