Commit 81df6b62 authored by Hirokazu Honda's avatar Hirokazu Honda Committed by Chromium LUCI CQ

media/gpu/video_encode_accelerator_tests: Add NV12Dmabuf Cropping test case

This adds NV12Dmabuf cropping test case, where the visible rectangle of
fed VideoFrames to an encoder doesn't start with (0, 0).

Bug: b:174318867, b:172210338
Test: video_encode_accelerator_tests --gtest_also_run_disabled_tests on atlas
Change-Id: I1361011344a685484bb7098e0f3507dc8014b2aa
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2563391
Commit-Queue: Hirokazu Honda <hiroh@chromium.org>
Reviewed-by: default avatarDavid Staessens <dstaessens@chromium.org>
Cr-Commit-Position: refs/heads/master@{#834126}
parent 823e10f9
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include <math.h> #include <math.h>
#include <utility> #include <utility>
#include "base/logging.h"
#include "media/base/video_frame.h" #include "media/base/video_frame.h"
#include "media/base/video_types.h" #include "media/base/video_types.h"
#include "media/gpu/test/video_frame_helpers.h" #include "media/gpu/test/video_frame_helpers.h"
...@@ -36,11 +37,9 @@ double ComputeSimilarity(const VideoFrame* frame1, ...@@ -36,11 +37,9 @@ double ComputeSimilarity(const VideoFrame* frame1,
SimilarityMetrics mode) { SimilarityMetrics mode) {
ASSERT_TRUE_OR_RETURN(frame1->IsMappable() && frame2->IsMappable(), ASSERT_TRUE_OR_RETURN(frame1->IsMappable() && frame2->IsMappable(),
std::numeric_limits<std::size_t>::max()); std::numeric_limits<std::size_t>::max());
// TODO(crbug.com/1044509): Remove these assumptions. ASSERT_TRUE_OR_RETURN(
ASSERT_TRUE_OR_RETURN(frame1->visible_rect() == frame2->visible_rect(), frame1->visible_rect().size() == frame2->visible_rect().size(),
std::numeric_limits<std::size_t>::max()); std::numeric_limits<std::size_t>::max());
ASSERT_TRUE_OR_RETURN(frame1->visible_rect().origin() == gfx::Point(0, 0),
std::numeric_limits<std::size_t>::max());
// These are used, only if frames are converted to I420, for keeping converted // These are used, only if frames are converted to I420, for keeping converted
// frames alive until the end of function. // frames alive until the end of function.
scoped_refptr<VideoFrame> converted_frame1; scoped_refptr<VideoFrame> converted_frame1;
...@@ -67,9 +66,10 @@ double ComputeSimilarity(const VideoFrame* frame1, ...@@ -67,9 +66,10 @@ double ComputeSimilarity(const VideoFrame* frame1,
ASSERT_TRUE_OR_RETURN(metric_func, std::numeric_limits<double>::max()); ASSERT_TRUE_OR_RETURN(metric_func, std::numeric_limits<double>::max());
return metric_func( return metric_func(
frame1->data(0), frame1->stride(0), frame1->data(1), frame1->stride(1), frame1->visible_data(0), frame1->stride(0), frame1->visible_data(1),
frame1->data(2), frame1->stride(2), frame2->data(0), frame2->stride(0), frame1->stride(1), frame1->visible_data(2), frame1->stride(2),
frame2->data(1), frame2->stride(1), frame2->data(2), frame2->stride(2), frame2->visible_data(0), frame2->stride(0), frame2->visible_data(1),
frame2->stride(1), frame2->visible_data(2), frame2->stride(2),
frame1->visible_rect().width(), frame1->visible_rect().height()); frame1->visible_rect().width(), frame1->visible_rect().height());
} }
} // namespace } // namespace
...@@ -77,24 +77,22 @@ double ComputeSimilarity(const VideoFrame* frame1, ...@@ -77,24 +77,22 @@ double ComputeSimilarity(const VideoFrame* frame1,
size_t CompareFramesWithErrorDiff(const VideoFrame& frame1, size_t CompareFramesWithErrorDiff(const VideoFrame& frame1,
const VideoFrame& frame2, const VideoFrame& frame2,
uint8_t tolerance) { uint8_t tolerance) {
ASSERT_TRUE_OR_RETURN(frame1.format() == frame2.format(),
std::numeric_limits<std::size_t>::max());
// TODO(crbug.com/1044509): Remove these assumption.
ASSERT_TRUE_OR_RETURN(frame1.visible_rect() == frame2.visible_rect(),
std::numeric_limits<std::size_t>::max());
ASSERT_TRUE_OR_RETURN(frame1.visible_rect().origin() == gfx::Point(0, 0),
std::numeric_limits<std::size_t>::max());
ASSERT_TRUE_OR_RETURN(frame1.IsMappable() && frame2.IsMappable(), ASSERT_TRUE_OR_RETURN(frame1.IsMappable() && frame2.IsMappable(),
std::numeric_limits<std::size_t>::max()); std::numeric_limits<std::size_t>::max());
ASSERT_TRUE_OR_RETURN(frame1.format() == frame2.format(),
std::numeric_limits<std::size_t>::max());
ASSERT_TRUE_OR_RETURN(
frame1.visible_rect().size() == frame2.visible_rect().size(),
std::numeric_limits<std::size_t>::max());
size_t diff_cnt = 0; size_t diff_cnt = 0;
const VideoPixelFormat format = frame1.format(); const VideoPixelFormat format = frame1.format();
const size_t num_planes = VideoFrame::NumPlanes(format); const size_t num_planes = VideoFrame::NumPlanes(format);
const gfx::Size& visible_size = frame1.visible_rect().size(); const gfx::Size& visible_size = frame1.visible_rect().size();
for (size_t i = 0; i < num_planes; ++i) { for (size_t i = 0; i < num_planes; ++i) {
const uint8_t* data1 = frame1.data(i); const uint8_t* data1 = frame1.visible_data(i);
const int stride1 = frame1.stride(i); const int stride1 = frame1.stride(i);
const uint8_t* data2 = frame2.data(i); const uint8_t* data2 = frame2.visible_data(i);
const int stride2 = frame2.stride(i); const int stride2 = frame2.stride(i);
const size_t rows = VideoFrame::Rows(i, format, visible_size.height()); const size_t rows = VideoFrame::Rows(i, format, visible_size.height());
const int row_bytes = VideoFrame::RowBytes(i, format, visible_size.width()); const int row_bytes = VideoFrame::RowBytes(i, format, visible_size.width());
......
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#include "testing/gtest/include/gtest/gtest.h" #include "testing/gtest/include/gtest/gtest.h"
#include "third_party/libyuv/include/libyuv/convert.h" #include "third_party/libyuv/include/libyuv/convert.h"
#include "third_party/libyuv/include/libyuv/planar_functions.h" #include "third_party/libyuv/include/libyuv/planar_functions.h"
#include "third_party/libyuv/include/libyuv/scale.h"
namespace media { namespace media {
namespace test { namespace test {
...@@ -48,6 +49,79 @@ Video::Video(const base::FilePath& file_path, ...@@ -48,6 +49,79 @@ Video::Video(const base::FilePath& file_path,
Video::~Video() = default; Video::~Video() = default;
std::unique_ptr<Video> Video::Expand(const gfx::Size& resolution,
const gfx::Rect& visible_rect) const {
LOG_ASSERT(IsLoaded()) << "The source video is not loaded";
LOG_ASSERT(pixel_format_ == VideoPixelFormat::PIXEL_FORMAT_NV12)
<< "The pixel format of source video is not NV12";
LOG_ASSERT(visible_rect.size() == resolution_)
<< "The resolution is different from the copied-into area of visible "
<< "rectangle";
LOG_ASSERT(gfx::Rect(resolution).Contains(visible_rect))
<< "The resolution doesn't contain visible rectangle";
LOG_ASSERT(visible_rect.x() % 2 == 0 && visible_rect.y() % 2 == 0)
<< "An odd origin point is not supported";
auto new_video = std::make_unique<Video>(file_path_, metadata_file_path_);
new_video->frame_checksums_ = frame_checksums_;
new_video->thumbnail_checksums_ = thumbnail_checksums_;
new_video->profile_ = profile_;
new_video->codec_ = codec_;
new_video->frame_rate_ = frame_rate_;
new_video->num_frames_ = num_frames_;
new_video->num_fragments_ = num_fragments_;
new_video->resolution_ = resolution;
new_video->visible_rect_ = visible_rect;
new_video->pixel_format_ = pixel_format_;
const auto src_layout =
CreateVideoFrameLayout(PIXEL_FORMAT_NV12, resolution_, 1u /* alignment*/);
const auto dst_layout =
CreateVideoFrameLayout(PIXEL_FORMAT_NV12, resolution, 1u /* alignment*/);
const size_t src_frame_size =
src_layout->planes().back().offset + src_layout->planes().back().size;
const size_t dst_frame_size =
dst_layout->planes().back().offset + dst_layout->planes().back().size;
LOG_ASSERT(src_frame_size * num_frames_ == data_.size())
<< "Unexpected data size";
std::vector<uint8_t> new_data(dst_frame_size * num_frames_);
auto compute_dst_visible_data_offset = [&dst_layout,
&visible_rect](size_t plane) {
const size_t stride = dst_layout->planes()[plane].stride;
const size_t bytes_per_pixel =
VideoFrame::BytesPerElement(dst_layout->format(), plane);
gfx::Point origin = visible_rect.origin();
LOG_ASSERT(dst_layout->format() == VideoPixelFormat::PIXEL_FORMAT_NV12)
<< "The pixel format of destination video is not NV12";
if (plane == 1)
origin.SetPoint(origin.x() / 2, origin.y() / 2);
return stride * origin.y() + bytes_per_pixel * origin.x();
};
const size_t dst_y_visible_offset = compute_dst_visible_data_offset(0);
const size_t dst_uv_visible_offset = compute_dst_visible_data_offset(1);
for (size_t i = 0; i < num_frames_; i++) {
const uint8_t* src_plane = data_.data() + (i * src_frame_size);
uint8_t* const dst_plane = new_data.data() + (i * dst_frame_size);
uint8_t* const dst_y_plane_visible_data =
dst_plane + dst_layout->planes()[0].offset + dst_y_visible_offset;
uint8_t* const dst_uv_plane_visible_data =
dst_plane + dst_layout->planes()[1].offset + dst_uv_visible_offset;
// Copy the source buffer to the visible area of the destination buffer.
// libyuv::NV12Scale copies the source to the destination as-is when their
// resolutions are the same.
libyuv::NV12Scale(src_plane + src_layout->planes()[0].offset,
src_layout->planes()[0].stride,
src_plane + src_layout->planes()[1].offset,
src_layout->planes()[1].stride, resolution_.width(),
resolution_.height(), dst_y_plane_visible_data,
dst_layout->planes()[0].stride, dst_uv_plane_visible_data,
dst_layout->planes()[1].stride, visible_rect_.width(),
visible_rect_.height(),
libyuv::FilterMode::kFilterBilinear);
}
new_video->data_ = std::move(new_data);
return new_video;
}
std::unique_ptr<Video> Video::ConvertToNV12() const { std::unique_ptr<Video> Video::ConvertToNV12() const {
LOG_ASSERT(IsLoaded()) << "The source video is not loaded"; LOG_ASSERT(IsLoaded()) << "The source video is not loaded";
LOG_ASSERT(pixel_format_ == VideoPixelFormat::PIXEL_FORMAT_I420) LOG_ASSERT(pixel_format_ == VideoPixelFormat::PIXEL_FORMAT_I420)
...@@ -61,6 +135,7 @@ std::unique_ptr<Video> Video::ConvertToNV12() const { ...@@ -61,6 +135,7 @@ std::unique_ptr<Video> Video::ConvertToNV12() const {
new_video->num_frames_ = num_frames_; new_video->num_frames_ = num_frames_;
new_video->num_fragments_ = num_fragments_; new_video->num_fragments_ = num_fragments_;
new_video->resolution_ = resolution_; new_video->resolution_ = resolution_;
new_video->visible_rect_ = visible_rect_;
new_video->pixel_format_ = PIXEL_FORMAT_NV12; new_video->pixel_format_ = PIXEL_FORMAT_NV12;
// Convert I420 To NV12. // Convert I420 To NV12.
...@@ -239,6 +314,10 @@ gfx::Size Video::Resolution() const { ...@@ -239,6 +314,10 @@ gfx::Size Video::Resolution() const {
return resolution_; return resolution_;
} }
gfx::Rect Video::VisibleRect() const {
return visible_rect_;
}
base::TimeDelta Video::GetDuration() const { base::TimeDelta Video::GetDuration() const {
return base::TimeDelta::FromSecondsD(static_cast<double>(num_frames_) / return base::TimeDelta::FromSecondsD(static_cast<double>(num_frames_) /
static_cast<double>(frame_rate_)); static_cast<double>(frame_rate_));
...@@ -381,6 +460,9 @@ bool Video::LoadMetadata() { ...@@ -381,6 +460,9 @@ bool Video::LoadMetadata() {
} }
resolution_ = gfx::Size(static_cast<uint32_t>(width->GetInt()), resolution_ = gfx::Size(static_cast<uint32_t>(width->GetInt()),
static_cast<uint32_t>(height->GetInt())); static_cast<uint32_t>(height->GetInt()));
// The default visible rectangle is (0, 0, |resolution_|). Expand() needs to
// be called to change the visible rectangle.
visible_rect_ = gfx::Rect(resolution_);
// Find optional frame checksums. These are only required when using the frame // Find optional frame checksums. These are only required when using the frame
// validator. // validator.
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include "base/time/time.h" #include "base/time/time.h"
#include "media/base/video_codecs.h" #include "media/base/video_codecs.h"
#include "media/base/video_types.h" #include "media/base/video_types.h"
#include "ui/gfx/geometry/rect.h"
#include "ui/gfx/geometry/size.h" #include "ui/gfx/geometry/size.h"
namespace media { namespace media {
...@@ -36,6 +37,11 @@ class Video { ...@@ -36,6 +37,11 @@ class Video {
// Create a new Video instance by copying and converting |data_| to NV12. // Create a new Video instance by copying and converting |data_| to NV12.
std::unique_ptr<Video> ConvertToNV12() const; std::unique_ptr<Video> ConvertToNV12() const;
// Create a new Video instance by copying the content to |visible_rect_| area
// and expanding the resolution to |resolution_|. This is only supported for
// raw videos in the NV12 format.
std::unique_ptr<Video> Expand(const gfx::Size& resolution,
const gfx::Rect& visible_rect) const;
// Load the video file from disk. |max_frames| is the maximum number of // Load the video file from disk. |max_frames| is the maximum number of
// frames to be read from disk. // frames to be read from disk.
...@@ -68,6 +74,8 @@ class Video { ...@@ -68,6 +74,8 @@ class Video {
uint32_t NumFragments() const; uint32_t NumFragments() const;
// Get the video resolution. // Get the video resolution.
gfx::Size Resolution() const; gfx::Size Resolution() const;
// Get the video visible rectangle.
gfx::Rect VisibleRect() const;
// Get the video duration. // Get the video duration.
base::TimeDelta GetDuration() const; base::TimeDelta GetDuration() const;
...@@ -145,6 +153,7 @@ class Video { ...@@ -145,6 +153,7 @@ class Video {
uint32_t num_frames_ = 0; uint32_t num_frames_ = 0;
uint32_t num_fragments_ = 0; uint32_t num_fragments_ = 0;
gfx::Size resolution_; gfx::Size resolution_;
gfx::Rect visible_rect_;
DISALLOW_COPY_AND_ASSIGN(Video); DISALLOW_COPY_AND_ASSIGN(Video);
}; };
......
...@@ -276,13 +276,13 @@ void VideoEncoderClient::RequireBitstreamBuffers( ...@@ -276,13 +276,13 @@ void VideoEncoderClient::RequireBitstreamBuffers(
coded_size = video_->Resolution(); coded_size = video_->Resolution();
} }
// TODO(crbug.com/1045825): Add support for videos with a visible rectangle
// not starting at (0,0).
// Follow the behavior of the chrome capture stack; |natural_size| is the // Follow the behavior of the chrome capture stack; |natural_size| is the
// dimension to be encoded. // dimension to be encoded.
aligned_data_helper_ = std::make_unique<AlignedDataHelper>( aligned_data_helper_ = std::make_unique<AlignedDataHelper>(
video_->Data(), video_->NumFrames(), video_->PixelFormat(), coded_size, video_->Data(), video_->NumFrames(), video_->PixelFormat(),
/*visible_rect=*/gfx::Rect(video_->Resolution()), /*src_coded_size=*/video_->Resolution(),
/*dst_coded_size=*/coded_size,
/*visible_rect=*/video_->VisibleRect(),
/*natural_size=*/encoder_client_config_.output_resolution, /*natural_size=*/encoder_client_config_.output_resolution,
encoder_client_config_.input_storage_type == encoder_client_config_.input_storage_type ==
VideoEncodeAccelerator::Config::StorageType::kDmabuf VideoEncodeAccelerator::Config::StorageType::kDmabuf
......
...@@ -269,10 +269,9 @@ bool ConvertVideoFrame(const VideoFrame* src_frame, VideoFrame* dst_frame) { ...@@ -269,10 +269,9 @@ bool ConvertVideoFrame(const VideoFrame* src_frame, VideoFrame* dst_frame) {
scoped_refptr<VideoFrame> ConvertVideoFrame(const VideoFrame* src_frame, scoped_refptr<VideoFrame> ConvertVideoFrame(const VideoFrame* src_frame,
VideoPixelFormat dst_pixel_format) { VideoPixelFormat dst_pixel_format) {
gfx::Rect visible_rect = src_frame->visible_rect();
auto dst_frame = VideoFrame::CreateFrame( auto dst_frame = VideoFrame::CreateFrame(
dst_pixel_format, visible_rect.size(), visible_rect, visible_rect.size(), dst_pixel_format, src_frame->coded_size(), src_frame->visible_rect(),
base::TimeDelta()); src_frame->natural_size(), src_frame->timestamp());
if (!dst_frame) { if (!dst_frame) {
LOG(ERROR) << "Failed to convert video frame to " << dst_frame->format(); LOG(ERROR) << "Failed to convert video frame to " << dst_frame->format();
return nullptr; return nullptr;
......
...@@ -373,22 +373,25 @@ AlignedDataHelper::AlignedDataHelper( ...@@ -373,22 +373,25 @@ AlignedDataHelper::AlignedDataHelper(
const std::vector<uint8_t>& stream, const std::vector<uint8_t>& stream,
uint32_t num_frames, uint32_t num_frames,
VideoPixelFormat pixel_format, VideoPixelFormat pixel_format,
const gfx::Size& coded_size, const gfx::Size& src_coded_size,
const gfx::Rect& visible_area, const gfx::Size& dst_coded_size,
const gfx::Rect& visible_rect,
const gfx::Size& natural_size, const gfx::Size& natural_size,
VideoFrame::StorageType storage_type, VideoFrame::StorageType storage_type,
gpu::GpuMemoryBufferFactory* const gpu_memory_buffer_factory) gpu::GpuMemoryBufferFactory* const gpu_memory_buffer_factory)
: num_frames_(num_frames), : num_frames_(num_frames),
storage_type_(storage_type), storage_type_(storage_type),
gpu_memory_buffer_factory_(gpu_memory_buffer_factory), gpu_memory_buffer_factory_(gpu_memory_buffer_factory),
visible_area_(visible_area), visible_rect_(visible_rect),
natural_size_(natural_size) { natural_size_(natural_size) {
if (storage_type_ == VideoFrame::STORAGE_GPU_MEMORY_BUFFER) { if (storage_type_ == VideoFrame::STORAGE_GPU_MEMORY_BUFFER) {
LOG_ASSERT(gpu_memory_buffer_factory_ != nullptr); LOG_ASSERT(gpu_memory_buffer_factory_ != nullptr);
InitializeGpuMemoryBufferFrames(stream, pixel_format, coded_size); InitializeGpuMemoryBufferFrames(stream, pixel_format, src_coded_size,
dst_coded_size);
} else { } else {
LOG_ASSERT(storage_type == VideoFrame::STORAGE_MOJO_SHARED_BUFFER); LOG_ASSERT(storage_type == VideoFrame::STORAGE_MOJO_SHARED_BUFFER);
InitializeAlignedMemoryFrames(stream, pixel_format, coded_size); InitializeAlignedMemoryFrames(stream, pixel_format, src_coded_size,
dst_coded_size);
} }
LOG_ASSERT(video_frame_data_.size() == num_frames_) LOG_ASSERT(video_frame_data_.size() == num_frames_)
<< "Failed to initialize VideoFrames"; << "Failed to initialize VideoFrames";
...@@ -439,7 +442,7 @@ scoped_refptr<VideoFrame> AlignedDataHelper::GetNextFrame() { ...@@ -439,7 +442,7 @@ scoped_refptr<VideoFrame> AlignedDataHelper::GetNextFrame() {
gpu::MailboxHolder dummy_mailbox[media::VideoFrame::kMaxPlanes]; gpu::MailboxHolder dummy_mailbox[media::VideoFrame::kMaxPlanes];
return media::VideoFrame::WrapExternalGpuMemoryBuffer( return media::VideoFrame::WrapExternalGpuMemoryBuffer(
visible_area_, natural_size_, std::move(gpu_memory_buffer), visible_rect_, natural_size_, std::move(gpu_memory_buffer),
dummy_mailbox, base::DoNothing() /* mailbox_holder_release_cb_ */, dummy_mailbox, base::DoNothing() /* mailbox_holder_release_cb_ */,
base::TimeTicks::Now().since_origin()); base::TimeTicks::Now().since_origin());
} else { } else {
...@@ -460,7 +463,7 @@ scoped_refptr<VideoFrame> AlignedDataHelper::GetNextFrame() { ...@@ -460,7 +463,7 @@ scoped_refptr<VideoFrame> AlignedDataHelper::GetNextFrame() {
const size_t video_frame_size = const size_t video_frame_size =
layout_->planes().back().offset + layout_->planes().back().size; layout_->planes().back().offset + layout_->planes().back().size;
return MojoSharedBufferVideoFrame::Create( return MojoSharedBufferVideoFrame::Create(
layout_->format(), layout_->coded_size(), visible_area_, natural_size_, layout_->format(), layout_->coded_size(), visible_rect_, natural_size_,
std::move(dup_handle), video_frame_size, offsets, strides, std::move(dup_handle), video_frame_size, offsets, strides,
base::TimeTicks::Now().since_origin()); base::TimeTicks::Now().since_origin());
} }
...@@ -469,7 +472,8 @@ scoped_refptr<VideoFrame> AlignedDataHelper::GetNextFrame() { ...@@ -469,7 +472,8 @@ scoped_refptr<VideoFrame> AlignedDataHelper::GetNextFrame() {
void AlignedDataHelper::InitializeAlignedMemoryFrames( void AlignedDataHelper::InitializeAlignedMemoryFrames(
const std::vector<uint8_t>& stream, const std::vector<uint8_t>& stream,
const VideoPixelFormat pixel_format, const VideoPixelFormat pixel_format,
const gfx::Size& coded_size) { const gfx::Size& src_coded_size,
const gfx::Size& dst_coded_size) {
ASSERT_NE(pixel_format, PIXEL_FORMAT_UNKNOWN); ASSERT_NE(pixel_format, PIXEL_FORMAT_UNKNOWN);
// Calculate padding in bytes to be added after each plane required to keep // Calculate padding in bytes to be added after each plane required to keep
...@@ -480,7 +484,7 @@ void AlignedDataHelper::InitializeAlignedMemoryFrames( ...@@ -480,7 +484,7 @@ void AlignedDataHelper::InitializeAlignedMemoryFrames(
// the VEA; each row of |src_strides| bytes in the original file needs to be // the VEA; each row of |src_strides| bytes in the original file needs to be
// copied into a row of |strides_| bytes in the aligned file. // copied into a row of |strides_| bytes in the aligned file.
size_t video_frame_size; size_t video_frame_size;
layout_ = GetAlignedVideoFrameLayout(pixel_format, coded_size, layout_ = GetAlignedVideoFrameLayout(pixel_format, dst_coded_size,
kPlatformBufferAlignment, nullptr, kPlatformBufferAlignment, nullptr,
&video_frame_size); &video_frame_size);
LOG_ASSERT(video_frame_size > 0UL); LOG_ASSERT(video_frame_size > 0UL);
...@@ -488,7 +492,7 @@ void AlignedDataHelper::InitializeAlignedMemoryFrames( ...@@ -488,7 +492,7 @@ void AlignedDataHelper::InitializeAlignedMemoryFrames(
std::vector<size_t> src_plane_rows; std::vector<size_t> src_plane_rows;
size_t src_video_frame_size = 0; size_t src_video_frame_size = 0;
auto src_layout = GetAlignedVideoFrameLayout( auto src_layout = GetAlignedVideoFrameLayout(
pixel_format, visible_area_.size(), 1u /* alignment */, &src_plane_rows, pixel_format, src_coded_size, 1u /* alignment */, &src_plane_rows,
&src_video_frame_size); &src_video_frame_size);
LOG_ASSERT(stream.size() % src_video_frame_size == 0U) LOG_ASSERT(stream.size() % src_video_frame_size == 0U)
<< "Stream byte size is not a product of calculated frame byte size"; << "Stream byte size is not a product of calculated frame byte size";
...@@ -520,17 +524,18 @@ void AlignedDataHelper::InitializeAlignedMemoryFrames( ...@@ -520,17 +524,18 @@ void AlignedDataHelper::InitializeAlignedMemoryFrames(
void AlignedDataHelper::InitializeGpuMemoryBufferFrames( void AlignedDataHelper::InitializeGpuMemoryBufferFrames(
const std::vector<uint8_t>& stream, const std::vector<uint8_t>& stream,
const VideoPixelFormat pixel_format, const VideoPixelFormat pixel_format,
const gfx::Size& coded_size) { const gfx::Size& src_coded_size,
const gfx::Size& dst_coded_size) {
#if BUILDFLAG(USE_CHROMEOS_MEDIA_ACCELERATION) #if BUILDFLAG(USE_CHROMEOS_MEDIA_ACCELERATION)
layout_ = GetPlatformVideoFrameLayout( layout_ = GetPlatformVideoFrameLayout(
gpu_memory_buffer_factory_, pixel_format, visible_area_.size(), gpu_memory_buffer_factory_, pixel_format, dst_coded_size,
gfx::BufferUsage::SCANOUT_VEA_READ_CAMERA_AND_CPU_READ_WRITE); gfx::BufferUsage::SCANOUT_VEA_READ_CAMERA_AND_CPU_READ_WRITE);
ASSERT_TRUE(layout_) << "Failed getting platform VideoFrameLayout"; ASSERT_TRUE(layout_) << "Failed getting platform VideoFrameLayout";
std::vector<size_t> src_plane_rows; std::vector<size_t> src_plane_rows;
size_t src_video_frame_size = 0; size_t src_video_frame_size = 0;
auto src_layout = GetAlignedVideoFrameLayout( auto src_layout = GetAlignedVideoFrameLayout(
pixel_format, visible_area_.size(), 1u /* alignment */, &src_plane_rows, pixel_format, src_coded_size, 1u /* alignment */, &src_plane_rows,
&src_video_frame_size); &src_video_frame_size);
LOG_ASSERT(stream.size() % src_video_frame_size == 0U) LOG_ASSERT(stream.size() % src_video_frame_size == 0U)
<< "Stream byte size is not a product of calculated frame byte size"; << "Stream byte size is not a product of calculated frame byte size";
...@@ -539,7 +544,7 @@ void AlignedDataHelper::InitializeGpuMemoryBufferFrames( ...@@ -539,7 +544,7 @@ void AlignedDataHelper::InitializeGpuMemoryBufferFrames(
const uint8_t* src_frame_ptr = &stream[0]; const uint8_t* src_frame_ptr = &stream[0];
for (size_t i = 0; i < num_frames_; i++) { for (size_t i = 0; i < num_frames_; i++) {
auto memory_frame = auto memory_frame =
VideoFrame::CreateFrame(pixel_format, coded_size, visible_area_, VideoFrame::CreateFrame(pixel_format, dst_coded_size, visible_rect_,
natural_size_, base::TimeDelta()); natural_size_, base::TimeDelta());
LOG_ASSERT(!!memory_frame) << "Failed creating VideoFrame"; LOG_ASSERT(!!memory_frame) << "Failed creating VideoFrame";
for (size_t i = 0; i < num_planes; i++) { for (size_t i = 0; i < num_planes; i++) {
...@@ -652,7 +657,7 @@ scoped_refptr<const VideoFrame> RawDataHelper::GetFrame(size_t index) { ...@@ -652,7 +657,7 @@ scoped_refptr<const VideoFrame> RawDataHelper::GetFrame(size_t index) {
// changes made in crrev.com/c/2050895. // changes made in crrev.com/c/2050895.
scoped_refptr<const VideoFrame> video_frame = scoped_refptr<const VideoFrame> video_frame =
VideoFrame::WrapExternalYuvDataWithLayout( VideoFrame::WrapExternalYuvDataWithLayout(
*layout_, gfx::Rect(video_->Resolution()), video_->Resolution(), *layout_, video_->VisibleRect(), video_->VisibleRect().size(),
frame_data[0], frame_data[1], frame_data[2], frame_data[0], frame_data[1], frame_data[2],
base::TimeTicks::Now().since_origin()); base::TimeTicks::Now().since_origin());
return video_frame; return video_frame;
......
...@@ -203,8 +203,9 @@ class AlignedDataHelper { ...@@ -203,8 +203,9 @@ class AlignedDataHelper {
const std::vector<uint8_t>& stream, const std::vector<uint8_t>& stream,
uint32_t num_frames, uint32_t num_frames,
VideoPixelFormat pixel_format, VideoPixelFormat pixel_format,
const gfx::Size& coded_size, const gfx::Size& src_coded_size,
const gfx::Rect& visible_area, const gfx::Size& dst_coded_size,
const gfx::Rect& visible_rect,
const gfx::Size& natural_size, const gfx::Size& natural_size,
VideoFrame::StorageType storage_type, VideoFrame::StorageType storage_type,
gpu::GpuMemoryBufferFactory* const gpu_memory_buffer_factory); gpu::GpuMemoryBufferFactory* const gpu_memory_buffer_factory);
...@@ -234,12 +235,14 @@ class AlignedDataHelper { ...@@ -234,12 +235,14 @@ class AlignedDataHelper {
// kPlatformBufferAlignment. // kPlatformBufferAlignment.
void InitializeAlignedMemoryFrames(const std::vector<uint8_t>& stream, void InitializeAlignedMemoryFrames(const std::vector<uint8_t>& stream,
const VideoPixelFormat pixel_format, const VideoPixelFormat pixel_format,
const gfx::Size& coded_size); const gfx::Size& src_coded_size,
const gfx::Size& dst_coded_size);
// Create GpuMemoryBuffer VideoFrame whose alignments is determined by // Create GpuMemoryBuffer VideoFrame whose alignments is determined by
// a GpuMemoryBuffer allocation backend (e.g. minigbm). // a GpuMemoryBuffer allocation backend (e.g. minigbm).
void InitializeGpuMemoryBufferFrames(const std::vector<uint8_t>& stream, void InitializeGpuMemoryBufferFrames(const std::vector<uint8_t>& stream,
const VideoPixelFormat pixel_format, const VideoPixelFormat pixel_format,
const gfx::Size& coded_size); const gfx::Size& src_coded_size,
const gfx::Size& dst_coded_size);
// The index of VideoFrame to be read next. // The index of VideoFrame to be read next.
uint32_t frame_index_ = 0; uint32_t frame_index_ = 0;
...@@ -251,7 +254,7 @@ class AlignedDataHelper { ...@@ -251,7 +254,7 @@ class AlignedDataHelper {
// The layout of VideoFrames returned by GetNextFrame(). // The layout of VideoFrames returned by GetNextFrame().
base::Optional<VideoFrameLayout> layout_; base::Optional<VideoFrameLayout> layout_;
const gfx::Rect visible_area_; const gfx::Rect visible_rect_;
const gfx::Size natural_size_; const gfx::Size natural_size_;
// The frame data returned by GetNextFrame(). // The frame data returned by GetNextFrame().
......
...@@ -476,6 +476,72 @@ TEST_F(VideoEncoderTest, DISABLED_FlushAtEndOfStream_NV12DmabufScaling) { ...@@ -476,6 +476,72 @@ TEST_F(VideoEncoderTest, DISABLED_FlushAtEndOfStream_NV12DmabufScaling) {
EXPECT_EQ(encoder->GetFrameReleasedCount(), nv12_video->NumFrames()); EXPECT_EQ(encoder->GetFrameReleasedCount(), nv12_video->NumFrames());
EXPECT_TRUE(encoder->WaitForBitstreamProcessors()); EXPECT_TRUE(encoder->WaitForBitstreamProcessors());
} }
// TODO(hiroh): Enable this test after the test dashboard becomes more green.
// Encode VideoFrames with cropping the rectangle (0, 60, size).
// Cropping is required in VideoEncodeAccelerator when zero-copy video
// capture is enabled. One example is when 640x360 capture recording is
// requested, a camera cannot produce the resolution and instead produce 640x480
// frames with visible_rect=0, 60, 640x360.
TEST_F(VideoEncoderTest,
DISABLED_FlushAtEndOfStream_NV12DmabufCroppingTopAndBottom) {
constexpr int kGrowHeight = 120;
const gfx::Size original_resolution = g_env->Video()->Resolution();
const gfx::Rect expanded_visible_rect(0, kGrowHeight / 2,
original_resolution.width(),
original_resolution.height());
const gfx::Size expanded_resolution(
original_resolution.width(), original_resolution.height() + kGrowHeight);
auto nv12_video = g_env->Video()->ConvertToNV12();
ASSERT_TRUE(nv12_video);
auto nv12_expanded_video =
nv12_video->Expand(expanded_resolution, expanded_visible_rect);
ASSERT_TRUE(nv12_expanded_video);
nv12_video.reset();
VideoEncoderClientConfig config(nv12_expanded_video.get(), g_env->Profile(),
g_env->NumTemporalLayers(), g_env->Bitrate());
config.output_resolution = original_resolution;
config.input_storage_type =
VideoEncodeAccelerator::Config::StorageType::kDmabuf;
auto encoder = CreateVideoEncoder(nv12_expanded_video.get(), config);
encoder->Encode();
EXPECT_TRUE(encoder->WaitForFlushDone());
EXPECT_EQ(encoder->GetFlushDoneCount(), 1u);
EXPECT_EQ(encoder->GetFrameReleasedCount(), nv12_expanded_video->NumFrames());
EXPECT_TRUE(encoder->WaitForBitstreamProcessors());
}
// TODO(hiroh): Enable this test after the test dashboard becomes more green.
// Encode VideoFrames with cropping the rectangle (60, 0, size).
TEST_F(VideoEncoderTest,
DISABLED_FlushAtEndOfStream_NV12DmabufCroppingRightAndLeft) {
constexpr int kGrowWidth = 120;
const gfx::Size original_resolution = g_env->Video()->Resolution();
const gfx::Rect expanded_visible_rect(kGrowWidth / 2, 0,
original_resolution.width(),
original_resolution.height());
const gfx::Size expanded_resolution(original_resolution.width() + kGrowWidth,
original_resolution.height());
auto nv12_video = g_env->Video()->ConvertToNV12();
ASSERT_TRUE(nv12_video);
auto nv12_expanded_video =
nv12_video->Expand(expanded_resolution, expanded_visible_rect);
ASSERT_TRUE(nv12_expanded_video);
nv12_video.reset();
VideoEncoderClientConfig config(nv12_expanded_video.get(), g_env->Profile(),
g_env->NumTemporalLayers(), g_env->Bitrate());
config.output_resolution = original_resolution;
config.input_storage_type =
VideoEncodeAccelerator::Config::StorageType::kDmabuf;
auto encoder = CreateVideoEncoder(nv12_expanded_video.get(), config);
encoder->Encode();
EXPECT_TRUE(encoder->WaitForFlushDone());
EXPECT_EQ(encoder->GetFlushDoneCount(), 1u);
EXPECT_EQ(encoder->GetFrameReleasedCount(), nv12_expanded_video->NumFrames());
EXPECT_TRUE(encoder->WaitForBitstreamProcessors());
}
} // namespace test } // namespace test
} // namespace media } // namespace media
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment