Commit 38163d72 authored by Hirokazu Honda's avatar Hirokazu Honda Committed by Commit Bot

media/gpu/test/video_encode_accelerator_tests: Add NV12 dmabuf scaling test case

This adds a test case of encoding NV12 Dmabuf VideoFrame with
down-scaling; the input resolution is twice larger in width and
height than one configured to VideoEncodeAccelerator. This can
happen in WebRTC simulcast scenario when zero-copy capture
feature is enabled. To not hit the smaller resolution's
limitation, the test case is skipped if the resolution to be
produced is less than 240x180.

Bug: 1045825
Test: video_encode_accelerator_tests on atlas and kukui
Change-Id: I11eac935aa2c6a03bcef0dfa533c8731d593d3df
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2342535
Commit-Queue: Hirokazu Honda <hiroh@chromium.org>
Reviewed-by: default avatarDavid Staessens <dstaessens@chromium.org>
Cr-Commit-Position: refs/heads/master@{#827711}
parent 2940e719
...@@ -70,6 +70,7 @@ VideoEncoderClientConfig::VideoEncoderClientConfig( ...@@ -70,6 +70,7 @@ VideoEncoderClientConfig::VideoEncoderClientConfig(
size_t num_temporal_layers, size_t num_temporal_layers,
uint32_t bitrate) uint32_t bitrate)
: output_profile(output_profile), : output_profile(output_profile),
output_resolution(video->Resolution()),
num_temporal_layers(num_temporal_layers), num_temporal_layers(num_temporal_layers),
bitrate(bitrate), bitrate(bitrate),
framerate(video->FrameRate()), framerate(video->FrameRate()),
...@@ -267,11 +268,22 @@ void VideoEncoderClient::RequireBitstreamBuffers( ...@@ -267,11 +268,22 @@ void VideoEncoderClient::RequireBitstreamBuffers(
ASSERT_GT(output_buffer_size, 0UL); ASSERT_GT(output_buffer_size, 0UL);
DVLOGF(4); DVLOGF(4);
gfx::Size coded_size = input_coded_size;
if (video_->Resolution() != encoder_client_config_.output_resolution) {
// Scaling case. Scaling is currently only supported when using Dmabufs.
EXPECT_EQ(encoder_client_config_.input_storage_type,
VideoEncodeAccelerator::Config::StorageType::kDmabuf);
coded_size = video_->Resolution();
}
// TODO(crbug.com/1045825): Add support for videos with a visible rectangle // TODO(crbug.com/1045825): Add support for videos with a visible rectangle
// not starting at (0,0). // not starting at (0,0).
// Follow the behavior of the chrome capture stack; |natural_size| is the
// dimension to be encoded.
aligned_data_helper_ = std::make_unique<AlignedDataHelper>( aligned_data_helper_ = std::make_unique<AlignedDataHelper>(
video_->Data(), video_->NumFrames(), video_->PixelFormat(), video_->Data(), video_->NumFrames(), video_->PixelFormat(), coded_size,
gfx::Rect(video_->Resolution()), input_coded_size, /*visible_rect=*/gfx::Rect(video_->Resolution()),
/*natural_size=*/encoder_client_config_.output_resolution,
encoder_client_config_.input_storage_type == encoder_client_config_.input_storage_type ==
VideoEncodeAccelerator::Config::StorageType::kDmabuf VideoEncodeAccelerator::Config::StorageType::kDmabuf
? VideoFrame::STORAGE_GPU_MEMORY_BUFFER ? VideoFrame::STORAGE_GPU_MEMORY_BUFFER
...@@ -409,7 +421,7 @@ void VideoEncoderClient::CreateEncoderTask(const Video* video, ...@@ -409,7 +421,7 @@ void VideoEncoderClient::CreateEncoderTask(const Video* video,
video_ = video; video_ = video;
const VideoEncodeAccelerator::Config config( const VideoEncodeAccelerator::Config config(
video_->PixelFormat(), video_->Resolution(), video_->PixelFormat(), encoder_client_config_.output_resolution,
encoder_client_config_.output_profile, encoder_client_config_.bitrate, encoder_client_config_.output_profile, encoder_client_config_.bitrate,
encoder_client_config_.framerate, base::nullopt /* gop_length */, encoder_client_config_.framerate, base::nullopt /* gop_length */,
base::nullopt /* h264_output_level*/, false /* is_constrained_h264 */, base::nullopt /* h264_output_level*/, false /* is_constrained_h264 */,
......
...@@ -43,6 +43,8 @@ struct VideoEncoderClientConfig { ...@@ -43,6 +43,8 @@ struct VideoEncoderClientConfig {
// The output profile to be used. // The output profile to be used.
VideoCodecProfile output_profile = VideoCodecProfile::H264PROFILE_MAIN; VideoCodecProfile output_profile = VideoCodecProfile::H264PROFILE_MAIN;
// The resolution output by VideoEncoderClient.
gfx::Size output_resolution;
// The number of temporal layers of the output stream. // The number of temporal layers of the output stream.
size_t num_temporal_layers = 1u; size_t num_temporal_layers = 1u;
// The maximum number of bitstream buffer encodes that can be requested // The maximum number of bitstream buffer encodes that can be requested
......
...@@ -285,6 +285,33 @@ scoped_refptr<VideoFrame> ConvertVideoFrame(const VideoFrame* src_frame, ...@@ -285,6 +285,33 @@ scoped_refptr<VideoFrame> ConvertVideoFrame(const VideoFrame* src_frame,
return dst_frame; return dst_frame;
} }
scoped_refptr<VideoFrame> ScaleVideoFrame(const VideoFrame* src_frame,
const gfx::Size& dst_resolution) {
if (src_frame->format() != PIXEL_FORMAT_NV12) {
LOG(ERROR) << src_frame->format() << " is not supported";
return nullptr;
}
auto scaled_frame = VideoFrame::CreateFrame(
PIXEL_FORMAT_NV12, dst_resolution, gfx::Rect(dst_resolution),
dst_resolution, src_frame->timestamp());
const int fail_scaling = libyuv::NV12Scale(
src_frame->visible_data(VideoFrame::kYPlane),
src_frame->stride(VideoFrame::kYPlane),
src_frame->visible_data(VideoFrame::kUVPlane),
src_frame->stride(VideoFrame::kUVPlane),
src_frame->visible_rect().width(), src_frame->visible_rect().height(),
scaled_frame->visible_data(VideoFrame::kYPlane),
scaled_frame->stride(VideoFrame::kYPlane),
scaled_frame->visible_data(VideoFrame::kUVPlane),
scaled_frame->stride(VideoFrame::kUVPlane), dst_resolution.width(),
dst_resolution.height(), libyuv::FilterMode::kFilterBilinear);
if (fail_scaling) {
LOG(ERROR) << "Failed scaling the source frame";
return nullptr;
}
return scaled_frame;
}
scoped_refptr<VideoFrame> CloneVideoFrame( scoped_refptr<VideoFrame> CloneVideoFrame(
gpu::GpuMemoryBufferFactory* gpu_memory_buffer_factory, gpu::GpuMemoryBufferFactory* gpu_memory_buffer_factory,
const VideoFrame* const src_frame, const VideoFrame* const src_frame,
......
...@@ -53,6 +53,11 @@ bool ConvertVideoFrame(const VideoFrame* src_frame, VideoFrame* dst_frame); ...@@ -53,6 +53,11 @@ bool ConvertVideoFrame(const VideoFrame* src_frame, VideoFrame* dst_frame);
scoped_refptr<VideoFrame> ConvertVideoFrame(const VideoFrame* src_frame, scoped_refptr<VideoFrame> ConvertVideoFrame(const VideoFrame* src_frame,
VideoPixelFormat dst_pixel_format); VideoPixelFormat dst_pixel_format);
// Scale and copy the |src_frame| to a new video frame with the specified scale.
// Supported input format is NV12.
scoped_refptr<VideoFrame> ScaleVideoFrame(const VideoFrame* src_frame,
const gfx::Size& dst_resolution);
// Copy |src_frame| into a new VideoFrame. // Copy |src_frame| into a new VideoFrame.
// If |dst_storage_type| is STORAGE_DMABUFS, this function creates DMABUF-backed // If |dst_storage_type| is STORAGE_DMABUFS, this function creates DMABUF-backed
// VideoFrame with |dst_layout|. If |dst_storage_type| is STORAGE_OWNED_MEMORY, // VideoFrame with |dst_layout|. If |dst_storage_type| is STORAGE_OWNED_MEMORY,
......
...@@ -373,14 +373,16 @@ AlignedDataHelper::AlignedDataHelper( ...@@ -373,14 +373,16 @@ AlignedDataHelper::AlignedDataHelper(
const std::vector<uint8_t>& stream, const std::vector<uint8_t>& stream,
uint32_t num_frames, uint32_t num_frames,
VideoPixelFormat pixel_format, VideoPixelFormat pixel_format,
const gfx::Rect& visible_area,
const gfx::Size& coded_size, const gfx::Size& coded_size,
const gfx::Rect& visible_area,
const gfx::Size& natural_size,
VideoFrame::StorageType storage_type, VideoFrame::StorageType storage_type,
gpu::GpuMemoryBufferFactory* const gpu_memory_buffer_factory) gpu::GpuMemoryBufferFactory* const gpu_memory_buffer_factory)
: num_frames_(num_frames), : num_frames_(num_frames),
storage_type_(storage_type), storage_type_(storage_type),
gpu_memory_buffer_factory_(gpu_memory_buffer_factory), gpu_memory_buffer_factory_(gpu_memory_buffer_factory),
visible_area_(visible_area) { visible_area_(visible_area),
natural_size_(natural_size) {
if (storage_type_ == VideoFrame::STORAGE_GPU_MEMORY_BUFFER) { if (storage_type_ == VideoFrame::STORAGE_GPU_MEMORY_BUFFER) {
LOG_ASSERT(gpu_memory_buffer_factory_ != nullptr); LOG_ASSERT(gpu_memory_buffer_factory_ != nullptr);
InitializeGpuMemoryBufferFrames(stream, pixel_format, coded_size); InitializeGpuMemoryBufferFrames(stream, pixel_format, coded_size);
...@@ -437,7 +439,7 @@ scoped_refptr<VideoFrame> AlignedDataHelper::GetNextFrame() { ...@@ -437,7 +439,7 @@ scoped_refptr<VideoFrame> AlignedDataHelper::GetNextFrame() {
gpu::MailboxHolder dummy_mailbox[media::VideoFrame::kMaxPlanes]; gpu::MailboxHolder dummy_mailbox[media::VideoFrame::kMaxPlanes];
return media::VideoFrame::WrapExternalGpuMemoryBuffer( return media::VideoFrame::WrapExternalGpuMemoryBuffer(
visible_area_, visible_area_.size(), std::move(gpu_memory_buffer), visible_area_, natural_size_, std::move(gpu_memory_buffer),
dummy_mailbox, base::DoNothing() /* mailbox_holder_release_cb_ */, dummy_mailbox, base::DoNothing() /* mailbox_holder_release_cb_ */,
base::TimeTicks::Now().since_origin()); base::TimeTicks::Now().since_origin());
} else { } else {
...@@ -458,9 +460,9 @@ scoped_refptr<VideoFrame> AlignedDataHelper::GetNextFrame() { ...@@ -458,9 +460,9 @@ scoped_refptr<VideoFrame> AlignedDataHelper::GetNextFrame() {
const size_t video_frame_size = const size_t video_frame_size =
layout_->planes().back().offset + layout_->planes().back().size; layout_->planes().back().offset + layout_->planes().back().size;
return MojoSharedBufferVideoFrame::Create( return MojoSharedBufferVideoFrame::Create(
layout_->format(), layout_->coded_size(), visible_area_, layout_->format(), layout_->coded_size(), visible_area_, natural_size_,
visible_area_.size(), std::move(dup_handle), video_frame_size, offsets, std::move(dup_handle), video_frame_size, offsets, strides,
strides, base::TimeTicks::Now().since_origin()); base::TimeTicks::Now().since_origin());
} }
} }
...@@ -538,7 +540,7 @@ void AlignedDataHelper::InitializeGpuMemoryBufferFrames( ...@@ -538,7 +540,7 @@ void AlignedDataHelper::InitializeGpuMemoryBufferFrames(
for (size_t i = 0; i < num_frames_; i++) { for (size_t i = 0; i < num_frames_; i++) {
auto memory_frame = auto memory_frame =
VideoFrame::CreateFrame(pixel_format, coded_size, visible_area_, VideoFrame::CreateFrame(pixel_format, coded_size, visible_area_,
visible_area_.size(), base::TimeDelta()); natural_size_, base::TimeDelta());
LOG_ASSERT(!!memory_frame) << "Failed creating VideoFrame"; LOG_ASSERT(!!memory_frame) << "Failed creating VideoFrame";
for (size_t i = 0; i < num_planes; i++) { for (size_t i = 0; i < num_planes; i++) {
libyuv::CopyPlane(src_frame_ptr + src_layout.planes()[i].offset, libyuv::CopyPlane(src_frame_ptr + src_layout.planes()[i].offset,
......
...@@ -203,8 +203,9 @@ class AlignedDataHelper { ...@@ -203,8 +203,9 @@ class AlignedDataHelper {
const std::vector<uint8_t>& stream, const std::vector<uint8_t>& stream,
uint32_t num_frames, uint32_t num_frames,
VideoPixelFormat pixel_format, VideoPixelFormat pixel_format,
const gfx::Rect& visible_area,
const gfx::Size& coded_size, const gfx::Size& coded_size,
const gfx::Rect& visible_area,
const gfx::Size& natural_size,
VideoFrame::StorageType storage_type, VideoFrame::StorageType storage_type,
gpu::GpuMemoryBufferFactory* const gpu_memory_buffer_factory); gpu::GpuMemoryBufferFactory* const gpu_memory_buffer_factory);
~AlignedDataHelper(); ~AlignedDataHelper();
...@@ -251,6 +252,7 @@ class AlignedDataHelper { ...@@ -251,6 +252,7 @@ class AlignedDataHelper {
// The layout of VideoFrames returned by GetNextFrame(). // The layout of VideoFrames returned by GetNextFrame().
base::Optional<VideoFrameLayout> layout_; base::Optional<VideoFrameLayout> layout_;
const gfx::Rect visible_area_; const gfx::Rect visible_area_;
const gfx::Size natural_size_;
// The frame data returned by GetNextFrame(). // The frame data returned by GetNextFrame().
std::vector<VideoFrameData> video_frame_data_; std::vector<VideoFrameData> video_frame_data_;
......
...@@ -126,7 +126,7 @@ class VideoEncoderTest : public ::testing::Test { ...@@ -126,7 +126,7 @@ class VideoEncoderTest : public ::testing::Test {
Video* video, Video* video,
const VideoEncoderClientConfig& config) { const VideoEncoderClientConfig& config) {
std::vector<std::unique_ptr<BitstreamProcessor>> bitstream_processors; std::vector<std::unique_ptr<BitstreamProcessor>> bitstream_processors;
const gfx::Rect visible_rect(video->Resolution()); const gfx::Rect visible_rect(config.output_resolution);
const VideoCodec codec = const VideoCodec codec =
VideoCodecProfileToVideoCodec(config.output_profile); VideoCodecProfileToVideoCodec(config.output_profile);
if (g_env->SaveOutputBitstream()) { if (g_env->SaveOutputBitstream()) {
...@@ -183,7 +183,7 @@ class VideoEncoderTest : public ::testing::Test { ...@@ -183,7 +183,7 @@ class VideoEncoderTest : public ::testing::Test {
VideoFrameValidator::GetModelFrameCB get_model_frame_cb = VideoFrameValidator::GetModelFrameCB get_model_frame_cb =
base::BindRepeating(&VideoEncoderTest::GetModelFrame, base::BindRepeating(&VideoEncoderTest::GetModelFrame,
base::Unretained(this)); base::Unretained(this), visible_rect);
// Attach a video frame writer to store individual frames to disk if // Attach a video frame writer to store individual frames to disk if
// requested. // requested.
...@@ -212,15 +212,20 @@ class VideoEncoderTest : public ::testing::Test { ...@@ -212,15 +212,20 @@ class VideoEncoderTest : public ::testing::Test {
return bitstream_processors; return bitstream_processors;
} }
scoped_refptr<const VideoFrame> GetModelFrame(size_t frame_index) { scoped_refptr<const VideoFrame> GetModelFrame(const gfx::Rect& visible_rect,
size_t frame_index) {
LOG_ASSERT(raw_data_helper_); LOG_ASSERT(raw_data_helper_);
return raw_data_helper_->GetFrame(frame_index % auto frame =
g_env->Video()->NumFrames()); raw_data_helper_->GetFrame(frame_index % g_env->Video()->NumFrames());
if (!frame)
return nullptr;
if (visible_rect.size() == frame->visible_rect().size())
return frame;
return ScaleVideoFrame(frame.get(), visible_rect.size());
} }
std::unique_ptr<RawDataHelper> raw_data_helper_; std::unique_ptr<RawDataHelper> raw_data_helper_;
}; };
} // namespace } // namespace
// TODO(dstaessens): Add more test scenarios: // TODO(dstaessens): Add more test scenarios:
...@@ -399,6 +404,42 @@ TEST_F(VideoEncoderTest, FlushAtEndOfStream_NV12Dmabuf) { ...@@ -399,6 +404,42 @@ TEST_F(VideoEncoderTest, FlushAtEndOfStream_NV12Dmabuf) {
EXPECT_EQ(encoder->GetFrameReleasedCount(), nv12_video->NumFrames()); EXPECT_EQ(encoder->GetFrameReleasedCount(), nv12_video->NumFrames());
EXPECT_TRUE(encoder->WaitForBitstreamProcessors()); EXPECT_TRUE(encoder->WaitForBitstreamProcessors());
} }
// TODO(hiroh): Enable this test after the test dashboard becomes more green.
// Downscaling is required in VideoEncodeAccelerator when zero-copy video
// capture is enabled. One example is simulcast, camera produces 360p VideoFrame
// and there are two VideoEncodeAccelerator for 360p and 180p. VideoEncoder for
// 180p is fed 360p and thus has to perform the scaling from 360p to 180p.
TEST_F(VideoEncoderTest, DISABLED_FlushAtEndOfStream_NV12DmabufScaling) {
constexpr gfx::Size kMinOutputResolution(240, 180);
const gfx::Size output_resolution =
gfx::Size(g_env->Video()->Resolution().width() / 2,
g_env->Video()->Resolution().height() / 2);
if (!gfx::Rect(output_resolution).Contains(gfx::Rect(kMinOutputResolution))) {
GTEST_SKIP() << "Skip test if video resolution is too small, "
<< "output_resolution=" << output_resolution.ToString()
<< ", minimum output resolution="
<< kMinOutputResolution.ToString();
}
auto nv12_video = g_env->Video()->ConvertToNV12();
ASSERT_TRUE(nv12_video);
// Set 1/4 of the original bitrate because the area of |output_resolution| is
// 1/4 of the original resolution.
VideoEncoderClientConfig config(nv12_video.get(), g_env->Profile(),
g_env->NumTemporalLayers(),
g_env->Bitrate() / 4);
config.output_resolution = output_resolution;
config.input_storage_type =
VideoEncodeAccelerator::Config::StorageType::kDmabuf;
auto encoder = CreateVideoEncoder(nv12_video.get(), config);
encoder->Encode();
EXPECT_TRUE(encoder->WaitForFlushDone());
EXPECT_EQ(encoder->GetFlushDoneCount(), 1u);
EXPECT_EQ(encoder->GetFrameReleasedCount(), nv12_video->NumFrames());
EXPECT_TRUE(encoder->WaitForBitstreamProcessors());
}
} // namespace test } // namespace test
} // namespace media } // namespace media
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment