Commit 648a653d authored by Hirokazu Honda's avatar Hirokazu Honda Committed by Commit Bot

media/gpu/video_encode_accelerator_tests: Add GpuMemoryBuffer VideoFrame input test case

GpumemoryBuffer based VideoFrames are input to
VideoEncodeAccelerator in ARC++ and also Zero-Copy case.
This adds a test case where the input VideoFrames is
GpuMemoryBuffer based one and its pixel format is NV12.
Note that our product code strongly assumes the pixel format is
NV12 in Zero-Copy path.

Bug: 1045825
Test: video_encode_accelerator_tests on atlas
Change-Id: Ifb894b39c40a2229209c3dd5884a16d7b033e428
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2260133
Commit-Queue: Hirokazu Honda <hiroh@chromium.org>
Reviewed-by: default avatarDavid Staessens <dstaessens@chromium.org>
Cr-Commit-Position: refs/heads/master@{#791751}
parent 73205e89
......@@ -5,6 +5,7 @@
#include "media/gpu/test/video.h"
#include <memory>
#include <numeric>
#include <utility>
#include "base/bind.h"
......@@ -25,7 +26,9 @@
#include "media/filters/in_memory_url_protocol.h"
#include "media/filters/vpx_video_decoder.h"
#include "media/gpu/macros.h"
#include "media/gpu/test/video_frame_helpers.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "third_party/libyuv/include/libyuv/convert.h"
#include "third_party/libyuv/include/libyuv/planar_functions.h"
namespace media {
......@@ -44,6 +47,53 @@ Video::Video(const base::FilePath& file_path,
Video::~Video() = default;
std::unique_ptr<Video> Video::ConvertToNV12() const {
LOG_ASSERT(IsLoaded()) << "The source video is not loaded";
LOG_ASSERT(pixel_format_ == VideoPixelFormat::PIXEL_FORMAT_I420)
<< "The pixel format of source video is not I420";
auto new_video = std::make_unique<Video>(file_path_, metadata_file_path_);
new_video->frame_checksums_ = frame_checksums_;
new_video->thumbnail_checksums_ = thumbnail_checksums_;
new_video->profile_ = profile_;
new_video->codec_ = codec_;
new_video->frame_rate_ = frame_rate_;
new_video->num_frames_ = num_frames_;
new_video->num_fragments_ = num_fragments_;
new_video->resolution_ = resolution_;
new_video->pixel_format_ = PIXEL_FORMAT_NV12;
// Convert I420 To NV12.
const auto i420_layout = CreateVideoFrameLayout(
PIXEL_FORMAT_I420, resolution_, 1u /* alignment */);
const auto nv12_layout =
CreateVideoFrameLayout(PIXEL_FORMAT_NV12, resolution_, 1u /* alignment*/);
LOG_ASSERT(i420_layout && nv12_layout) << "Failed creating VideoFrameLayout";
const size_t i420_frame_size =
i420_layout->planes().back().offset + i420_layout->planes().back().size;
const size_t nv12_frame_size =
nv12_layout->planes().back().offset + nv12_layout->planes().back().size;
LOG_ASSERT(i420_frame_size * num_frames_ == data_.size())
<< "Unexpected data size";
std::vector<uint8_t> new_data(nv12_frame_size * num_frames_);
for (size_t i = 0; i < num_frames_; i++) {
const uint8_t* src_plane = data_.data() + (i * i420_frame_size);
uint8_t* dst_plane = new_data.data() + (i * nv12_frame_size);
libyuv::I420ToNV12(src_plane + i420_layout->planes()[0].offset,
i420_layout->planes()[0].stride,
src_plane + i420_layout->planes()[1].offset,
i420_layout->planes()[1].stride,
src_plane + i420_layout->planes()[2].offset,
i420_layout->planes()[2].stride,
dst_plane + nv12_layout->planes()[0].offset,
nv12_layout->planes()[0].stride,
dst_plane + nv12_layout->planes()[1].offset,
nv12_layout->planes()[1].stride, resolution_.width(),
resolution_.height());
}
new_video->data_ = std::move(new_data);
return new_video;
}
bool Video::Load() {
// TODO(dstaessens@) Investigate reusing existing infrastructure such as
// DecoderBuffer.
......
......@@ -33,6 +33,9 @@ class Video {
const base::FilePath& metadata_file_path);
~Video();
// Create a new Video instance by copying and converting |data_| to NV12.
std::unique_ptr<Video> ConvertToNV12() const;
// Load the video file from disk.
bool Load();
// Returns true if the video file was loaded.
......
......@@ -6,6 +6,7 @@
#include "base/bind.h"
#include "base/memory/ptr_util.h"
#include "gpu/ipc/service/gpu_memory_buffer_factory.h"
#include "media/base/video_bitrate_allocation.h"
#include "media/gpu/macros.h"
#include "media/gpu/test/bitstream_helpers.h"
......@@ -47,9 +48,10 @@ constexpr size_t kDefaultEventListSize = 512;
// static
std::unique_ptr<VideoEncoder> VideoEncoder::Create(
const VideoEncoderClientConfig& config,
gpu::GpuMemoryBufferFactory* const gpu_memory_buffer_factory,
std::vector<std::unique_ptr<BitstreamProcessor>> bitstream_processors) {
auto video_encoder = base::WrapUnique(new VideoEncoder());
if (!video_encoder->CreateEncoderClient(config,
if (!video_encoder->CreateEncoderClient(config, gpu_memory_buffer_factory,
std::move(bitstream_processors))) {
return nullptr;
}
......@@ -72,6 +74,7 @@ VideoEncoder::~VideoEncoder() {
bool VideoEncoder::CreateEncoderClient(
const VideoEncoderClientConfig& config,
gpu::GpuMemoryBufferFactory* const gpu_memory_buffer_factory,
std::vector<std::unique_ptr<BitstreamProcessor>> bitstream_processors) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK_EQ(video_encoder_state_.load(), EncoderState::kUninitialized);
......@@ -83,8 +86,9 @@ bool VideoEncoder::CreateEncoderClient(
EventCallback event_cb =
base::BindRepeating(&VideoEncoder::NotifyEvent, base::Unretained(this));
encoder_client_ = VideoEncoderClient::Create(
event_cb, std::move(bitstream_processors), config);
encoder_client_ =
VideoEncoderClient::Create(event_cb, std::move(bitstream_processors),
gpu_memory_buffer_factory, config);
if (!encoder_client_) {
VLOGF(1) << "Failed to create video encoder client";
return false;
......
......@@ -18,6 +18,10 @@
#include "base/synchronization/lock.h"
#include "base/thread_annotations.h"
namespace gpu {
class GpuMemoryBufferFactory;
} // namespace gpu
namespace media {
namespace test {
......@@ -52,6 +56,7 @@ class VideoEncoder {
// destroyed on the same sequence where they are created.
static std::unique_ptr<VideoEncoder> Create(
const VideoEncoderClientConfig& config,
gpu::GpuMemoryBufferFactory* const gpu_memory_buffer_factory,
std::vector<std::unique_ptr<BitstreamProcessor>> bitstream_processors =
{});
......@@ -111,6 +116,7 @@ class VideoEncoder {
bool CreateEncoderClient(
const VideoEncoderClientConfig& config,
gpu::GpuMemoryBufferFactory* const gpu_memory_buffer_factory,
std::vector<std::unique_ptr<BitstreamProcessor>> bitstream_processors);
// Notify the video encoder an event has occurred (e.g. bitstream ready).
......
......@@ -11,6 +11,7 @@
#include "base/bind.h"
#include "base/memory/ptr_util.h"
#include "gpu/config/gpu_preferences.h"
#include "gpu/ipc/service/gpu_memory_buffer_factory.h"
#include "media/base/bind_to_current_loop.h"
#include "media/gpu/gpu_video_encode_accelerator_factory.h"
#include "media/gpu/macros.h"
......@@ -68,13 +69,15 @@ void VideoEncoderStats::Reset() {
VideoEncoderClient::VideoEncoderClient(
const VideoEncoder::EventCallback& event_cb,
std::vector<std::unique_ptr<BitstreamProcessor>> bitstream_processors,
gpu::GpuMemoryBufferFactory* gpu_memory_buffer_factory,
const VideoEncoderClientConfig& config)
: event_cb_(event_cb),
bitstream_processors_(std::move(bitstream_processors)),
encoder_client_config_(config),
encoder_client_thread_("VDAClientEncoderThread"),
encoder_client_state_(VideoEncoderClientState::kUninitialized),
current_stats_(encoder_client_config_.framerate) {
current_stats_(encoder_client_config_.framerate),
gpu_memory_buffer_factory_(gpu_memory_buffer_factory) {
DETACH_FROM_SEQUENCE(encoder_client_sequence_checker_);
weak_this_ = weak_this_factory_.GetWeakPtr();
......@@ -90,9 +93,11 @@ VideoEncoderClient::~VideoEncoderClient() {
std::unique_ptr<VideoEncoderClient> VideoEncoderClient::Create(
const VideoEncoder::EventCallback& event_cb,
std::vector<std::unique_ptr<BitstreamProcessor>> bitstream_processors,
gpu::GpuMemoryBufferFactory* const gpu_memory_buffer_factory,
const VideoEncoderClientConfig& config) {
return base::WrapUnique(new VideoEncoderClient(
event_cb, std::move(bitstream_processors), config));
return base::WrapUnique(
new VideoEncoderClient(event_cb, std::move(bitstream_processors),
gpu_memory_buffer_factory, config));
}
bool VideoEncoderClient::Initialize(const Video* video) {
......@@ -192,7 +197,12 @@ void VideoEncoderClient::RequireBitstreamBuffers(
// not starting at (0,0).
aligned_data_helper_ = std::make_unique<AlignedDataHelper>(
video_->Data(), video_->NumFrames(), video_->PixelFormat(),
gfx::Rect(video_->Resolution()), input_coded_size);
gfx::Rect(video_->Resolution()), input_coded_size,
encoder_client_config_.input_storage_type ==
VideoEncodeAccelerator::Config::StorageType::kDmabuf
? VideoFrame::STORAGE_GPU_MEMORY_BUFFER
: VideoFrame::STORAGE_MOJO_SHARED_BUFFER,
gpu_memory_buffer_factory_);
output_buffer_size_ = output_buffer_size;
......@@ -297,7 +307,10 @@ void VideoEncoderClient::CreateEncoderTask(const Video* video,
const VideoEncodeAccelerator::Config config(
video_->PixelFormat(), video_->Resolution(),
encoder_client_config_.output_profile, encoder_client_config_.bitrate,
encoder_client_config_.framerate);
encoder_client_config_.framerate, base::nullopt /* gop_length */,
base::nullopt /* h264_output_level*/, false /* is_constrained_h264 */,
encoder_client_config_.input_storage_type);
encoder_ = GpuVideoEncodeAcceleratorFactory::CreateVEA(config, this,
gpu::GpuPreferences());
*success = (encoder_ != nullptr);
......
......@@ -20,7 +20,7 @@
#include "media/video/video_encode_accelerator.h"
namespace gpu {
class gpu_memory_buffer_factory;
class GpuMemoryBufferFactory;
}
namespace media {
......@@ -51,6 +51,9 @@ struct VideoEncoderClientConfig {
// frames in the video, and in which case the VideoEncoderClient loops the
// video during encoding.
size_t num_frames_to_encode = 0;
// The storage type of the input VideoFrames.
VideoEncodeAccelerator::Config::StorageType input_storage_type =
VideoEncodeAccelerator::Config::StorageType::kShmem;
};
struct VideoEncoderStats {
......@@ -89,6 +92,7 @@ class VideoEncoderClient : public VideoEncodeAccelerator::Client {
static std::unique_ptr<VideoEncoderClient> Create(
const VideoEncoder::EventCallback& event_cb,
std::vector<std::unique_ptr<BitstreamProcessor>> bitstream_processors,
gpu::GpuMemoryBufferFactory* const gpu_memory_buffer_factory,
const VideoEncoderClientConfig& config);
// Initialize the video encode accelerator for the specified |video|.
......@@ -136,6 +140,7 @@ class VideoEncoderClient : public VideoEncodeAccelerator::Client {
VideoEncoderClient(
const VideoEncoder::EventCallback& event_cb,
std::vector<std::unique_ptr<BitstreamProcessor>> bitstream_processors,
gpu::GpuMemoryBufferFactory* const gpu_memory_buffer_factory,
const VideoEncoderClientConfig& config);
// Destroy the video encoder client.
......@@ -222,6 +227,8 @@ class VideoEncoderClient : public VideoEncodeAccelerator::Client {
VideoEncoderStats current_stats_ GUARDED_BY(stats_lock_);
mutable base::Lock stats_lock_;
gpu::GpuMemoryBufferFactory* const gpu_memory_buffer_factory_;
SEQUENCE_CHECKER(test_sequence_checker_);
SEQUENCE_CHECKER(encoder_client_sequence_checker_);
......
......@@ -6,13 +6,18 @@
#include <limits>
#include "base/bind_helpers.h"
#include "base/logging.h"
#include "base/memory/aligned_memory.h"
#include "base/memory/ptr_util.h"
#include "base/stl_util.h"
#include "base/sys_byteorder.h"
#include "gpu/ipc/common/gpu_memory_buffer_support.h"
#include "gpu/ipc/service/gpu_memory_buffer_factory.h"
#include "media/base/format_utils.h"
#include "media/base/video_decoder_config.h"
#include "media/base/video_frame_layout.h"
#include "media/gpu/chromeos/platform_video_frame_utils.h"
#include "media/gpu/test/video.h"
#include "media/gpu/test/video_frame_helpers.h"
#include "media/mojo/common/mojo_shared_buffer_video_frame.h"
......@@ -21,6 +26,10 @@
#include "testing/gtest/include/gtest/gtest.h"
#include "third_party/libyuv/include/libyuv/planar_functions.h"
#if BUILDFLAG(USE_CHROMEOS_MEDIA_ACCELERATION)
#include "media/gpu/chromeos/platform_video_frame_utils.h"
#endif // BUILDFLAG(USE_CHROMEOS_MEDIA_ACCELERATION)
namespace media {
namespace test {
namespace {
......@@ -322,21 +331,39 @@ struct AlignedDataHelper::VideoFrameData {
VideoFrameData() = default;
VideoFrameData(mojo::ScopedSharedBufferHandle mojo_handle)
: mojo_handle(std::move(mojo_handle)) {}
VideoFrameData(gfx::GpuMemoryBufferHandle gmb_handle)
: gmb_handle(std::move(gmb_handle)) {}
VideoFrameData(VideoFrameData&&) = default;
VideoFrameData& operator=(VideoFrameData&&) = default;
VideoFrameData(const VideoFrameData&) = delete;
VideoFrameData& operator=(const VideoFrameData&) = delete;
mojo::ScopedSharedBufferHandle mojo_handle;
gfx::GpuMemoryBufferHandle gmb_handle;
};
AlignedDataHelper::AlignedDataHelper(const std::vector<uint8_t>& stream,
uint32_t num_frames,
VideoPixelFormat pixel_format,
const gfx::Rect& visible_area,
const gfx::Size& coded_size)
: num_frames_(num_frames), visible_area_(visible_area) {
InitializeAlignedMemoryFrames(stream, pixel_format, coded_size);
AlignedDataHelper::AlignedDataHelper(
const std::vector<uint8_t>& stream,
uint32_t num_frames,
VideoPixelFormat pixel_format,
const gfx::Rect& visible_area,
const gfx::Size& coded_size,
VideoFrame::StorageType storage_type,
gpu::GpuMemoryBufferFactory* const gpu_memory_buffer_factory)
: num_frames_(num_frames),
storage_type_(storage_type),
gpu_memory_buffer_factory_(gpu_memory_buffer_factory),
visible_area_(visible_area) {
if (storage_type_ == VideoFrame::STORAGE_GPU_MEMORY_BUFFER) {
LOG_ASSERT(gpu_memory_buffer_factory_ != nullptr);
InitializeGpuMemoryBufferFrames(stream, pixel_format, coded_size);
} else {
LOG_ASSERT(storage_type == VideoFrame::STORAGE_MOJO_SHARED_BUFFER);
InitializeAlignedMemoryFrames(stream, pixel_format, coded_size);
}
LOG_ASSERT(video_frame_data_.size() == num_frames_)
<< "Failed to initialize VideoFrames";
}
AlignedDataHelper::~AlignedDataHelper() {}
......@@ -355,26 +382,60 @@ bool AlignedDataHelper::AtEndOfStream() const {
scoped_refptr<VideoFrame> AlignedDataHelper::GetNextFrame() {
LOG_ASSERT(!AtEndOfStream());
const auto& mojo_handle = video_frame_data_[frame_index_++].mojo_handle;
auto dup_handle =
mojo_handle->Clone(mojo::SharedBufferHandle::AccessMode::READ_WRITE);
if (!dup_handle.is_valid()) {
LOG(ERROR) << "Failed duplicating mojo handle";
return nullptr;
}
if (storage_type_ == VideoFrame::STORAGE_GPU_MEMORY_BUFFER) {
const auto& gmb_handle = video_frame_data_[frame_index_++].gmb_handle;
auto dup_handle = gmb_handle.Clone();
if (dup_handle.is_null()) {
LOG(ERROR) << "Failed duplicating GpuMemoryBufferHandle";
return nullptr;
}
std::vector<uint32_t> offsets(layout_->planes().size());
std::vector<int32_t> strides(layout_->planes().size());
for (size_t i = 0; i < layout_->planes().size(); i++) {
offsets[i] = layout_->planes()[i].offset;
strides[i] = layout_->planes()[i].stride;
base::Optional<gfx::BufferFormat> buffer_format =
VideoPixelFormatToGfxBufferFormat(layout_->format());
if (!buffer_format) {
LOG(ERROR) << "Unexpected format: " << layout_->format();
return nullptr;
}
// Create GpuMemoryBuffer from GpuMemoryBufferHandle.
gpu::GpuMemoryBufferSupport support;
auto gpu_memory_buffer = support.CreateGpuMemoryBufferImplFromHandle(
std::move(dup_handle), layout_->coded_size(), *buffer_format,
gfx::BufferUsage::SCANOUT_VEA_READ_CAMERA_AND_CPU_READ_WRITE,
base::DoNothing());
if (!gpu_memory_buffer) {
LOG(ERROR) << "Failed to create GpuMemoryBuffer from "
<< "GpuMemoryBufferHandle";
return nullptr;
}
gpu::MailboxHolder dummy_mailbox[media::VideoFrame::kMaxPlanes];
return media::VideoFrame::WrapExternalGpuMemoryBuffer(
visible_area_, visible_area_.size(), std::move(gpu_memory_buffer),
dummy_mailbox, base::DoNothing() /* mailbox_holder_release_cb_ */,
base::TimeTicks::Now().since_origin());
} else {
const auto& mojo_handle = video_frame_data_[frame_index_++].mojo_handle;
auto dup_handle =
mojo_handle->Clone(mojo::SharedBufferHandle::AccessMode::READ_WRITE);
if (!dup_handle.is_valid()) {
LOG(ERROR) << "Failed duplicating mojo handle";
return nullptr;
}
std::vector<uint32_t> offsets(layout_->planes().size());
std::vector<int32_t> strides(layout_->planes().size());
for (size_t i = 0; i < layout_->planes().size(); i++) {
offsets[i] = layout_->planes()[i].offset;
strides[i] = layout_->planes()[i].stride;
}
const size_t video_frame_size =
layout_->planes().back().offset + layout_->planes().back().size;
return MojoSharedBufferVideoFrame::Create(
layout_->format(), layout_->coded_size(), visible_area_,
visible_area_.size(), std::move(dup_handle), video_frame_size, offsets,
strides, base::TimeTicks::Now().since_origin());
}
const size_t video_frame_size =
layout_->planes().back().offset + layout_->planes().back().size;
return MojoSharedBufferVideoFrame::Create(
layout_->format(), layout_->coded_size(), visible_area_,
visible_area_.size(), std::move(dup_handle), video_frame_size, offsets,
strides, base::TimeTicks::Now().since_origin());
}
void AlignedDataHelper::InitializeAlignedMemoryFrames(
......@@ -428,6 +489,51 @@ void AlignedDataHelper::InitializeAlignedMemoryFrames(
}
}
void AlignedDataHelper::InitializeGpuMemoryBufferFrames(
const std::vector<uint8_t>& stream,
const VideoPixelFormat pixel_format,
const gfx::Size& coded_size) {
#if BUILDFLAG(USE_CHROMEOS_MEDIA_ACCELERATION)
layout_ = GetPlatformVideoFrameLayout(
gpu_memory_buffer_factory_, pixel_format, visible_area_.size(),
gfx::BufferUsage::SCANOUT_VEA_READ_CAMERA_AND_CPU_READ_WRITE);
ASSERT_TRUE(layout_) << "Failed getting platform VideoFrameLayout";
std::vector<size_t> src_plane_rows;
size_t src_video_frame_size = 0;
auto src_layout = GetAlignedVideoFrameLayout(
pixel_format, visible_area_.size(), 1u /* alignment */, &src_plane_rows,
&src_video_frame_size);
LOG_ASSERT(stream.size() % src_video_frame_size == 0U)
<< "Stream byte size is not a product of calculated frame byte size";
const size_t num_planes = VideoFrame::NumPlanes(pixel_format);
const uint8_t* src_frame_ptr = &stream[0];
for (size_t i = 0; i < num_frames_; i++) {
auto memory_frame =
VideoFrame::CreateFrame(pixel_format, coded_size, visible_area_,
visible_area_.size(), base::TimeDelta());
LOG_ASSERT(!!memory_frame) << "Failed creating VideoFrame";
for (size_t i = 0; i < num_planes; i++) {
libyuv::CopyPlane(src_frame_ptr + src_layout.planes()[i].offset,
src_layout.planes()[i].stride, memory_frame->data(i),
memory_frame->stride(i), src_layout.planes()[i].stride,
src_plane_rows[i]);
}
src_frame_ptr += src_video_frame_size;
auto frame = CloneVideoFrame(
gpu_memory_buffer_factory_, memory_frame.get(), *layout_,
VideoFrame::STORAGE_GPU_MEMORY_BUFFER,
gfx::BufferUsage::SCANOUT_VEA_READ_CAMERA_AND_CPU_READ_WRITE);
LOG_ASSERT(!!frame) << "Failed creating GpuMemoryBuffer VideoFrame";
auto gmb_handle = CreateGpuMemoryBufferHandle(frame.get());
LOG_ASSERT(!gmb_handle.is_null())
<< "Failed creating GpuMemoryBufferHandle";
video_frame_data_.push_back(VideoFrameData(std::move(gmb_handle)));
}
#endif // BUILDFLAG(USE_CHROMEOS_MEDIA_ACCELERATION)
}
// static
VideoFrameLayout AlignedDataHelper::GetAlignedVideoFrameLayout(
VideoPixelFormat pixel_format,
......
......@@ -28,6 +28,10 @@
#include "ui/gfx/geometry/rect.h"
#include "ui/gfx/geometry/size.h"
namespace gpu {
class GpuMemoryBufferFactory;
} // namespace gpu
namespace media {
namespace test {
class Video;
......@@ -189,14 +193,20 @@ class AlignedAllocator : public std::allocator<T> {
};
// Helper to align data and extract frames from raw video streams.
// TODO(crbug.com/1045825): Reduces number of data copies performed.
// GetNextFrame() returns VideoFrames with a specified |storage_type|. The
// VideoFrames are aligned by the specified |alignment| in the case of
// MojoSharedBuffer VideoFrame. On the other hand, GpuMemoryBuffer based
// VideoFrame is determined by the GpuMemoryBuffer allocation backend.
class AlignedDataHelper {
public:
AlignedDataHelper(const std::vector<uint8_t>& stream,
uint32_t num_frames,
VideoPixelFormat pixel_format,
const gfx::Rect& visible_area,
const gfx::Size& coded_size);
AlignedDataHelper(
const std::vector<uint8_t>& stream,
uint32_t num_frames,
VideoPixelFormat pixel_format,
const gfx::Rect& visible_area,
const gfx::Size& coded_size,
VideoFrame::StorageType storage_type,
gpu::GpuMemoryBufferFactory* const gpu_memory_buffer_factory);
~AlignedDataHelper();
// Compute and return the next frame to be sent to the encoder.
......@@ -224,12 +234,20 @@ class AlignedDataHelper {
void InitializeAlignedMemoryFrames(const std::vector<uint8_t>& stream,
const VideoPixelFormat pixel_format,
const gfx::Size& coded_size);
// Create GpuMemoryBuffer VideoFrame whose alignments is determined by
// a GpuMemoryBuffer allocation backend (e.g. minigbm).
void InitializeGpuMemoryBufferFrames(const std::vector<uint8_t>& stream,
const VideoPixelFormat pixel_format,
const gfx::Size& coded_size);
// The index of VideoFrame to be read next.
uint32_t frame_index_ = 0;
// The number of frames in the video stream.
const uint32_t num_frames_;
const VideoFrame::StorageType storage_type_;
gpu::GpuMemoryBufferFactory* const gpu_memory_buffer_factory_;
// The layout of VideoFrames returned by GetNextFrame().
base::Optional<VideoFrameLayout> layout_;
const gfx::Rect visible_area_;
......
......@@ -91,11 +91,12 @@ class VideoEncoderTest : public ::testing::Test {
public:
std::unique_ptr<VideoEncoder> CreateVideoEncoder(
Video* video,
VideoEncoderClientConfig config) {
const VideoEncoderClientConfig& config) {
LOG_ASSERT(video);
auto video_encoder =
VideoEncoder::Create(config, CreateBitstreamProcessors(video, config));
VideoEncoder::Create(config, g_env->GetGpuMemoryBufferFactory(),
CreateBitstreamProcessors(video, config));
LOG_ASSERT(video_encoder);
if (!video_encoder->Initialize(video))
......@@ -107,7 +108,7 @@ class VideoEncoderTest : public ::testing::Test {
private:
std::vector<std::unique_ptr<BitstreamProcessor>> CreateBitstreamProcessors(
Video* video,
VideoEncoderClientConfig config) {
const VideoEncoderClientConfig& config) {
std::vector<std::unique_ptr<BitstreamProcessor>> bitstream_processors;
if (!g_env->IsBitstreamValidatorEnabled()) {
return bitstream_processors;
......@@ -250,7 +251,8 @@ TEST_F(VideoEncoderTest, Initialize) {
// are triggered upon destroying.
TEST_F(VideoEncoderTest, DestroyBeforeInitialize) {
VideoEncoderClientConfig config(g_env->Video(), g_env->Profile());
auto video_encoder = VideoEncoder::Create(config);
auto video_encoder =
VideoEncoder::Create(config, g_env->GetGpuMemoryBufferFactory());
EXPECT_NE(video_encoder, nullptr);
}
......@@ -349,6 +351,23 @@ TEST_F(VideoEncoderTest, DynamicFramerateChange) {
EXPECT_EQ(encoder->GetFrameReleasedCount(), config.num_frames_to_encode);
EXPECT_TRUE(encoder->WaitForBitstreamProcessors());
}
TEST_F(VideoEncoderTest, FlushAtEndOfStream_NV12Dmabuf) {
auto nv12_video = g_env->Video()->ConvertToNV12();
ASSERT_TRUE(nv12_video);
VideoEncoderClientConfig config(nv12_video.get(), g_env->Profile());
config.input_storage_type =
VideoEncodeAccelerator::Config::StorageType::kDmabuf;
auto encoder = CreateVideoEncoder(nv12_video.get(), config);
encoder->Encode();
EXPECT_TRUE(encoder->WaitForFlushDone());
EXPECT_EQ(encoder->GetFlushDoneCount(), 1u);
EXPECT_EQ(encoder->GetFrameReleasedCount(), nv12_video->NumFrames());
EXPECT_TRUE(encoder->WaitForBitstreamProcessors());
}
} // namespace test
} // namespace media
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment