Commit 7935975e authored by Hirokazu Honda's avatar Hirokazu Honda Committed by Commit Bot

media/gpu/vaapiVEA: Support DMABuf-backed video frame input on Encode()

This enables VaapiVEA to encode DMABuf-backed video frame. Because the
va surface format is NV12, NV12 format is only acceptable.
This change is tested by crrev.com/c/1295636

BUG=chromium:895230
TEST=VEA unittest --native_input

Change-Id: Ib3c09363936cdf3099bb9ed9fc14eb24bd0e70fe
Reviewed-on: https://chromium-review.googlesource.com/c/1306944
Commit-Queue: Hirokazu Honda <hiroh@chromium.org>
Reviewed-by: default avatarMiguel Casas <mcasas@chromium.org>
Reviewed-by: default avatarPawel Osciak <posciak@chromium.org>
Cr-Commit-Position: refs/heads/master@{#606519}
parent 7b445fc9
...@@ -6,12 +6,17 @@ ...@@ -6,12 +6,17 @@
#include "base/bind_helpers.h" #include "base/bind_helpers.h"
#include "base/memory/ptr_util.h" #include "base/memory/ptr_util.h"
#include "build/build_config.h"
#include "media/gpu/format_utils.h" #include "media/gpu/format_utils.h"
#include "media/gpu/vaapi/vaapi_picture_factory.h" #include "media/gpu/vaapi/vaapi_picture_factory.h"
#include "media/gpu/vaapi/vaapi_utils.h" #include "media/gpu/vaapi/vaapi_utils.h"
#include "media/gpu/vaapi/vaapi_wrapper.h" #include "media/gpu/vaapi/vaapi_wrapper.h"
#include "media/video/picture.h" #include "media/video/picture.h"
#if defined(OS_POSIX)
#include "media/gpu/vaapi/vaapi_picture_native_pixmap.h"
#endif
#define VLOGF(level) VLOG(level) << __func__ << "(): " #define VLOGF(level) VLOG(level) << __func__ << "(): "
namespace media { namespace media {
...@@ -19,42 +24,10 @@ namespace test { ...@@ -19,42 +24,10 @@ namespace test {
namespace { namespace {
constexpr uint32_t kDummyPictureBufferId = 0;
// This is equal to GBM_FORMAT_MOD_NONE.
constexpr uint64_t kDummyGbmModifier = 0;
constexpr VAImageFormat kImageFormatNV12{.fourcc = VA_FOURCC_NV12, constexpr VAImageFormat kImageFormatNV12{.fourcc = VA_FOURCC_NV12,
.byte_order = VA_LSB_FIRST, .byte_order = VA_LSB_FIRST,
.bits_per_pixel = 12}; .bits_per_pixel = 12};
gfx::GpuMemoryBufferHandle CreateGMBHandleFromVideoFrame(
const VideoFrame* const video_frame) {
DCHECK(video_frame->HasDmaBufs());
gfx::GpuMemoryBufferHandle handle;
handle.type = gfx::NATIVE_PIXMAP;
const VideoFrameLayout& layout = video_frame->layout();
size_t num_planes = layout.num_planes();
const std::vector<VideoFrameLayout::Plane>& planes = layout.planes();
for (size_t i = 0; i < num_planes; i++) {
handle.native_pixmap_handle.planes.emplace_back(
planes[i].stride, planes[i].offset, i, kDummyGbmModifier);
}
const auto& fds = video_frame->DmabufFds();
for (const auto& fd : fds) {
int dup_fd = HANDLE_EINTR(dup(fd.get()));
if (dup_fd == -1) {
VLOGF(1) << "Failed duplicating dmabuf fd";
return gfx::GpuMemoryBufferHandle();
}
handle.native_pixmap_handle.fds.emplace_back(
base::FileDescriptor(dup_fd, true));
}
return handle;
}
void DeallocateBuffers(std::unique_ptr<ScopedVAImage> va_image) { void DeallocateBuffers(std::unique_ptr<ScopedVAImage> va_image) {
// Destructing ScopedVAImage releases its owned memory. // Destructing ScopedVAImage releases its owned memory.
DCHECK(va_image->IsValid()); DCHECK(va_image->IsValid());
...@@ -130,6 +103,7 @@ scoped_refptr<VideoFrame> VaapiDmaBufVideoFrameMapper::Map( ...@@ -130,6 +103,7 @@ scoped_refptr<VideoFrame> VaapiDmaBufVideoFrameMapper::Map(
} }
const gfx::Size& coded_size = video_frame->coded_size(); const gfx::Size& coded_size = video_frame->coded_size();
constexpr int32_t kDummyPictureBufferId = 0;
// Passing empty callbacks is ok, because given PictureBuffer doesn't have // Passing empty callbacks is ok, because given PictureBuffer doesn't have
// texture id and thus these callbacks will never called. // texture id and thus these callbacks will never called.
...@@ -141,7 +115,12 @@ scoped_refptr<VideoFrame> VaapiDmaBufVideoFrameMapper::Map( ...@@ -141,7 +115,12 @@ scoped_refptr<VideoFrame> VaapiDmaBufVideoFrameMapper::Map(
return nullptr; return nullptr;
} }
auto gmb_handle = CreateGMBHandleFromVideoFrame(video_frame.get()); gfx::GpuMemoryBufferHandle gmb_handle;
#if defined(OS_POSIX)
gmb_handle =
VaapiPictureNativePixmap::CreateGpuMemoryBufferHandleFromVideoFrame(
video_frame.get());
#endif
if (gmb_handle.is_null()) { if (gmb_handle.is_null()) {
VLOGF(1) << "Failed to CreateGMBHandleFromVideoFrame."; VLOGF(1) << "Failed to CreateGMBHandleFromVideoFrame.";
return nullptr; return nullptr;
......
...@@ -72,4 +72,30 @@ unsigned VaapiPictureNativePixmap::BufferFormatToInternalFormat( ...@@ -72,4 +72,30 @@ unsigned VaapiPictureNativePixmap::BufferFormatToInternalFormat(
} }
} }
// static
gfx::GpuMemoryBufferHandle
VaapiPictureNativePixmap::CreateGpuMemoryBufferHandleFromVideoFrame(
const VideoFrame* const video_frame) {
DCHECK(video_frame->HasDmaBufs());
gfx::GpuMemoryBufferHandle handle;
handle.type = gfx::NATIVE_PIXMAP;
for (const auto& plane : video_frame->layout().planes()) {
handle.native_pixmap_handle.planes.emplace_back(plane.stride, plane.offset,
0);
}
const auto& fds = video_frame->DmabufFds();
for (const auto& fd : fds) {
int dup_fd = HANDLE_EINTR(dup(fd.get()));
if (dup_fd == -1) {
PLOG(ERROR) << "Failed duplicating dmabuf fd";
return gfx::GpuMemoryBufferHandle();
}
handle.native_pixmap_handle.fds.emplace_back(
base::FileDescriptor(dup_fd, true));
}
return handle;
}
} // namespace media } // namespace media
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include "media/gpu/vaapi/vaapi_picture.h" #include "media/gpu/vaapi/vaapi_picture.h"
#include "ui/gfx/buffer_types.h" #include "ui/gfx/buffer_types.h"
#include "ui/gfx/geometry/size.h" #include "ui/gfx/geometry/size.h"
#include "ui/gfx/gpu_memory_buffer.h"
namespace gl { namespace gl {
class GLImage; class GLImage;
...@@ -23,6 +24,7 @@ class NativePixmap; ...@@ -23,6 +24,7 @@ class NativePixmap;
namespace media { namespace media {
class VideoFrame;
class VaapiWrapper; class VaapiWrapper;
// Implementation of VaapiPicture based on NativePixmaps. // Implementation of VaapiPicture based on NativePixmaps.
...@@ -39,6 +41,9 @@ class VaapiPictureNativePixmap : public VaapiPicture { ...@@ -39,6 +41,9 @@ class VaapiPictureNativePixmap : public VaapiPicture {
uint32_t texture_target); uint32_t texture_target);
~VaapiPictureNativePixmap() override; ~VaapiPictureNativePixmap() override;
static gfx::GpuMemoryBufferHandle CreateGpuMemoryBufferHandleFromVideoFrame(
const VideoFrame* const video_frame);
// VaapiPicture implementation. // VaapiPicture implementation.
bool DownloadFromSurface(const scoped_refptr<VASurface>& va_surface) override; bool DownloadFromSurface(const scoped_refptr<VASurface>& va_surface) override;
bool AllowOverlay() const override; bool AllowOverlay() const override;
......
...@@ -24,15 +24,22 @@ ...@@ -24,15 +24,22 @@
#include "base/stl_util.h" #include "base/stl_util.h"
#include "base/threading/thread_task_runner_handle.h" #include "base/threading/thread_task_runner_handle.h"
#include "base/trace_event/trace_event.h" #include "base/trace_event/trace_event.h"
#include "build/build_config.h"
#include "media/base/bind_to_current_loop.h" #include "media/base/bind_to_current_loop.h"
#include "media/base/unaligned_shared_memory.h" #include "media/base/unaligned_shared_memory.h"
#include "media/base/video_bitrate_allocation.h" #include "media/base/video_bitrate_allocation.h"
#include "media/gpu/format_utils.h"
#include "media/gpu/h264_dpb.h" #include "media/gpu/h264_dpb.h"
#include "media/gpu/vaapi/h264_encoder.h" #include "media/gpu/vaapi/h264_encoder.h"
#include "media/gpu/vaapi/vaapi_common.h" #include "media/gpu/vaapi/vaapi_common.h"
#include "media/gpu/vaapi/vaapi_picture_factory.h"
#include "media/gpu/vaapi/vp8_encoder.h" #include "media/gpu/vaapi/vp8_encoder.h"
#include "media/gpu/vp8_reference_frame_vector.h" #include "media/gpu/vp8_reference_frame_vector.h"
#if defined(OS_POSIX)
#include "media/gpu/vaapi/vaapi_picture_native_pixmap.h"
#endif
#define VLOGF(level) VLOG(level) << __func__ << "(): " #define VLOGF(level) VLOG(level) << __func__ << "(): "
#define DVLOGF(level) DVLOG(level) << __func__ << "(): " #define DVLOGF(level) DVLOG(level) << __func__ << "(): "
...@@ -51,10 +58,6 @@ namespace { ...@@ -51,10 +58,6 @@ namespace {
// if encoder requests less. // if encoder requests less.
constexpr size_t kMinNumFramesInFlight = 4; constexpr size_t kMinNumFramesInFlight = 4;
// Need 2 surfaces for each frame: one for input data and one for
// reconstructed picture, which is later used for reference.
constexpr size_t kNumSurfacesPerFrame = 2;
// Percentage of bitrate set to be targeted by the HW encoder. // Percentage of bitrate set to be targeted by the HW encoder.
constexpr unsigned int kTargetBitratePercentage = 90; constexpr unsigned int kTargetBitratePercentage = 90;
...@@ -84,6 +87,7 @@ class VaapiEncodeJob : public AcceleratedVideoEncoder::EncodeJob { ...@@ -84,6 +87,7 @@ class VaapiEncodeJob : public AcceleratedVideoEncoder::EncodeJob {
base::OnceClosure execute_cb, base::OnceClosure execute_cb,
scoped_refptr<VASurface> input_surface, scoped_refptr<VASurface> input_surface,
scoped_refptr<VASurface> reconstructed_surface, scoped_refptr<VASurface> reconstructed_surface,
std::unique_ptr<VaapiPicture> va_picture,
VABufferID coded_buffer_id); VABufferID coded_buffer_id);
VaapiEncodeJob* AsVaapiEncodeJob() override { return this; } VaapiEncodeJob* AsVaapiEncodeJob() override { return this; }
...@@ -106,6 +110,10 @@ class VaapiEncodeJob : public AcceleratedVideoEncoder::EncodeJob { ...@@ -106,6 +110,10 @@ class VaapiEncodeJob : public AcceleratedVideoEncoder::EncodeJob {
// for subsequent frames. // for subsequent frames.
const scoped_refptr<VASurface> reconstructed_surface_; const scoped_refptr<VASurface> reconstructed_surface_;
// VAPicture associated with |input_surface_|. This member value is to just
// keep VAPicture alive as long as input_surface_ is alive, but not used.
const std::unique_ptr<VaapiPicture> va_picture_;
// Buffer that will contain the output bitstream data for this frame. // Buffer that will contain the output bitstream data for this frame.
VABufferID coded_buffer_id_; VABufferID coded_buffer_id_;
...@@ -235,6 +243,22 @@ bool VaapiVideoEncodeAccelerator::Initialize(const Config& config, ...@@ -235,6 +243,22 @@ bool VaapiVideoEncodeAccelerator::Initialize(const Config& config,
return false; return false;
} }
if (config.storage_type.value_or(Config::StorageType::kShmem) ==
Config::StorageType::kDmabuf) {
#if !defined(USE_OZONE)
VLOGF(1) << "Native mode is only available on OZONE platform.";
return false;
#else
if (config.input_format != PIXEL_FORMAT_NV12) {
// TODO(crbug.com/894381): Support other formats.
VLOGF(1) << "Unsupported format for native input mode: "
<< VideoPixelFormatToString(config.input_format);
return false;
}
native_input_mode_ = true;
#endif // USE_OZONE
}
const SupportedProfiles& profiles = GetSupportedProfiles(); const SupportedProfiles& profiles = GetSupportedProfiles();
auto profile = find_if(profiles.begin(), profiles.end(), auto profile = find_if(profiles.begin(), profiles.end(),
[output_profile = config.output_profile]( [output_profile = config.output_profile](
...@@ -247,6 +271,12 @@ bool VaapiVideoEncodeAccelerator::Initialize(const Config& config, ...@@ -247,6 +271,12 @@ bool VaapiVideoEncodeAccelerator::Initialize(const Config& config,
return false; return false;
} }
if (native_input_mode_) {
VLOGF(2) << "DMABuf mode: VaapiVEA will accept DMABuf-backed VideoFrame on "
<< "Encode()";
vaapi_picture_factory_ = std::make_unique<VaapiPictureFactory>();
}
if (config.input_visible_size.width() > profile->max_resolution.width() || if (config.input_visible_size.width() > profile->max_resolution.width() ||
config.input_visible_size.height() > profile->max_resolution.height()) { config.input_visible_size.height() > profile->max_resolution.height()) {
VLOGF(1) << "Input size too big: " << config.input_visible_size.ToString() VLOGF(1) << "Input size too big: " << config.input_visible_size.ToString()
...@@ -308,20 +338,25 @@ void VaapiVideoEncodeAccelerator::InitializeTask(const Config& config) { ...@@ -308,20 +338,25 @@ void VaapiVideoEncodeAccelerator::InitializeTask(const Config& config) {
coded_size_ = encoder_->GetCodedSize(); coded_size_ = encoder_->GetCodedSize();
output_buffer_byte_size_ = encoder_->GetBitstreamBufferSize(); output_buffer_byte_size_ = encoder_->GetBitstreamBufferSize();
const size_t max_ref_frames = encoder_->GetMaxNumOfRefFrames(); const size_t max_ref_frames = encoder_->GetMaxNumOfRefFrames();
// Use at least kMinNumFramesInFlight if encoder requested less for // Use at least kMinNumFramesInFlight if encoder requested less for
// pipeline depth. // pipeline depth.
const size_t num_frames_in_flight = const size_t num_frames_in_flight =
std::max(kMinNumFramesInFlight, max_ref_frames); std::max(kMinNumFramesInFlight, max_ref_frames);
const size_t num_surfaces = (num_frames_in_flight + 1) * kNumSurfacesPerFrame;
DVLOGF(1) << "Frames in flight: " << num_frames_in_flight; DVLOGF(1) << "Frames in flight: " << num_frames_in_flight;
va_surface_release_cb_ = BindToCurrentLoop( va_surface_release_cb_ = BindToCurrentLoop(
base::Bind(&VaapiVideoEncodeAccelerator::RecycleVASurfaceID, base::Bind(&VaapiVideoEncodeAccelerator::RecycleVASurfaceID,
base::Unretained(this))); base::Unretained(this)));
if (!vaapi_wrapper_->CreateSurfaces(VA_RT_FORMAT_YUV420, coded_size_, va_surfaces_per_video_frame_ =
num_surfaces, kNumSurfacesForOutputPicture +
&available_va_surface_ids_)) { (native_input_mode_ ? 0 : kNumSurfacesPerInputVideoFrame);
if (!vaapi_wrapper_->CreateSurfaces(
VA_RT_FORMAT_YUV420, coded_size_,
(num_frames_in_flight + 1) * va_surfaces_per_video_frame_,
&available_va_surface_ids_)) {
NOTIFY_ERROR(kPlatformFailureError, "Failed creating VASurfaces"); NOTIFY_ERROR(kPlatformFailureError, "Failed creating VASurfaces");
return; return;
} }
...@@ -459,8 +494,12 @@ scoped_refptr<VaapiEncodeJob> VaapiVideoEncodeAccelerator::CreateEncodeJob( ...@@ -459,8 +494,12 @@ scoped_refptr<VaapiEncodeJob> VaapiVideoEncodeAccelerator::CreateEncodeJob(
scoped_refptr<VideoFrame> frame, scoped_refptr<VideoFrame> frame,
bool force_keyframe) { bool force_keyframe) {
DCHECK(encoder_thread_task_runner_->BelongsToCurrentThread()); DCHECK(encoder_thread_task_runner_->BelongsToCurrentThread());
if (native_input_mode_ != frame->HasDmaBufs()) {
NOTIFY_ERROR(kPlatformFailureError, "Unexpected storage");
return nullptr;
}
if (available_va_surface_ids_.size() < kNumSurfacesPerFrame) { if (available_va_surface_ids_.size() < va_surfaces_per_video_frame_) {
DVLOGF(4) << "Not enough surfaces available"; DVLOGF(4) << "Not enough surfaces available";
return nullptr; return nullptr;
} }
...@@ -472,11 +511,48 @@ scoped_refptr<VaapiEncodeJob> VaapiVideoEncodeAccelerator::CreateEncodeJob( ...@@ -472,11 +511,48 @@ scoped_refptr<VaapiEncodeJob> VaapiVideoEncodeAccelerator::CreateEncodeJob(
return nullptr; return nullptr;
} }
static_assert(kNumSurfacesPerFrame == 2, "kNumSurfacesPerFrame must be 2"); VASurfaceID va_input_surface_id = VA_INVALID_ID;
std::unique_ptr<VaapiPicture> va_picture;
if (native_input_mode_) {
DCHECK(vaapi_picture_factory_);
if (frame->format() != PIXEL_FORMAT_NV12) {
NOTIFY_ERROR(kPlatformFailureError, "Unexpected format, expected NV12");
return nullptr;
}
constexpr int32_t kDummyPictureBufferId = 0;
// Passing empty callbacks is ok, because given PictureBuffer doesn't have
// texture id and thus these callbacks will never called.
va_picture = vaapi_picture_factory_->Create(
vaapi_wrapper_, MakeGLContextCurrentCallback(), BindGLImageCallback(),
PictureBuffer(kDummyPictureBufferId, frame->coded_size()));
gfx::GpuMemoryBufferHandle gmb_handle;
#if defined(OS_POSIX)
gmb_handle =
VaapiPictureNativePixmap::CreateGpuMemoryBufferHandleFromVideoFrame(
frame.get());
#endif
if (gmb_handle.is_null()) {
NOTIFY_ERROR(kPlatformFailureError,
"Failed to create GMB handle from video frame");
return nullptr;
}
if (!va_picture->ImportGpuMemoryBufferHandle(
VideoPixelFormatToGfxBufferFormat(frame->format()),
std::move(gmb_handle))) {
NOTIFY_ERROR(kPlatformFailureError,
"Failed in ImportGpuMemoryBufferHandle");
return nullptr;
}
va_input_surface_id = va_picture->va_surface_id();
} else {
va_input_surface_id = available_va_surface_ids_.back();
available_va_surface_ids_.pop_back();
}
scoped_refptr<VASurface> input_surface = new VASurface( scoped_refptr<VASurface> input_surface = new VASurface(
available_va_surface_ids_.back(), coded_size_, va_input_surface_id, coded_size_, vaapi_wrapper_->va_surface_format(),
vaapi_wrapper_->va_surface_format(), va_surface_release_cb_); native_input_mode_ ? base::DoNothing() : va_surface_release_cb_);
available_va_surface_ids_.pop_back();
scoped_refptr<VASurface> reconstructed_surface = new VASurface( scoped_refptr<VASurface> reconstructed_surface = new VASurface(
available_va_surface_ids_.back(), coded_size_, available_va_surface_ids_.back(), coded_size_,
...@@ -487,11 +563,14 @@ scoped_refptr<VaapiEncodeJob> VaapiVideoEncodeAccelerator::CreateEncodeJob( ...@@ -487,11 +563,14 @@ scoped_refptr<VaapiEncodeJob> VaapiVideoEncodeAccelerator::CreateEncodeJob(
frame, force_keyframe, frame, force_keyframe,
base::BindOnce(&VaapiVideoEncodeAccelerator::ExecuteEncode, base::BindOnce(&VaapiVideoEncodeAccelerator::ExecuteEncode,
base::Unretained(this), input_surface->id()), base::Unretained(this), input_surface->id()),
input_surface, reconstructed_surface, coded_buffer_id); input_surface, reconstructed_surface, std::move(va_picture),
coded_buffer_id);
job->AddSetupCallback( if (!native_input_mode_) {
base::BindOnce(&VaapiVideoEncodeAccelerator::UploadFrame, job->AddSetupCallback(
base::Unretained(this), frame, input_surface->id())); base::BindOnce(&VaapiVideoEncodeAccelerator::UploadFrame,
base::Unretained(this), frame, input_surface->id()));
}
return job; return job;
} }
...@@ -701,10 +780,12 @@ VaapiEncodeJob::VaapiEncodeJob(scoped_refptr<VideoFrame> input_frame, ...@@ -701,10 +780,12 @@ VaapiEncodeJob::VaapiEncodeJob(scoped_refptr<VideoFrame> input_frame,
base::OnceClosure execute_cb, base::OnceClosure execute_cb,
scoped_refptr<VASurface> input_surface, scoped_refptr<VASurface> input_surface,
scoped_refptr<VASurface> reconstructed_surface, scoped_refptr<VASurface> reconstructed_surface,
std::unique_ptr<VaapiPicture> va_picture,
VABufferID coded_buffer_id) VABufferID coded_buffer_id)
: EncodeJob(input_frame, keyframe, std::move(execute_cb)), : EncodeJob(input_frame, keyframe, std::move(execute_cb)),
input_surface_(input_surface), input_surface_(input_surface),
reconstructed_surface_(reconstructed_surface), reconstructed_surface_(reconstructed_surface),
va_picture_(std::move(va_picture)),
coded_buffer_id_(coded_buffer_id) { coded_buffer_id_(coded_buffer_id) {
DCHECK(input_surface_); DCHECK(input_surface_);
DCHECK(reconstructed_surface_); DCHECK(reconstructed_surface_);
......
...@@ -24,6 +24,8 @@ ...@@ -24,6 +24,8 @@
namespace media { namespace media {
class VaapiEncodeJob; class VaapiEncodeJob;
class VaapiPictureFactory;
// A VideoEncodeAccelerator implementation that uses VA-API // A VideoEncodeAccelerator implementation that uses VA-API
// (https://01.org/vaapi) for HW-accelerated video encode. // (https://01.org/vaapi) for HW-accelerated video encode.
class MEDIA_GPU_EXPORT VaapiVideoEncodeAccelerator class MEDIA_GPU_EXPORT VaapiVideoEncodeAccelerator
...@@ -63,6 +65,11 @@ class MEDIA_GPU_EXPORT VaapiVideoEncodeAccelerator ...@@ -63,6 +65,11 @@ class MEDIA_GPU_EXPORT VaapiVideoEncodeAccelerator
// Holds output buffers coming from the client ready to be filled. // Holds output buffers coming from the client ready to be filled.
struct BitstreamBufferRef; struct BitstreamBufferRef;
// one surface for input data.
// one surface for reconstructed picture, which is later used for reference.
static constexpr size_t kNumSurfacesPerInputVideoFrame = 1;
static constexpr size_t kNumSurfacesForOutputPicture = 1;
// //
// Tasks for each of the VEA interface calls to be executed on the // Tasks for each of the VEA interface calls to be executed on the
// encoder thread. // encoder thread.
...@@ -146,12 +153,28 @@ class MEDIA_GPU_EXPORT VaapiVideoEncodeAccelerator ...@@ -146,12 +153,28 @@ class MEDIA_GPU_EXPORT VaapiVideoEncodeAccelerator
// Size in bytes required for output bitstream buffers. // Size in bytes required for output bitstream buffers.
size_t output_buffer_byte_size_; size_t output_buffer_byte_size_;
// This flag signals when the client is sending NV12 + DmaBuf-backed
// VideoFrames to encode, which allows for skipping a copy-adaptation on
// input.
bool native_input_mode_ = false;
// The number of va surfaces required for one video frame on Encode().
// In |native_input_mode_|, one surface for input data is created from DmaBufs
// of incoming VideoFrame. One surface for reconstructed picture is always
// needed, which is later used for reference.
// Therefore, |va_surfaces_per_video_frame| is one in |native_input_mode_|,
// and two otherwise.
size_t va_surfaces_per_video_frame_;
// All of the members below must be accessed on the encoder_thread_, // All of the members below must be accessed on the encoder_thread_,
// while it is running. // while it is running.
// Encoder state. Encode tasks will only run in kEncoding state. // Encoder state. Encode tasks will only run in kEncoding state.
State state_; State state_;
// Creates VaapiPictures to wrap incoming DmaBufs in |native_input_mode_|.
std::unique_ptr<VaapiPictureFactory> vaapi_picture_factory_;
// Encoder instance managing video codec state and preparing encode jobs. // Encoder instance managing video codec state and preparing encode jobs.
std::unique_ptr<AcceleratedVideoEncoder> encoder_; std::unique_ptr<AcceleratedVideoEncoder> encoder_;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment