Commit 09825450 authored by Andres Calderon Jaramillo's avatar Andres Calderon Jaramillo Committed by Chromium LUCI CQ

media/platform_vf_utils: Handle null GMB factory.

This CL handles a null GpuMemoryBufferFactory in
AllocateGpuMemoryBufferHandle() so that the caller is not obligated to
pass a valid factory. This is in preparation for follow-ups CLs to
remove the need for video encode accelerators to have access to a
GpuMemoryBufferFactory.

When the GpuMemoryBufferFactory is null, we just use the render node
directly to allocate a native buffer object and then we wrap it into a
GpuMemoryBufferHandle.

So, why do we still keep the option to have a non-null
GpuMemoryBufferFactory? This is because that path is still useful in the
decoding case: when a buffer is created with the GpuMemoryBufferFactory,
the corresponding NativePixmap is cached [1]; eventually, a SharedImage
needs to be created and to do so we pass the GpuMemoryBufferHandle
obtained from the factory [2]; when the SharedImage system needs to
create the GLImage for the buffer, it passes the handle to the
factory [3] which in turn uses it to look up the cached
NativePixmap [4]. This saves us from going through a gbm_bo_import()
(which also implies a thread hop to the DRM thread).

So, in conclusion, for buffers that need to be displayed later, it's
still better to use the GMB factory owned by the GpuServiceImpl. For
encoders this is not the case.

[1] https://source.chromium.org/chromium/chromium/src/+/master:gpu/ipc/service/gpu_memory_buffer_factory_native_pixmap.cc;l=319;drc=d81c5852498699fe3cd812e78d31c77c28e29281
[2] https://source.chromium.org/chromium/chromium/src/+/master:media/gpu/chromeos/mailbox_video_frame_converter.cc;l=355-359;drc=d81c5852498699fe3cd812e78d31c77c28e29281
[3] https://source.chromium.org/chromium/chromium/src/+/master:gpu/command_buffer/service/shared_image_backing_gl_texture.cc;l=355-356;drc=d81c5852498699fe3cd812e78d31c77c28e29281
[4] https://source.chromium.org/chromium/chromium/src/+/master:gpu/ipc/service/gpu_memory_buffer_factory_native_pixmap.cc;l=164-172;drc=d81c5852498699fe3cd812e78d31c77c28e29281

Bug: b:173167846
Test: video.EncodeAccel.h264_720p_nv12_dmabuf on elm
Change-Id: I7b730eb843da7503c61bffe30dce7a5ce51dec60
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2568937
Commit-Queue: Andres Calderon Jaramillo <andrescj@chromium.org>
Reviewed-by: default avatarMiguel Casas <mcasas@chromium.org>
Reviewed-by: default avatarHirokazu Honda <hiroh@chromium.org>
Cr-Commit-Position: refs/heads/master@{#834608}
parent f3fe0da5
......@@ -78,6 +78,7 @@ source_set("common") {
deps = [
":fourcc",
"//base",
"//build/config/linux/libdrm",
"//gpu/ipc/common:common",
"//media",
"//media/gpu:buildflags",
......@@ -88,6 +89,8 @@ source_set("common") {
"//ui/gfx:buffer_types",
"//ui/gfx:memory_buffer",
"//ui/gfx/geometry",
"//ui/gfx/linux:drm",
"//ui/gfx/linux:gbm",
"//ui/gl",
]
}
......
......@@ -4,13 +4,19 @@
#include "media/gpu/chromeos/platform_video_frame_utils.h"
#include <drm_fourcc.h>
#include <xf86drm.h>
#include <limits>
#include "base/bind.h"
#include "base/callback_helpers.h"
#include "base/files/file.h"
#include "base/files/file_path.h"
#include "base/files/scoped_file.h"
#include "base/no_destructor.h"
#include "base/posix/eintr_wrapper.h"
#include "base/strings/stringprintf.h"
#include "base/synchronization/lock.h"
#include "gpu/ipc/common/gpu_client_ids.h"
#include "gpu/ipc/common/gpu_memory_buffer_support.h"
......@@ -23,24 +29,127 @@
#include "media/gpu/buffer_validation.h"
#include "media/gpu/macros.h"
#include "ui/gfx/gpu_memory_buffer.h"
#include "ui/gfx/linux/drm_util_linux.h"
#include "ui/gfx/linux/gbm_buffer.h"
#include "ui/gfx/linux/gbm_device.h"
#include "ui/gfx/linux/gbm_util.h"
#include "ui/gfx/linux/gbm_wrapper.h"
#include "ui/gfx/linux/native_pixmap_dmabuf.h"
#include "ui/gfx/native_pixmap.h"
namespace media {
namespace {
// GbmDeviceWrapper is a singleton that provides thread-safe access to a
// ui::GbmDevice for the purposes of creating native BOs. The ui::GbmDevice is
// initialized with the first non-vgem render node found that works starting at
// /dev/dri/renderD128. Note that we have our own FD to the render node (i.e.,
// it's not shared with other components). Therefore, there should not be any
// concurrency issues if other components in the GPU process (like the VA-API
// driver) access the render node using their own FD.
class GbmDeviceWrapper {
public:
GbmDeviceWrapper(const GbmDeviceWrapper&) = delete;
GbmDeviceWrapper& operator=(const GbmDeviceWrapper&) = delete;
static GbmDeviceWrapper* Get() {
static base::NoDestructor<GbmDeviceWrapper> gbm_device_wrapper;
return gbm_device_wrapper.get();
}
// Creates a native BO and returns it as a GpuMemoryBufferHandle. Returns
// gfx::GpuMemoryBufferHandle() on failure.
gfx::GpuMemoryBufferHandle CreateGpuMemoryBuffer(
gfx::BufferFormat format,
const gfx::Size& size,
gfx::BufferUsage buffer_usage) {
base::AutoLock lock(lock_);
if (!gbm_device_)
return gfx::GpuMemoryBufferHandle();
const int fourcc_format = ui::GetFourCCFormatFromBufferFormat(format);
if (fourcc_format == DRM_FORMAT_INVALID)
return gfx::GpuMemoryBufferHandle();
const uint32_t flags = ui::BufferUsageToGbmFlags(buffer_usage);
std::unique_ptr<ui::GbmBuffer> buffer =
gbm_device_->CreateBuffer(fourcc_format, size, flags);
if (!buffer)
return gfx::GpuMemoryBufferHandle();
gfx::NativePixmapHandle native_pixmap_handle = buffer->ExportHandle();
if (native_pixmap_handle.planes.empty())
return gfx::GpuMemoryBufferHandle();
CHECK_LT(next_gpu_memory_buffer_id_, std::numeric_limits<int>::max());
const gfx::GpuMemoryBufferId gpu_memory_buffer_id(
next_gpu_memory_buffer_id_++);
gfx::GpuMemoryBufferHandle gmb_handle;
gmb_handle.type = gfx::GpuMemoryBufferType::NATIVE_PIXMAP;
gmb_handle.id = gpu_memory_buffer_id;
gmb_handle.native_pixmap_handle = std::move(native_pixmap_handle);
return gmb_handle;
}
private:
GbmDeviceWrapper() {
constexpr char kRenderNodeFilePattern[] = "/dev/dri/renderD%d";
// This loop ends on either the first card that does not exist or the first
// one that results in the creation of a gbm device.
for (int i = 128;; i++) {
base::FilePath dev_path(FILE_PATH_LITERAL(
base::StringPrintf(kRenderNodeFilePattern, i).c_str()));
render_node_file_ =
base::File(dev_path, base::File::FLAG_OPEN | base::File::FLAG_READ);
if (!render_node_file_.IsValid())
return;
// Skip the virtual graphics memory manager device.
drmVersionPtr version =
drmGetVersion(render_node_file_.GetPlatformFile());
if (!version)
continue;
std::string version_name(
version->name,
base::checked_cast<std::string::size_type>(version->name_len));
drmFreeVersion(version);
if (base::LowerCaseEqualsASCII(version_name, "vgem"))
continue;
gbm_device_ = ui::CreateGbmDevice(render_node_file_.GetPlatformFile());
if (gbm_device_)
return;
}
}
~GbmDeviceWrapper() = default;
friend class base::NoDestructor<GbmDeviceWrapper>;
base::Lock lock_;
base::File render_node_file_ GUARDED_BY(lock_);
std::unique_ptr<ui::GbmDevice> gbm_device_ GUARDED_BY(lock_);
int next_gpu_memory_buffer_id_ GUARDED_BY(lock_) = 0;
};
gfx::GpuMemoryBufferHandle AllocateGpuMemoryBufferHandle(
gpu::GpuMemoryBufferFactory* factory,
VideoPixelFormat pixel_format,
const gfx::Size& coded_size,
const gfx::Rect& visible_rect,
gfx::BufferUsage buffer_usage) {
DCHECK(factory);
gfx::BufferUsage buffer_usage,
base::ScopedClosureRunner& destroy_cb) {
DCHECK(factory ||
buffer_usage ==
gfx::BufferUsage::SCANOUT_VEA_READ_CAMERA_AND_CPU_READ_WRITE);
gfx::GpuMemoryBufferHandle gmb_handle;
auto buffer_format = VideoPixelFormatToGfxBufferFormat(pixel_format);
if (!buffer_format)
return gmb_handle;
if (!factory) {
return GbmDeviceWrapper::Get()->CreateGpuMemoryBuffer(
*buffer_format, coded_size, buffer_usage);
}
int gpu_memory_buffer_id;
{
static base::NoDestructor<base::Lock> id_lock;
......@@ -50,7 +159,6 @@ gfx::GpuMemoryBufferHandle AllocateGpuMemoryBufferHandle(
gpu_memory_buffer_id = next_gpu_memory_buffer_id++;
}
// TODO(hiroh): Rename the client id to more generic one.
gmb_handle = factory->CreateGpuMemoryBuffer(
gfx::GpuMemoryBufferId(gpu_memory_buffer_id), coded_size,
/*framebuffer_size=*/GetRectSizeFromOrigin(visible_rect), *buffer_format,
......@@ -59,6 +167,12 @@ gfx::GpuMemoryBufferHandle AllocateGpuMemoryBufferHandle(
DCHECK(gmb_handle.is_null() || gmb_handle.type != gfx::NATIVE_PIXMAP ||
VideoFrame::NumPlanes(pixel_format) ==
gmb_handle.native_pixmap_handle.planes.size());
if (gmb_handle.is_null())
return gmb_handle;
destroy_cb.ReplaceClosure(
base::BindOnce(&gpu::GpuMemoryBufferFactory::DestroyGpuMemoryBuffer,
base::Unretained(factory), gmb_handle.id,
gpu::kPlatformVideoFramePoolClientId));
return gmb_handle;
}
......@@ -72,17 +186,11 @@ scoped_refptr<VideoFrame> CreateGpuMemoryBufferVideoFrame(
const gfx::Size& natural_size,
base::TimeDelta timestamp,
gfx::BufferUsage buffer_usage) {
DCHECK(factory);
auto gmb_handle = AllocateGpuMemoryBufferHandle(
factory, pixel_format, coded_size, visible_rect, buffer_usage);
if (gmb_handle.is_null())
return nullptr;
base::ScopedClosureRunner destroy_cb(
base::BindOnce(&gpu::GpuMemoryBufferFactory::DestroyGpuMemoryBuffer,
base::Unretained(factory), gmb_handle.id,
gpu::kPlatformVideoFramePoolClientId));
if (gmb_handle.type != gfx::NATIVE_PIXMAP)
base::ScopedClosureRunner destroy_cb((base::DoNothing()));
auto gmb_handle =
AllocateGpuMemoryBufferHandle(factory, pixel_format, coded_size,
visible_rect, buffer_usage, destroy_cb);
if (gmb_handle.is_null() || gmb_handle.type != gfx::NATIVE_PIXMAP)
return nullptr;
auto buffer_format = VideoPixelFormatToGfxBufferFormat(pixel_format);
......@@ -114,17 +222,11 @@ scoped_refptr<VideoFrame> CreatePlatformVideoFrame(
const gfx::Size& natural_size,
base::TimeDelta timestamp,
gfx::BufferUsage buffer_usage) {
DCHECK(factory);
auto gmb_handle = AllocateGpuMemoryBufferHandle(
factory, pixel_format, coded_size, visible_rect, buffer_usage);
if (gmb_handle.is_null())
return nullptr;
base::ScopedClosureRunner destroy_cb(
base::BindOnce(&gpu::GpuMemoryBufferFactory::DestroyGpuMemoryBuffer,
base::Unretained(factory), gmb_handle.id,
gpu::kPlatformVideoFramePoolClientId));
if (gmb_handle.type != gfx::NATIVE_PIXMAP)
base::ScopedClosureRunner destroy_cb((base::DoNothing()));
auto gmb_handle =
AllocateGpuMemoryBufferHandle(factory, pixel_format, coded_size,
visible_rect, buffer_usage, destroy_cb);
if (gmb_handle.is_null() || gmb_handle.type != gfx::NATIVE_PIXMAP)
return nullptr;
std::vector<ColorPlaneLayout> planes;
......@@ -148,7 +250,6 @@ scoped_refptr<VideoFrame> CreatePlatformVideoFrame(
if (!frame)
return nullptr;
// We need to have the factory drop its reference to the native pixmap.
frame->AddDestructionObserver(destroy_cb.Release());
return frame;
}
......
......@@ -23,7 +23,13 @@ namespace media {
// Create GpuMemoryBuffer-based media::VideoFrame with |buffer_usage|.
// See //media/base/video_frame.h for other parameters.
// |gpu_memory_buffer_factory| must outlive the returned VideoFrame.
// If |gpu_memory_buffer_factory| is not null, it's used to allocate the
// GpuMemoryBuffer and it must outlive the returned VideoFrame. If it's null,
// the buffer is allocated using the render node (this is intended to be used
// only for the internals of video encoding when the usage is
// SCANOUT_VEA_READ_CAMERA_AND_CPU_READ_WRITE). It's safe to call this function
// concurrently from multiple threads (as long as either
// |gpu_memory_buffer_factory| is thread-safe or nullptr).
MEDIA_GPU_EXPORT scoped_refptr<VideoFrame> CreateGpuMemoryBufferVideoFrame(
gpu::GpuMemoryBufferFactory* gpu_memory_buffer_factory,
VideoPixelFormat pixel_format,
......@@ -35,7 +41,13 @@ MEDIA_GPU_EXPORT scoped_refptr<VideoFrame> CreateGpuMemoryBufferVideoFrame(
// Create platform dependent media::VideoFrame with |buffer_usage|.
// See //media/base/video_frame.h for other parameters.
// |gpu_memory_buffer_factory| must outlive the returned VideoFrame.
// If |gpu_memory_buffer_factory| is not null, it's used to allocate the
// video frame's storage and it must outlive the returned VideoFrame. If it's
// null, the buffer is allocated using the render node (this is intended to be
// used only for the internals of video encoding when the usage is
// SCANOUT_VEA_READ_CAMERA_AND_CPU_READ_WRITE). It's safe to call this function
// concurrently from multiple threads (as long as either
// |gpu_memory_buffer_factory| is thread-safe or nullptr).
MEDIA_GPU_EXPORT scoped_refptr<VideoFrame> CreatePlatformVideoFrame(
gpu::GpuMemoryBufferFactory* gpu_memory_buffer_factory,
VideoPixelFormat pixel_format,
......@@ -48,6 +60,12 @@ MEDIA_GPU_EXPORT scoped_refptr<VideoFrame> CreatePlatformVideoFrame(
// Get VideoFrameLayout of platform dependent video frame with |pixel_format|,
// |coded_size| and |buffer_usage|. This function is not cost-free as this
// allocates a platform dependent video frame.
// If |gpu_memory_buffer_factory| is not null, it's used to allocate the
// video frame's storage. If it's null, the storage is allocated using the
// render node (this is intended to be used only for the internals of video
// encoding when the usage is SCANOUT_VEA_READ_CAMERA_AND_CPU_READ_WRITE). It's
// safe to call this function concurrently from multiple threads (as long as
// either |gpu_memory_buffer_factory| is thread-safe or nullptr).
MEDIA_GPU_EXPORT base::Optional<VideoFrameLayout> GetPlatformVideoFrameLayout(
gpu::GpuMemoryBufferFactory* gpu_memory_buffer_factory,
VideoPixelFormat pixel_format,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment