Commit 3da0177b authored by Ted Meyer's avatar Ted Meyer Committed by Commit Bot

Add GLImageEGLPixmap for vaapi linux support.

This new image type can hook an X11 pixmap up to an egl pixmap surface,
which lets VaapiPictureNativePixmapAngle create xlib pixmaps and bind
them as surfaces.

With this cl, the flag --enable-accelerated-video-decode will be
functional on a modern intel graphics linux system while using ANGLE.

Bug: 1103510

Change-Id: I9c2bce16729be1e8f007b47da5a5160074f78483
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2288633
Commit-Queue: Ted Meyer <tmathmeyer@chromium.org>
Reviewed-by: default avatarDale Curtis <dalecurtis@chromium.org>
Reviewed-by: default avatarAndres Calderon Jaramillo <andrescj@chromium.org>
Reviewed-by: default avatarZhenyao Mo <zmo@chromium.org>
Reviewed-by: default avatarGeoff Lang <geofflang@chromium.org>
Cr-Commit-Position: refs/heads/master@{#798952}
parent abaf2f5e
......@@ -114,6 +114,7 @@ enum class StatusCode : StatusCodeType {
kVaapiUnsupportedFormat = 0x0000070A,
kVaapiFailedToExportImage = 0x0000070B,
kVaapiBadImageSize = 0x0000070C,
kVaapiNoTexture = 0x0000070D,
// Special codes
kGenericErrorPleaseRemove = 0x79999999,
......
......@@ -4,27 +4,47 @@
#include "media/gpu/vaapi/vaapi_picture_native_pixmap_angle.h"
#include "base/file_descriptor_posix.h"
#include "gpu/command_buffer/common/gpu_memory_buffer_support.h"
#include "media/gpu/vaapi/va_surface.h"
#include "media/gpu/vaapi/vaapi_wrapper.h"
#include "ui/gfx/linux/gbm_buffer.h"
#include "ui/gfx/linux/gpu_memory_buffer_support_x11.h"
#include "ui/gfx/linux/native_pixmap_dmabuf.h"
#include "ui/gfx/native_pixmap.h"
#include "ui/base/ui_base_features.h"
#include "ui/gl/gl_bindings.h"
#include "ui/gl/gl_image_native_pixmap.h"
#include "ui/gl/gl_image_egl_pixmap.h"
#include "ui/gl/scoped_binders.h"
namespace media {
namespace {
inline Pixmap CreatePixmap(const gfx::Size& size) {
auto* display = gfx::GetXDisplay();
if (!display)
return 0;
int screen = DefaultScreen(display);
auto root = XRootWindow(display, screen);
if (root == BadValue)
return 0;
XWindowAttributes win_attr = {};
// returns 0 on failure, see:
// https://tronche.com/gui/x/xlib/introduction/errors.html#Status
if (!XGetWindowAttributes(display, root, &win_attr))
return 0;
// TODO(tmathmeyer) should we use the depth from libva instead of root window?
return XCreatePixmap(display, root, size.width(), size.height(),
win_attr.depth);
}
} // namespace
VaapiPictureNativePixmapAngle::VaapiPictureNativePixmapAngle(
scoped_refptr<VaapiWrapper> vaapi_wrapper,
const MakeGLContextCurrentCallback& make_context_current_cb,
const BindGLImageCallback& bind_image_cb,
int32_t picture_buffer_id,
const gfx::Size& visible_size,
const gfx::Size& size,
const gfx::Size& visible_size,
uint32_t service_texture_id,
uint32_t client_texture_id,
uint32_t texture_target)
......@@ -39,8 +59,9 @@ VaapiPictureNativePixmapAngle::VaapiPictureNativePixmapAngle(
texture_target) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
// Check that they're either both 0 or both not 0 (tests will set both to 0)
DCHECK(!!service_texture_id == !!client_texture_id);
// Check that they're both not 0
DCHECK(service_texture_id);
DCHECK(client_texture_id);
}
VaapiPictureNativePixmapAngle::~VaapiPictureNativePixmapAngle() {
......@@ -49,20 +70,81 @@ VaapiPictureNativePixmapAngle::~VaapiPictureNativePixmapAngle() {
gl_image_->ReleaseTexImage(texture_target_);
DCHECK_EQ(glGetError(), static_cast<GLenum>(GL_NO_ERROR));
}
if (x_pixmap_) {
if (auto* display = gfx::GetXDisplay()) {
XFreePixmap(display, x_pixmap_);
}
}
}
Status VaapiPictureNativePixmapAngle::Allocate(gfx::BufferFormat format) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
NOTIMPLEMENTED();
return StatusCode::kGenericErrorPleaseRemove;
if (!(texture_id_ || client_texture_id_))
return StatusCode::kVaapiNoTexture;
if (!make_context_current_cb_ || !make_context_current_cb_.Run())
return StatusCode::kVaapiBadContext;
DCHECK(!features::IsUsingOzonePlatform());
auto image =
base::MakeRefCounted<gl::GLImageEGLPixmap>(visible_size_, format);
if (!image)
return StatusCode::kVaapiNoImage;
x_pixmap_ = CreatePixmap(visible_size_);
if (!x_pixmap_)
return StatusCode::kVaapiNoPixmap;
if (!image->Initialize(x_pixmap_))
return StatusCode::kVaapiFailedToInitializeImage;
gl::ScopedTextureBinder texture_binder(texture_target_, texture_id_);
if (!image->BindTexImage(texture_target_))
return StatusCode::kVaapiFailedToBindTexture;
gl_image_ = image;
DCHECK(bind_image_cb_);
if (!bind_image_cb_.Run(client_texture_id_, texture_target_, gl_image_,
/*can_bind_to_sampler=*/true)) {
return StatusCode::kVaapiFailedToBindImage;
}
return OkStatus();
}
bool VaapiPictureNativePixmapAngle::ImportGpuMemoryBufferHandle(
gfx::BufferFormat format,
gfx::GpuMemoryBufferHandle gpu_memory_buffer_handlee) {
gfx::GpuMemoryBufferHandle gpu_memory_buffer_handle) {
NOTREACHED();
return false;
}
bool VaapiPictureNativePixmapAngle::DownloadFromSurface(
scoped_refptr<VASurface> va_surface) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
NOTIMPLEMENTED();
if (!make_context_current_cb_ || !make_context_current_cb_.Run())
return false;
DCHECK(texture_id_);
gl::ScopedTextureBinder texture_binder(texture_target_, texture_id_);
// GL needs to re-bind the texture after the pixmap content is updated so that
// the compositor sees the updated contents (we found this out experimentally)
gl_image_->ReleaseTexImage(texture_target_);
DCHECK(gfx::Rect(va_surface->size()).Contains(gfx::Rect(visible_size_)));
if (!vaapi_wrapper_->PutSurfaceIntoPixmap(va_surface->id(), x_pixmap_,
visible_size_)) {
return false;
}
return gl_image_->BindTexImage(texture_target_);
}
VASurfaceID VaapiPictureNativePixmapAngle::va_surface_id() const {
return VA_INVALID_ID;
}
} // namespace media
......@@ -13,6 +13,7 @@
#include "media/gpu/vaapi/vaapi_picture_native_pixmap.h"
#include "ui/gfx/buffer_types.h"
#include "ui/gfx/geometry/size.h"
#include "ui/gl/gl_bindings.h"
namespace media {
......@@ -39,8 +40,14 @@ class VaapiPictureNativePixmapAngle : public VaapiPictureNativePixmap {
bool ImportGpuMemoryBufferHandle(
gfx::BufferFormat format,
gfx::GpuMemoryBufferHandle gpu_memory_buffer_handle) override;
bool DownloadFromSurface(scoped_refptr<VASurface> va_surface) override;
// This native pixmap implementation never instantiates its own VASurfaces.
VASurfaceID va_surface_id() const override;
private:
Pixmap x_pixmap_ = 0;
DISALLOW_COPY_AND_ASSIGN(VaapiPictureNativePixmapAngle);
};
......
......@@ -25,6 +25,7 @@
#include "base/trace_event/memory_dump_manager.h"
#include "base/trace_event/process_memory_dump.h"
#include "base/trace_event/trace_event.h"
#include "build/build_config.h"
#include "gpu/ipc/service/gpu_channel.h"
#include "media/base/bind_to_current_loop.h"
#include "media/base/format_utils.h"
......@@ -43,6 +44,7 @@
#include "media/gpu/vp8_decoder.h"
#include "media/gpu/vp9_decoder.h"
#include "media/video/picture.h"
#include "ui/base/ui_base_features.h"
#include "ui/gl/gl_image.h"
namespace media {
......@@ -193,6 +195,12 @@ bool VaapiVideoDecodeAccelerator::Initialize(const Config& config,
Client* client) {
DCHECK(task_runner_->BelongsToCurrentThread());
#if defined(USE_X11)
// TODO(crbug/1116701): implement decode acceleration when running with Ozone.
if (features::IsUsingOzonePlatform())
return false;
#endif
if (config.is_encrypted()) {
NOTREACHED() << "Encrypted streams are not supported for this VDA";
return false;
......@@ -693,30 +701,34 @@ void VaapiVideoDecodeAccelerator::AssignPictureBuffers(
va_surface_format_ = GetVaFormatForVideoCodecProfile(profile_);
std::vector<VASurfaceID> va_surface_ids;
scoped_refptr<VaapiWrapper> vaapi_wrapper_for_picture = vaapi_wrapper_;
// The X11/ANGLE implementation can use |vaapi_wrapper_| to copy from an
// internal libva buffer into an X Pixmap without having to use a processing
// wrapper.
#if !defined(USE_X11)
// If we aren't in BufferAllocationMode::kNone, we have to allocate a
// |vpp_vaapi_wrapper_| for VaapiPicture to DownloadFromSurface() the VA's
// internal decoded frame.
if (buffer_allocation_mode_ != BufferAllocationMode::kNone &&
!vpp_vaapi_wrapper_) {
if (buffer_allocation_mode_ != BufferAllocationMode::kNone) {
if (!vpp_vaapi_wrapper_) {
vpp_vaapi_wrapper_ = VaapiWrapper::Create(
VaapiWrapper::kVideoProcess, VAProfileNone,
base::BindRepeating(&ReportToUMA, VAAPI_VPP_ERROR));
RETURN_AND_NOTIFY_ON_FAILURE(vpp_vaapi_wrapper_,
"Failed to initialize VppVaapiWrapper",
PLATFORM_FAILURE, );
// Size is irrelevant for a VPP context.
RETURN_AND_NOTIFY_ON_FAILURE(vpp_vaapi_wrapper_->CreateContext(gfx::Size()),
"Failed to create Context",
PLATFORM_FAILURE, );
RETURN_AND_NOTIFY_ON_FAILURE(
vpp_vaapi_wrapper_->CreateContext(gfx::Size()),
"Failed to create Context", PLATFORM_FAILURE, );
}
vaapi_wrapper_for_picture = vpp_vaapi_wrapper_;
}
for (size_t i = 0; i < buffers.size(); ++i) {
// If we aren't in BufferAllocationMode::kNone, this |picture| is
// only used as a copy destination. Therefore, the VaapiWrapper used and
// owned by |picture| is |vpp_vaapi_wrapper_|.
#endif // !defined(USE_X11)
for (size_t i = 0; i < buffers.size(); ++i) {
// TODO(b/139460315): Create with buffers[i] once the AMD driver issue is
// resolved.
PictureBuffer buffer = buffers[i];
......@@ -729,16 +741,15 @@ void VaapiVideoDecodeAccelerator::AssignPictureBuffers(
: gfx::Size();
std::unique_ptr<VaapiPicture> picture = vaapi_picture_factory_->Create(
(buffer_allocation_mode_ == BufferAllocationMode::kNone)
? vaapi_wrapper_
: vpp_vaapi_wrapper_,
make_context_current_cb_, bind_image_cb_, buffer, size_to_bind);
vaapi_wrapper_for_picture, make_context_current_cb_, bind_image_cb_,
buffer, size_to_bind);
RETURN_AND_NOTIFY_ON_FAILURE(picture, "Failed creating a VaapiPicture",
PLATFORM_FAILURE, );
if (output_mode_ == Config::OutputMode::ALLOCATE) {
RETURN_AND_NOTIFY_ON_STATUS(
picture->Allocate(vaapi_picture_factory_->GetBufferFormat()), );
available_picture_buffers_.push_back(buffers[i].id());
VASurfaceID va_surface_id = picture->va_surface_id();
if (va_surface_id != VA_INVALID_ID)
......@@ -1190,6 +1201,20 @@ VaapiVideoDecodeAccelerator::GetSupportedProfiles() {
VaapiVideoDecodeAccelerator::BufferAllocationMode
VaapiVideoDecodeAccelerator::DecideBufferAllocationMode() {
#if defined(USE_X11)
// The IMPORT mode is used for Android on Chrome OS, so this doesn't apply
// here.
DCHECK_NE(output_mode_, VideoDecodeAccelerator::Config::OutputMode::IMPORT);
// TODO(crbug/1116701): get video decode acceleration working with ozone.
DCHECK(!features::IsUsingOzonePlatform());
// For H.264 on older devices, another +1 is experimentally needed for
// high-to-high resolution changes.
// TODO(mcasas): Figure out why and why only H264, see crbug.com/912295 and
// http://crrev.com/c/1363807/9/media/gpu/h264_decoder.cc#1449.
if (profile_ >= H264PROFILE_MIN && profile_ <= H264PROFILE_MAX)
return BufferAllocationMode::kReduced;
return BufferAllocationMode::kSuperReduced;
#else
// TODO(crbug.com/912295): Enable a better BufferAllocationMode for IMPORT
// |output_mode_| as well.
if (output_mode_ == VideoDecodeAccelerator::Config::OutputMode::IMPORT)
......@@ -1224,6 +1249,7 @@ VaapiVideoDecodeAccelerator::DecideBufferAllocationMode() {
// GetNumReferenceFrames() + 1. Moreover, we also request the |client_| to
// allocate less than the usual |decoder_|s GetRequiredNumOfPictures().
return BufferAllocationMode::kSuperReduced;
#endif
}
bool VaapiVideoDecodeAccelerator::IsBufferAllocationModeReducedOrSuperReduced()
......
......@@ -199,9 +199,11 @@ class MEDIA_GPU_EXPORT VaapiVideoDecodeAccelerator
enum class BufferAllocationMode {
// Only using |client_|s provided PictureBuffers, none internal.
kNone,
// Using a reduced amount of |client_|s provided PictureBuffers and
// |decoder_|s GetNumReferenceFrames() internallly.
kSuperReduced,
// Similar to kSuperReduced, but we have to increase slightly the amount of
// PictureBuffers allocated for the |client_|.
kReduced,
......
......@@ -419,9 +419,8 @@ bool VADisplayState::InitializeOnce() {
break;
case gl::kGLImplementationEGLANGLE:
#if defined(USE_X11)
if (!features::IsUsingOzonePlatform())
va_display_ = vaGetDisplay(gfx::GetXDisplay());
if (vaDisplayIsValid(va_display_))
break;
#endif // USE_X11
break;
// Cannot infer platform from GL, try all available displays
......@@ -447,12 +446,25 @@ bool VADisplayState::InitializeOnce() {
return false;
}
// Set VA logging level to enable error messages, unless already set
// Set VA logging level and driver name, unless already set.
constexpr char libva_log_level_env[] = "LIBVA_MESSAGING_LEVEL";
std::unique_ptr<base::Environment> env(base::Environment::Create());
if (!env->HasVar(libva_log_level_env))
env->SetVar(libva_log_level_env, "1");
#if defined(USE_X11)
if (gl::GetGLImplementation() == gl::kGLImplementationEGLANGLE) {
DCHECK(!features::IsUsingOzonePlatform());
constexpr char libva_driver_impl_env[] = "LIBVA_DRIVER_NAME";
// TODO(crbug/1116703) The libva intel-media driver has a known segfault in
// vaPutSurface, so until this is fixed, fall back to the i965 driver. There
// is discussion of the issue here:
// https://github.com/intel/media-driver/issues/818
if (!env->HasVar(libva_driver_impl_env))
env->SetVar(libva_driver_impl_env, "i965");
}
#endif // USE_X11
// The VAAPI version.
int major_version, minor_version;
VAStatus va_res = vaInitialize(va_display_, &major_version, &minor_version);
......
......@@ -252,6 +252,8 @@ component("gl") {
if (use_x11 || ozone_platform_x11) {
sources += [
"gl_image_egl_pixmap.cc",
"gl_image_egl_pixmap.h",
"gl_surface_egl_x11.cc",
"gl_surface_egl_x11.h",
"gl_surface_egl_x11_gles2.cc",
......
// Copyright (c) 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "ui/gl/gl_image_egl_pixmap.h"
#include <memory>
#include "base/logging.h"
#include "build/build_config.h"
#include "ui/gfx/x/x11.h"
#include "ui/gl/buffer_format_utils.h"
#include "ui/gl/gl_bindings.h"
#include "ui/gl/gl_surface_glx.h"
#include "ui/gl/gl_visual_picker_glx.h"
namespace gl {
inline EGLDisplay FromXDisplay() {
#if defined(USE_X11)
if (auto* x_display = gfx::GetXDisplay())
return eglGetDisplay(x_display);
#endif
return EGL_NO_DISPLAY;
}
GLImageEGLPixmap::GLImageEGLPixmap(const gfx::Size& size,
gfx::BufferFormat format)
: surface_(nullptr),
size_(size),
format_(format),
display_(FromXDisplay()) {}
GLImageEGLPixmap::~GLImageEGLPixmap() {
if (surface_)
eglDestroySurface(display_, surface_);
}
bool GLImageEGLPixmap::Initialize(XID pixmap) {
if (eglInitialize(display_, nullptr, nullptr) != EGL_TRUE)
return false;
EGLint attribs[] = {EGL_BUFFER_SIZE,
32,
EGL_ALPHA_SIZE,
8,
EGL_BLUE_SIZE,
8,
EGL_GREEN_SIZE,
8,
EGL_RED_SIZE,
8,
EGL_SURFACE_TYPE,
EGL_PIXMAP_BIT,
EGL_BIND_TO_TEXTURE_RGBA,
EGL_TRUE,
EGL_NONE};
EGLint num_configs;
EGLConfig config = nullptr;
if ((eglChooseConfig(display_, attribs, &config, 1, &num_configs) !=
EGL_TRUE) ||
!num_configs) {
return false;
}
std::vector<EGLint> attrs = {EGL_TEXTURE_FORMAT, EGL_TEXTURE_RGBA,
EGL_TEXTURE_TARGET, EGL_TEXTURE_2D, EGL_NONE};
surface_ = eglCreatePixmapSurface(display_, config, pixmap, attrs.data());
return surface_ != EGL_NO_SURFACE;
}
gfx::Size GLImageEGLPixmap::GetSize() {
return size_;
}
unsigned GLImageEGLPixmap::GetInternalFormat() {
return gl::BufferFormatToGLInternalFormat(format_);
}
unsigned GLImageEGLPixmap::GetDataType() {
return GL_UNSIGNED_BYTE;
}
GLImageEGLPixmap::BindOrCopy GLImageEGLPixmap::ShouldBindOrCopy() {
return BIND;
}
bool GLImageEGLPixmap::BindTexImage(unsigned target) {
if (!surface_)
return false;
// Requires TEXTURE_2D target.
if (target != GL_TEXTURE_2D)
return false;
if (eglBindTexImage(display_, surface_, EGL_BACK_BUFFER) != EGL_TRUE)
return false;
return true;
}
void GLImageEGLPixmap::ReleaseTexImage(unsigned target) {
DCHECK_NE(nullptr, surface_);
DCHECK_EQ(static_cast<GLenum>(GL_TEXTURE_2D), target);
eglReleaseTexImage(display_, surface_, EGL_BACK_BUFFER);
}
void GLImageEGLPixmap::OnMemoryDump(base::trace_event::ProcessMemoryDump* pmd,
uint64_t process_tracing_id,
const std::string& dump_name) {
// TODO(crbug.com/514914): Implement GLImage OnMemoryDump.
}
} // namespace gl
// Copyright (c) 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef UI_GL_GL_IMAGE_EGL_PIXMAP_H_
#define UI_GL_GL_IMAGE_EGL_PIXMAP_H_
#include <stdint.h>
#include "base/macros.h"
#include "ui/gfx/geometry/size.h"
#include "ui/gfx/x/x11_types.h"
#include "ui/gl/gl_export.h"
#include "ui/gl/gl_image.h"
typedef void* EGLSurface;
typedef void* EGLDisplay;
namespace gl {
class GL_EXPORT GLImageEGLPixmap : public GLImage {
public:
GLImageEGLPixmap(const gfx::Size& size, gfx::BufferFormat format);
bool Initialize(XID pixmap);
// Overridden from GLImage:
gfx::Size GetSize() override;
unsigned GetInternalFormat() override;
unsigned GetDataType() override;
BindOrCopy ShouldBindOrCopy() override;
bool BindTexImage(unsigned target) override;
void ReleaseTexImage(unsigned target) override;
void Flush() override {}
void OnMemoryDump(base::trace_event::ProcessMemoryDump* pmd,
uint64_t process_tracing_id,
const std::string& dump_name) override;
protected:
~GLImageEGLPixmap() override;
gfx::BufferFormat format() const { return format_; }
private:
EGLSurface surface_;
const gfx::Size size_;
gfx::BufferFormat format_;
EGLDisplay display_;
DISALLOW_COPY_AND_ASSIGN(GLImageEGLPixmap);
};
} // namespace gl
#endif // UI_GL_GL_IMAGE_EGL_PIXMAP_H_
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment