Commit 91436df9 authored by Vasiliy Telezhnikov's avatar Vasiliy Telezhnikov Committed by Commit Bot

Remove GLRendererCopier from SkiaRenderer

This CL removes GLRendererCopier for SkiaRenderer, as we use skia for
readback on all platforms.

Bug: 1044594
Change-Id: I51ee6f36c87a0598a5aa7dd9d9dc1c9ca1417d73
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2307651Reviewed-by: default avatarJonathan Backer <backer@chromium.org>
Commit-Queue: Vasiliy Telezhnikov <vasilyt@chromium.org>
Cr-Commit-Position: refs/heads/master@{#790983}
parent 396d268a
...@@ -19,9 +19,6 @@ namespace features { ...@@ -19,9 +19,6 @@ namespace features {
const base::Feature kForcePreferredIntervalForVideo{ const base::Feature kForcePreferredIntervalForVideo{
"ForcePreferredIntervalForVideo", base::FEATURE_DISABLED_BY_DEFAULT}; "ForcePreferredIntervalForVideo", base::FEATURE_DISABLED_BY_DEFAULT};
const base::Feature kUseSkiaForGLReadback{"UseSkiaForGLReadback",
base::FEATURE_ENABLED_BY_DEFAULT};
// Use the SkiaRenderer. // Use the SkiaRenderer.
#if defined(OS_LINUX) && !(defined(OS_CHROMEOS) || BUILDFLAG(IS_CHROMECAST)) #if defined(OS_LINUX) && !(defined(OS_CHROMEOS) || BUILDFLAG(IS_CHROMECAST))
const base::Feature kUseSkiaRenderer{"UseSkiaRenderer", const base::Feature kUseSkiaRenderer{"UseSkiaRenderer",
...@@ -91,14 +88,6 @@ bool IsVizHitTestingDebugEnabled() { ...@@ -91,14 +88,6 @@ bool IsVizHitTestingDebugEnabled() {
switches::kEnableVizHitTestDebug); switches::kEnableVizHitTestDebug);
} }
bool IsUsingSkiaForGLReadback() {
// Viz for webview requires Skia Readback.
if (IsUsingVizForWebView())
return true;
return base::FeatureList::IsEnabled(kUseSkiaForGLReadback);
}
bool IsUsingSkiaRenderer() { bool IsUsingSkiaRenderer() {
#if defined(OS_ANDROID) #if defined(OS_ANDROID)
// We don't support KitKat. Check for it before looking at the feature flag // We don't support KitKat. Check for it before looking at the feature flag
......
...@@ -14,7 +14,6 @@ ...@@ -14,7 +14,6 @@
namespace features { namespace features {
VIZ_COMMON_EXPORT extern const base::Feature kForcePreferredIntervalForVideo; VIZ_COMMON_EXPORT extern const base::Feature kForcePreferredIntervalForVideo;
VIZ_COMMON_EXPORT extern const base::Feature kUseSkiaForGLReadback;
VIZ_COMMON_EXPORT extern const base::Feature kUseSkiaRenderer; VIZ_COMMON_EXPORT extern const base::Feature kUseSkiaRenderer;
VIZ_COMMON_EXPORT extern const base::Feature kRecordSkPicture; VIZ_COMMON_EXPORT extern const base::Feature kRecordSkPicture;
VIZ_COMMON_EXPORT extern const base::Feature kDisableDeJelly; VIZ_COMMON_EXPORT extern const base::Feature kDisableDeJelly;
...@@ -33,7 +32,6 @@ VIZ_COMMON_EXPORT extern const base::Feature kWebRtcLogCapturePipeline; ...@@ -33,7 +32,6 @@ VIZ_COMMON_EXPORT extern const base::Feature kWebRtcLogCapturePipeline;
VIZ_COMMON_EXPORT bool IsForcePreferredIntervalForVideoEnabled(); VIZ_COMMON_EXPORT bool IsForcePreferredIntervalForVideoEnabled();
VIZ_COMMON_EXPORT bool IsVizHitTestingDebugEnabled(); VIZ_COMMON_EXPORT bool IsVizHitTestingDebugEnabled();
VIZ_COMMON_EXPORT bool IsUsingSkiaForGLReadback();
VIZ_COMMON_EXPORT bool IsUsingSkiaRenderer(); VIZ_COMMON_EXPORT bool IsUsingSkiaRenderer();
VIZ_COMMON_EXPORT bool IsRecordingSkPicture(); VIZ_COMMON_EXPORT bool IsRecordingSkPicture();
#if defined(OS_ANDROID) #if defined(OS_ANDROID)
......
...@@ -96,8 +96,6 @@ viz_component("service") { ...@@ -96,8 +96,6 @@ viz_component("service") {
"display/texture_deleter.h", "display/texture_deleter.h",
"display_embedder/buffer_queue.cc", "display_embedder/buffer_queue.cc",
"display_embedder/buffer_queue.h", "display_embedder/buffer_queue.h",
"display_embedder/direct_context_provider.cc",
"display_embedder/direct_context_provider.h",
"display_embedder/gl_output_surface.cc", "display_embedder/gl_output_surface.cc",
"display_embedder/gl_output_surface.h", "display_embedder/gl_output_surface.h",
"display_embedder/gl_output_surface_buffer_queue.cc", "display_embedder/gl_output_surface_buffer_queue.cc",
......
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "components/viz/service/display_embedder/direct_context_provider.h"
#include <stdint.h>
#include <utility>
#include "base/threading/thread_task_runner_handle.h"
#include "base/trace_event/memory_dump_manager.h"
#include "build/build_config.h"
#include "components/viz/common/gpu/context_lost_observer.h"
#include "components/viz/common/gpu/context_lost_reason.h"
#include "gpu/command_buffer/client/gles2_cmd_helper.h"
#include "gpu/command_buffer/client/gles2_implementation.h"
#include "gpu/command_buffer/client/shared_image_interface.h"
#include "gpu/command_buffer/client/shared_memory_limits.h"
#include "gpu/command_buffer/client/transfer_buffer.h"
#include "gpu/command_buffer/common/context_creation_attribs.h"
#include "gpu/command_buffer/service/command_buffer_direct.h"
#include "gpu/command_buffer/service/gles2_cmd_decoder.h"
#include "gpu/command_buffer/service/memory_tracking.h"
#include "gpu/command_buffer/service/service_utils.h"
#include "gpu/config/gpu_feature_info.h"
#include "ui/gl/gl_context.h"
#include "ui/gl/gl_surface.h"
namespace viz {
DirectContextProviderDelegate::DirectContextProviderDelegate() = default;
DirectContextProviderDelegate::~DirectContextProviderDelegate() = default;
DirectContextProvider::DirectContextProvider(
scoped_refptr<gl::GLContext> gl_context,
scoped_refptr<gl::GLSurface> gl_surface,
bool supports_alpha,
const gpu::GpuPreferences& gpu_preferences,
gpu::gles2::FeatureInfo* feature_info,
std::unique_ptr<DirectContextProviderDelegate> delegate)
: discardable_manager_(gpu_preferences),
passthrough_discardable_manager_(gpu_preferences),
translator_cache_(gpu_preferences),
delegate_(std::move(delegate)) {
DCHECK(gl_context->IsCurrent(gl_surface.get()));
auto limits = gpu::SharedMemoryLimits::ForMailboxContext();
auto group = base::MakeRefCounted<gpu::gles2::ContextGroup>(
gpu_preferences, gpu::gles2::PassthroughCommandDecoderSupported(),
&mailbox_manager_, /*memory_tracker=*/nullptr, &translator_cache_,
&completeness_cache_, feature_info, true, &image_manager_,
/*image_factory=*/nullptr,
/*progress_reporter=*/nullptr, gpu_feature_info_, &discardable_manager_,
&passthrough_discardable_manager_, delegate_->GetSharedImageManager());
auto command_buffer = std::make_unique<gpu::CommandBufferDirect>();
std::unique_ptr<gpu::gles2::GLES2Decoder> decoder(
gpu::gles2::GLES2Decoder::Create(command_buffer.get(),
command_buffer->service(), &outputter_,
group.get()));
if (gpu_preferences.enable_gpu_service_logging)
decoder->SetLogCommands(true);
command_buffer->set_handler(decoder.get());
gpu::ContextCreationAttribs attribs;
attribs.alpha_size = supports_alpha ? 8 : 0;
attribs.buffer_preserved = false;
attribs.bind_generates_resource = true;
attribs.fail_if_major_perf_caveat = false;
attribs.lose_context_when_out_of_memory = true;
attribs.context_type = gpu::CONTEXT_TYPE_OPENGLES2;
context_result_ =
decoder->Initialize(gl_surface, gl_context, gl_surface->IsOffscreen(),
gpu::gles2::DisallowedFeatures(), attribs);
if (context_result_ != gpu::ContextResult::kSuccess)
return;
auto gles2_cmd_helper =
std::make_unique<gpu::gles2::GLES2CmdHelper>(command_buffer.get());
context_result_ = gles2_cmd_helper->Initialize(limits.command_buffer_size);
if (context_result_ != gpu::ContextResult::kSuccess) {
decoder->Destroy(true);
return;
}
// Client side Capabilities queries return reference, service side return
// value. Here two sides are joined together.
capabilities_ = decoder->GetCapabilities();
auto transfer_buffer =
std::make_unique<gpu::TransferBuffer>(gles2_cmd_helper.get());
gles2_cmd_helper_ = std::move(gles2_cmd_helper);
transfer_buffer_ = std::move(transfer_buffer);
command_buffer_ = std::move(command_buffer);
decoder_ = std::move(decoder);
gl_context_ = std::move(gl_context);
gl_surface_ = std::move(gl_surface);
gles2_implementation_ = std::make_unique<gpu::gles2::GLES2Implementation>(
gles2_cmd_helper_.get(), nullptr, transfer_buffer_.get(),
attribs.bind_generates_resource, attribs.lose_context_when_out_of_memory,
/*kSupportClientSideArrays=*/false, this);
context_result_ = gles2_implementation_->Initialize(limits);
if (context_result_ != gpu::ContextResult::kSuccess) {
Destroy();
return;
}
base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider(
this, "viz::DirectContextProvider", base::ThreadTaskRunnerHandle::Get());
// TraceEndCHROMIUM is implicit when the context is destroyed
gles2_implementation_->TraceBeginCHROMIUM("VizCompositor",
"DisplayCompositor");
}
DirectContextProvider::~DirectContextProvider() {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
if (decoder_)
Destroy();
}
void DirectContextProvider::Destroy() {
DCHECK(decoder_);
bool have_context = !decoder_->WasContextLost() &&
(gl_context_->IsCurrent(nullptr) ||
gl_context_->MakeCurrent(gl_surface_.get()));
if (have_context && framebuffer_id_ != 0) {
gles2_implementation_->DeleteFramebuffers(1, &framebuffer_id_);
framebuffer_id_ = 0;
}
// The client gl interface might still be set to current global
// interface. This will be cleaned up in ApplyContextReleased
// with AutoCurrentContextRestore.
gles2_implementation_.reset();
gl_context_.reset();
transfer_buffer_.reset();
gles2_cmd_helper_.reset();
decoder_->Destroy(have_context);
decoder_.reset();
command_buffer_.reset();
}
void DirectContextProvider::SetGLRendererCopierRequiredState(
GLuint texture_client_id) {
// Get into known state (see
// SkiaOutputSurfaceImplOnGpu::ScopedUseContextProvider).
gles2_implementation_->BindFramebuffer(GL_FRAMEBUFFER, 0);
auto* group = decoder()->GetContextGroup();
if (group->use_passthrough_cmd_decoder()) {
// Matches state setting in
// SkiaOutputSurfaceImplOnGpu::ScopedUseContextProvider when passthrough
// is enabled so that client side and service side state match.
//
// TODO(backer): Use ANGLE API to force state reset once API is available.
gles2_implementation_->UseProgram(0);
gles2_implementation_->ActiveTexture(GL_TEXTURE0);
gles2_implementation_->BindBuffer(GL_ARRAY_BUFFER, 0);
gles2_implementation_->BindTexture(GL_TEXTURE_2D, 0);
} else {
decoder_->RestoreActiveTexture();
decoder_->RestoreProgramBindings();
decoder_->RestoreAllAttributes();
decoder_->RestoreGlobalState();
decoder_->RestoreBufferBindings();
}
// At this point |decoder_| cached state (if any, passthrough doesn't cache)
// is synced with GLContext state. But GLES2Implementation caches some state
// too and we need to make sure this are in sync with |decoder_| and context
constexpr static std::initializer_list<GLuint> caps = {
GL_SCISSOR_TEST, GL_STENCIL_TEST, GL_BLEND};
for (auto cap : caps) {
if (gles2_implementation_->IsEnabled(cap))
gles2_cmd_helper_->Enable(cap);
else
gles2_cmd_helper_->Disable(cap);
}
if (texture_client_id) {
if (!framebuffer_id_)
gles2_implementation_->GenFramebuffers(1, &framebuffer_id_);
gles2_implementation_->BindFramebuffer(GL_FRAMEBUFFER, framebuffer_id_);
gles2_implementation_->FramebufferTexture2D(
GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, texture_client_id,
0);
DCHECK_EQ(gles2_implementation_->CheckFramebufferStatus(GL_FRAMEBUFFER),
static_cast<GLenum>(GL_FRAMEBUFFER_COMPLETE));
}
}
gpu::gles2::TextureManager* DirectContextProvider::texture_manager() {
return decoder_->GetContextGroup()->texture_manager();
}
void DirectContextProvider::AddRef() const {
base::RefCountedThreadSafe<DirectContextProvider>::AddRef();
}
void DirectContextProvider::Release() const {
base::RefCountedThreadSafe<DirectContextProvider>::Release();
}
gpu::ContextResult DirectContextProvider::BindToCurrentThread() {
return context_result_;
}
gpu::gles2::GLES2Interface* DirectContextProvider::ContextGL() {
return gles2_implementation_.get();
}
gpu::ContextSupport* DirectContextProvider::ContextSupport() {
return gles2_implementation_.get();
}
class GrContext* DirectContextProvider::GrContext() {
NOTREACHED();
return nullptr;
}
gpu::SharedImageInterface* DirectContextProvider::SharedImageInterface() {
return delegate_->GetSharedImageInterface();
}
ContextCacheController* DirectContextProvider::CacheController() {
NOTREACHED();
return nullptr;
}
base::Lock* DirectContextProvider::GetLock() {
NOTREACHED();
return nullptr;
}
const gpu::Capabilities& DirectContextProvider::ContextCapabilities() const {
return capabilities_;
}
const gpu::GpuFeatureInfo& DirectContextProvider::GetGpuFeatureInfo() const {
return gpu_feature_info_;
}
void DirectContextProvider::AddObserver(ContextLostObserver* obs) {
observers_.AddObserver(obs);
}
void DirectContextProvider::RemoveObserver(ContextLostObserver* obs) {
observers_.RemoveObserver(obs);
}
void DirectContextProvider::OnContextLost() {
// TODO(https://crbug.com/927460): Instrument this with a context loss UMA
// stat shared with SkiaRenderer.
for (auto& observer : observers_)
observer.OnContextLost();
}
bool DirectContextProvider::OnMemoryDump(
const base::trace_event::MemoryDumpArgs& args,
base::trace_event::ProcessMemoryDump* pmd) {
DCHECK_EQ(context_result_, gpu::ContextResult::kSuccess);
gles2_implementation_->OnMemoryDump(args, pmd);
gles2_cmd_helper_->OnMemoryDump(args, pmd);
return true;
}
void DirectContextProvider::SetGpuControlClient(gpu::GpuControlClient*) {
// The client is not currently called, so don't store it.
}
const gpu::Capabilities& DirectContextProvider::GetCapabilities() const {
return capabilities_;
}
int32_t DirectContextProvider::CreateImage(ClientBuffer buffer,
size_t width,
size_t height) {
NOTREACHED();
return -1;
}
void DirectContextProvider::DestroyImage(int32_t id) {
NOTREACHED();
}
void DirectContextProvider::SignalQuery(uint32_t query,
base::OnceClosure callback) {
decoder_->SetQueryCallback(query, std::move(callback));
}
void DirectContextProvider::CreateGpuFence(uint32_t gpu_fence_id,
ClientGpuFence source) {
NOTREACHED();
}
void DirectContextProvider::GetGpuFence(
uint32_t gpu_fence_id,
base::OnceCallback<void(std::unique_ptr<gfx::GpuFence>)> callback) {
NOTREACHED();
}
void DirectContextProvider::SetLock(base::Lock*) {
NOTREACHED();
}
void DirectContextProvider::EnsureWorkVisible() {
}
gpu::CommandBufferNamespace DirectContextProvider::GetNamespaceID() const {
return delegate_->GetNamespaceID();
}
gpu::CommandBufferId DirectContextProvider::GetCommandBufferID() const {
return delegate_->GetCommandBufferID();
}
void DirectContextProvider::FlushPendingWork() {
NOTREACHED();
}
uint64_t DirectContextProvider::GenerateFenceSyncRelease() {
return delegate_->GenerateFenceSyncRelease();
}
bool DirectContextProvider::IsFenceSyncReleased(uint64_t release) {
NOTREACHED();
return false;
}
void DirectContextProvider::SignalSyncToken(const gpu::SyncToken& sync_token,
base::OnceClosure callback) {
delegate_->SignalSyncToken(sync_token, std::move(callback));
}
void DirectContextProvider::WaitSyncToken(const gpu::SyncToken& sync_token) {
NOTREACHED();
}
bool DirectContextProvider::CanWaitUnverifiedSyncToken(
const gpu::SyncToken& sync_token) {
return false;
}
void DirectContextProvider::SetDisplayTransform(
gfx::OverlayTransform transform) {
NOTREACHED();
}
GLuint DirectContextProvider::GenClientTextureId() {
const auto& share_group = gles2_implementation_->share_group();
auto* id_handler =
share_group->GetIdHandler(gpu::gles2::SharedIdNamespaces::kTextures);
GLuint client_id;
id_handler->MakeIds(gles2_implementation_.get(), 0, 1, &client_id);
return client_id;
}
void DirectContextProvider::DeleteClientTextureId(GLuint client_id) {
gles2_implementation_->DeleteTextures(1, &client_id);
}
void DirectContextProvider::MarkContextLost() {
if (!decoder_->WasContextLost()) {
decoder_->MarkContextLost(gpu::error::kUnknown);
command_buffer_->service()->SetParseError(gpu::error::kLostContext);
OnContextLost();
}
}
void DirectContextProvider::FinishQueries() {
if (decoder_->HasPendingQueries())
gles2_implementation_->Finish();
}
} // namespace viz
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef COMPONENTS_VIZ_SERVICE_DISPLAY_EMBEDDER_DIRECT_CONTEXT_PROVIDER_H_
#define COMPONENTS_VIZ_SERVICE_DISPLAY_EMBEDDER_DIRECT_CONTEXT_PROVIDER_H_
#include <stdint.h>
#include <memory>
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/observer_list.h"
#include "base/trace_event/memory_dump_provider.h"
#include "components/viz/common/gpu/context_provider.h"
#include "components/viz/service/viz_service_export.h"
#include "gpu/command_buffer/client/gpu_control.h"
#include "gpu/command_buffer/client/shared_image_interface.h"
#include "gpu/command_buffer/common/context_result.h"
#include "gpu/command_buffer/service/gpu_tracer.h"
#include "gpu/command_buffer/service/image_manager.h"
#include "gpu/command_buffer/service/mailbox_manager_impl.h"
#include "gpu/command_buffer/service/passthrough_discardable_manager.h"
#include "gpu/command_buffer/service/service_discardable_manager.h"
#include "gpu/command_buffer/service/shared_image_manager.h"
class GrContext;
namespace gpu {
class CommandBufferDirect;
class DecoderContext;
class SharedImageInterface;
class TransferBuffer;
struct GpuPreferences;
namespace gles2 {
class GLES2CmdHelper;
class GLES2Implementation;
class GLES2Interface;
class TextureManager;
} // namespace gles2
} // namespace gpu
namespace viz {
class ContextLostObserver;
// DirectContextProvider bridges between GPU client code and GPU service
// code. Often we cannot include client and service headers in the same
// translation unit due to collisions between client and service GL
// defines. DirectContextProviderDelegate satisfies DirectContextProvider
// dependencies that require GPU service code that cannot be included by
// DirectContextProvider.
class VIZ_SERVICE_EXPORT DirectContextProviderDelegate {
public:
DirectContextProviderDelegate();
virtual ~DirectContextProviderDelegate();
virtual gpu::SharedImageManager* GetSharedImageManager() = 0;
virtual gpu::SharedImageInterface* GetSharedImageInterface() = 0;
virtual gpu::CommandBufferNamespace GetNamespaceID() const = 0;
virtual gpu::CommandBufferId GetCommandBufferID() const = 0;
// Generate the release ID for a new SyncToken (see
// GpuControl::GenerateFenceSyncRelease).
virtual uint64_t GenerateFenceSyncRelease() = 0;
// Runs |callback| when |sync_token| is released.
virtual void SignalSyncToken(const gpu::SyncToken& sync_token,
base::OnceClosure callback) = 0;
private:
DISALLOW_COPY_AND_ASSIGN(DirectContextProviderDelegate);
};
// DirectContextProvider provides a GLES2Interface by running cross process code
// (e.g. GLES2Implementation and GLES2Decoder) within a single thread. It is
// suitable for easily porting code relying on GLES2Interface, but is less
// efficient than calling native GL because it serializes/deserializes command
// streams, validates command streams, and has unnecessary copies through shared
// memory (e.g. glReadPixels frame buffer). Parts of GLES2Interface are
// NOTIMPLEMENTED().
class VIZ_SERVICE_EXPORT DirectContextProvider
: public base::RefCountedThreadSafe<DirectContextProvider>,
public ContextProvider,
public gpu::GpuControl,
public base::trace_event::MemoryDumpProvider {
public:
DirectContextProvider(scoped_refptr<gl::GLContext> gl_context,
scoped_refptr<gl::GLSurface> gl_surface,
bool supports_alpha,
const gpu::GpuPreferences& gpu_preferences,
gpu::gles2::FeatureInfo* feature_info,
std::unique_ptr<DirectContextProviderDelegate> client);
gpu::DecoderContext* decoder() { return decoder_.get(); }
// Set required state, including texture_client_id as color attachment 0
// of a currently bound framebuffer. If texture_client_id == 0, set FBO0 as
// current.
void SetGLRendererCopierRequiredState(GLuint texture_client_id);
gpu::gles2::TextureManager* texture_manager();
GLuint GenClientTextureId();
void DeleteClientTextureId(GLuint client_id);
void MarkContextLost();
// Call a glFinish() to complete any pending queries.
void FinishQueries();
// ContextProvider implementation.
void AddRef() const override;
void Release() const override;
gpu::ContextResult BindToCurrentThread() override;
gpu::gles2::GLES2Interface* ContextGL() override;
gpu::ContextSupport* ContextSupport() override;
class GrContext* GrContext() override;
gpu::SharedImageInterface* SharedImageInterface() override;
ContextCacheController* CacheController() override;
base::Lock* GetLock() override;
const gpu::Capabilities& ContextCapabilities() const override;
const gpu::GpuFeatureInfo& GetGpuFeatureInfo() const override;
void AddObserver(ContextLostObserver* obs) override;
void RemoveObserver(ContextLostObserver* obs) override;
// GpuControl implementation.
void SetGpuControlClient(gpu::GpuControlClient*) override;
const gpu::Capabilities& GetCapabilities() const override;
int32_t CreateImage(ClientBuffer buffer,
size_t width,
size_t height) override;
void DestroyImage(int32_t id) override;
void SignalQuery(uint32_t query, base::OnceClosure callback) override;
void CreateGpuFence(uint32_t gpu_fence_id, ClientGpuFence source) override;
void GetGpuFence(uint32_t gpu_fence_id,
base::OnceCallback<void(std::unique_ptr<gfx::GpuFence>)>
callback) override;
void SetLock(base::Lock*) override;
void EnsureWorkVisible() override;
gpu::CommandBufferNamespace GetNamespaceID() const override;
gpu::CommandBufferId GetCommandBufferID() const override;
void FlushPendingWork() override;
uint64_t GenerateFenceSyncRelease() override;
bool IsFenceSyncReleased(uint64_t release) override;
void SignalSyncToken(const gpu::SyncToken& sync_token,
base::OnceClosure callback) override;
void WaitSyncToken(const gpu::SyncToken& sync_token) override;
bool CanWaitUnverifiedSyncToken(const gpu::SyncToken& sync_token) override;
void SetDisplayTransform(gfx::OverlayTransform transform) override;
private:
friend class base::RefCountedThreadSafe<DirectContextProvider>;
~DirectContextProvider() override;
void Destroy();
void OnContextLost();
// base::trace_event::MemoryDumpProvider implementation.
bool OnMemoryDump(const base::trace_event::MemoryDumpArgs& args,
base::trace_event::ProcessMemoryDump* pmd) override;
gpu::gles2::MailboxManagerImpl mailbox_manager_;
gpu::gles2::TraceOutputter outputter_;
gpu::gles2::ImageManager image_manager_;
gpu::ServiceDiscardableManager discardable_manager_;
gpu::PassthroughDiscardableManager passthrough_discardable_manager_;
gpu::gles2::ShaderTranslatorCache translator_cache_;
gpu::gles2::FramebufferCompletenessCache completeness_cache_;
gpu::GpuFeatureInfo gpu_feature_info_;
gpu::Capabilities capabilities_;
gpu::ContextResult context_result_ = gpu::ContextResult::kSuccess;
std::unique_ptr<DirectContextProviderDelegate> delegate_;
// Only non-null if BindToCurrentThread() == ContextResult::kSuccess.
std::unique_ptr<gpu::CommandBufferDirect> command_buffer_;
std::unique_ptr<gpu::gles2::GLES2CmdHelper> gles2_cmd_helper_;
std::unique_ptr<gpu::gles2::GLES2Decoder> decoder_;
std::unique_ptr<gpu::TransferBuffer> transfer_buffer_;
scoped_refptr<gl::GLContext> gl_context_;
scoped_refptr<gl::GLSurface> gl_surface_;
std::unique_ptr<gpu::gles2::GLES2Implementation> gles2_implementation_;
GLuint framebuffer_id_ = 0;
THREAD_CHECKER(thread_checker_);
base::ObserverList<ContextLostObserver>::Unchecked observers_;
};
} // namespace viz
#endif // COMPONENTS_VIZ_SERVICE_DISPLAY_EMBEDDER_DIRECT_CONTEXT_PROVIDER_H_
...@@ -51,7 +51,6 @@ class VIZ_SERVICE_EXPORT SkiaOutputDeviceBufferQueue : public SkiaOutputDevice { ...@@ -51,7 +51,6 @@ class VIZ_SERVICE_EXPORT SkiaOutputDeviceBufferQueue : public SkiaOutputDevice {
SkSurface* BeginPaint( SkSurface* BeginPaint(
std::vector<GrBackendSemaphore>* end_semaphores) override; std::vector<GrBackendSemaphore>* end_semaphores) override;
void EndPaint() override; void EndPaint() override;
bool supports_alpha() { return true; }
bool IsPrimaryPlaneOverlay() const override; bool IsPrimaryPlaneOverlay() const override;
void SchedulePrimaryPlane( void SchedulePrimaryPlane(
......
...@@ -42,10 +42,6 @@ class SkiaOutputDeviceGL final : public SkiaOutputDevice { ...@@ -42,10 +42,6 @@ class SkiaOutputDeviceGL final : public SkiaOutputDevice {
DidSwapBufferCompleteCallback did_swap_buffer_complete_callback); DidSwapBufferCompleteCallback did_swap_buffer_complete_callback);
~SkiaOutputDeviceGL() override; ~SkiaOutputDeviceGL() override;
bool supports_alpha() {
return supports_alpha_;
}
// SkiaOutputDevice implementation: // SkiaOutputDevice implementation:
bool Reshape(const gfx::Size& size, bool Reshape(const gfx::Size& size,
float device_scale_factor, float device_scale_factor,
......
...@@ -33,8 +33,6 @@ class SkiaOutputDeviceWebView : public SkiaOutputDevice { ...@@ -33,8 +33,6 @@ class SkiaOutputDeviceWebView : public SkiaOutputDevice {
SkiaOutputDeviceWebView& operator=(const SkiaOutputDeviceWebView&) = delete; SkiaOutputDeviceWebView& operator=(const SkiaOutputDeviceWebView&) = delete;
~SkiaOutputDeviceWebView() override; ~SkiaOutputDeviceWebView() override;
bool supports_alpha() const { return supports_alpha_; }
// SkiaOutputDevice implementation: // SkiaOutputDevice implementation:
bool Reshape(const gfx::Size& size, bool Reshape(const gfx::Size& size,
float device_scale_factor, float device_scale_factor,
......
...@@ -8,23 +8,17 @@ ...@@ -8,23 +8,17 @@
#include "base/bind.h" #include "base/bind.h"
#include "base/bind_helpers.h" #include "base/bind_helpers.h"
#include "base/callback_helpers.h" #include "base/callback_helpers.h"
#include "base/command_line.h"
#include "base/optional.h" #include "base/optional.h"
#include "base/threading/thread_task_runner_handle.h"
#include "base/trace_event/memory_dump_manager.h" #include "base/trace_event/memory_dump_manager.h"
#include "base/trace_event/trace_event.h" #include "base/trace_event/trace_event.h"
#include "components/viz/common/features.h"
#include "components/viz/common/frame_sinks/copy_output_request.h" #include "components/viz/common/frame_sinks/copy_output_request.h"
#include "components/viz/common/frame_sinks/copy_output_util.h" #include "components/viz/common/frame_sinks/copy_output_util.h"
#include "components/viz/common/resources/resource_format_utils.h" #include "components/viz/common/resources/resource_format_utils.h"
#include "components/viz/common/skia_helper.h" #include "components/viz/common/skia_helper.h"
#include "components/viz/common/viz_utils.h" #include "components/viz/common/viz_utils.h"
#include "components/viz/service/display/dc_layer_overlay.h" #include "components/viz/service/display/dc_layer_overlay.h"
#include "components/viz/service/display/gl_renderer_copier.h"
#include "components/viz/service/display/output_surface_frame.h" #include "components/viz/service/display/output_surface_frame.h"
#include "components/viz/service/display/overlay_candidate.h" #include "components/viz/service/display/overlay_candidate.h"
#include "components/viz/service/display/texture_deleter.h"
#include "components/viz/service/display_embedder/direct_context_provider.h"
#include "components/viz/service/display_embedder/image_context_impl.h" #include "components/viz/service/display_embedder/image_context_impl.h"
#include "components/viz/service/display_embedder/output_presenter_gl.h" #include "components/viz/service/display_embedder/output_presenter_gl.h"
#include "components/viz/service/display_embedder/skia_output_device.h" #include "components/viz/service/display_embedder/skia_output_device.h"
...@@ -33,20 +27,13 @@ ...@@ -33,20 +27,13 @@
#include "components/viz/service/display_embedder/skia_output_device_offscreen.h" #include "components/viz/service/display_embedder/skia_output_device_offscreen.h"
#include "components/viz/service/display_embedder/skia_output_device_webview.h" #include "components/viz/service/display_embedder/skia_output_device_webview.h"
#include "components/viz/service/display_embedder/skia_output_surface_dependency.h" #include "components/viz/service/display_embedder/skia_output_surface_dependency.h"
#include "gpu/command_buffer/common/shared_image_usage.h"
#include "gpu/command_buffer/common/swap_buffers_complete_params.h" #include "gpu/command_buffer/common/swap_buffers_complete_params.h"
#include "gpu/command_buffer/service/context_state.h"
#include "gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h"
#include "gpu/command_buffer/service/gr_shader_cache.h" #include "gpu/command_buffer/service/gr_shader_cache.h"
#include "gpu/command_buffer/service/mailbox_manager.h" #include "gpu/command_buffer/service/mailbox_manager.h"
#include "gpu/command_buffer/service/memory_tracking.h" #include "gpu/command_buffer/service/memory_tracking.h"
#include "gpu/command_buffer/service/scheduler.h" #include "gpu/command_buffer/service/scheduler.h"
#include "gpu/command_buffer/service/service_utils.h"
#include "gpu/command_buffer/service/shared_image_factory.h"
#include "gpu/command_buffer/service/shared_image_representation.h" #include "gpu/command_buffer/service/shared_image_representation.h"
#include "gpu/command_buffer/service/skia_utils.h" #include "gpu/command_buffer/service/skia_utils.h"
#include "gpu/command_buffer/service/texture_base.h"
#include "gpu/command_buffer/service/texture_manager.h"
#include "gpu/config/gpu_preferences.h" #include "gpu/config/gpu_preferences.h"
#include "gpu/ipc/common/command_buffer_id.h" #include "gpu/ipc/common/command_buffer_id.h"
#include "gpu/ipc/common/gpu_client_ids.h" #include "gpu/ipc/common/gpu_client_ids.h"
...@@ -54,18 +41,10 @@ ...@@ -54,18 +41,10 @@
#include "gpu/ipc/common/gpu_surface_lookup.h" #include "gpu/ipc/common/gpu_surface_lookup.h"
#include "gpu/vulkan/buildflags.h" #include "gpu/vulkan/buildflags.h"
#include "skia/buildflags.h" #include "skia/buildflags.h"
#include "skia/ext/image_operations.h"
#include "third_party/skia/include/core/SkDeferredDisplayList.h" #include "third_party/skia/include/core/SkDeferredDisplayList.h"
#include "third_party/skia/include/core/SkPixelRef.h"
#include "ui/gfx/color_space.h" #include "ui/gfx/color_space.h"
#include "ui/gfx/geometry/rect_conversions.h" #include "ui/gfx/geometry/rect_conversions.h"
#include "ui/gfx/skia_util.h"
#include "ui/gl/gl_bindings.h"
#include "ui/gl/gl_context.h"
#include "ui/gl/gl_gl_api_implementation.h"
#include "ui/gl/gl_surface.h" #include "ui/gl/gl_surface.h"
#include "ui/gl/gl_version_info.h"
#include "ui/gl/init/gl_factory.h"
#if BUILDFLAG(ENABLE_VULKAN) #if BUILDFLAG(ENABLE_VULKAN)
#include "components/viz/service/display_embedder/skia_output_device_vulkan.h" #include "components/viz/service/display_embedder/skia_output_device_vulkan.h"
...@@ -101,9 +80,6 @@ namespace viz { ...@@ -101,9 +80,6 @@ namespace viz {
namespace { namespace {
constexpr base::TimeDelta kReadbackPollingInterval =
base::TimeDelta::FromMilliseconds(2);
template <typename... Args> template <typename... Args>
void PostAsyncTaskRepeatedly( void PostAsyncTaskRepeatedly(
base::WeakPtr<SkiaOutputSurfaceImplOnGpu> impl_on_gpu, base::WeakPtr<SkiaOutputSurfaceImplOnGpu> impl_on_gpu,
...@@ -318,55 +294,6 @@ void SkiaOutputSurfaceImplOnGpu::PromiseImageAccessHelper::EndAccess() { ...@@ -318,55 +294,6 @@ void SkiaOutputSurfaceImplOnGpu::PromiseImageAccessHelper::EndAccess() {
image_contexts_.clear(); image_contexts_.clear();
} }
// Skia gr_context() and |context_provider_| share an underlying GLContext.
// Each of them caches some GL state. Interleaving usage could make cached
// state inconsistent with GL state. Using a ScopedUseContextProvider whenever
// |context_provider_| could be accessed (e.g. processing completed queries),
// will keep cached state consistent with driver GL state.
class SkiaOutputSurfaceImplOnGpu::ScopedUseContextProvider {
public:
ScopedUseContextProvider(SkiaOutputSurfaceImplOnGpu* impl_on_gpu,
GLuint texture_client_id)
: impl_on_gpu_(impl_on_gpu) {
if (!impl_on_gpu_->MakeCurrent(true /* need_fbo0 */)) {
valid_ = false;
return;
}
// GLRendererCopier uses context_provider_->ContextGL(), which caches GL
// state and removes state setting calls that it considers redundant. To get
// to a known GL state, we first set driver GL state and then make client
// side consistent with that.
auto* api = impl_on_gpu_->api_;
api->glBindFramebufferEXTFn(GL_FRAMEBUFFER, 0);
auto* group = impl_on_gpu->context_provider_->decoder()->GetContextGroup();
if (group->use_passthrough_cmd_decoder()) {
// Passthrough decoding is not hooked into GLStateRestorer and we must
// manually reset the context into a known state after Skia is finished.
api->glUseProgramFn(0);
api->glActiveTextureFn(GL_TEXTURE0);
api->glBindBufferFn(GL_ARRAY_BUFFER, 0);
api->glBindTextureFn(GL_TEXTURE_2D, 0);
}
impl_on_gpu_->context_provider_->SetGLRendererCopierRequiredState(
texture_client_id);
}
~ScopedUseContextProvider() {
if (valid_)
impl_on_gpu_->gr_context()->resetContext();
}
bool valid() { return valid_; }
private:
SkiaOutputSurfaceImplOnGpu* const impl_on_gpu_;
bool valid_ = true;
DISALLOW_COPY_AND_ASSIGN(ScopedUseContextProvider);
};
namespace { namespace {
base::AtomicSequenceNumber g_next_command_buffer_id; base::AtomicSequenceNumber g_next_command_buffer_id;
...@@ -387,330 +314,6 @@ CreateSharedImageRepresentationFactory(SkiaOutputSurfaceDependency* deps, ...@@ -387,330 +314,6 @@ CreateSharedImageRepresentationFactory(SkiaOutputSurfaceDependency* deps,
return std::make_unique<gpu::SharedImageRepresentationFactory>( return std::make_unique<gpu::SharedImageRepresentationFactory>(
deps->GetSharedImageManager(), memory_tracker); deps->GetSharedImageManager(), memory_tracker);
} }
class ScopedSurfaceToTexture {
public:
ScopedSurfaceToTexture(scoped_refptr<DirectContextProvider> context_provider,
SkSurface* surface)
: context_provider_(context_provider),
client_id_(context_provider->GenClientTextureId()) {
GrBackendTexture skia_texture =
surface->getBackendTexture(SkSurface::kFlushRead_BackendHandleAccess);
GrGLTextureInfo gl_texture_info;
skia_texture.getGLTextureInfo(&gl_texture_info);
auto* group = context_provider->decoder()->GetContextGroup();
if (group->use_passthrough_cmd_decoder()) {
group->passthrough_resources()->texture_id_map.SetIDMapping(
client_id_, gl_texture_info.fID);
auto texture = base::MakeRefCounted<gpu::gles2::TexturePassthrough>(
gl_texture_info.fID, gl_texture_info.fTarget, GL_RGBA,
surface->width(), surface->height(),
/*depth=*/1, /*border=*/0,
/*format=*/GL_RGBA, /*type=*/GL_UNSIGNED_BYTE);
group->passthrough_resources()->texture_object_map.SetIDMapping(
client_id_, texture);
} else {
auto* texture_manager = context_provider_->texture_manager();
texture_ref_ =
texture_manager->CreateTexture(client_id_, gl_texture_info.fID);
texture_manager->SetTarget(texture_ref_.get(), gl_texture_info.fTarget);
texture_manager->SetLevelInfo(
texture_ref_.get(), gl_texture_info.fTarget,
/*level=*/0,
/*internal_format=*/GL_RGBA, surface->width(), surface->height(),
/*depth=*/1, /*border=*/0,
/*format=*/GL_RGBA, /*type=*/GL_UNSIGNED_BYTE,
/*cleared_rect=*/gfx::Rect(surface->width(), surface->height()));
}
}
~ScopedSurfaceToTexture() {
auto* group = context_provider_->decoder()->GetContextGroup();
// Skia owns the texture. It will delete it when it is done.
if (group->use_passthrough_cmd_decoder()) {
group->passthrough_resources()
->texture_object_map.GetServiceIDOrInvalid(client_id_)
->MarkContextLost();
} else {
texture_ref_->ForceContextLost();
}
context_provider_->DeleteClientTextureId(client_id());
}
GLuint client_id() { return client_id_; }
private:
scoped_refptr<DirectContextProvider> context_provider_;
const GLuint client_id_;
// This is only used with validating gles cmd decoder
scoped_refptr<gpu::gles2::TextureRef> texture_ref_;
DISALLOW_COPY_AND_ASSIGN(ScopedSurfaceToTexture);
};
// This SingleThreadTaskRunner runs tasks on the GPU main thread, where
// DirectContextProvider can safely service calls. It wraps all posted tasks to
// ensure that |impl_on_gpu_->context_provider_| is made current and in a known
// state when the task is run. If |impl_on_gpu| is destructed, pending tasks are
// no-oped when they are run.
class ContextCurrentTaskRunner : public base::SingleThreadTaskRunner {
public:
explicit ContextCurrentTaskRunner(
base::WeakPtr<SkiaOutputSurfaceImplOnGpu> impl_on_gpu)
: real_task_runner_(base::ThreadTaskRunnerHandle::Get()),
impl_on_gpu_(impl_on_gpu) {}
bool PostDelayedTask(const base::Location& from_here,
base::OnceClosure task,
base::TimeDelta delay) override {
return real_task_runner_->PostDelayedTask(
from_here, WrapClosure(std::move(task)), delay);
}
bool PostNonNestableDelayedTask(const base::Location& from_here,
base::OnceClosure task,
base::TimeDelta delay) override {
return real_task_runner_->PostNonNestableDelayedTask(
from_here, WrapClosure(std::move(task)), delay);
}
bool RunsTasksInCurrentSequence() const override {
return real_task_runner_->RunsTasksInCurrentSequence();
}
private:
base::OnceClosure WrapClosure(base::OnceClosure task) {
return base::BindOnce(
[](base::WeakPtr<SkiaOutputSurfaceImplOnGpu> impl_on_gpu,
base::OnceClosure task) {
if (!impl_on_gpu)
return;
SkiaOutputSurfaceImplOnGpu::ScopedUseContextProvider scoped_use(
impl_on_gpu.get(), /*texture_client_id=*/0);
if (!scoped_use.valid())
return;
std::move(task).Run();
},
impl_on_gpu_, std::move(task));
}
~ContextCurrentTaskRunner() override = default;
scoped_refptr<base::SingleThreadTaskRunner> real_task_runner_;
base::WeakPtr<SkiaOutputSurfaceImplOnGpu> impl_on_gpu_;
DISALLOW_COPY_AND_ASSIGN(ContextCurrentTaskRunner);
};
class DirectContextProviderDelegateImpl : public DirectContextProviderDelegate,
public gpu::SharedImageInterface {
public:
DirectContextProviderDelegateImpl(
const gpu::GpuPreferences& gpu_preferences,
const gpu::GpuDriverBugWorkarounds& workarounds,
const gpu::GpuFeatureInfo& gpu_feature_info,
gpu::SharedContextState* context_state,
gpu::MailboxManager* mailbox_manager,
gpu::SharedImageManager* shared_image_manager,
gpu::MemoryTracker* memory_tracker,
scoped_refptr<gpu::SyncPointClientState> sync_point_client_state)
: shared_image_manager_(shared_image_manager),
shared_image_factory_(gpu_preferences,
workarounds,
gpu_feature_info,
context_state,
mailbox_manager,
shared_image_manager,
nullptr /* image_factory */,
memory_tracker,
true /* is_using_skia_renderer */),
sync_point_client_state_(sync_point_client_state) {}
~DirectContextProviderDelegateImpl() override {
sync_point_client_state_->Destroy();
}
// SharedImageInterface implementation:
gpu::Mailbox CreateSharedImage(ResourceFormat format,
const gfx::Size& size,
const gfx::ColorSpace& color_space,
GrSurfaceOrigin surface_origin,
SkAlphaType alpha_type,
uint32_t usage,
gpu::SurfaceHandle surface_handle) override {
auto mailbox = gpu::Mailbox::GenerateForSharedImage();
if (shared_image_factory_.CreateSharedImage(
mailbox, format, size, color_space, surface_origin, alpha_type,
surface_handle, usage))
return mailbox;
return gpu::Mailbox();
}
gpu::Mailbox CreateSharedImage(
ResourceFormat format,
const gfx::Size& size,
const gfx::ColorSpace& color_space,
GrSurfaceOrigin surface_origin,
SkAlphaType alpha_type,
uint32_t usage,
base::span<const uint8_t> pixel_data) override {
auto mailbox = gpu::Mailbox::GenerateForSharedImage();
if (shared_image_factory_.CreateSharedImage(mailbox, format, size,
color_space, surface_origin,
alpha_type, usage, pixel_data))
return mailbox;
return gpu::Mailbox();
}
gpu::Mailbox CreateSharedImage(
gfx::GpuMemoryBuffer* gpu_memory_buffer,
gpu::GpuMemoryBufferManager* gpu_memory_buffer_manager,
const gfx::ColorSpace& color_space,
GrSurfaceOrigin surface_origin,
SkAlphaType alpha_type,
uint32_t usage) override {
// We do not support creating GMB backed SharedImages.
NOTIMPLEMENTED();
return gpu::Mailbox();
}
void UpdateSharedImage(const gpu::SyncToken& sync_token,
std::unique_ptr<gfx::GpuFence> acquire_fence,
const gpu::Mailbox& mailbox) override {
NOTREACHED();
}
void UpdateSharedImage(const gpu::SyncToken& sync_token,
const gpu::Mailbox& mailbox) override {
DCHECK(!ShouldWait(sync_token))
<< "Cannot UpdateSharedImage with SyncToken from different "
"command buffer.";
shared_image_factory_.UpdateSharedImage(mailbox);
}
void DestroySharedImage(const gpu::SyncToken& sync_token,
const gpu::Mailbox& mailbox) override {
DCHECK(!ShouldWait(sync_token))
<< "Cannot DestroySharedImage with SyncToken from different "
"command buffer.";
shared_image_factory_.DestroySharedImage(mailbox);
}
SwapChainMailboxes CreateSwapChain(ResourceFormat format,
const gfx::Size& size,
const gfx::ColorSpace& color_space,
GrSurfaceOrigin surface_origin,
SkAlphaType alpha_type,
uint32_t usage) override {
NOTREACHED();
return {};
}
void PresentSwapChain(const gpu::SyncToken& sync_token,
const gpu::Mailbox& mailbox) override {
NOTREACHED();
}
#if defined(OS_FUCHSIA)
void RegisterSysmemBufferCollection(gfx::SysmemBufferCollectionId id,
zx::channel token,
gfx::BufferFormat format,
gfx::BufferUsage usage) override {
NOTREACHED();
}
void ReleaseSysmemBufferCollection(
gfx::SysmemBufferCollectionId id) override {
NOTREACHED();
}
#endif // defined(OS_FUCHSIA)
gpu::SyncToken GenUnverifiedSyncToken() override {
return gpu::SyncToken(sync_point_client_state_->namespace_id(),
sync_point_client_state_->command_buffer_id(),
GenerateFenceSyncRelease());
}
gpu::SyncToken GenVerifiedSyncToken() override {
gpu::SyncToken sync_token = GenUnverifiedSyncToken();
sync_token.SetVerifyFlush();
return sync_token;
}
void WaitSyncToken(const gpu::SyncToken& sync_token) override {
NOTREACHED();
}
void Flush() override {
// No need to flush in this implementation.
}
scoped_refptr<gfx::NativePixmap> GetNativePixmap(
const gpu::Mailbox& mailbox) override {
DCHECK(shared_image_manager_->is_thread_safe());
return shared_image_manager_->GetNativePixmap(mailbox);
}
// DirectContextProviderDelegate implementation.
gpu::SharedImageManager* GetSharedImageManager() override {
return shared_image_manager_;
}
gpu::SharedImageInterface* GetSharedImageInterface() override { return this; }
gpu::CommandBufferNamespace GetNamespaceID() const override {
return sync_point_client_state_->namespace_id();
}
gpu::CommandBufferId GetCommandBufferID() const override {
return sync_point_client_state_->command_buffer_id();
}
uint64_t GenerateFenceSyncRelease() override {
uint64_t release = ++sync_fence_release_;
// Release fence immediately because the relevant GPU calls were already
// issued.
sync_point_client_state_->ReleaseFenceSync(release);
return release;
}
void SignalSyncToken(const gpu::SyncToken& sync_token,
base::OnceClosure callback) override {
base::RepeatingClosure maybe_pass_callback =
base::AdaptCallbackForRepeating(std::move(callback));
if (!sync_point_client_state_->Wait(sync_token, maybe_pass_callback)) {
maybe_pass_callback.Run();
}
}
private:
bool ShouldWait(const gpu::SyncToken& sync_token) {
// Don't wait on an invalid SyncToken.
if (!sync_token.HasData())
return false;
// Don't wait on SyncTokens our own sync tokens because we've already issued
// the relevant calls to the GPU.
return sync_point_client_state_->namespace_id() !=
sync_token.namespace_id() ||
sync_point_client_state_->command_buffer_id() !=
sync_token.command_buffer_id();
}
gpu::SharedImageManager* const shared_image_manager_;
gpu::SharedImageFactory shared_image_factory_;
scoped_refptr<gpu::SyncPointClientState> sync_point_client_state_;
uint64_t sync_fence_release_ = 0;
DISALLOW_COPY_AND_ASSIGN(DirectContextProviderDelegateImpl);
};
} // namespace } // namespace
// Offscreen surfaces for render passes. It can only be accessed on GPU // Offscreen surfaces for render passes. It can only be accessed on GPU
...@@ -835,14 +438,12 @@ SkiaOutputSurfaceImplOnGpu::SkiaOutputSurfaceImplOnGpu( ...@@ -835,14 +438,12 @@ SkiaOutputSurfaceImplOnGpu::SkiaOutputSurfaceImplOnGpu(
vulkan_context_provider_(dependency_->GetVulkanContextProvider()), vulkan_context_provider_(dependency_->GetVulkanContextProvider()),
dawn_context_provider_(dependency_->GetDawnContextProvider()), dawn_context_provider_(dependency_->GetDawnContextProvider()),
renderer_settings_(renderer_settings), renderer_settings_(renderer_settings),
sequence_id_(sequence_id),
did_swap_buffer_complete_callback_( did_swap_buffer_complete_callback_(
std::move(did_swap_buffer_complete_callback)), std::move(did_swap_buffer_complete_callback)),
context_lost_callback_(std::move(context_lost_callback)), context_lost_callback_(std::move(context_lost_callback)),
gpu_vsync_callback_(std::move(gpu_vsync_callback)), gpu_vsync_callback_(std::move(gpu_vsync_callback)),
gpu_preferences_(dependency_->GetGpuPreferences()), gpu_preferences_(dependency_->GetGpuPreferences()),
display_context_(std::make_unique<DisplayContext>(deps, this)), display_context_(std::make_unique<DisplayContext>(deps, this)) {
copier_active_url_(GURL("chrome://gpu/SkiaRendererGLRendererCopier")) {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
weak_ptr_ = weak_ptr_factory_.GetWeakPtr(); weak_ptr_ = weak_ptr_factory_.GetWeakPtr();
...@@ -866,18 +467,6 @@ SkiaOutputSurfaceImplOnGpu::~SkiaOutputSurfaceImplOnGpu() { ...@@ -866,18 +467,6 @@ SkiaOutputSurfaceImplOnGpu::~SkiaOutputSurfaceImplOnGpu() {
} }
} }
if (copier_) {
context_provider_->FinishQueries();
copier_ = nullptr;
texture_deleter_ = nullptr;
context_provider_ = nullptr;
// Destroying context_provider_ will ReleaseCurrent. MakeCurrent again for
// the rest of this dtor.
MakeCurrent(false /* need_fbo0 */);
}
sync_point_client_state_->Destroy(); sync_point_client_state_->Destroy();
} }
...@@ -1142,41 +731,6 @@ bool SkiaOutputSurfaceImplOnGpu::CopyOutput( ...@@ -1142,41 +731,6 @@ bool SkiaOutputSurfaceImplOnGpu::CopyOutput(
return false; return false;
} }
if (use_gl_renderer_copier_)
gpu::ContextUrl::SetActiveUrl(copier_active_url_);
// Lazy initialize GLRendererCopier before draw because
// DirectContextProvider ctor the backbuffer.
if (use_gl_renderer_copier_ && !copier_) {
auto client = std::make_unique<DirectContextProviderDelegateImpl>(
gpu_preferences_, dependency_->GetGpuDriverBugWorkarounds(),
dependency_->GetGpuFeatureInfo(), context_state_.get(),
dependency_->GetMailboxManager(), dependency_->GetSharedImageManager(),
memory_tracker_.get(),
CreateSyncPointClientState(dependency_, sequence_id_));
context_provider_ = base::MakeRefCounted<DirectContextProvider>(
context_state_->context(), gl_surface_, supports_alpha_,
gpu_preferences_, feature_info_.get(), std::move(client));
auto result = context_provider_->BindToCurrentThread();
if (result != gpu::ContextResult::kSuccess) {
DLOG(ERROR) << "Couldn't initialize GLRendererCopier";
context_provider_ = nullptr;
return false;
}
context_current_task_runner_ =
base::MakeRefCounted<ContextCurrentTaskRunner>(weak_ptr_);
texture_deleter_ =
std::make_unique<TextureDeleter>(context_current_task_runner_);
copier_ = std::make_unique<GLRendererCopier>(context_provider_,
texture_deleter_.get());
copier_->set_async_gl_task_runner(context_current_task_runner_);
// DirectContextProvider changed GL state. Reset Skia state tracking
// for potential draw below.
gr_context()->resetContext();
}
bool from_fbo0 = !id; bool from_fbo0 = !id;
DCHECK(scoped_output_device_paint_ || !from_fbo0); DCHECK(scoped_output_device_paint_ || !from_fbo0);
...@@ -1202,41 +756,6 @@ bool SkiaOutputSurfaceImplOnGpu::CopyOutput( ...@@ -1202,41 +756,6 @@ bool SkiaOutputSurfaceImplOnGpu::CopyOutput(
surface->flush(); surface->flush();
} }
if (use_gl_renderer_copier_) {
surface->flush();
GLuint gl_id = 0;
GLenum internal_format = supports_alpha_ ? GL_RGBA : GL_RGB;
bool flipped = from_fbo0 && capabilities().output_surface_origin ==
gfx::SurfaceOrigin::kBottomLeft;
// readback_offset is in window co-ordinate space and must take into account
// flipping.
if (flipped) {
geometry.readback_offset.set_y(
size_.height() -
(geometry.readback_offset.y() + geometry.result_selection.height()));
}
base::Optional<ScopedSurfaceToTexture> texture_mapper;
if (!from_fbo0 || dependency_->IsOffscreen() ||
gl_surface_->IsSurfaceless()) {
texture_mapper.emplace(context_provider_.get(), surface);
gl_id = texture_mapper.value().client_id();
internal_format = GL_RGBA;
}
gfx::Size surface_size(surface->width(), surface->height());
ScopedUseContextProvider use_context_provider(this, gl_id);
copier_->CopyFromTextureOrFramebuffer(std::move(request), geometry,
internal_format, gl_id, surface_size,
flipped, color_space);
if (decoder()->HasMoreIdleWork() || decoder()->HasPendingQueries())
ScheduleDelayedWork();
return true;
}
base::Optional<gpu::raster::GrShaderCache::ScopedCacheUse> cache_use; base::Optional<gpu::raster::GrShaderCache::ScopedCacheUse> cache_use;
if (dependency_->GetGrShaderCache()) { if (dependency_->GetGrShaderCache()) {
cache_use.emplace(dependency_->GetGrShaderCache(), cache_use.emplace(dependency_->GetGrShaderCache(),
...@@ -1312,37 +831,6 @@ bool SkiaOutputSurfaceImplOnGpu::CopyOutput( ...@@ -1312,37 +831,6 @@ bool SkiaOutputSurfaceImplOnGpu::CopyOutput(
return true; return true;
} }
gpu::DecoderContext* SkiaOutputSurfaceImplOnGpu::decoder() {
return context_provider_->decoder();
}
void SkiaOutputSurfaceImplOnGpu::ScheduleDelayedWork() {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
if (delayed_work_pending_)
return;
delayed_work_pending_ = true;
base::ThreadTaskRunnerHandle::Get()->PostDelayedTask(
FROM_HERE,
base::BindOnce(&SkiaOutputSurfaceImplOnGpu::PerformDelayedWork,
weak_ptr_factory_.GetWeakPtr()),
kReadbackPollingInterval);
}
void SkiaOutputSurfaceImplOnGpu::PerformDelayedWork() {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
gpu::ContextUrl::SetActiveUrl(copier_active_url_);
ScopedUseContextProvider use_context_provider(this, /*texture_client_id=*/0);
delayed_work_pending_ = false;
if (MakeCurrent(true /* need_fbo0 */)) {
decoder()->PerformIdleWork();
decoder()->ProcessPendingQueries(false);
if (decoder()->HasMoreIdleWork() || decoder()->HasPendingQueries()) {
ScheduleDelayedWork();
}
}
}
void SkiaOutputSurfaceImplOnGpu::BeginAccessImages( void SkiaOutputSurfaceImplOnGpu::BeginAccessImages(
const std::vector<ImageContextImpl*>& image_contexts, const std::vector<ImageContextImpl*>& image_contexts,
std::vector<GrBackendSemaphore>* begin_semaphores, std::vector<GrBackendSemaphore>* begin_semaphores,
...@@ -1479,8 +967,7 @@ bool SkiaOutputSurfaceImplOnGpu::Initialize() { ...@@ -1479,8 +967,7 @@ bool SkiaOutputSurfaceImplOnGpu::Initialize() {
if (!InitializeForGL()) if (!InitializeForGL())
return false; return false;
} }
use_gl_renderer_copier_ = !is_using_vulkan() && !is_using_dawn() &&
!features::IsUsingSkiaForGLReadback();
max_resource_cache_bytes_ = max_resource_cache_bytes_ =
context_state_->gr_context()->getResourceCacheLimit(); context_state_->gr_context()->getResourceCacheLimit();
if (context_state_) if (context_state_)
...@@ -1490,11 +977,6 @@ bool SkiaOutputSurfaceImplOnGpu::Initialize() { ...@@ -1490,11 +977,6 @@ bool SkiaOutputSurfaceImplOnGpu::Initialize() {
} }
bool SkiaOutputSurfaceImplOnGpu::InitializeForGL() { bool SkiaOutputSurfaceImplOnGpu::InitializeForGL() {
auto* context = context_state_->real_context();
auto* current_gl = context->GetCurrentGL();
api_ = current_gl->Api;
gl_version_info_ = context->GetVersionInfo();
gl::GLSurfaceFormat format; gl::GLSurfaceFormat format;
if (PreferRGB565ResourcesForDisplay() && if (PreferRGB565ResourcesForDisplay() &&
!renderer_settings_.requires_alpha_channel) { !renderer_settings_.requires_alpha_channel) {
...@@ -1510,7 +992,6 @@ bool SkiaOutputSurfaceImplOnGpu::InitializeForGL() { ...@@ -1510,7 +992,6 @@ bool SkiaOutputSurfaceImplOnGpu::InitializeForGL() {
context_state_, gfx::SurfaceOrigin::kTopLeft, context_state_, gfx::SurfaceOrigin::kTopLeft,
renderer_settings_.requires_alpha_channel, memory_tracker_.get(), renderer_settings_.requires_alpha_channel, memory_tracker_.get(),
GetDidSwapBuffersCompleteCallback()); GetDidSwapBuffersCompleteCallback());
supports_alpha_ = renderer_settings_.requires_alpha_channel;
} else { } else {
gl_surface_ = gl_surface_ =
dependency_->CreateGLSurface(weak_ptr_factory_.GetWeakPtr(), format); dependency_->CreateGLSurface(weak_ptr_factory_.GetWeakPtr(), format);
...@@ -1520,31 +1001,22 @@ bool SkiaOutputSurfaceImplOnGpu::InitializeForGL() { ...@@ -1520,31 +1001,22 @@ bool SkiaOutputSurfaceImplOnGpu::InitializeForGL() {
if (MakeCurrent(true /* need_fbo0 */)) { if (MakeCurrent(true /* need_fbo0 */)) {
if (gl_surface_->IsSurfaceless()) { if (gl_surface_->IsSurfaceless()) {
std::unique_ptr<SkiaOutputDeviceBufferQueue> onscreen_device = output_device_ = std::make_unique<SkiaOutputDeviceBufferQueue>(
std::make_unique<SkiaOutputDeviceBufferQueue>( std::make_unique<OutputPresenterGL>(gl_surface_, dependency_,
std::make_unique<OutputPresenterGL>(gl_surface_, dependency_, memory_tracker_.get()),
memory_tracker_.get()), dependency_, memory_tracker_.get(),
dependency_, memory_tracker_.get(), GetDidSwapBuffersCompleteCallback());
GetDidSwapBuffersCompleteCallback());
supports_alpha_ = onscreen_device->supports_alpha();
output_device_ = std::move(onscreen_device);
} else { } else {
if (dependency_->NeedsSupportForExternalStencil()) { if (dependency_->NeedsSupportForExternalStencil()) {
std::unique_ptr<SkiaOutputDeviceWebView> onscreen_device = output_device_ = std::make_unique<SkiaOutputDeviceWebView>(
std::make_unique<SkiaOutputDeviceWebView>( context_state_.get(), gl_surface_, memory_tracker_.get(),
context_state_.get(), gl_surface_, memory_tracker_.get(), GetDidSwapBuffersCompleteCallback());
GetDidSwapBuffersCompleteCallback());
supports_alpha_ = onscreen_device->supports_alpha();
output_device_ = std::move(onscreen_device);
} else { } else {
std::unique_ptr<SkiaOutputDeviceGL> onscreen_device = output_device_ = std::make_unique<SkiaOutputDeviceGL>(
std::make_unique<SkiaOutputDeviceGL>( dependency_->GetMailboxManager(), context_state_.get(),
dependency_->GetMailboxManager(), context_state_.get(), gl_surface_, feature_info_, memory_tracker_.get(),
gl_surface_, feature_info_, memory_tracker_.get(), GetDidSwapBuffersCompleteCallback());
GetDidSwapBuffersCompleteCallback());
supports_alpha_ = onscreen_device->supports_alpha();
output_device_ = std::move(onscreen_device);
} }
} }
} else { } else {
...@@ -1565,11 +1037,9 @@ bool SkiaOutputSurfaceImplOnGpu::InitializeForVulkan() { ...@@ -1565,11 +1037,9 @@ bool SkiaOutputSurfaceImplOnGpu::InitializeForVulkan() {
context_state_, gfx::SurfaceOrigin::kBottomLeft, context_state_, gfx::SurfaceOrigin::kBottomLeft,
renderer_settings_.requires_alpha_channel, memory_tracker_.get(), renderer_settings_.requires_alpha_channel, memory_tracker_.get(),
GetDidSwapBuffersCompleteCallback()); GetDidSwapBuffersCompleteCallback());
supports_alpha_ = renderer_settings_.requires_alpha_channel;
} else { } else {
#if defined(USE_X11) #if defined(USE_X11)
if (!features::IsUsingOzonePlatform()) { if (!features::IsUsingOzonePlatform()) {
supports_alpha_ = true;
if (!gpu_preferences_.disable_vulkan_surface) { if (!gpu_preferences_.disable_vulkan_surface) {
output_device_ = SkiaOutputDeviceVulkan::Create( output_device_ = SkiaOutputDeviceVulkan::Create(
vulkan_context_provider_, dependency_->GetSurfaceHandle(), vulkan_context_provider_, dependency_->GetSurfaceHandle(),
...@@ -1626,7 +1096,6 @@ bool SkiaOutputSurfaceImplOnGpu::InitializeForDawn() { ...@@ -1626,7 +1096,6 @@ bool SkiaOutputSurfaceImplOnGpu::InitializeForDawn() {
context_state_, gfx::SurfaceOrigin::kBottomLeft, context_state_, gfx::SurfaceOrigin::kBottomLeft,
renderer_settings_.requires_alpha_channel, memory_tracker_.get(), renderer_settings_.requires_alpha_channel, memory_tracker_.get(),
GetDidSwapBuffersCompleteCallback()); GetDidSwapBuffersCompleteCallback());
supports_alpha_ = renderer_settings_.requires_alpha_channel;
} else { } else {
#if defined(USE_X11) #if defined(USE_X11)
// TODO(sgilhuly): Set up a Vulkan swapchain so that Linux can also use // TODO(sgilhuly): Set up a Vulkan swapchain so that Linux can also use
...@@ -1851,8 +1320,6 @@ void SkiaOutputSurfaceImplOnGpu::MarkContextLost(ContextLostReason reason) { ...@@ -1851,8 +1320,6 @@ void SkiaOutputSurfaceImplOnGpu::MarkContextLost(ContextLostReason reason) {
context_state_->MarkContextLost(); context_state_->MarkContextLost();
if (context_lost_callback_) { if (context_lost_callback_) {
PostTaskToClientThread(std::move(context_lost_callback_)); PostTaskToClientThread(std::move(context_lost_callback_));
if (context_provider_)
context_provider_->MarkContextLost();
} }
} }
......
...@@ -27,7 +27,6 @@ ...@@ -27,7 +27,6 @@
#include "gpu/command_buffer/common/sync_token.h" #include "gpu/command_buffer/common/sync_token.h"
#include "gpu/command_buffer/service/shared_context_state.h" #include "gpu/command_buffer/service/shared_context_state.h"
#include "gpu/command_buffer/service/sync_point_manager.h" #include "gpu/command_buffer/service/sync_point_manager.h"
#include "gpu/ipc/in_process_command_buffer.h"
#include "gpu/ipc/service/context_url.h" #include "gpu/ipc/service/context_url.h"
#include "gpu/ipc/service/display_context.h" #include "gpu/ipc/service/display_context.h"
#include "gpu/ipc/service/image_transport_surface_delegate.h" #include "gpu/ipc/service/image_transport_surface_delegate.h"
...@@ -41,11 +40,11 @@ class ColorSpace; ...@@ -41,11 +40,11 @@ class ColorSpace;
} }
namespace gl { namespace gl {
class GLApi;
class GLSurface; class GLSurface;
} }
namespace gpu { namespace gpu {
class SharedImageRepresentationFactory;
class SyncPointClientState; class SyncPointClientState;
} }
...@@ -58,10 +57,7 @@ class PlatformWindowSurface; ...@@ -58,10 +57,7 @@ class PlatformWindowSurface;
namespace viz { namespace viz {
class DawnContextProvider; class DawnContextProvider;
class DirectContextProvider;
class GLRendererCopier;
class ImageContextImpl; class ImageContextImpl;
class TextureDeleter;
class VulkanContextProvider; class VulkanContextProvider;
namespace copy_output { namespace copy_output {
...@@ -74,8 +70,6 @@ class SkiaOutputSurfaceImplOnGpu ...@@ -74,8 +70,6 @@ class SkiaOutputSurfaceImplOnGpu
: public gpu::ImageTransportSurfaceDelegate, : public gpu::ImageTransportSurfaceDelegate,
public gpu::SharedContextState::ContextLostObserver { public gpu::SharedContextState::ContextLostObserver {
public: public:
class ScopedUseContextProvider;
using DidSwapBufferCompleteCallback = using DidSwapBufferCompleteCallback =
base::RepeatingCallback<void(gpu::SwapBuffersCompleteParams, base::RepeatingCallback<void(gpu::SwapBuffersCompleteParams,
const gfx::Size& pixel_size)>; const gfx::Size& pixel_size)>;
...@@ -167,7 +161,6 @@ class SkiaOutputSurfaceImplOnGpu ...@@ -167,7 +161,6 @@ class SkiaOutputSurfaceImplOnGpu
void EndAccessImages(const base::flat_set<ImageContextImpl*>& image_contexts); void EndAccessImages(const base::flat_set<ImageContextImpl*>& image_contexts);
sk_sp<GrContextThreadSafeProxy> GetGrContextThreadSafeProxy(); sk_sp<GrContextThreadSafeProxy> GetGrContextThreadSafeProxy();
const gl::GLVersionInfo* gl_version_info() const { return gl_version_info_; }
size_t max_resource_cache_bytes() const { return max_resource_cache_bytes_; } size_t max_resource_cache_bytes() const { return max_resource_cache_bytes_; }
void ReleaseImageContexts( void ReleaseImageContexts(
std::vector<std::unique_ptr<ExternalUseClient::ImageContext>> std::vector<std::unique_ptr<ExternalUseClient::ImageContext>>
...@@ -244,10 +237,6 @@ class SkiaOutputSurfaceImplOnGpu ...@@ -244,10 +237,6 @@ class SkiaOutputSurfaceImplOnGpu
OutputSurfaceFrame* frame = nullptr); OutputSurfaceFrame* frame = nullptr);
GrContext* gr_context() { return context_state_->gr_context(); } GrContext* gr_context() { return context_state_->gr_context(); }
gpu::DecoderContext* decoder();
void ScheduleDelayedWork();
void PerformDelayedWork();
bool is_using_vulkan() const { bool is_using_vulkan() const {
return !!vulkan_context_provider_ && return !!vulkan_context_provider_ &&
...@@ -296,16 +285,12 @@ class SkiaOutputSurfaceImplOnGpu ...@@ -296,16 +285,12 @@ class SkiaOutputSurfaceImplOnGpu
VulkanContextProvider* const vulkan_context_provider_; VulkanContextProvider* const vulkan_context_provider_;
DawnContextProvider* const dawn_context_provider_; DawnContextProvider* const dawn_context_provider_;
const RendererSettings renderer_settings_; const RendererSettings renderer_settings_;
// This is only used to lazily create DirectContextProviderDelegate for
// readback using GLRendererCopier.
// TODO(samans): Remove |sequence_id| once readback always uses Skia.
const gpu::SequenceId sequence_id_;
// Should only be run on the client thread with PostTaskToClientThread(). // Should only be run on the client thread with PostTaskToClientThread().
DidSwapBufferCompleteCallback did_swap_buffer_complete_callback_; DidSwapBufferCompleteCallback did_swap_buffer_complete_callback_;
BufferPresentedCallback buffer_presented_callback_; BufferPresentedCallback buffer_presented_callback_;
ContextLostCallback context_lost_callback_; ContextLostCallback context_lost_callback_;
GpuVSyncCallback gpu_vsync_callback_; GpuVSyncCallback gpu_vsync_callback_;
bool use_gl_renderer_copier_ = false;
#if defined(USE_OZONE) #if defined(USE_OZONE)
// This should outlive gl_surface_ and vulkan_surface_. // This should outlive gl_surface_ and vulkan_surface_.
...@@ -317,7 +302,6 @@ class SkiaOutputSurfaceImplOnGpu ...@@ -317,7 +302,6 @@ class SkiaOutputSurfaceImplOnGpu
gfx::ColorSpace color_space_; gfx::ColorSpace color_space_;
scoped_refptr<gl::GLSurface> gl_surface_; scoped_refptr<gl::GLSurface> gl_surface_;
scoped_refptr<gpu::SharedContextState> context_state_; scoped_refptr<gpu::SharedContextState> context_state_;
const gl::GLVersionInfo* gl_version_info_ = nullptr;
size_t max_resource_cache_bytes_ = 0u; size_t max_resource_cache_bytes_ = 0u;
std::unique_ptr<DisplayContext> display_context_; std::unique_ptr<DisplayContext> display_context_;
...@@ -350,21 +334,9 @@ class SkiaOutputSurfaceImplOnGpu ...@@ -350,21 +334,9 @@ class SkiaOutputSurfaceImplOnGpu
base::flat_map<RenderPassId, OffscreenSurface> offscreen_surfaces_; base::flat_map<RenderPassId, OffscreenSurface> offscreen_surfaces_;
scoped_refptr<base::SingleThreadTaskRunner> context_current_task_runner_;
scoped_refptr<DirectContextProvider> context_provider_;
std::unique_ptr<TextureDeleter> texture_deleter_;
std::unique_ptr<GLRendererCopier> copier_;
bool delayed_work_pending_ = false;
gl::GLApi* api_ = nullptr;
bool supports_alpha_ = false;
// Micro-optimization to get to issuing GPU SwapBuffers as soon as possible. // Micro-optimization to get to issuing GPU SwapBuffers as soon as possible.
std::vector<sk_sp<SkDeferredDisplayList>> destroy_after_swap_; std::vector<sk_sp<SkDeferredDisplayList>> destroy_after_swap_;
const gpu::ContextUrl copier_active_url_;
int num_readbacks_pending_ = 0; int num_readbacks_pending_ = 0;
bool readback_poll_pending_ = false; bool readback_poll_pending_ = false;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment