Commit 28f296ff authored by Brian Ho's avatar Brian Ho Committed by Commit Bot

Reland "viz: RGBA_TEXTURE copy requests in SkiaRenderer"

The original CL was reverted [1] because it broke a few tests
running on SkiaRenderer Vk (namely Pixel_Video_Context_Loss_MP4).
This was because we only ensure the GL context is current in the
ImplOnGpu::InitializeForGL path, but SharedImageFactory always
creates a GL backing factory which in turn invokes a few GL API
calls in its constructor.

This CL fixes the issue by always making the GL context current
regardless of Skia backend.

[1] https://bugs.chromium.org/p/chromium/issues/detail?id=1111312

Bug: 1111312, 1046788, 971257, 1098435
Change-Id: Ifa5d91dc6d96d2ecc4144be95221cd188095f0e7
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2333991
Commit-Queue: Brian Ho <hob@chromium.org>
Reviewed-by: default avatarJonathan Backer <backer@chromium.org>
Reviewed-by: default avatarKhushal <khushalsagar@chromium.org>
Cr-Commit-Position: refs/heads/master@{#795118}
parent 1ec951ff
......@@ -420,12 +420,7 @@ ReadbackTestConfig const kTestConfigs[] = {
ReadbackTestConfig{TestRendererType::kSoftware, TestReadBackType::kBitmap},
ReadbackTestConfig{TestRendererType::kGL, TestReadBackType::kTexture},
ReadbackTestConfig{TestRendererType::kGL, TestReadBackType::kBitmap},
// TODO(crbug.com/1046788): The skia readback path doesn't support
// RGBA_TEXTURE readback requests yet. Don't run these tests on platforms
// that have UseSkiaForGLReadback enabled by default.
//
// ReadbackTestConfig{TestRendererType::kSkiaGL,
// TestReadBackType::kTexture},
ReadbackTestConfig{TestRendererType::kSkiaGL, TestReadBackType::kTexture},
ReadbackTestConfig{TestRendererType::kSkiaGL, TestReadBackType::kBitmap},
#if defined(ENABLE_CC_VULKAN_TESTS)
ReadbackTestConfig{TestRendererType::kSkiaVk, TestReadBackType::kBitmap},
......@@ -445,12 +440,7 @@ ReadbackTestConfig const kMaybeVulkanTestConfigs[] = {
ReadbackTestConfig{TestRendererType::kSoftware, TestReadBackType::kBitmap},
ReadbackTestConfig{TestRendererType::kGL, TestReadBackType::kTexture},
ReadbackTestConfig{TestRendererType::kGL, TestReadBackType::kBitmap},
// TODO(crbug.com/1046788): The skia readback path doesn't support
// RGBA_TEXTURE readback requests yet. Don't run these tests on platforms
// that have UseSkiaForGLReadback enabled by default.
//
// ReadbackTestConfig{TestRendererType::kSkiaGL,
// TestReadBackType::kTexture},
ReadbackTestConfig{TestRendererType::kSkiaGL, TestReadBackType::kTexture},
ReadbackTestConfig{TestRendererType::kSkiaGL, TestReadBackType::kBitmap},
#if defined(ENABLE_CC_VULKAN_TESTS) && !defined(THREAD_SANITIZER) && \
!defined(MEMORY_SANITIZER)
......
......@@ -9,6 +9,7 @@
#include "base/bind_helpers.h"
#include "base/callback_helpers.h"
#include "base/optional.h"
#include "base/threading/thread_task_runner_handle.h"
#include "base/trace_event/memory_dump_manager.h"
#include "base/trace_event/trace_event.h"
#include "components/viz/common/frame_sinks/copy_output_request.h"
......@@ -115,45 +116,6 @@ struct ReadPixelsContext {
base::WeakPtr<SkiaOutputSurfaceImplOnGpu> impl_on_gpu;
};
class SharedImageSubMemoryTracker : public gpu::MemoryTracker {
public:
SharedImageSubMemoryTracker(gpu::CommandBufferId command_buffer_id,
uint64_t client_tracing_id,
Observer* observer)
: command_buffer_id_(command_buffer_id),
client_tracing_id_(client_tracing_id),
observer_(observer) {}
SharedImageSubMemoryTracker(const SharedImageSubMemoryTracker&) = delete;
SharedImageSubMemoryTracker& operator=(const SharedImageSubMemoryTracker&) =
delete;
~SharedImageSubMemoryTracker() override { DCHECK(!size_); }
// MemoryTracker implementation:
void TrackMemoryAllocatedChange(int64_t delta) override {
DCHECK(delta >= 0 || size_ >= static_cast<uint64_t>(-delta));
uint64_t old_size = size_;
size_ += delta;
DCHECK(observer_);
observer_->OnMemoryAllocatedChange(
command_buffer_id_, old_size, size_,
gpu::GpuPeakMemoryAllocationSource::SKIA);
}
uint64_t GetSize() const override { return size_; }
uint64_t ClientTracingId() const override { return client_tracing_id_; }
int ClientId() const override {
return gpu::ChannelIdFromCommandBufferId(command_buffer_id_);
}
uint64_t ContextGroupTracingId() const override {
return command_buffer_id_.GetUnsafeValue();
}
private:
gpu::CommandBufferId command_buffer_id_;
const uint64_t client_tracing_id_;
MemoryTracker::Observer* const observer_;
uint64_t size_ = 0;
};
class CopyOutputResultYUV : public CopyOutputResult {
public:
CopyOutputResultYUV(const gfx::Rect& rect,
......@@ -427,14 +389,9 @@ SkiaOutputSurfaceImplOnGpu::SkiaOutputSurfaceImplOnGpu(
feature_info_(std::move(feature_info)),
sync_point_client_state_(
CreateSyncPointClientState(dependency_, sequence_id)),
memory_tracker_(std::make_unique<SharedImageSubMemoryTracker>(
sync_point_client_state_->command_buffer_id(),
base::trace_event::MemoryDumpManager::GetInstance()
->GetTracingProcessId(),
dependency_->GetSharedContextState()->memory_tracker())),
memory_tracker_(dependency_->GetSharedContextState()->memory_tracker()),
shared_image_representation_factory_(
CreateSharedImageRepresentationFactory(dependency_,
memory_tracker_.get())),
CreateSharedImageRepresentationFactory(dependency_, memory_tracker_)),
vulkan_context_provider_(dependency_->GetVulkanContextProvider()),
dawn_context_provider_(dependency_->GetDawnContextProvider()),
renderer_settings_(renderer_settings),
......@@ -714,6 +671,15 @@ void SkiaOutputSurfaceImplOnGpu::RemoveRenderPassResource(
// |image_contexts| will go out of scope and be destroyed now.
}
static void PostTaskFromMainToImplThread(
scoped_refptr<base::SingleThreadTaskRunner> impl_task_runner,
ReleaseCallback callback,
const gpu::SyncToken& sync_token,
bool is_lost) {
impl_task_runner->PostTask(
FROM_HERE, base::BindOnce(std::move(callback), sync_token, is_lost));
}
bool SkiaOutputSurfaceImplOnGpu::CopyOutput(
RenderPassId id,
copy_output::RenderPassGeometry geometry,
......@@ -765,10 +731,6 @@ bool SkiaOutputSurfaceImplOnGpu::CopyOutput(
gpu::kInProcessCommandBufferClientId);
}
// Skia readback could be synchronous. Incremement counter in case
// ReadbackCompleted is called immediately.
num_readbacks_pending_++;
// For downscaling, use the GOOD quality setting (appropriate for
// thumbnailing); and, for upscaling, use the BEST quality.
bool is_downscale_in_both_dimensions =
......@@ -807,6 +769,9 @@ bool SkiaOutputSurfaceImplOnGpu::CopyOutput(
std::make_unique<ReadPixelsContext>(std::move(request),
geometry.result_selection,
color_space, weak_ptr_);
// Skia readback could be synchronous. Incremement counter in case
// ReadbackCompleted is called immediately.
num_readbacks_pending_++;
surface->asyncRescaleAndReadPixelsYUV420(
kRec709_SkYUVColorSpace, SkColorSpace::MakeSRGB(), src_rect,
{geometry.result_selection.width(), geometry.result_selection.height()},
......@@ -824,16 +789,97 @@ bool SkiaOutputSurfaceImplOnGpu::CopyOutput(
std::make_unique<ReadPixelsContext>(std::move(request),
geometry.result_selection,
color_space, weak_ptr_);
// Skia readback could be synchronous. Incremement counter in case
// ReadbackCompleted is called immediately.
num_readbacks_pending_++;
surface->asyncRescaleAndReadPixels(
dst_info, src_rect, SkSurface::RescaleGamma::kSrc, filter_quality,
&OnRGBAReadbackDone, context.release());
} else if (request->result_format() ==
CopyOutputRequest::ResultFormat::RGBA_TEXTURE) {
gpu::Mailbox mailbox = gpu::Mailbox::GenerateForSharedImage();
constexpr auto kUsage = gpu::SHARED_IMAGE_USAGE_GLES2 |
gpu::SHARED_IMAGE_USAGE_GLES2_FRAMEBUFFER_HINT |
gpu::SHARED_IMAGE_USAGE_RASTER |
gpu::SHARED_IMAGE_USAGE_DISPLAY;
bool result = shared_image_factory_->CreateSharedImage(
mailbox, ResourceFormat::RGBA_8888,
gfx::Size(geometry.result_bounds.width(),
geometry.result_bounds.height()),
color_space, kBottomLeft_GrSurfaceOrigin, kUnpremul_SkAlphaType,
gpu::kNullSurfaceHandle, kUsage);
if (!result) {
DLOG(ERROR) << "Failed to create shared image.";
return false;
}
auto representation = dependency_->GetSharedImageManager()->ProduceSkia(
mailbox, context_state_->memory_type_tracker(), context_state_);
shared_image_factory_->DestroySharedImage(mailbox);
SkSurfaceProps surface_props{0, kUnknown_SkPixelGeometry};
std::vector<GrBackendSemaphore> begin_semaphores;
std::vector<GrBackendSemaphore> end_semaphores;
representation->SetCleared();
auto scoped_write = representation->BeginScopedWriteAccess(
0 /* final_msaa_count */, surface_props, &begin_semaphores,
&end_semaphores,
gpu::SharedImageRepresentation::AllowUnclearedAccess::kYes);
SkSurface* dest_surface = scoped_write->surface();
dest_surface->wait(begin_semaphores.size(), begin_semaphores.data());
SkCanvas* dest_canvas = dest_surface->getCanvas();
if (request->is_scaled()) {
dest_canvas->scale(request->scale_from().x() / request->scale_to().x(),
request->scale_from().y() / request->scale_to().y());
}
SkPaint paint;
paint.setFilterQuality(filter_quality);
sk_sp<SkImage> image = surface->makeImageSnapshot(src_rect);
dest_canvas->clipRect(
SkRect::MakeXYWH(0, 0, src_rect.width(), src_rect.height()));
surface->draw(dest_canvas, -src_rect.x(), -src_rect.y(), &paint);
GrFlushInfo flush_info;
flush_info.fNumSemaphores = end_semaphores.size();
flush_info.fSignalSemaphores = end_semaphores.data();
gpu::AddVulkanCleanupTaskForSkiaFlush(vulkan_context_provider_,
&flush_info);
auto flush_result = dest_surface->flush(
SkSurface::BackendSurfaceAccess::kNoAccess, flush_info);
if (flush_result != GrSemaphoresSubmitted::kYes &&
!(begin_semaphores.empty() && end_semaphores.empty())) {
// TODO(penghuang): handle vulkan device lost.
DLOG(ERROR) << "dest_surface->flush() failed.";
return false;
}
auto release_callback = base::BindOnce(
&SkiaOutputSurfaceImplOnGpu::DestroySharedImageOnImplThread,
weak_ptr_factory_.GetWeakPtr(), std::move(representation),
context_state_);
auto main_callback = SingleReleaseCallback::Create(base::BindOnce(
&PostTaskFromMainToImplThread, base::ThreadTaskRunnerHandle::Get(),
std::move(release_callback)));
request->SendResult(std::make_unique<CopyOutputTextureResult>(
geometry.result_bounds, mailbox, gpu::SyncToken(), color_space,
std::move(main_callback)));
} else {
NOTIMPLEMENTED(); // ResultFormat::RGBA_TEXTURE
NOTREACHED();
}
ScheduleCheckReadbackCompletion();
return true;
}
void SkiaOutputSurfaceImplOnGpu::DestroySharedImageOnImplThread(
std::unique_ptr<gpu::SharedImageRepresentationSkia> representation,
scoped_refptr<gpu::SharedContextState> context_state,
const gpu::SyncToken& sync_token,
bool is_lost) {
context_state_->MakeCurrent(nullptr);
representation.reset();
}
void SkiaOutputSurfaceImplOnGpu::BeginAccessImages(
const std::vector<ImageContextImpl*>& image_contexts,
std::vector<GrBackendSemaphore>* begin_semaphores,
......@@ -932,7 +978,7 @@ void SkiaOutputSurfaceImplOnGpu::SetCapabilitiesForTesting(
DCHECK(dependency_->IsOffscreen());
output_device_ = std::make_unique<SkiaOutputDeviceOffscreen>(
context_state_, capabilities.output_surface_origin,
renderer_settings_.requires_alpha_channel, memory_tracker_.get(),
renderer_settings_.requires_alpha_channel, memory_tracker_,
GetDidSwapBuffersCompleteCallback());
}
......@@ -969,6 +1015,23 @@ bool SkiaOutputSurfaceImplOnGpu::Initialize() {
return false;
}
// Even with Vulkan/Dawn compositing, the SharedImageFactory constructor
// always initializes a GL-backed SharedImage factory to fall back on.
// Creating the SharedImageBackingFactoryGLTexture invokes GL API calls, so
// we need to ensure there is a current GL context.
if (!context_state_->MakeCurrent(nullptr, true /* need_gl */)) {
LOG(ERROR) << "Failed to make current during initialization.";
return false;
}
context_state_->set_need_context_state_reset(true);
shared_image_factory_ = std::make_unique<gpu::SharedImageFactory>(
dependency_->GetGpuPreferences(),
dependency_->GetGpuDriverBugWorkarounds(),
dependency_->GetGpuFeatureInfo(),
dependency_->GetSharedContextState().get(),
dependency_->GetMailboxManager(), dependency_->GetSharedImageManager(),
dependency_->GetGpuImageFactory(), memory_tracker_,
true /* enable_wrapped_sk_image */),
max_resource_cache_bytes_ =
context_state_->gr_context()->getResourceCacheLimit();
if (context_state_)
......@@ -991,7 +1054,7 @@ bool SkiaOutputSurfaceImplOnGpu::InitializeForGL() {
output_device_ = std::make_unique<SkiaOutputDeviceOffscreen>(
context_state_, gfx::SurfaceOrigin::kTopLeft,
renderer_settings_.requires_alpha_channel, memory_tracker_.get(),
renderer_settings_.requires_alpha_channel, memory_tracker_,
GetDidSwapBuffersCompleteCallback());
} else {
gl_surface_ =
......@@ -1004,20 +1067,19 @@ bool SkiaOutputSurfaceImplOnGpu::InitializeForGL() {
if (gl_surface_->IsSurfaceless()) {
output_device_ = std::make_unique<SkiaOutputDeviceBufferQueue>(
std::make_unique<OutputPresenterGL>(gl_surface_, dependency_,
memory_tracker_.get()),
dependency_, memory_tracker_.get(),
GetDidSwapBuffersCompleteCallback());
memory_tracker_),
dependency_, memory_tracker_, GetDidSwapBuffersCompleteCallback());
} else {
if (dependency_->NeedsSupportForExternalStencil()) {
output_device_ = std::make_unique<SkiaOutputDeviceWebView>(
context_state_.get(), gl_surface_, memory_tracker_.get(),
context_state_.get(), gl_surface_, memory_tracker_,
GetDidSwapBuffersCompleteCallback());
} else {
output_device_ = std::make_unique<SkiaOutputDeviceGL>(
dependency_->GetMailboxManager(),
shared_image_representation_factory_.get(), context_state_.get(),
gl_surface_, feature_info_, memory_tracker_.get(),
gl_surface_, feature_info_, memory_tracker_,
GetDidSwapBuffersCompleteCallback());
}
}
......@@ -1037,7 +1099,7 @@ bool SkiaOutputSurfaceImplOnGpu::InitializeForVulkan() {
if (dependency_->IsOffscreen()) {
output_device_ = std::make_unique<SkiaOutputDeviceOffscreen>(
context_state_, gfx::SurfaceOrigin::kBottomLeft,
renderer_settings_.requires_alpha_channel, memory_tracker_.get(),
renderer_settings_.requires_alpha_channel, memory_tracker_,
GetDidSwapBuffersCompleteCallback());
} else {
#if defined(USE_X11)
......@@ -1045,22 +1107,22 @@ bool SkiaOutputSurfaceImplOnGpu::InitializeForVulkan() {
if (!gpu_preferences_.disable_vulkan_surface) {
output_device_ = SkiaOutputDeviceVulkan::Create(
vulkan_context_provider_, dependency_->GetSurfaceHandle(),
memory_tracker_.get(), GetDidSwapBuffersCompleteCallback());
memory_tracker_, GetDidSwapBuffersCompleteCallback());
}
if (!output_device_) {
output_device_ = std::make_unique<SkiaOutputDeviceX11>(
context_state_, dependency_->GetSurfaceHandle(),
memory_tracker_.get(), GetDidSwapBuffersCompleteCallback());
context_state_, dependency_->GetSurfaceHandle(), memory_tracker_,
GetDidSwapBuffersCompleteCallback());
}
}
#endif
if (!output_device_) {
#if defined(OS_FUCHSIA)
auto output_presenter = OutputPresenterFuchsia::Create(
window_surface_.get(), dependency_, memory_tracker_.get());
window_surface_.get(), dependency_, memory_tracker_);
#else
auto output_presenter =
OutputPresenterGL::Create(dependency_, memory_tracker_.get());
OutputPresenterGL::Create(dependency_, memory_tracker_);
if (output_presenter) {
// TODO(https://crbug.com/1012401): don't depend on GL.
gl_surface_ = output_presenter->gl_surface();
......@@ -1068,12 +1130,12 @@ bool SkiaOutputSurfaceImplOnGpu::InitializeForVulkan() {
#endif
if (output_presenter) {
output_device_ = std::make_unique<SkiaOutputDeviceBufferQueue>(
std::move(output_presenter), dependency_, memory_tracker_.get(),
std::move(output_presenter), dependency_, memory_tracker_,
GetDidSwapBuffersCompleteCallback());
} else {
auto output_device = SkiaOutputDeviceVulkan::Create(
vulkan_context_provider_, dependency_->GetSurfaceHandle(),
memory_tracker_.get(), GetDidSwapBuffersCompleteCallback());
memory_tracker_, GetDidSwapBuffersCompleteCallback());
#if defined(OS_WIN)
gpu::SurfaceHandle child_surface =
output_device ? output_device->GetChildSurfaceHandle()
......@@ -1096,7 +1158,7 @@ bool SkiaOutputSurfaceImplOnGpu::InitializeForDawn() {
if (dependency_->IsOffscreen()) {
output_device_ = std::make_unique<SkiaOutputDeviceOffscreen>(
context_state_, gfx::SurfaceOrigin::kBottomLeft,
renderer_settings_.requires_alpha_channel, memory_tracker_.get(),
renderer_settings_.requires_alpha_channel, memory_tracker_,
GetDidSwapBuffersCompleteCallback());
} else {
#if defined(USE_X11)
......@@ -1104,8 +1166,8 @@ bool SkiaOutputSurfaceImplOnGpu::InitializeForDawn() {
// SkiaOutputDeviceDawn.
if (!features::IsUsingOzonePlatform()) {
output_device_ = std::make_unique<SkiaOutputDeviceX11>(
context_state_, dependency_->GetSurfaceHandle(),
memory_tracker_.get(), GetDidSwapBuffersCompleteCallback());
context_state_, dependency_->GetSurfaceHandle(), memory_tracker_,
GetDidSwapBuffersCompleteCallback());
} else {
return false;
}
......@@ -1113,7 +1175,7 @@ bool SkiaOutputSurfaceImplOnGpu::InitializeForDawn() {
std::unique_ptr<SkiaOutputDeviceDawn> output_device =
std::make_unique<SkiaOutputDeviceDawn>(
dawn_context_provider_, dependency_->GetSurfaceHandle(),
gfx::SurfaceOrigin::kTopLeft, memory_tracker_.get(),
gfx::SurfaceOrigin::kTopLeft, memory_tracker_,
GetDidSwapBuffersCompleteCallback());
const gpu::SurfaceHandle child_surface_handle =
output_device->GetChildSurfaceHandle();
......
......@@ -26,6 +26,7 @@
#include "gpu/command_buffer/common/mailbox.h"
#include "gpu/command_buffer/common/sync_token.h"
#include "gpu/command_buffer/service/shared_context_state.h"
#include "gpu/command_buffer/service/shared_image_representation.h"
#include "gpu/command_buffer/service/sync_point_manager.h"
#include "gpu/ipc/service/context_url.h"
#include "gpu/ipc/service/display_context.h"
......@@ -45,6 +46,7 @@ class GLSurface;
namespace gpu {
class SharedImageRepresentationFactory;
class SharedImageFactory;
class SyncPointClientState;
}
......@@ -204,7 +206,7 @@ class SkiaOutputSurfaceImplOnGpu
num_readbacks_pending_--;
}
gpu::MemoryTracker* GetMemoryTracker() { return memory_tracker_.get(); }
gpu::MemoryTracker* GetMemoryTracker() { return memory_tracker_; }
private:
class OffscreenSurface;
......@@ -226,6 +228,12 @@ class SkiaOutputSurfaceImplOnGpu
bool MakeCurrent(bool need_fbo0);
void MarkContextLost(ContextLostReason reason);
void DestroySharedImageOnImplThread(
std::unique_ptr<gpu::SharedImageRepresentationSkia> representation,
scoped_refptr<gpu::SharedContextState> context_state,
const gpu::SyncToken& sync_token,
bool is_lost);
void PullTextureUpdates(std::vector<gpu::SyncToken> sync_token);
void ReleaseFenceSyncAndPushTextureUpdates(uint64_t sync_fence_release);
......@@ -277,9 +285,10 @@ class SkiaOutputSurfaceImplOnGpu
SkiaOutputSurfaceDependency* const dependency_;
scoped_refptr<gpu::gles2::FeatureInfo> feature_info_;
scoped_refptr<gpu::SyncPointClientState> sync_point_client_state_;
std::unique_ptr<gpu::MemoryTracker> memory_tracker_;
gpu::MemoryTracker* memory_tracker_;
std::unique_ptr<gpu::SharedImageRepresentationFactory>
shared_image_representation_factory_;
std::unique_ptr<gpu::SharedImageFactory> shared_image_factory_;
VulkanContextProvider* const vulkan_context_provider_;
DawnContextProvider* const dawn_context_provider_;
const RendererSettings renderer_settings_;
......
......@@ -78,15 +78,15 @@ void SharedContextState::compileError(const char* shader, const char* errors) {
}
}
SharedContextState::MemoryTracker::MemoryTracker(
SharedContextState::MemoryTrackerObserver::MemoryTrackerObserver(
base::WeakPtr<gpu::MemoryTracker::Observer> peak_memory_monitor)
: peak_memory_monitor_(peak_memory_monitor) {}
SharedContextState::MemoryTracker::~MemoryTracker() {
SharedContextState::MemoryTrackerObserver::~MemoryTrackerObserver() {
DCHECK(!size_);
}
void SharedContextState::MemoryTracker::OnMemoryAllocatedChange(
void SharedContextState::MemoryTrackerObserver::OnMemoryAllocatedChange(
CommandBufferId id,
uint64_t old_size,
uint64_t new_size,
......@@ -100,6 +100,45 @@ void SharedContextState::MemoryTracker::OnMemoryAllocatedChange(
}
}
base::AtomicSequenceNumber g_next_command_buffer_id;
SharedContextState::MemoryTracker::MemoryTracker(Observer* observer)
: command_buffer_id_(gpu::CommandBufferId::FromUnsafeValue(
g_next_command_buffer_id.GetNext() + 1)),
client_tracing_id_(base::trace_event::MemoryDumpManager::GetInstance()
->GetTracingProcessId()),
observer_(observer) {}
SharedContextState::MemoryTracker::~MemoryTracker() {
DCHECK(!size_);
}
void SharedContextState::MemoryTracker::TrackMemoryAllocatedChange(
int64_t delta) {
DCHECK(delta >= 0 || size_ >= static_cast<uint64_t>(-delta));
uint64_t old_size = size_;
size_ += delta;
DCHECK(observer_);
observer_->OnMemoryAllocatedChange(command_buffer_id_, old_size, size_,
gpu::GpuPeakMemoryAllocationSource::SKIA);
}
uint64_t SharedContextState::MemoryTracker::GetSize() const {
return size_;
}
uint64_t SharedContextState::MemoryTracker::ClientTracingId() const {
return client_tracing_id_;
}
int SharedContextState::MemoryTracker::ClientId() const {
return gpu::ChannelIdFromCommandBufferId(command_buffer_id_);
}
uint64_t SharedContextState::MemoryTracker::ContextGroupTracingId() const {
return command_buffer_id_.GetUnsafeValue();
}
SharedContextState::SharedContextState(
scoped_refptr<gl::GLShareGroup> share_group,
scoped_refptr<gl::GLSurface> surface,
......@@ -114,7 +153,9 @@ SharedContextState::SharedContextState(
: use_virtualized_gl_contexts_(use_virtualized_gl_contexts),
context_lost_callback_(std::move(context_lost_callback)),
gr_context_type_(gr_context_type),
memory_tracker_(peak_memory_monitor),
memory_tracker_observer_(peak_memory_monitor),
memory_tracker_(&memory_tracker_observer_),
memory_type_tracker_(&memory_tracker_),
vk_context_provider_(vulkan_context_provider),
metal_context_provider_(metal_context_provider),
dawn_context_provider_(dawn_context_provider),
......@@ -172,7 +213,6 @@ SharedContextState::SharedContextState(
// Initialize the scratch buffer to some small initial size.
scratch_deserialization_buffer_.resize(
kInitialScratchDeserializationBufferSize);
}
SharedContextState::~SharedContextState() {
......@@ -195,8 +235,8 @@ SharedContextState::~SharedContextState() {
DCHECK(!owned_gr_context_ || owned_gr_context_->unique());
// GPU memory allocations except skia_gr_cache_size_ tracked by this
// memory_tracker_ should have been released.
DCHECK_EQ(skia_gr_cache_size_, memory_tracker_.GetMemoryUsage());
// memory_tracker_observer_ should have been released.
DCHECK_EQ(skia_gr_cache_size_, memory_tracker_observer_.GetMemoryUsage());
// gr_context_ and all resources owned by it will be released soon, so set it
// to null, and UpdateSkiaOwnedMemorySize() will update skia memory usage to
// 0, to ensure that PeakGpuMemoryMonitor sees 0 allocated memory.
......@@ -511,6 +551,9 @@ bool SharedContextState::IsCurrent(gl::GLSurface* surface) {
return context_->IsCurrent(surface);
}
// TODO(https://crbug.com/1110357): Account for memory tracked by
// memory_tracker_ and memory_type_tracker_ (e.g. SharedImages allocated in
// SkiaOutputSurfaceImplOnGpu::CopyOutput).
bool SharedContextState::OnMemoryDump(
const base::trace_event::MemoryDumpArgs& args,
base::trace_event::ProcessMemoryDump* pmd) {
......@@ -574,13 +617,13 @@ void SharedContextState::PurgeMemory(
uint64_t SharedContextState::GetMemoryUsage() {
UpdateSkiaOwnedMemorySize();
return memory_tracker_.GetMemoryUsage();
return memory_tracker_observer_.GetMemoryUsage();
}
void SharedContextState::UpdateSkiaOwnedMemorySize() {
if (!gr_context_) {
memory_tracker_.OnMemoryAllocatedChange(CommandBufferId(),
skia_gr_cache_size_, 0u);
memory_tracker_observer_.OnMemoryAllocatedChange(CommandBufferId(),
skia_gr_cache_size_, 0u);
skia_gr_cache_size_ = 0u;
return;
}
......@@ -589,7 +632,7 @@ void SharedContextState::UpdateSkiaOwnedMemorySize() {
// Skia does not have a CommandBufferId. PeakMemoryMonitor currently does not
// use CommandBufferId to identify source, so use zero here to separate
// prevent confusion.
memory_tracker_.OnMemoryAllocatedChange(
memory_tracker_observer_.OnMemoryAllocatedChange(
CommandBufferId(), skia_gr_cache_size_, static_cast<uint64_t>(new_size));
skia_gr_cache_size_ = static_cast<uint64_t>(new_size);
}
......
......@@ -24,6 +24,7 @@
#include "gpu/command_buffer/service/memory_tracking.h"
#include "gpu/config/gpu_preferences.h"
#include "gpu/gpu_gles2_export.h"
#include "gpu/ipc/common/command_buffer_id.h"
#include "gpu/ipc/common/gpu_peak_memory.h"
#include "gpu/vulkan/buildflags.h"
#include "third_party/skia/include/core/SkSurface.h"
......@@ -153,7 +154,13 @@ class GPU_GLES2_EXPORT SharedContextState
bool support_vulkan_external_object() const {
return support_vulkan_external_object_;
}
gpu::MemoryTracker::Observer* memory_tracker() { return &memory_tracker_; }
gpu::MemoryTracker::Observer* memory_tracker_observer() {
return &memory_tracker_observer_;
}
gpu::MemoryTracker* memory_tracker() { return &memory_tracker_; }
gpu::MemoryTypeTracker* memory_type_tracker() {
return &memory_type_tracker_;
}
ExternalSemaphorePool* external_semaphore_pool() {
#if BUILDFLAG(ENABLE_VULKAN)
return external_semaphore_pool_.get();
......@@ -213,13 +220,14 @@ class GPU_GLES2_EXPORT SharedContextState
// Observer which is notified when SkiaOutputSurfaceImpl takes ownership of a
// shared image, and forward information to both histograms and task manager.
class GPU_GLES2_EXPORT MemoryTracker : public gpu::MemoryTracker::Observer {
class GPU_GLES2_EXPORT MemoryTrackerObserver
: public gpu::MemoryTracker::Observer {
public:
explicit MemoryTracker(
explicit MemoryTrackerObserver(
base::WeakPtr<gpu::MemoryTracker::Observer> peak_memory_monitor);
MemoryTracker(MemoryTracker&) = delete;
MemoryTracker& operator=(MemoryTracker&) = delete;
~MemoryTracker() override;
MemoryTrackerObserver(MemoryTrackerObserver&) = delete;
MemoryTrackerObserver& operator=(MemoryTrackerObserver&) = delete;
~MemoryTrackerObserver() override;
// gpu::MemoryTracker::Observer implementation:
void OnMemoryAllocatedChange(
......@@ -237,6 +245,29 @@ class GPU_GLES2_EXPORT SharedContextState
base::WeakPtr<gpu::MemoryTracker::Observer> const peak_memory_monitor_;
};
// MemoryTracker implementation used to track SharedImages owned by
// SkiaOutputSurfaceImpl.
class MemoryTracker : public gpu::MemoryTracker {
public:
explicit MemoryTracker(gpu::MemoryTracker::Observer* observer);
MemoryTracker(const MemoryTracker&) = delete;
MemoryTracker& operator=(const MemoryTracker&) = delete;
~MemoryTracker() override;
// MemoryTracker implementation:
void TrackMemoryAllocatedChange(int64_t delta) override;
uint64_t GetSize() const override;
uint64_t ClientTracingId() const override;
int ClientId() const override;
uint64_t ContextGroupTracingId() const override;
private:
gpu::CommandBufferId command_buffer_id_;
const uint64_t client_tracing_id_;
gpu::MemoryTracker::Observer* const observer_;
uint64_t size_ = 0;
};
~SharedContextState() override;
// gpu::GLContextVirtualDelegate implementation.
......@@ -263,7 +294,9 @@ class GPU_GLES2_EXPORT SharedContextState
bool support_vulkan_external_object_ = false;
ContextLostCallback context_lost_callback_;
GrContextType gr_context_type_ = GrContextType::kGL;
MemoryTrackerObserver memory_tracker_observer_;
MemoryTracker memory_tracker_;
gpu::MemoryTypeTracker memory_type_tracker_;
viz::VulkanContextProvider* const vk_context_provider_;
viz::MetalContextProvider* const metal_context_provider_;
viz::DawnContextProvider* const dawn_context_provider_;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment