Commit 4a52a132 authored by Brian Ho's avatar Brian Ho Committed by Commit Bot

viz: RGBA_TEXTURE copy requests in SkiaRenderer

Currently, SkiaRenderer does not support RGBA_TEXTURE
CopyOutputRequests which are used in Chrome OS for screen rotation
animations [1] (among other things). As part of a larger effort to
enable SkiaRenderer on Chrome OS, this CL implements RGBA_TEXTURE
support in SkiaOutputSurfaceImplOnGpu::CopyOutput:

1. Refactor SharedImageFactory ownership out of
   DirectContextProviderDelegateImpl (used only for the
   use_gl_renderer_copier bypass) and directly into
   SkiaOutputSurfaceImplOnGpu.
2. Create a SharedImage and its Skia representation in CopyOutput and
   blit the request rect from the source into the output SkSurface.
3. Send the SharedImage's mailbox back through the CopyOutputResult
   with a release callback that destroys the SharedImage.
4. Enable SKIA_GL tests in cc_unittests.

[1] https://source.chromium.org/chromium/chromium/src/+/master:ash/rotator/screen_rotation_animator.cc;l=214;drc=b15cb5fd43aff2d181355401d99e47e2e44aa61f?originalUrl=https:%2F%2Fcs.chromium.org%2F

Bug: 1046788, 971257, 1098435
Change-Id: I62d41390828ed0c79a1dc29c508efc748924ce06
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2302733Reviewed-by: default avatarKhushal <khushalsagar@chromium.org>
Reviewed-by: default avatarJonathan Backer <backer@chromium.org>
Commit-Queue: Brian Ho <hob@chromium.org>
Cr-Commit-Position: refs/heads/master@{#792971}
parent b1df5cff
...@@ -420,12 +420,7 @@ ReadbackTestConfig const kTestConfigs[] = { ...@@ -420,12 +420,7 @@ ReadbackTestConfig const kTestConfigs[] = {
ReadbackTestConfig{TestRendererType::kSoftware, TestReadBackType::kBitmap}, ReadbackTestConfig{TestRendererType::kSoftware, TestReadBackType::kBitmap},
ReadbackTestConfig{TestRendererType::kGL, TestReadBackType::kTexture}, ReadbackTestConfig{TestRendererType::kGL, TestReadBackType::kTexture},
ReadbackTestConfig{TestRendererType::kGL, TestReadBackType::kBitmap}, ReadbackTestConfig{TestRendererType::kGL, TestReadBackType::kBitmap},
// TODO(crbug.com/1046788): The skia readback path doesn't support ReadbackTestConfig{TestRendererType::kSkiaGL, TestReadBackType::kTexture},
// RGBA_TEXTURE readback requests yet. Don't run these tests on platforms
// that have UseSkiaForGLReadback enabled by default.
//
// ReadbackTestConfig{TestRendererType::kSkiaGL,
// TestReadBackType::kTexture},
ReadbackTestConfig{TestRendererType::kSkiaGL, TestReadBackType::kBitmap}, ReadbackTestConfig{TestRendererType::kSkiaGL, TestReadBackType::kBitmap},
#if defined(ENABLE_CC_VULKAN_TESTS) #if defined(ENABLE_CC_VULKAN_TESTS)
ReadbackTestConfig{TestRendererType::kSkiaVk, TestReadBackType::kBitmap}, ReadbackTestConfig{TestRendererType::kSkiaVk, TestReadBackType::kBitmap},
...@@ -445,12 +440,7 @@ ReadbackTestConfig const kMaybeVulkanTestConfigs[] = { ...@@ -445,12 +440,7 @@ ReadbackTestConfig const kMaybeVulkanTestConfigs[] = {
ReadbackTestConfig{TestRendererType::kSoftware, TestReadBackType::kBitmap}, ReadbackTestConfig{TestRendererType::kSoftware, TestReadBackType::kBitmap},
ReadbackTestConfig{TestRendererType::kGL, TestReadBackType::kTexture}, ReadbackTestConfig{TestRendererType::kGL, TestReadBackType::kTexture},
ReadbackTestConfig{TestRendererType::kGL, TestReadBackType::kBitmap}, ReadbackTestConfig{TestRendererType::kGL, TestReadBackType::kBitmap},
// TODO(crbug.com/1046788): The skia readback path doesn't support ReadbackTestConfig{TestRendererType::kSkiaGL, TestReadBackType::kTexture},
// RGBA_TEXTURE readback requests yet. Don't run these tests on platforms
// that have UseSkiaForGLReadback enabled by default.
//
// ReadbackTestConfig{TestRendererType::kSkiaGL,
// TestReadBackType::kTexture},
ReadbackTestConfig{TestRendererType::kSkiaGL, TestReadBackType::kBitmap}, ReadbackTestConfig{TestRendererType::kSkiaGL, TestReadBackType::kBitmap},
#if defined(ENABLE_CC_VULKAN_TESTS) && !defined(THREAD_SANITIZER) && \ #if defined(ENABLE_CC_VULKAN_TESTS) && !defined(THREAD_SANITIZER) && \
!defined(MEMORY_SANITIZER) !defined(MEMORY_SANITIZER)
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include "base/bind_helpers.h" #include "base/bind_helpers.h"
#include "base/callback_helpers.h" #include "base/callback_helpers.h"
#include "base/optional.h" #include "base/optional.h"
#include "base/threading/thread_task_runner_handle.h"
#include "base/trace_event/memory_dump_manager.h" #include "base/trace_event/memory_dump_manager.h"
#include "base/trace_event/trace_event.h" #include "base/trace_event/trace_event.h"
#include "components/viz/common/frame_sinks/copy_output_request.h" #include "components/viz/common/frame_sinks/copy_output_request.h"
...@@ -115,45 +116,6 @@ struct ReadPixelsContext { ...@@ -115,45 +116,6 @@ struct ReadPixelsContext {
base::WeakPtr<SkiaOutputSurfaceImplOnGpu> impl_on_gpu; base::WeakPtr<SkiaOutputSurfaceImplOnGpu> impl_on_gpu;
}; };
class SharedImageSubMemoryTracker : public gpu::MemoryTracker {
public:
SharedImageSubMemoryTracker(gpu::CommandBufferId command_buffer_id,
uint64_t client_tracing_id,
Observer* observer)
: command_buffer_id_(command_buffer_id),
client_tracing_id_(client_tracing_id),
observer_(observer) {}
SharedImageSubMemoryTracker(const SharedImageSubMemoryTracker&) = delete;
SharedImageSubMemoryTracker& operator=(const SharedImageSubMemoryTracker&) =
delete;
~SharedImageSubMemoryTracker() override { DCHECK(!size_); }
// MemoryTracker implementation:
void TrackMemoryAllocatedChange(int64_t delta) override {
DCHECK(delta >= 0 || size_ >= static_cast<uint64_t>(-delta));
uint64_t old_size = size_;
size_ += delta;
DCHECK(observer_);
observer_->OnMemoryAllocatedChange(
command_buffer_id_, old_size, size_,
gpu::GpuPeakMemoryAllocationSource::SKIA);
}
uint64_t GetSize() const override { return size_; }
uint64_t ClientTracingId() const override { return client_tracing_id_; }
int ClientId() const override {
return gpu::ChannelIdFromCommandBufferId(command_buffer_id_);
}
uint64_t ContextGroupTracingId() const override {
return command_buffer_id_.GetUnsafeValue();
}
private:
gpu::CommandBufferId command_buffer_id_;
const uint64_t client_tracing_id_;
MemoryTracker::Observer* const observer_;
uint64_t size_ = 0;
};
class CopyOutputResultYUV : public CopyOutputResult { class CopyOutputResultYUV : public CopyOutputResult {
public: public:
CopyOutputResultYUV(const gfx::Rect& rect, CopyOutputResultYUV(const gfx::Rect& rect,
...@@ -427,14 +389,9 @@ SkiaOutputSurfaceImplOnGpu::SkiaOutputSurfaceImplOnGpu( ...@@ -427,14 +389,9 @@ SkiaOutputSurfaceImplOnGpu::SkiaOutputSurfaceImplOnGpu(
feature_info_(std::move(feature_info)), feature_info_(std::move(feature_info)),
sync_point_client_state_( sync_point_client_state_(
CreateSyncPointClientState(dependency_, sequence_id)), CreateSyncPointClientState(dependency_, sequence_id)),
memory_tracker_(std::make_unique<SharedImageSubMemoryTracker>( memory_tracker_(dependency_->GetSharedContextState()->memory_tracker()),
sync_point_client_state_->command_buffer_id(),
base::trace_event::MemoryDumpManager::GetInstance()
->GetTracingProcessId(),
dependency_->GetSharedContextState()->memory_tracker())),
shared_image_representation_factory_( shared_image_representation_factory_(
CreateSharedImageRepresentationFactory(dependency_, CreateSharedImageRepresentationFactory(dependency_, memory_tracker_)),
memory_tracker_.get())),
vulkan_context_provider_(dependency_->GetVulkanContextProvider()), vulkan_context_provider_(dependency_->GetVulkanContextProvider()),
dawn_context_provider_(dependency_->GetDawnContextProvider()), dawn_context_provider_(dependency_->GetDawnContextProvider()),
renderer_settings_(renderer_settings), renderer_settings_(renderer_settings),
...@@ -712,6 +669,15 @@ void SkiaOutputSurfaceImplOnGpu::RemoveRenderPassResource( ...@@ -712,6 +669,15 @@ void SkiaOutputSurfaceImplOnGpu::RemoveRenderPassResource(
// |image_contexts| will go out of scope and be destroyed now. // |image_contexts| will go out of scope and be destroyed now.
} }
static void PostTaskFromMainToImplThread(
scoped_refptr<base::SingleThreadTaskRunner> impl_task_runner,
ReleaseCallback callback,
const gpu::SyncToken& sync_token,
bool is_lost) {
impl_task_runner->PostTask(
FROM_HERE, base::BindOnce(std::move(callback), sync_token, is_lost));
}
bool SkiaOutputSurfaceImplOnGpu::CopyOutput( bool SkiaOutputSurfaceImplOnGpu::CopyOutput(
RenderPassId id, RenderPassId id,
copy_output::RenderPassGeometry geometry, copy_output::RenderPassGeometry geometry,
...@@ -763,10 +729,6 @@ bool SkiaOutputSurfaceImplOnGpu::CopyOutput( ...@@ -763,10 +729,6 @@ bool SkiaOutputSurfaceImplOnGpu::CopyOutput(
gpu::kInProcessCommandBufferClientId); gpu::kInProcessCommandBufferClientId);
} }
// Skia readback could be synchronous. Incremement counter in case
// ReadbackCompleted is called immediately.
num_readbacks_pending_++;
// For downscaling, use the GOOD quality setting (appropriate for // For downscaling, use the GOOD quality setting (appropriate for
// thumbnailing); and, for upscaling, use the BEST quality. // thumbnailing); and, for upscaling, use the BEST quality.
bool is_downscale_in_both_dimensions = bool is_downscale_in_both_dimensions =
...@@ -805,6 +767,9 @@ bool SkiaOutputSurfaceImplOnGpu::CopyOutput( ...@@ -805,6 +767,9 @@ bool SkiaOutputSurfaceImplOnGpu::CopyOutput(
std::make_unique<ReadPixelsContext>(std::move(request), std::make_unique<ReadPixelsContext>(std::move(request),
geometry.result_selection, geometry.result_selection,
color_space, weak_ptr_); color_space, weak_ptr_);
// Skia readback could be synchronous. Incremement counter in case
// ReadbackCompleted is called immediately.
num_readbacks_pending_++;
surface->asyncRescaleAndReadPixelsYUV420( surface->asyncRescaleAndReadPixelsYUV420(
kRec709_SkYUVColorSpace, SkColorSpace::MakeSRGB(), src_rect, kRec709_SkYUVColorSpace, SkColorSpace::MakeSRGB(), src_rect,
{geometry.result_selection.width(), geometry.result_selection.height()}, {geometry.result_selection.width(), geometry.result_selection.height()},
...@@ -822,16 +787,97 @@ bool SkiaOutputSurfaceImplOnGpu::CopyOutput( ...@@ -822,16 +787,97 @@ bool SkiaOutputSurfaceImplOnGpu::CopyOutput(
std::make_unique<ReadPixelsContext>(std::move(request), std::make_unique<ReadPixelsContext>(std::move(request),
geometry.result_selection, geometry.result_selection,
color_space, weak_ptr_); color_space, weak_ptr_);
// Skia readback could be synchronous. Incremement counter in case
// ReadbackCompleted is called immediately.
num_readbacks_pending_++;
surface->asyncRescaleAndReadPixels( surface->asyncRescaleAndReadPixels(
dst_info, src_rect, SkSurface::RescaleGamma::kSrc, filter_quality, dst_info, src_rect, SkSurface::RescaleGamma::kSrc, filter_quality,
&OnRGBAReadbackDone, context.release()); &OnRGBAReadbackDone, context.release());
} else if (request->result_format() ==
CopyOutputRequest::ResultFormat::RGBA_TEXTURE) {
gpu::Mailbox mailbox = gpu::Mailbox::GenerateForSharedImage();
constexpr auto kUsage = gpu::SHARED_IMAGE_USAGE_GLES2 |
gpu::SHARED_IMAGE_USAGE_GLES2_FRAMEBUFFER_HINT |
gpu::SHARED_IMAGE_USAGE_RASTER |
gpu::SHARED_IMAGE_USAGE_DISPLAY;
bool result = shared_image_factory_->CreateSharedImage(
mailbox, ResourceFormat::RGBA_8888,
gfx::Size(geometry.result_bounds.width(),
geometry.result_bounds.height()),
color_space, kBottomLeft_GrSurfaceOrigin, kUnpremul_SkAlphaType,
gpu::kNullSurfaceHandle, kUsage);
if (!result) {
DLOG(ERROR) << "Failed to create shared image.";
return false;
}
auto representation = dependency_->GetSharedImageManager()->ProduceSkia(
mailbox, context_state_->memory_type_tracker(), context_state_);
shared_image_factory_->DestroySharedImage(mailbox);
SkSurfaceProps surface_props{0, kUnknown_SkPixelGeometry};
std::vector<GrBackendSemaphore> begin_semaphores;
std::vector<GrBackendSemaphore> end_semaphores;
representation->SetCleared();
auto scoped_write = representation->BeginScopedWriteAccess(
0 /* final_msaa_count */, surface_props, &begin_semaphores,
&end_semaphores,
gpu::SharedImageRepresentation::AllowUnclearedAccess::kYes);
SkSurface* dest_surface = scoped_write->surface();
dest_surface->wait(begin_semaphores.size(), begin_semaphores.data());
SkCanvas* dest_canvas = dest_surface->getCanvas();
if (request->is_scaled()) {
dest_canvas->scale(request->scale_from().x() / request->scale_to().x(),
request->scale_from().y() / request->scale_to().y());
}
SkPaint paint;
paint.setFilterQuality(filter_quality);
sk_sp<SkImage> image = surface->makeImageSnapshot(src_rect);
dest_canvas->clipRect(
SkRect::MakeXYWH(0, 0, src_rect.width(), src_rect.height()));
surface->draw(dest_canvas, -src_rect.x(), -src_rect.y(), &paint);
GrFlushInfo flush_info;
flush_info.fNumSemaphores = end_semaphores.size();
flush_info.fSignalSemaphores = end_semaphores.data();
gpu::AddVulkanCleanupTaskForSkiaFlush(vulkan_context_provider_,
&flush_info);
auto flush_result = dest_surface->flush(
SkSurface::BackendSurfaceAccess::kNoAccess, flush_info);
if (flush_result != GrSemaphoresSubmitted::kYes &&
!(begin_semaphores.empty() && end_semaphores.empty())) {
// TODO(penghuang): handle vulkan device lost.
DLOG(ERROR) << "dest_surface->flush() failed.";
return false;
}
auto release_callback = base::BindOnce(
&SkiaOutputSurfaceImplOnGpu::DestroySharedImageOnImplThread,
weak_ptr_factory_.GetWeakPtr(), std::move(representation),
context_state_);
auto main_callback = SingleReleaseCallback::Create(base::BindOnce(
&PostTaskFromMainToImplThread, base::ThreadTaskRunnerHandle::Get(),
std::move(release_callback)));
request->SendResult(std::make_unique<CopyOutputTextureResult>(
geometry.result_bounds, mailbox, gpu::SyncToken(), color_space,
std::move(main_callback)));
} else { } else {
NOTIMPLEMENTED(); // ResultFormat::RGBA_TEXTURE NOTREACHED();
} }
ScheduleCheckReadbackCompletion(); ScheduleCheckReadbackCompletion();
return true; return true;
} }
void SkiaOutputSurfaceImplOnGpu::DestroySharedImageOnImplThread(
std::unique_ptr<gpu::SharedImageRepresentationSkia> representation,
scoped_refptr<gpu::SharedContextState> context_state,
const gpu::SyncToken& sync_token,
bool is_lost) {
context_state_->MakeCurrent(nullptr);
representation.reset();
}
void SkiaOutputSurfaceImplOnGpu::BeginAccessImages( void SkiaOutputSurfaceImplOnGpu::BeginAccessImages(
const std::vector<ImageContextImpl*>& image_contexts, const std::vector<ImageContextImpl*>& image_contexts,
std::vector<GrBackendSemaphore>* begin_semaphores, std::vector<GrBackendSemaphore>* begin_semaphores,
...@@ -930,7 +976,7 @@ void SkiaOutputSurfaceImplOnGpu::SetCapabilitiesForTesting( ...@@ -930,7 +976,7 @@ void SkiaOutputSurfaceImplOnGpu::SetCapabilitiesForTesting(
DCHECK(dependency_->IsOffscreen()); DCHECK(dependency_->IsOffscreen());
output_device_ = std::make_unique<SkiaOutputDeviceOffscreen>( output_device_ = std::make_unique<SkiaOutputDeviceOffscreen>(
context_state_, capabilities.output_surface_origin, context_state_, capabilities.output_surface_origin,
renderer_settings_.requires_alpha_channel, memory_tracker_.get(), renderer_settings_.requires_alpha_channel, memory_tracker_,
GetDidSwapBuffersCompleteCallback()); GetDidSwapBuffersCompleteCallback());
} }
...@@ -967,6 +1013,14 @@ bool SkiaOutputSurfaceImplOnGpu::Initialize() { ...@@ -967,6 +1013,14 @@ bool SkiaOutputSurfaceImplOnGpu::Initialize() {
return false; return false;
} }
shared_image_factory_ = std::make_unique<gpu::SharedImageFactory>(
dependency_->GetGpuPreferences(),
dependency_->GetGpuDriverBugWorkarounds(),
dependency_->GetGpuFeatureInfo(),
dependency_->GetSharedContextState().get(),
dependency_->GetMailboxManager(), dependency_->GetSharedImageManager(),
dependency_->GetGpuImageFactory(), memory_tracker_,
true /* enable_wrapped_sk_image */),
max_resource_cache_bytes_ = max_resource_cache_bytes_ =
context_state_->gr_context()->getResourceCacheLimit(); context_state_->gr_context()->getResourceCacheLimit();
if (context_state_) if (context_state_)
...@@ -987,10 +1041,17 @@ bool SkiaOutputSurfaceImplOnGpu::InitializeForGL() { ...@@ -987,10 +1041,17 @@ bool SkiaOutputSurfaceImplOnGpu::InitializeForGL() {
if (!gl_surface_) if (!gl_surface_)
return false; return false;
output_device_ = std::make_unique<SkiaOutputDeviceOffscreen>( if (MakeCurrent(false /* need_fbo0 */)) {
context_state_, gfx::SurfaceOrigin::kTopLeft, output_device_ = std::make_unique<SkiaOutputDeviceOffscreen>(
renderer_settings_.requires_alpha_channel, memory_tracker_.get(), context_state_, gfx::SurfaceOrigin::kTopLeft,
GetDidSwapBuffersCompleteCallback()); renderer_settings_.requires_alpha_channel, memory_tracker_,
GetDidSwapBuffersCompleteCallback());
} else {
gl_surface_ = nullptr;
context_state_ = nullptr;
LOG(ERROR) << "Failed to make current during initialization.";
return false;
}
} else { } else {
gl_surface_ = gl_surface_ =
dependency_->CreateGLSurface(weak_ptr_factory_.GetWeakPtr(), format); dependency_->CreateGLSurface(weak_ptr_factory_.GetWeakPtr(), format);
...@@ -1002,19 +1063,18 @@ bool SkiaOutputSurfaceImplOnGpu::InitializeForGL() { ...@@ -1002,19 +1063,18 @@ bool SkiaOutputSurfaceImplOnGpu::InitializeForGL() {
if (gl_surface_->IsSurfaceless()) { if (gl_surface_->IsSurfaceless()) {
output_device_ = std::make_unique<SkiaOutputDeviceBufferQueue>( output_device_ = std::make_unique<SkiaOutputDeviceBufferQueue>(
std::make_unique<OutputPresenterGL>(gl_surface_, dependency_, std::make_unique<OutputPresenterGL>(gl_surface_, dependency_,
memory_tracker_.get()), memory_tracker_),
dependency_, memory_tracker_.get(), dependency_, memory_tracker_, GetDidSwapBuffersCompleteCallback());
GetDidSwapBuffersCompleteCallback());
} else { } else {
if (dependency_->NeedsSupportForExternalStencil()) { if (dependency_->NeedsSupportForExternalStencil()) {
output_device_ = std::make_unique<SkiaOutputDeviceWebView>( output_device_ = std::make_unique<SkiaOutputDeviceWebView>(
context_state_.get(), gl_surface_, memory_tracker_.get(), context_state_.get(), gl_surface_, memory_tracker_,
GetDidSwapBuffersCompleteCallback()); GetDidSwapBuffersCompleteCallback());
} else { } else {
output_device_ = std::make_unique<SkiaOutputDeviceGL>( output_device_ = std::make_unique<SkiaOutputDeviceGL>(
dependency_->GetMailboxManager(), context_state_.get(), dependency_->GetMailboxManager(), context_state_.get(),
gl_surface_, feature_info_, memory_tracker_.get(), gl_surface_, feature_info_, memory_tracker_,
GetDidSwapBuffersCompleteCallback()); GetDidSwapBuffersCompleteCallback());
} }
} }
...@@ -1034,7 +1094,7 @@ bool SkiaOutputSurfaceImplOnGpu::InitializeForVulkan() { ...@@ -1034,7 +1094,7 @@ bool SkiaOutputSurfaceImplOnGpu::InitializeForVulkan() {
if (dependency_->IsOffscreen()) { if (dependency_->IsOffscreen()) {
output_device_ = std::make_unique<SkiaOutputDeviceOffscreen>( output_device_ = std::make_unique<SkiaOutputDeviceOffscreen>(
context_state_, gfx::SurfaceOrigin::kBottomLeft, context_state_, gfx::SurfaceOrigin::kBottomLeft,
renderer_settings_.requires_alpha_channel, memory_tracker_.get(), renderer_settings_.requires_alpha_channel, memory_tracker_,
GetDidSwapBuffersCompleteCallback()); GetDidSwapBuffersCompleteCallback());
} else { } else {
#if defined(USE_X11) #if defined(USE_X11)
...@@ -1042,22 +1102,22 @@ bool SkiaOutputSurfaceImplOnGpu::InitializeForVulkan() { ...@@ -1042,22 +1102,22 @@ bool SkiaOutputSurfaceImplOnGpu::InitializeForVulkan() {
if (!gpu_preferences_.disable_vulkan_surface) { if (!gpu_preferences_.disable_vulkan_surface) {
output_device_ = SkiaOutputDeviceVulkan::Create( output_device_ = SkiaOutputDeviceVulkan::Create(
vulkan_context_provider_, dependency_->GetSurfaceHandle(), vulkan_context_provider_, dependency_->GetSurfaceHandle(),
memory_tracker_.get(), GetDidSwapBuffersCompleteCallback()); memory_tracker_, GetDidSwapBuffersCompleteCallback());
} }
if (!output_device_) { if (!output_device_) {
output_device_ = std::make_unique<SkiaOutputDeviceX11>( output_device_ = std::make_unique<SkiaOutputDeviceX11>(
context_state_, dependency_->GetSurfaceHandle(), context_state_, dependency_->GetSurfaceHandle(), memory_tracker_,
memory_tracker_.get(), GetDidSwapBuffersCompleteCallback()); GetDidSwapBuffersCompleteCallback());
} }
} }
#endif #endif
if (!output_device_) { if (!output_device_) {
#if defined(OS_FUCHSIA) #if defined(OS_FUCHSIA)
auto output_presenter = OutputPresenterFuchsia::Create( auto output_presenter = OutputPresenterFuchsia::Create(
window_surface_.get(), dependency_, memory_tracker_.get()); window_surface_.get(), dependency_, memory_tracker_);
#else #else
auto output_presenter = auto output_presenter =
OutputPresenterGL::Create(dependency_, memory_tracker_.get()); OutputPresenterGL::Create(dependency_, memory_tracker_);
if (output_presenter) { if (output_presenter) {
// TODO(https://crbug.com/1012401): don't depend on GL. // TODO(https://crbug.com/1012401): don't depend on GL.
gl_surface_ = output_presenter->gl_surface(); gl_surface_ = output_presenter->gl_surface();
...@@ -1065,12 +1125,12 @@ bool SkiaOutputSurfaceImplOnGpu::InitializeForVulkan() { ...@@ -1065,12 +1125,12 @@ bool SkiaOutputSurfaceImplOnGpu::InitializeForVulkan() {
#endif #endif
if (output_presenter) { if (output_presenter) {
output_device_ = std::make_unique<SkiaOutputDeviceBufferQueue>( output_device_ = std::make_unique<SkiaOutputDeviceBufferQueue>(
std::move(output_presenter), dependency_, memory_tracker_.get(), std::move(output_presenter), dependency_, memory_tracker_,
GetDidSwapBuffersCompleteCallback()); GetDidSwapBuffersCompleteCallback());
} else { } else {
auto output_device = SkiaOutputDeviceVulkan::Create( auto output_device = SkiaOutputDeviceVulkan::Create(
vulkan_context_provider_, dependency_->GetSurfaceHandle(), vulkan_context_provider_, dependency_->GetSurfaceHandle(),
memory_tracker_.get(), GetDidSwapBuffersCompleteCallback()); memory_tracker_, GetDidSwapBuffersCompleteCallback());
#if defined(OS_WIN) #if defined(OS_WIN)
gpu::SurfaceHandle child_surface = gpu::SurfaceHandle child_surface =
output_device ? output_device->GetChildSurfaceHandle() output_device ? output_device->GetChildSurfaceHandle()
...@@ -1093,7 +1153,7 @@ bool SkiaOutputSurfaceImplOnGpu::InitializeForDawn() { ...@@ -1093,7 +1153,7 @@ bool SkiaOutputSurfaceImplOnGpu::InitializeForDawn() {
if (dependency_->IsOffscreen()) { if (dependency_->IsOffscreen()) {
output_device_ = std::make_unique<SkiaOutputDeviceOffscreen>( output_device_ = std::make_unique<SkiaOutputDeviceOffscreen>(
context_state_, gfx::SurfaceOrigin::kBottomLeft, context_state_, gfx::SurfaceOrigin::kBottomLeft,
renderer_settings_.requires_alpha_channel, memory_tracker_.get(), renderer_settings_.requires_alpha_channel, memory_tracker_,
GetDidSwapBuffersCompleteCallback()); GetDidSwapBuffersCompleteCallback());
} else { } else {
#if defined(USE_X11) #if defined(USE_X11)
...@@ -1101,8 +1161,8 @@ bool SkiaOutputSurfaceImplOnGpu::InitializeForDawn() { ...@@ -1101,8 +1161,8 @@ bool SkiaOutputSurfaceImplOnGpu::InitializeForDawn() {
// SkiaOutputDeviceDawn. // SkiaOutputDeviceDawn.
if (!features::IsUsingOzonePlatform()) { if (!features::IsUsingOzonePlatform()) {
output_device_ = std::make_unique<SkiaOutputDeviceX11>( output_device_ = std::make_unique<SkiaOutputDeviceX11>(
context_state_, dependency_->GetSurfaceHandle(), context_state_, dependency_->GetSurfaceHandle(), memory_tracker_,
memory_tracker_.get(), GetDidSwapBuffersCompleteCallback()); GetDidSwapBuffersCompleteCallback());
} else { } else {
return false; return false;
} }
...@@ -1110,7 +1170,7 @@ bool SkiaOutputSurfaceImplOnGpu::InitializeForDawn() { ...@@ -1110,7 +1170,7 @@ bool SkiaOutputSurfaceImplOnGpu::InitializeForDawn() {
std::unique_ptr<SkiaOutputDeviceDawn> output_device = std::unique_ptr<SkiaOutputDeviceDawn> output_device =
std::make_unique<SkiaOutputDeviceDawn>( std::make_unique<SkiaOutputDeviceDawn>(
dawn_context_provider_, dependency_->GetSurfaceHandle(), dawn_context_provider_, dependency_->GetSurfaceHandle(),
gfx::SurfaceOrigin::kTopLeft, memory_tracker_.get(), gfx::SurfaceOrigin::kTopLeft, memory_tracker_,
GetDidSwapBuffersCompleteCallback()); GetDidSwapBuffersCompleteCallback());
const gpu::SurfaceHandle child_surface_handle = const gpu::SurfaceHandle child_surface_handle =
output_device->GetChildSurfaceHandle(); output_device->GetChildSurfaceHandle();
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include "gpu/command_buffer/common/mailbox.h" #include "gpu/command_buffer/common/mailbox.h"
#include "gpu/command_buffer/common/sync_token.h" #include "gpu/command_buffer/common/sync_token.h"
#include "gpu/command_buffer/service/shared_context_state.h" #include "gpu/command_buffer/service/shared_context_state.h"
#include "gpu/command_buffer/service/shared_image_representation.h"
#include "gpu/command_buffer/service/sync_point_manager.h" #include "gpu/command_buffer/service/sync_point_manager.h"
#include "gpu/ipc/service/context_url.h" #include "gpu/ipc/service/context_url.h"
#include "gpu/ipc/service/display_context.h" #include "gpu/ipc/service/display_context.h"
...@@ -45,6 +46,7 @@ class GLSurface; ...@@ -45,6 +46,7 @@ class GLSurface;
namespace gpu { namespace gpu {
class SharedImageRepresentationFactory; class SharedImageRepresentationFactory;
class SharedImageFactory;
class SyncPointClientState; class SyncPointClientState;
} }
...@@ -204,7 +206,7 @@ class SkiaOutputSurfaceImplOnGpu ...@@ -204,7 +206,7 @@ class SkiaOutputSurfaceImplOnGpu
num_readbacks_pending_--; num_readbacks_pending_--;
} }
gpu::MemoryTracker* GetMemoryTracker() { return memory_tracker_.get(); } gpu::MemoryTracker* GetMemoryTracker() { return memory_tracker_; }
private: private:
class OffscreenSurface; class OffscreenSurface;
...@@ -226,6 +228,12 @@ class SkiaOutputSurfaceImplOnGpu ...@@ -226,6 +228,12 @@ class SkiaOutputSurfaceImplOnGpu
bool MakeCurrent(bool need_fbo0); bool MakeCurrent(bool need_fbo0);
void MarkContextLost(ContextLostReason reason); void MarkContextLost(ContextLostReason reason);
void DestroySharedImageOnImplThread(
std::unique_ptr<gpu::SharedImageRepresentationSkia> representation,
scoped_refptr<gpu::SharedContextState> context_state,
const gpu::SyncToken& sync_token,
bool is_lost);
void PullTextureUpdates(std::vector<gpu::SyncToken> sync_token); void PullTextureUpdates(std::vector<gpu::SyncToken> sync_token);
void ReleaseFenceSyncAndPushTextureUpdates(uint64_t sync_fence_release); void ReleaseFenceSyncAndPushTextureUpdates(uint64_t sync_fence_release);
...@@ -277,9 +285,10 @@ class SkiaOutputSurfaceImplOnGpu ...@@ -277,9 +285,10 @@ class SkiaOutputSurfaceImplOnGpu
SkiaOutputSurfaceDependency* const dependency_; SkiaOutputSurfaceDependency* const dependency_;
scoped_refptr<gpu::gles2::FeatureInfo> feature_info_; scoped_refptr<gpu::gles2::FeatureInfo> feature_info_;
scoped_refptr<gpu::SyncPointClientState> sync_point_client_state_; scoped_refptr<gpu::SyncPointClientState> sync_point_client_state_;
std::unique_ptr<gpu::MemoryTracker> memory_tracker_; gpu::MemoryTracker* memory_tracker_;
std::unique_ptr<gpu::SharedImageRepresentationFactory> std::unique_ptr<gpu::SharedImageRepresentationFactory>
shared_image_representation_factory_; shared_image_representation_factory_;
std::unique_ptr<gpu::SharedImageFactory> shared_image_factory_;
VulkanContextProvider* const vulkan_context_provider_; VulkanContextProvider* const vulkan_context_provider_;
DawnContextProvider* const dawn_context_provider_; DawnContextProvider* const dawn_context_provider_;
const RendererSettings renderer_settings_; const RendererSettings renderer_settings_;
......
...@@ -78,15 +78,15 @@ void SharedContextState::compileError(const char* shader, const char* errors) { ...@@ -78,15 +78,15 @@ void SharedContextState::compileError(const char* shader, const char* errors) {
} }
} }
SharedContextState::MemoryTracker::MemoryTracker( SharedContextState::MemoryTrackerObserver::MemoryTrackerObserver(
base::WeakPtr<gpu::MemoryTracker::Observer> peak_memory_monitor) base::WeakPtr<gpu::MemoryTracker::Observer> peak_memory_monitor)
: peak_memory_monitor_(peak_memory_monitor) {} : peak_memory_monitor_(peak_memory_monitor) {}
SharedContextState::MemoryTracker::~MemoryTracker() { SharedContextState::MemoryTrackerObserver::~MemoryTrackerObserver() {
DCHECK(!size_); DCHECK(!size_);
} }
void SharedContextState::MemoryTracker::OnMemoryAllocatedChange( void SharedContextState::MemoryTrackerObserver::OnMemoryAllocatedChange(
CommandBufferId id, CommandBufferId id,
uint64_t old_size, uint64_t old_size,
uint64_t new_size, uint64_t new_size,
...@@ -100,6 +100,45 @@ void SharedContextState::MemoryTracker::OnMemoryAllocatedChange( ...@@ -100,6 +100,45 @@ void SharedContextState::MemoryTracker::OnMemoryAllocatedChange(
} }
} }
base::AtomicSequenceNumber g_next_command_buffer_id;
SharedContextState::MemoryTracker::MemoryTracker(Observer* observer)
: command_buffer_id_(gpu::CommandBufferId::FromUnsafeValue(
g_next_command_buffer_id.GetNext() + 1)),
client_tracing_id_(base::trace_event::MemoryDumpManager::GetInstance()
->GetTracingProcessId()),
observer_(observer) {}
SharedContextState::MemoryTracker::~MemoryTracker() {
DCHECK(!size_);
}
void SharedContextState::MemoryTracker::TrackMemoryAllocatedChange(
int64_t delta) {
DCHECK(delta >= 0 || size_ >= static_cast<uint64_t>(-delta));
uint64_t old_size = size_;
size_ += delta;
DCHECK(observer_);
observer_->OnMemoryAllocatedChange(command_buffer_id_, old_size, size_,
gpu::GpuPeakMemoryAllocationSource::SKIA);
}
uint64_t SharedContextState::MemoryTracker::GetSize() const {
return size_;
}
uint64_t SharedContextState::MemoryTracker::ClientTracingId() const {
return client_tracing_id_;
}
int SharedContextState::MemoryTracker::ClientId() const {
return gpu::ChannelIdFromCommandBufferId(command_buffer_id_);
}
uint64_t SharedContextState::MemoryTracker::ContextGroupTracingId() const {
return command_buffer_id_.GetUnsafeValue();
}
SharedContextState::SharedContextState( SharedContextState::SharedContextState(
scoped_refptr<gl::GLShareGroup> share_group, scoped_refptr<gl::GLShareGroup> share_group,
scoped_refptr<gl::GLSurface> surface, scoped_refptr<gl::GLSurface> surface,
...@@ -114,7 +153,9 @@ SharedContextState::SharedContextState( ...@@ -114,7 +153,9 @@ SharedContextState::SharedContextState(
: use_virtualized_gl_contexts_(use_virtualized_gl_contexts), : use_virtualized_gl_contexts_(use_virtualized_gl_contexts),
context_lost_callback_(std::move(context_lost_callback)), context_lost_callback_(std::move(context_lost_callback)),
gr_context_type_(gr_context_type), gr_context_type_(gr_context_type),
memory_tracker_(peak_memory_monitor), memory_tracker_observer_(peak_memory_monitor),
memory_tracker_(&memory_tracker_observer_),
memory_type_tracker_(&memory_tracker_),
vk_context_provider_(vulkan_context_provider), vk_context_provider_(vulkan_context_provider),
metal_context_provider_(metal_context_provider), metal_context_provider_(metal_context_provider),
dawn_context_provider_(dawn_context_provider), dawn_context_provider_(dawn_context_provider),
...@@ -172,7 +213,6 @@ SharedContextState::SharedContextState( ...@@ -172,7 +213,6 @@ SharedContextState::SharedContextState(
// Initialize the scratch buffer to some small initial size. // Initialize the scratch buffer to some small initial size.
scratch_deserialization_buffer_.resize( scratch_deserialization_buffer_.resize(
kInitialScratchDeserializationBufferSize); kInitialScratchDeserializationBufferSize);
} }
SharedContextState::~SharedContextState() { SharedContextState::~SharedContextState() {
...@@ -195,8 +235,8 @@ SharedContextState::~SharedContextState() { ...@@ -195,8 +235,8 @@ SharedContextState::~SharedContextState() {
DCHECK(!owned_gr_context_ || owned_gr_context_->unique()); DCHECK(!owned_gr_context_ || owned_gr_context_->unique());
// GPU memory allocations except skia_gr_cache_size_ tracked by this // GPU memory allocations except skia_gr_cache_size_ tracked by this
// memory_tracker_ should have been released. // memory_tracker_observer_ should have been released.
DCHECK_EQ(skia_gr_cache_size_, memory_tracker_.GetMemoryUsage()); DCHECK_EQ(skia_gr_cache_size_, memory_tracker_observer_.GetMemoryUsage());
// gr_context_ and all resources owned by it will be released soon, so set it // gr_context_ and all resources owned by it will be released soon, so set it
// to null, and UpdateSkiaOwnedMemorySize() will update skia memory usage to // to null, and UpdateSkiaOwnedMemorySize() will update skia memory usage to
// 0, to ensure that PeakGpuMemoryMonitor sees 0 allocated memory. // 0, to ensure that PeakGpuMemoryMonitor sees 0 allocated memory.
...@@ -511,6 +551,9 @@ bool SharedContextState::IsCurrent(gl::GLSurface* surface) { ...@@ -511,6 +551,9 @@ bool SharedContextState::IsCurrent(gl::GLSurface* surface) {
return context_->IsCurrent(surface); return context_->IsCurrent(surface);
} }
// TODO(https://crbug.com/1110357): Account for memory tracked by
// memory_tracker_ and memory_type_tracker_ (e.g. SharedImages allocated in
// SkiaOutputSurfaceImplOnGpu::CopyOutput).
bool SharedContextState::OnMemoryDump( bool SharedContextState::OnMemoryDump(
const base::trace_event::MemoryDumpArgs& args, const base::trace_event::MemoryDumpArgs& args,
base::trace_event::ProcessMemoryDump* pmd) { base::trace_event::ProcessMemoryDump* pmd) {
...@@ -574,13 +617,13 @@ void SharedContextState::PurgeMemory( ...@@ -574,13 +617,13 @@ void SharedContextState::PurgeMemory(
uint64_t SharedContextState::GetMemoryUsage() { uint64_t SharedContextState::GetMemoryUsage() {
UpdateSkiaOwnedMemorySize(); UpdateSkiaOwnedMemorySize();
return memory_tracker_.GetMemoryUsage(); return memory_tracker_observer_.GetMemoryUsage();
} }
void SharedContextState::UpdateSkiaOwnedMemorySize() { void SharedContextState::UpdateSkiaOwnedMemorySize() {
if (!gr_context_) { if (!gr_context_) {
memory_tracker_.OnMemoryAllocatedChange(CommandBufferId(), memory_tracker_observer_.OnMemoryAllocatedChange(CommandBufferId(),
skia_gr_cache_size_, 0u); skia_gr_cache_size_, 0u);
skia_gr_cache_size_ = 0u; skia_gr_cache_size_ = 0u;
return; return;
} }
...@@ -589,7 +632,7 @@ void SharedContextState::UpdateSkiaOwnedMemorySize() { ...@@ -589,7 +632,7 @@ void SharedContextState::UpdateSkiaOwnedMemorySize() {
// Skia does not have a CommandBufferId. PeakMemoryMonitor currently does not // Skia does not have a CommandBufferId. PeakMemoryMonitor currently does not
// use CommandBufferId to identify source, so use zero here to separate // use CommandBufferId to identify source, so use zero here to separate
// prevent confusion. // prevent confusion.
memory_tracker_.OnMemoryAllocatedChange( memory_tracker_observer_.OnMemoryAllocatedChange(
CommandBufferId(), skia_gr_cache_size_, static_cast<uint64_t>(new_size)); CommandBufferId(), skia_gr_cache_size_, static_cast<uint64_t>(new_size));
skia_gr_cache_size_ = static_cast<uint64_t>(new_size); skia_gr_cache_size_ = static_cast<uint64_t>(new_size);
} }
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include "gpu/command_buffer/service/memory_tracking.h" #include "gpu/command_buffer/service/memory_tracking.h"
#include "gpu/config/gpu_preferences.h" #include "gpu/config/gpu_preferences.h"
#include "gpu/gpu_gles2_export.h" #include "gpu/gpu_gles2_export.h"
#include "gpu/ipc/common/command_buffer_id.h"
#include "gpu/ipc/common/gpu_peak_memory.h" #include "gpu/ipc/common/gpu_peak_memory.h"
#include "gpu/vulkan/buildflags.h" #include "gpu/vulkan/buildflags.h"
#include "third_party/skia/include/core/SkSurface.h" #include "third_party/skia/include/core/SkSurface.h"
...@@ -153,7 +154,13 @@ class GPU_GLES2_EXPORT SharedContextState ...@@ -153,7 +154,13 @@ class GPU_GLES2_EXPORT SharedContextState
bool support_vulkan_external_object() const { bool support_vulkan_external_object() const {
return support_vulkan_external_object_; return support_vulkan_external_object_;
} }
gpu::MemoryTracker::Observer* memory_tracker() { return &memory_tracker_; } gpu::MemoryTracker::Observer* memory_tracker_observer() {
return &memory_tracker_observer_;
}
gpu::MemoryTracker* memory_tracker() { return &memory_tracker_; }
gpu::MemoryTypeTracker* memory_type_tracker() {
return &memory_type_tracker_;
}
ExternalSemaphorePool* external_semaphore_pool() { ExternalSemaphorePool* external_semaphore_pool() {
#if BUILDFLAG(ENABLE_VULKAN) #if BUILDFLAG(ENABLE_VULKAN)
return external_semaphore_pool_.get(); return external_semaphore_pool_.get();
...@@ -213,13 +220,14 @@ class GPU_GLES2_EXPORT SharedContextState ...@@ -213,13 +220,14 @@ class GPU_GLES2_EXPORT SharedContextState
// Observer which is notified when SkiaOutputSurfaceImpl takes ownership of a // Observer which is notified when SkiaOutputSurfaceImpl takes ownership of a
// shared image, and forward information to both histograms and task manager. // shared image, and forward information to both histograms and task manager.
class GPU_GLES2_EXPORT MemoryTracker : public gpu::MemoryTracker::Observer { class GPU_GLES2_EXPORT MemoryTrackerObserver
: public gpu::MemoryTracker::Observer {
public: public:
explicit MemoryTracker( explicit MemoryTrackerObserver(
base::WeakPtr<gpu::MemoryTracker::Observer> peak_memory_monitor); base::WeakPtr<gpu::MemoryTracker::Observer> peak_memory_monitor);
MemoryTracker(MemoryTracker&) = delete; MemoryTrackerObserver(MemoryTrackerObserver&) = delete;
MemoryTracker& operator=(MemoryTracker&) = delete; MemoryTrackerObserver& operator=(MemoryTrackerObserver&) = delete;
~MemoryTracker() override; ~MemoryTrackerObserver() override;
// gpu::MemoryTracker::Observer implementation: // gpu::MemoryTracker::Observer implementation:
void OnMemoryAllocatedChange( void OnMemoryAllocatedChange(
...@@ -237,6 +245,29 @@ class GPU_GLES2_EXPORT SharedContextState ...@@ -237,6 +245,29 @@ class GPU_GLES2_EXPORT SharedContextState
base::WeakPtr<gpu::MemoryTracker::Observer> const peak_memory_monitor_; base::WeakPtr<gpu::MemoryTracker::Observer> const peak_memory_monitor_;
}; };
// MemoryTracker implementation used to track SharedImages owned by
// SkiaOutputSurfaceImpl.
class MemoryTracker : public gpu::MemoryTracker {
public:
explicit MemoryTracker(gpu::MemoryTracker::Observer* observer);
MemoryTracker(const MemoryTracker&) = delete;
MemoryTracker& operator=(const MemoryTracker&) = delete;
~MemoryTracker() override;
// MemoryTracker implementation:
void TrackMemoryAllocatedChange(int64_t delta) override;
uint64_t GetSize() const override;
uint64_t ClientTracingId() const override;
int ClientId() const override;
uint64_t ContextGroupTracingId() const override;
private:
gpu::CommandBufferId command_buffer_id_;
const uint64_t client_tracing_id_;
gpu::MemoryTracker::Observer* const observer_;
uint64_t size_ = 0;
};
~SharedContextState() override; ~SharedContextState() override;
// gpu::GLContextVirtualDelegate implementation. // gpu::GLContextVirtualDelegate implementation.
...@@ -263,7 +294,9 @@ class GPU_GLES2_EXPORT SharedContextState ...@@ -263,7 +294,9 @@ class GPU_GLES2_EXPORT SharedContextState
bool support_vulkan_external_object_ = false; bool support_vulkan_external_object_ = false;
ContextLostCallback context_lost_callback_; ContextLostCallback context_lost_callback_;
GrContextType gr_context_type_ = GrContextType::kGL; GrContextType gr_context_type_ = GrContextType::kGL;
MemoryTrackerObserver memory_tracker_observer_;
MemoryTracker memory_tracker_; MemoryTracker memory_tracker_;
gpu::MemoryTypeTracker memory_type_tracker_;
viz::VulkanContextProvider* const vk_context_provider_; viz::VulkanContextProvider* const vk_context_provider_;
viz::MetalContextProvider* const metal_context_provider_; viz::MetalContextProvider* const metal_context_provider_;
viz::DawnContextProvider* const dawn_context_provider_; viz::DawnContextProvider* const dawn_context_provider_;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment