Commit 4f5e284a authored by Peng Huang's avatar Peng Huang Committed by Commit Bot

Fix IOSurface leaking on macOS

Some client resources (video, etc) will need to wait on a ResourceFence
before returning them to client. That fence is released when all GPU
work for the root render pass are finished on GPU. However, on macOS,
there isn't a root render pass for every frames, and all related client
resources will never be released. To fix the problem, we have to release
the fence for each GPU tasks (drawing non root render passes, drawing
render pass overlays, etc).

Bug: 1143042
Change-Id: I335e4a7c4a36557315cede7edd834b028c7b015d
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2537112Reviewed-by: default avatarVasiliy Telezhnikov <vasilyt@chromium.org>
Commit-Queue: Peng Huang <penghuang@chromium.org>
Cr-Commit-Position: refs/heads/master@{#827445}
parent 763b84c8
...@@ -122,10 +122,7 @@ class VIZ_SERVICE_EXPORT SkiaOutputSurface : public OutputSurface, ...@@ -122,10 +122,7 @@ class VIZ_SERVICE_EXPORT SkiaOutputSurface : public OutputSurface,
// Finish painting the current frame or current render pass, depends on which // Finish painting the current frame or current render pass, depends on which
// BeginPaint function is called last. This method will schedule a GPU task to // BeginPaint function is called last. This method will schedule a GPU task to
// play the DDL back on GPU thread on a cached SkSurface. This method returns // play the DDL back on GPU thread on a cached SkSurface.
// a sync token which can be waited on in a command buffer to ensure the paint
// operation is completed. This token is released when the GPU ops from
// painting the render pass have been seen and processed by the GPU main.
// Optionally the caller may specify |on_finished| callback to be called after // Optionally the caller may specify |on_finished| callback to be called after
// the GPU has finished processing all submitted commands. The callback may be // the GPU has finished processing all submitted commands. The callback may be
// called on a different thread. // called on a different thread.
...@@ -156,8 +153,12 @@ class VIZ_SERVICE_EXPORT SkiaOutputSurface : public OutputSurface, ...@@ -156,8 +153,12 @@ class VIZ_SERVICE_EXPORT SkiaOutputSurface : public OutputSurface,
// Schedule drawing overlays at next SwapBuffers() call. Waits on // Schedule drawing overlays at next SwapBuffers() call. Waits on
// |sync_tokens| for the overlay textures to be ready before scheduling. // |sync_tokens| for the overlay textures to be ready before scheduling.
// Optionally the caller may specify |on_finished| callback to be called after
// the GPU has finished processing all submitted commands. The callback may be
// called on a different thread.
virtual void ScheduleOverlays(OverlayList overlays, virtual void ScheduleOverlays(OverlayList overlays,
std::vector<gpu::SyncToken> sync_tokens) = 0; std::vector<gpu::SyncToken> sync_tokens,
base::OnceClosure on_finished) = 0;
// Add context lost observer. // Add context lost observer.
virtual void AddContextLostObserver(ContextLostObserver* observer) = 0; virtual void AddContextLostObserver(ContextLostObserver* observer) = 0;
...@@ -170,7 +171,9 @@ class VIZ_SERVICE_EXPORT SkiaOutputSurface : public OutputSurface, ...@@ -170,7 +171,9 @@ class VIZ_SERVICE_EXPORT SkiaOutputSurface : public OutputSurface,
base::OnceClosure callback, base::OnceClosure callback,
std::vector<gpu::SyncToken> sync_tokens) = 0; std::vector<gpu::SyncToken> sync_tokens) = 0;
// Flush pending GPU tasks. // Flush pending GPU tasks. This method returns a sync token which can be
// waited on in a command buffer to ensure all pending tasks are executed on
// the GPU main thread.
virtual gpu::SyncToken Flush() = 0; virtual gpu::SyncToken Flush() = 0;
#if defined(OS_APPLE) #if defined(OS_APPLE)
......
...@@ -651,24 +651,6 @@ class SkiaRenderer::ScopedYUVSkImageBuilder { ...@@ -651,24 +651,6 @@ class SkiaRenderer::ScopedYUVSkImageBuilder {
DISALLOW_COPY_AND_ASSIGN(ScopedYUVSkImageBuilder); DISALLOW_COPY_AND_ASSIGN(ScopedYUVSkImageBuilder);
}; };
SkiaRenderer::SkiaRenderer(const RendererSettings* settings,
const DebugRendererSettings* debug_settings,
OutputSurface* output_surface,
DisplayResourceProvider* resource_provider,
OverlayProcessorInterface* overlay_processor,
SkiaOutputSurface* skia_output_surface)
: DirectRenderer(settings,
debug_settings,
output_surface,
resource_provider,
overlay_processor),
skia_output_surface_(skia_output_surface) {
DCHECK(skia_output_surface_);
lock_set_for_external_use_.emplace(resource_provider, skia_output_surface_);
}
SkiaRenderer::~SkiaRenderer() = default;
class SkiaRenderer::FrameResourceFence : public ResourceFence { class SkiaRenderer::FrameResourceFence : public ResourceFence {
public: public:
FrameResourceFence() = default; FrameResourceFence() = default;
...@@ -691,6 +673,27 @@ class SkiaRenderer::FrameResourceFence : public ResourceFence { ...@@ -691,6 +673,27 @@ class SkiaRenderer::FrameResourceFence : public ResourceFence {
DISALLOW_COPY_AND_ASSIGN(FrameResourceFence); DISALLOW_COPY_AND_ASSIGN(FrameResourceFence);
}; };
SkiaRenderer::SkiaRenderer(const RendererSettings* settings,
const DebugRendererSettings* debug_settings,
OutputSurface* output_surface,
DisplayResourceProvider* resource_provider,
OverlayProcessorInterface* overlay_processor,
SkiaOutputSurface* skia_output_surface)
: DirectRenderer(settings,
debug_settings,
output_surface,
resource_provider,
overlay_processor),
skia_output_surface_(skia_output_surface) {
DCHECK(skia_output_surface_);
lock_set_for_external_use_.emplace(resource_provider, skia_output_surface_);
current_frame_resource_fence_ = base::MakeRefCounted<FrameResourceFence>();
resource_provider_->SetReadLockFence(current_frame_resource_fence_.get());
}
SkiaRenderer::~SkiaRenderer() = default;
bool SkiaRenderer::CanPartialSwap() { bool SkiaRenderer::CanPartialSwap() {
return output_surface_->capabilities().supports_post_sub_buffer; return output_surface_->capabilities().supports_post_sub_buffer;
} }
...@@ -698,10 +701,7 @@ bool SkiaRenderer::CanPartialSwap() { ...@@ -698,10 +701,7 @@ bool SkiaRenderer::CanPartialSwap() {
void SkiaRenderer::BeginDrawingFrame() { void SkiaRenderer::BeginDrawingFrame() {
TRACE_EVENT0("viz", "SkiaRenderer::BeginDrawingFrame"); TRACE_EVENT0("viz", "SkiaRenderer::BeginDrawingFrame");
DCHECK(!current_frame_resource_fence_); DCHECK(!current_frame_resource_fence_->WasSet());
current_frame_resource_fence_ = base::MakeRefCounted<FrameResourceFence>();
resource_provider_->SetReadLockFence(current_frame_resource_fence_.get());
#if defined(OS_ANDROID) #if defined(OS_ANDROID)
for (const auto& pass : *current_frame()->render_passes_in_draw_order) { for (const auto& pass : *current_frame()->render_passes_in_draw_order) {
...@@ -715,7 +715,6 @@ void SkiaRenderer::BeginDrawingFrame() { ...@@ -715,7 +715,6 @@ void SkiaRenderer::BeginDrawingFrame() {
void SkiaRenderer::FinishDrawingFrame() { void SkiaRenderer::FinishDrawingFrame() {
TRACE_EVENT0("viz", "SkiaRenderer::FinishDrawingFrame"); TRACE_EVENT0("viz", "SkiaRenderer::FinishDrawingFrame");
current_frame_resource_fence_ = nullptr;
current_canvas_ = nullptr; current_canvas_ = nullptr;
current_surface_ = nullptr; current_surface_ = nullptr;
...@@ -2066,6 +2065,8 @@ void SkiaRenderer::DrawUnsupportedQuad(const DrawQuad* quad, ...@@ -2066,6 +2065,8 @@ void SkiaRenderer::DrawUnsupportedQuad(const DrawQuad* quad,
} }
void SkiaRenderer::ScheduleOverlays() { void SkiaRenderer::ScheduleOverlays() {
DCHECK(!current_frame_resource_fence_->WasSet());
pending_overlay_locks_.emplace_back(); pending_overlay_locks_.emplace_back();
if (current_frame()->overlay_list.empty()) if (current_frame()->overlay_list.empty())
return; return;
...@@ -2170,8 +2171,17 @@ void SkiaRenderer::ScheduleOverlays() { ...@@ -2170,8 +2171,17 @@ void SkiaRenderer::ScheduleOverlays() {
NOTREACHED(); NOTREACHED();
#endif // defined(OS_ANDROID) #endif // defined(OS_ANDROID)
base::OnceClosure on_finished_callback;
if (current_frame_resource_fence_->WasSet()) {
on_finished_callback = base::BindOnce(
&FrameResourceFence::Signal, std::move(current_frame_resource_fence_));
current_frame_resource_fence_ = base::MakeRefCounted<FrameResourceFence>();
resource_provider_->SetReadLockFence(current_frame_resource_fence_.get());
}
skia_output_surface_->ScheduleOverlays( skia_output_surface_->ScheduleOverlays(
std::move(current_frame()->overlay_list), std::move(sync_tokens)); std::move(current_frame()->overlay_list), std::move(sync_tokens),
std::move(on_finished_callback));
} }
sk_sp<SkColorFilter> SkiaRenderer::GetColorSpaceConversionFilter( sk_sp<SkColorFilter> SkiaRenderer::GetColorSpaceConversionFilter(
...@@ -2519,10 +2529,11 @@ void SkiaRenderer::FinishDrawingQuadList() { ...@@ -2519,10 +2529,11 @@ void SkiaRenderer::FinishDrawingQuadList() {
base::OnceClosure on_finished_callback; base::OnceClosure on_finished_callback;
// Signal |current_frame_resource_fence_| when the root render pass is // Signal |current_frame_resource_fence_| when the root render pass is
// finished. // finished.
if (current_frame_resource_fence_ && if (current_frame_resource_fence_->WasSet()) {
current_frame_resource_fence_->WasSet() && is_root_render_pass) {
on_finished_callback = base::BindOnce( on_finished_callback = base::BindOnce(
&FrameResourceFence::Signal, std::move(current_frame_resource_fence_)); &FrameResourceFence::Signal, std::move(current_frame_resource_fence_));
current_frame_resource_fence_ = base::MakeRefCounted<FrameResourceFence>();
resource_provider_->SetReadLockFence(current_frame_resource_fence_.get());
} }
skia_output_surface_->EndPaint(std::move(on_finished_callback)); skia_output_surface_->EndPaint(std::move(on_finished_callback));
......
...@@ -559,7 +559,6 @@ void SkiaOutputSurfaceImpl::EndPaint(base::OnceClosure on_finished) { ...@@ -559,7 +559,6 @@ void SkiaOutputSurfaceImpl::EndPaint(base::OnceClosure on_finished) {
// MakePromiseSkImageFromRenderPass() is called. // MakePromiseSkImageFromRenderPass() is called.
it->second->clear_image(); it->second->clear_image();
} }
DCHECK(!on_finished);
base::TimeTicks post_task_timestamp; base::TimeTicks post_task_timestamp;
if (should_measure_next_post_task_) { if (should_measure_next_post_task_) {
...@@ -567,11 +566,12 @@ void SkiaOutputSurfaceImpl::EndPaint(base::OnceClosure on_finished) { ...@@ -567,11 +566,12 @@ void SkiaOutputSurfaceImpl::EndPaint(base::OnceClosure on_finished) {
post_task_timestamp = base::TimeTicks::Now(); post_task_timestamp = base::TimeTicks::Now();
} }
auto task = base::BindOnce( auto task =
&SkiaOutputSurfaceImplOnGpu::FinishPaintRenderPass, base::BindOnce(&SkiaOutputSurfaceImplOnGpu::FinishPaintRenderPass,
base::Unretained(impl_on_gpu_.get()), post_task_timestamp, base::Unretained(impl_on_gpu_.get()),
current_paint_->render_pass_id(), std::move(ddl), post_task_timestamp, current_paint_->render_pass_id(),
std::move(images_in_current_paint_), resource_sync_tokens_); std::move(ddl), std::move(images_in_current_paint_),
resource_sync_tokens_, std::move(on_finished));
EnqueueGpuTask(std::move(task), std::move(resource_sync_tokens_), EnqueueGpuTask(std::move(task), std::move(resource_sync_tokens_),
/*make_current=*/true, /*need_framebuffer=*/false); /*make_current=*/true, /*need_framebuffer=*/false);
} else { } else {
...@@ -677,11 +677,12 @@ void SkiaOutputSurfaceImpl::CopyOutput( ...@@ -677,11 +677,12 @@ void SkiaOutputSurfaceImpl::CopyOutput(
void SkiaOutputSurfaceImpl::ScheduleOverlays( void SkiaOutputSurfaceImpl::ScheduleOverlays(
OverlayList overlays, OverlayList overlays,
std::vector<gpu::SyncToken> sync_tokens) { std::vector<gpu::SyncToken> sync_tokens,
auto task = base::OnceClosure on_finished) {
base::BindOnce(&SkiaOutputSurfaceImplOnGpu::ScheduleOverlays, auto task = base::BindOnce(
&SkiaOutputSurfaceImplOnGpu::ScheduleOverlays,
base::Unretained(impl_on_gpu_.get()), std::move(overlays), base::Unretained(impl_on_gpu_.get()), std::move(overlays),
std::move(images_in_current_paint_)); std::move(images_in_current_paint_), std::move(on_finished));
#if defined(OS_APPLE) #if defined(OS_APPLE)
DCHECK_EQ(dependency_->gr_context_type(), gpu::GrContextType::kGL); DCHECK_EQ(dependency_->gr_context_type(), gpu::GrContextType::kGL);
// If there are render pass overlays, then a gl context is needed for drawing // If there are render pass overlays, then a gl context is needed for drawing
......
...@@ -118,7 +118,8 @@ class VIZ_SERVICE_EXPORT SkiaOutputSurfaceImpl : public SkiaOutputSurface { ...@@ -118,7 +118,8 @@ class VIZ_SERVICE_EXPORT SkiaOutputSurfaceImpl : public SkiaOutputSurface {
void RemoveRenderPassResource( void RemoveRenderPassResource(
std::vector<AggregatedRenderPassId> ids) override; std::vector<AggregatedRenderPassId> ids) override;
void ScheduleOverlays(OverlayList overlays, void ScheduleOverlays(OverlayList overlays,
std::vector<gpu::SyncToken> sync_tokens) override; std::vector<gpu::SyncToken> sync_tokens,
base::OnceClosure on_finished) override;
void CopyOutput(AggregatedRenderPassId id, void CopyOutput(AggregatedRenderPassId id,
const copy_output::RenderPassGeometry& geometry, const copy_output::RenderPassGeometry& geometry,
......
...@@ -235,11 +235,14 @@ void SkiaOutputSurfaceImplOnGpu::PromiseImageAccessHelper::BeginAccess( ...@@ -235,11 +235,14 @@ void SkiaOutputSurfaceImplOnGpu::PromiseImageAccessHelper::BeginAccess(
std::vector<ImageContextImpl*> image_contexts, std::vector<ImageContextImpl*> image_contexts,
std::vector<GrBackendSemaphore>* begin_semaphores, std::vector<GrBackendSemaphore>* begin_semaphores,
std::vector<GrBackendSemaphore>* end_semaphores) { std::vector<GrBackendSemaphore>* end_semaphores) {
// GL doesn't need semaphores.
if (!impl_on_gpu_->context_state_->GrContextIsGL()) {
DCHECK(begin_semaphores); DCHECK(begin_semaphores);
DCHECK(end_semaphores); DCHECK(end_semaphores);
begin_semaphores->reserve(image_contexts.size()); begin_semaphores->reserve(image_contexts.size());
// We may need one more space for the swap buffer semaphore. // We may need one more space for the swap buffer semaphore.
end_semaphores->reserve(image_contexts.size() + 1); end_semaphores->reserve(image_contexts.size() + 1);
}
image_contexts_.reserve(image_contexts.size() + image_contexts_.size()); image_contexts_.reserve(image_contexts.size() + image_contexts_.size());
image_contexts_.insert(image_contexts.begin(), image_contexts.end()); image_contexts_.insert(image_contexts.begin(), image_contexts.end());
impl_on_gpu_->BeginAccessImages(std::move(image_contexts), begin_semaphores, impl_on_gpu_->BeginAccessImages(std::move(image_contexts), begin_semaphores,
...@@ -604,7 +607,8 @@ void SkiaOutputSurfaceImplOnGpu::FinishPaintRenderPass( ...@@ -604,7 +607,8 @@ void SkiaOutputSurfaceImplOnGpu::FinishPaintRenderPass(
AggregatedRenderPassId id, AggregatedRenderPassId id,
sk_sp<SkDeferredDisplayList> ddl, sk_sp<SkDeferredDisplayList> ddl,
std::vector<ImageContextImpl*> image_contexts, std::vector<ImageContextImpl*> image_contexts,
std::vector<gpu::SyncToken> sync_tokens) { std::vector<gpu::SyncToken> sync_tokens,
base::OnceClosure on_finished) {
TRACE_EVENT0("viz", "SkiaOutputSurfaceImplOnGpu::FinishPaintRenderPass"); TRACE_EVENT0("viz", "SkiaOutputSurfaceImplOnGpu::FinishPaintRenderPass");
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
DCHECK(ddl); DCHECK(ddl);
...@@ -655,6 +659,8 @@ void SkiaOutputSurfaceImplOnGpu::FinishPaintRenderPass( ...@@ -655,6 +659,8 @@ void SkiaOutputSurfaceImplOnGpu::FinishPaintRenderPass(
}; };
gpu::AddVulkanCleanupTaskForSkiaFlush(vulkan_context_provider_, gpu::AddVulkanCleanupTaskForSkiaFlush(vulkan_context_provider_,
&flush_info); &flush_info);
if (on_finished)
gpu::AddCleanupTaskForSkiaFlush(std::move(on_finished), &flush_info);
auto result = offscreen.surface()->flush(flush_info); auto result = offscreen.surface()->flush(flush_info);
if (result != GrSemaphoresSubmitted::kYes && if (result != GrSemaphoresSubmitted::kYes &&
!(begin_semaphores.empty() && end_semaphores.empty())) { !(begin_semaphores.empty() && end_semaphores.empty())) {
...@@ -983,28 +989,22 @@ void SkiaOutputSurfaceImplOnGpu::ReleaseImageContexts( ...@@ -983,28 +989,22 @@ void SkiaOutputSurfaceImplOnGpu::ReleaseImageContexts(
void SkiaOutputSurfaceImplOnGpu::ScheduleOverlays( void SkiaOutputSurfaceImplOnGpu::ScheduleOverlays(
SkiaOutputSurface::OverlayList overlays, SkiaOutputSurface::OverlayList overlays,
std::vector<ImageContextImpl*> image_contexts) { std::vector<ImageContextImpl*> image_contexts,
base::OnceClosure on_finished) {
#if defined(OS_APPLE) #if defined(OS_APPLE)
if (context_is_lost_) if (context_is_lost_)
return; return;
std::vector<GrBackendSemaphore> begin_semaphores;
std::vector<GrBackendSemaphore> end_semaphores;
promise_image_access_helper_.BeginAccess(std::move(image_contexts),
&begin_semaphores, &end_semaphores);
// GL is used on MacOS and GL doesn't need semaphores. // GL is used on MacOS and GL doesn't need semaphores.
DCHECK(begin_semaphores.empty()); promise_image_access_helper_.BeginAccess(std::move(image_contexts),
DCHECK(end_semaphores.empty()); /*begin_semaphores=*/nullptr,
/*end_semaphores=*/nullptr);
using ScopedWriteAccess =
std::unique_ptr<gpu::SharedImageRepresentationSkia::ScopedWriteAccess>;
std::vector<ScopedWriteAccess> scoped_write_accesses;
for (auto& overlay : overlays) { for (auto& overlay : overlays) {
if (!overlay.ddl) if (!overlay.ddl)
continue; continue;
base::Optional<gpu::raster::GrShaderCache::ScopedCacheUse> cache_use;
if (dependency_->GetGrShaderCache()) {
cache_use.emplace(dependency_->GetGrShaderCache(),
gpu::kDisplayCompositorClientId);
}
const auto& characterization = overlay.ddl->characterization(); const auto& characterization = overlay.ddl->characterization();
auto backing = GetOrCreateRenderPassOverlayBacking(characterization); auto backing = GetOrCreateRenderPassOverlayBacking(characterization);
if (!backing) if (!backing)
...@@ -1018,11 +1018,25 @@ void SkiaOutputSurfaceImplOnGpu::ScheduleOverlays( ...@@ -1018,11 +1018,25 @@ void SkiaOutputSurfaceImplOnGpu::ScheduleOverlays(
gpu::SharedImageRepresentation::AllowUnclearedAccess::kYes); gpu::SharedImageRepresentation::AllowUnclearedAccess::kYes);
bool result = scoped_access->surface()->draw(overlay.ddl); bool result = scoped_access->surface()->draw(overlay.ddl);
DCHECK(result); DCHECK(result);
context_state_->gr_context()->flushAndSubmit(); scoped_write_accesses.push_back(std::move(scoped_access));
scoped_access.reset();
backing->SetCleared(); backing->SetCleared();
in_flight_render_pass_overlay_backings_.insert(std::move(backing)); in_flight_render_pass_overlay_backings_.insert(std::move(backing));
} }
if (!scoped_write_accesses.empty()) {
base::Optional<gpu::raster::GrShaderCache::ScopedCacheUse> cache_use;
if (dependency_->GetGrShaderCache()) {
cache_use.emplace(dependency_->GetGrShaderCache(),
gpu::kDisplayCompositorClientId);
}
GrFlushInfo flush_info = {};
if (on_finished)
gpu::AddCleanupTaskForSkiaFlush(std::move(on_finished), &flush_info);
context_state_->gr_context()->flush(flush_info);
context_state_->gr_context()->submit();
scoped_write_accesses.clear();
}
promise_image_access_helper_.EndAccess(); promise_image_access_helper_.EndAccess();
output_device_->ScheduleOverlays(std::move(overlays)); output_device_->ScheduleOverlays(std::move(overlays));
#else #else
......
...@@ -142,7 +142,8 @@ class SkiaOutputSurfaceImplOnGpu ...@@ -142,7 +142,8 @@ class SkiaOutputSurfaceImplOnGpu
AggregatedRenderPassId id, AggregatedRenderPassId id,
sk_sp<SkDeferredDisplayList> ddl, sk_sp<SkDeferredDisplayList> ddl,
std::vector<ImageContextImpl*> image_contexts, std::vector<ImageContextImpl*> image_contexts,
std::vector<gpu::SyncToken> sync_tokens); std::vector<gpu::SyncToken> sync_tokens,
base::OnceClosure on_finished);
// Deletes resources for RenderPasses in |ids|. Also takes ownership of // Deletes resources for RenderPasses in |ids|. Also takes ownership of
// |images_contexts| and destroys them on GPU thread. // |images_contexts| and destroys them on GPU thread.
void RemoveRenderPassResource( void RemoveRenderPassResource(
...@@ -165,7 +166,8 @@ class SkiaOutputSurfaceImplOnGpu ...@@ -165,7 +166,8 @@ class SkiaOutputSurfaceImplOnGpu
std::vector<std::unique_ptr<ExternalUseClient::ImageContext>> std::vector<std::unique_ptr<ExternalUseClient::ImageContext>>
image_contexts); image_contexts);
void ScheduleOverlays(SkiaOutputSurface::OverlayList overlays, void ScheduleOverlays(SkiaOutputSurface::OverlayList overlays,
std::vector<ImageContextImpl*> image_contexts); std::vector<ImageContextImpl*> image_contexts,
base::OnceClosure on_finished);
void SetEnableDCLayers(bool enable); void SetEnableDCLayers(bool enable);
void SetGpuVSyncEnabled(bool enabled); void SetGpuVSyncEnabled(bool enabled);
......
...@@ -90,7 +90,8 @@ class FakeSkiaOutputSurface : public SkiaOutputSurface { ...@@ -90,7 +90,8 @@ class FakeSkiaOutputSurface : public SkiaOutputSurface {
void RemoveRenderPassResource( void RemoveRenderPassResource(
std::vector<AggregatedRenderPassId> ids) override; std::vector<AggregatedRenderPassId> ids) override;
void ScheduleOverlays(OverlayList overlays, void ScheduleOverlays(OverlayList overlays,
std::vector<gpu::SyncToken> sync_tokens) override {} std::vector<gpu::SyncToken> sync_tokens,
base::OnceClosure on_finished) override {}
#if defined(OS_WIN) #if defined(OS_WIN)
void SetEnableDCLayers(bool enable) override {} void SetEnableDCLayers(bool enable) override {}
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment