Commit 23a637ea authored by Peng Huang's avatar Peng Huang Committed by Commit Bot

SkiaOutputSurfaceBufferQueue: support empty swap with CommitOverlayPlanes

For empty swap case, SchedulePrimaryPlane() will re-schedule the last
submitted image.

Bug: 1041035
Change-Id: Ic2994b3596e8919bf8f7a1828bcc73274e561470
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2007475
Commit-Queue: Peng Huang <penghuang@chromium.org>
Reviewed-by: default avatarVasiliy Telezhnikov <vasilyt@chromium.org>
Cr-Commit-Position: refs/heads/master@{#733049}
parent 1a8076b8
...@@ -141,6 +141,9 @@ void DirectRenderer::Initialize() { ...@@ -141,6 +141,9 @@ void DirectRenderer::Initialize() {
.disable_non_empty_post_sub_buffers) { .disable_non_empty_post_sub_buffers) {
use_partial_swap_ = false; use_partial_swap_ = false;
} }
} else {
allow_empty_swap_ |=
output_surface_->capabilities().supports_commit_overlay_planes;
} }
initialized_ = true; initialized_ = true;
......
...@@ -64,6 +64,8 @@ class VIZ_SERVICE_EXPORT OutputSurface { ...@@ -64,6 +64,8 @@ class VIZ_SERVICE_EXPORT OutputSurface {
bool supports_stencil = false; bool supports_stencil = false;
// Whether this OutputSurface supports post sub buffer or not. // Whether this OutputSurface supports post sub buffer or not.
bool supports_post_sub_buffer = false; bool supports_post_sub_buffer = false;
// Whether this OutputSurface supports commit overlay planes.
bool supports_commit_overlay_planes = false;
// Whether this OutputSurface supports gpu vsync callbacks. // Whether this OutputSurface supports gpu vsync callbacks.
bool supports_gpu_vsync = false; bool supports_gpu_vsync = false;
// Whether this OutputSurface supports pre transform. If it is supported, // Whether this OutputSurface supports pre transform. If it is supported,
......
...@@ -27,6 +27,12 @@ SkiaOutputDevice::SkiaOutputDevice( ...@@ -27,6 +27,12 @@ SkiaOutputDevice::SkiaOutputDevice(
SkiaOutputDevice::~SkiaOutputDevice() = default; SkiaOutputDevice::~SkiaOutputDevice() = default;
void SkiaOutputDevice::CommitOverlayPlanes(
BufferPresentedCallback feedback,
std::vector<ui::LatencyInfo> latency_info) {
NOTREACHED();
}
void SkiaOutputDevice::PostSubBuffer( void SkiaOutputDevice::PostSubBuffer(
const gfx::Rect& rect, const gfx::Rect& rect,
BufferPresentedCallback feedback, BufferPresentedCallback feedback,
......
...@@ -88,6 +88,8 @@ class SkiaOutputDevice { ...@@ -88,6 +88,8 @@ class SkiaOutputDevice {
virtual void PostSubBuffer(const gfx::Rect& rect, virtual void PostSubBuffer(const gfx::Rect& rect,
BufferPresentedCallback feedback, BufferPresentedCallback feedback,
std::vector<ui::LatencyInfo> latency_info); std::vector<ui::LatencyInfo> latency_info);
virtual void CommitOverlayPlanes(BufferPresentedCallback feedback,
std::vector<ui::LatencyInfo> latency_info);
// Set the rectangle that will be drawn into on the surface. // Set the rectangle that will be drawn into on the surface.
virtual void SetDrawRectangle(const gfx::Rect& draw_rectangle); virtual void SetDrawRectangle(const gfx::Rect& draw_rectangle);
......
...@@ -67,8 +67,8 @@ class SkiaOutputDeviceBufferQueue::Image { ...@@ -67,8 +67,8 @@ class SkiaOutputDeviceBufferQueue::Image {
} }
SkSurface* BeginWriteSkia() { SkSurface* BeginWriteSkia() {
DCHECK(!scoped_write_access_); DCHECK(!scoped_skia_write_access_);
DCHECK(!scoped_read_access_); DCHECK(!scoped_overlay_read_access_);
DCHECK(end_semaphores_.empty()); DCHECK(end_semaphores_.empty());
std::vector<GrBackendSemaphore> begin_semaphores; std::vector<GrBackendSemaphore> begin_semaphores;
...@@ -77,29 +77,29 @@ class SkiaOutputDeviceBufferQueue::Image { ...@@ -77,29 +77,29 @@ class SkiaOutputDeviceBufferQueue::Image {
// Buffer queue is internal to GPU proc and handles texture initialization, // Buffer queue is internal to GPU proc and handles texture initialization,
// so allow uncleared access. // so allow uncleared access.
// TODO(vasilyt): Props and MSAA // TODO(vasilyt): Props and MSAA
scoped_write_access_ = skia_representation_->BeginScopedWriteAccess( scoped_skia_write_access_ = skia_representation_->BeginScopedWriteAccess(
0 /* final_msaa_count */, surface_props, &begin_semaphores, 0 /* final_msaa_count */, surface_props, &begin_semaphores,
&end_semaphores_, &end_semaphores_,
gpu::SharedImageRepresentation::AllowUnclearedAccess::kYes); gpu::SharedImageRepresentation::AllowUnclearedAccess::kYes);
DCHECK(scoped_write_access_); DCHECK(scoped_skia_write_access_);
if (!begin_semaphores.empty()) { if (!begin_semaphores.empty()) {
scoped_write_access_->surface()->wait(begin_semaphores.size(), scoped_skia_write_access_->surface()->wait(begin_semaphores.size(),
begin_semaphores.data()); begin_semaphores.data());
} }
return scoped_write_access_->surface(); return scoped_skia_write_access_->surface();
} }
void EndWriteSkia() { void EndWriteSkia() {
DCHECK(scoped_write_access_); DCHECK(scoped_skia_write_access_);
GrFlushInfo flush_info = { GrFlushInfo flush_info = {
.fFlags = kNone_GrFlushFlags, .fFlags = kNone_GrFlushFlags,
.fNumSemaphores = end_semaphores_.size(), .fNumSemaphores = end_semaphores_.size(),
.fSignalSemaphores = end_semaphores_.data(), .fSignalSemaphores = end_semaphores_.data(),
}; };
scoped_write_access_->surface()->flush( scoped_skia_write_access_->surface()->flush(
SkSurface::BackendSurfaceAccess::kNoAccess, flush_info); SkSurface::BackendSurfaceAccess::kNoAccess, flush_info);
scoped_write_access_.reset(); scoped_skia_write_access_.reset();
end_semaphores_.clear(); end_semaphores_.clear();
// SkiaRenderer always draws the full frame. // SkiaRenderer always draws the full frame.
...@@ -107,14 +107,19 @@ class SkiaOutputDeviceBufferQueue::Image { ...@@ -107,14 +107,19 @@ class SkiaOutputDeviceBufferQueue::Image {
} }
void BeginPresent() { void BeginPresent() {
DCHECK(!scoped_write_access_); if (++present_count_ != 1) {
DCHECK(!scoped_read_access_); DCHECK(scoped_overlay_read_access_ || scoped_gl_read_access_);
DCHECK(!scoped_gl_read_access_); return;
}
DCHECK(!scoped_skia_write_access_);
DCHECK(!scoped_overlay_read_access_);
if (overlay_representation_) { if (overlay_representation_) {
scoped_read_access_ = overlay_representation_->BeginScopedReadAccess( scoped_overlay_read_access_ =
true /* need_gl_image */); overlay_representation_->BeginScopedReadAccess(
DCHECK(scoped_read_access_); true /* need_gl_image */);
DCHECK(scoped_overlay_read_access_);
return; return;
} }
...@@ -125,24 +130,32 @@ class SkiaOutputDeviceBufferQueue::Image { ...@@ -125,24 +130,32 @@ class SkiaOutputDeviceBufferQueue::Image {
} }
void EndPresent() { void EndPresent() {
scoped_read_access_.reset(); DCHECK(present_count_);
if (--present_count_)
return;
scoped_overlay_read_access_.reset();
scoped_gl_read_access_.reset(); scoped_gl_read_access_.reset();
} }
gl::GLImage* GetGLImage(std::unique_ptr<gfx::GpuFence>* fence) { gl::GLImage* GetGLImage(std::unique_ptr<gfx::GpuFence>* fence) {
*fence = nullptr; if (scoped_overlay_read_access_)
return scoped_overlay_read_access_->gl_image();
if (scoped_read_access_)
return scoped_read_access_->gl_image();
DCHECK(scoped_gl_read_access_); DCHECK(scoped_gl_read_access_);
if (auto gl_fence = gl::GLFence::CreateForGpuFence()) {
*fence = gl_fence->GetGpuFence(); if (gl::GLFence::IsGpuFenceSupported() && fence) {
if (auto gl_fence = gl::GLFence::CreateForGpuFence())
*fence = gl_fence->GetGpuFence();
} }
auto* texture = gl_representation_->GetTexture(); auto* texture = gl_representation_->GetTexture();
return texture->GetLevelImage(texture->target(), 0); return texture->GetLevelImage(texture->target(), 0);
} }
int present_count() const { return present_count_; }
gpu::SharedImageRepresentationSkia* skia_representation() {
return skia_representation_.get();
}
private: private:
gpu::SharedImageFactory* const factory_; gpu::SharedImageFactory* const factory_;
gpu::SharedImageRepresentationFactory* const representation_factory_; gpu::SharedImageRepresentationFactory* const representation_factory_;
...@@ -153,12 +166,13 @@ class SkiaOutputDeviceBufferQueue::Image { ...@@ -153,12 +166,13 @@ class SkiaOutputDeviceBufferQueue::Image {
overlay_representation_; overlay_representation_;
std::unique_ptr<gpu::SharedImageRepresentationGLTexture> gl_representation_; std::unique_ptr<gpu::SharedImageRepresentationGLTexture> gl_representation_;
std::unique_ptr<gpu::SharedImageRepresentationSkia::ScopedWriteAccess> std::unique_ptr<gpu::SharedImageRepresentationSkia::ScopedWriteAccess>
scoped_write_access_; scoped_skia_write_access_;
std::unique_ptr<gpu::SharedImageRepresentationOverlay::ScopedReadAccess> std::unique_ptr<gpu::SharedImageRepresentationOverlay::ScopedReadAccess>
scoped_read_access_; scoped_overlay_read_access_;
std::unique_ptr<gpu::SharedImageRepresentationGLTexture::ScopedAccess> std::unique_ptr<gpu::SharedImageRepresentationGLTexture::ScopedAccess>
scoped_gl_read_access_; scoped_gl_read_access_;
std::vector<GrBackendSemaphore> end_semaphores_; std::vector<GrBackendSemaphore> end_semaphores_;
int present_count_ = 0;
DISALLOW_COPY_AND_ASSIGN(Image); DISALLOW_COPY_AND_ASSIGN(Image);
}; };
...@@ -193,7 +207,8 @@ SkiaOutputDeviceBufferQueue::SkiaOutputDeviceBufferQueue( ...@@ -193,7 +207,8 @@ SkiaOutputDeviceBufferQueue::SkiaOutputDeviceBufferQueue(
memory_tracker, memory_tracker,
did_swap_buffer_complete_callback), did_swap_buffer_complete_callback),
dependency_(deps), dependency_(deps),
gl_surface_(gl_surface), gl_surface_(std::move(gl_surface)),
supports_async_swap_(gl_surface_->SupportsAsyncSwap()),
shared_image_factory_(deps->GetGpuPreferences(), shared_image_factory_(deps->GetGpuPreferences(),
deps->GetGpuDriverBugWorkarounds(), deps->GetGpuDriverBugWorkarounds(),
deps->GetGpuFeatureInfo(), deps->GetGpuFeatureInfo(),
...@@ -214,8 +229,11 @@ SkiaOutputDeviceBufferQueue::SkiaOutputDeviceBufferQueue( ...@@ -214,8 +229,11 @@ SkiaOutputDeviceBufferQueue::SkiaOutputDeviceBufferQueue(
image_format_ = RGBA_8888; image_format_ = RGBA_8888;
#endif #endif
capabilities_.supports_post_sub_buffer = gl_surface_->SupportsPostSubBuffer();
// TODO(vasilyt): Need to figure out why partial swap isn't working // TODO(vasilyt): Need to figure out why partial swap isn't working
capabilities_.supports_post_sub_buffer = false; capabilities_.supports_post_sub_buffer = false;
capabilities_.supports_commit_overlay_planes =
gl_surface_->SupportsCommitOverlayPlanes();
capabilities_.max_frames_pending = 2; capabilities_.max_frames_pending = 2;
// Set supports_surfaceless to enable overlays. // Set supports_surfaceless to enable overlays.
capabilities_.supports_surfaceless = true; capabilities_.supports_surfaceless = true;
...@@ -226,7 +244,7 @@ SkiaOutputDeviceBufferQueue::SkiaOutputDeviceBufferQueue( ...@@ -226,7 +244,7 @@ SkiaOutputDeviceBufferQueue::SkiaOutputDeviceBufferQueue(
SkiaOutputSurfaceDependency* deps, SkiaOutputSurfaceDependency* deps,
gpu::MemoryTracker* memory_tracker, gpu::MemoryTracker* memory_tracker,
const DidSwapBufferCompleteCallback& did_swap_buffer_complete_callback) const DidSwapBufferCompleteCallback& did_swap_buffer_complete_callback)
: SkiaOutputDeviceBufferQueue(gl_surface, : SkiaOutputDeviceBufferQueue(std::move(gl_surface),
deps, deps,
memory_tracker, memory_tracker,
did_swap_buffer_complete_callback, did_swap_buffer_complete_callback,
...@@ -274,17 +292,9 @@ SkiaOutputDeviceBufferQueue::Create( ...@@ -274,17 +292,9 @@ SkiaOutputDeviceBufferQueue::Create(
} }
SkiaOutputDeviceBufferQueue::Image* SkiaOutputDeviceBufferQueue::Image*
SkiaOutputDeviceBufferQueue::GetCurrentImage() {
if (!current_image_)
current_image_ = GetNextImage();
return current_image_.get();
}
std::unique_ptr<SkiaOutputDeviceBufferQueue::Image>
SkiaOutputDeviceBufferQueue::GetNextImage() { SkiaOutputDeviceBufferQueue::GetNextImage() {
if (!available_images_.empty()) { if (!available_images_.empty()) {
std::unique_ptr<Image> image = std::move(available_images_.back()); auto* image = available_images_.back();
available_images_.pop_back(); available_images_.pop_back();
return image; return image;
} }
...@@ -294,47 +304,59 @@ SkiaOutputDeviceBufferQueue::GetNextImage() { ...@@ -294,47 +304,59 @@ SkiaOutputDeviceBufferQueue::GetNextImage() {
if (image->Initialize(image_size_, color_space_, image_format_, dependency_, if (image->Initialize(image_size_, color_space_, image_format_, dependency_,
shared_image_usage_)) { shared_image_usage_)) {
return image; images_.push_back(std::move(image));
return images_.back().get();
} }
return nullptr; return nullptr;
} }
void SkiaOutputDeviceBufferQueue::PageFlipComplete( void SkiaOutputDeviceBufferQueue::PageFlipComplete(Image* image) {
std::unique_ptr<Image> image) { DCHECK(image);
if (displayed_image_) { if (displayed_image_) {
DCHECK_EQ(displayed_image_->present_count() > 1, displayed_image_ == image);
displayed_image_->EndPresent(); displayed_image_->EndPresent();
available_images_.push_back(std::move(displayed_image_)); if (!displayed_image_->present_count()) {
available_images_.push_back(displayed_image_);
}
} }
displayed_image_ = std::move(image);
displayed_image_ = image;
swap_completion_callbacks_.pop_front(); swap_completion_callbacks_.pop_front();
} }
void SkiaOutputDeviceBufferQueue::FreeAllSurfaces() { void SkiaOutputDeviceBufferQueue::FreeAllSurfaces() {
displayed_image_.reset();
current_image_.reset();
// Clear and cancel swap buffer callbacks. // Clear and cancel swap buffer callbacks.
swap_completion_callbacks_.clear(); swap_completion_callbacks_.clear();
images_.clear();
current_image_ = nullptr;
submitted_image_ = nullptr;
displayed_image_ = nullptr;
available_images_.clear(); available_images_.clear();
} }
void SkiaOutputDeviceBufferQueue::SchedulePrimaryPlane( void SkiaOutputDeviceBufferQueue::SchedulePrimaryPlane(
const OverlayProcessorInterface::OutputSurfaceOverlayPlane& plane) { const OverlayProcessorInterface::OutputSurfaceOverlayPlane& plane) {
if (!current_image_) // If the current_image_ is nullptr, it means there is no change on the
return; // primary plane. So we just need to schedule the last submitted image.
auto* image = current_image_ ? current_image_ : submitted_image_;
DCHECK(image);
current_image_->BeginPresent(); image->BeginPresent();
std::unique_ptr<gfx::GpuFence> fence; std::unique_ptr<gfx::GpuFence> fence;
auto* image = current_image_->GetGLImage(&fence); // If the submitted_image_ is being scheduled, we don't new a new fence.
auto* gl_image =
image->GetGLImage(image == submitted_image_ ? nullptr : &fence);
// Output surface is also z-order 0. // Output surface is also z-order 0.
constexpr int kPlaneZOrder = 0; constexpr int kPlaneZOrder = 0;
// Output surface always uses the full texture. // Output surface always uses the full texture.
constexpr gfx::RectF kUVRect(0.f, 0.f, 1.f, 1.f); constexpr gfx::RectF kUVRect(0.f, 0.f, 1.0f, 1.0f);
gl_surface_->ScheduleOverlayPlane(kPlaneZOrder, plane.transform, image, gl_surface_->ScheduleOverlayPlane(kPlaneZOrder, plane.transform, gl_image,
ToNearestRect(plane.display_rect), kUVRect, ToNearestRect(plane.display_rect), kUVRect,
plane.enable_blending, std::move(fence)); plane.enable_blending, std::move(fence));
} }
...@@ -382,19 +404,22 @@ void SkiaOutputDeviceBufferQueue::SwapBuffers( ...@@ -382,19 +404,22 @@ void SkiaOutputDeviceBufferQueue::SwapBuffers(
std::vector<ui::LatencyInfo> latency_info) { std::vector<ui::LatencyInfo> latency_info) {
StartSwapBuffers({}); StartSwapBuffers({});
if (gl_surface_->SupportsAsyncSwap()) { DCHECK(current_image_);
submitted_image_ = current_image_;
current_image_ = nullptr;
if (supports_async_swap_) {
// Cancelable callback uses weak ptr to drop this task upon destruction. // Cancelable callback uses weak ptr to drop this task upon destruction.
// Thus it is safe to use |base::Unretained(this)|. // Thus it is safe to use |base::Unretained(this)|.
swap_completion_callbacks_.emplace_back( swap_completion_callbacks_.emplace_back(
std::make_unique<CancelableSwapCompletionCallback>(base::BindOnce( std::make_unique<CancelableSwapCompletionCallback>(base::BindOnce(
&SkiaOutputDeviceBufferQueue::DoFinishSwapBuffers, &SkiaOutputDeviceBufferQueue::DoFinishSwapBuffers,
base::Unretained(this), image_size_, std::move(latency_info), base::Unretained(this), image_size_, std::move(latency_info),
std::move(current_image_), std::move(committed_overlays_)))); submitted_image_, std::move(committed_overlays_))));
gl_surface_->SwapBuffersAsync(swap_completion_callbacks_.back()->callback(), gl_surface_->SwapBuffersAsync(swap_completion_callbacks_.back()->callback(),
std::move(feedback)); std::move(feedback));
} else { } else {
DoFinishSwapBuffers(image_size_, std::move(latency_info), DoFinishSwapBuffers(image_size_, std::move(latency_info), submitted_image_,
std::move(current_image_),
std::move(committed_overlays_), std::move(committed_overlays_),
gl_surface_->SwapBuffers(std::move(feedback)), nullptr); gl_surface_->SwapBuffers(std::move(feedback)), nullptr);
} }
...@@ -402,46 +427,46 @@ void SkiaOutputDeviceBufferQueue::SwapBuffers( ...@@ -402,46 +427,46 @@ void SkiaOutputDeviceBufferQueue::SwapBuffers(
std::swap(committed_overlays_, pending_overlays_); std::swap(committed_overlays_, pending_overlays_);
} }
void SkiaOutputDeviceBufferQueue::PostSubBuffer( void SkiaOutputDeviceBufferQueue::CommitOverlayPlanes(
const gfx::Rect& rect,
BufferPresentedCallback feedback, BufferPresentedCallback feedback,
std::vector<ui::LatencyInfo> latency_info) { std::vector<ui::LatencyInfo> latency_info) {
StartSwapBuffers({}); StartSwapBuffers({});
if (gl_surface_->SupportsAsyncSwap()) { // There is no drawing for this frame on the main buffer.
DCHECK(!current_image_);
// A main buffer has to be submitted for previous frames.
DCHECK(submitted_image_);
if (supports_async_swap_) {
// Cancelable callback uses weak ptr to drop this task upon destruction. // Cancelable callback uses weak ptr to drop this task upon destruction.
// Thus it is safe to use |base::Unretained(this)|. // Thus it is safe to use |base::Unretained(this)|.
swap_completion_callbacks_.emplace_back( swap_completion_callbacks_.emplace_back(
std::make_unique<CancelableSwapCompletionCallback>(base::BindOnce( std::make_unique<CancelableSwapCompletionCallback>(base::BindOnce(
&SkiaOutputDeviceBufferQueue::DoFinishSwapBuffers, &SkiaOutputDeviceBufferQueue::DoFinishSwapBuffers,
base::Unretained(this), image_size_, std::move(latency_info), base::Unretained(this), image_size_, std::move(latency_info),
std::move(current_image_), std::move(committed_overlays_)))); submitted_image_, std::move(committed_overlays_))));
gl_surface_->PostSubBufferAsync( gl_surface_->CommitOverlayPlanesAsync(
rect.x(), rect.y(), rect.width(), rect.height(),
swap_completion_callbacks_.back()->callback(), std::move(feedback)); swap_completion_callbacks_.back()->callback(), std::move(feedback));
} else { } else {
DoFinishSwapBuffers( DoFinishSwapBuffers(image_size_, std::move(latency_info), submitted_image_,
image_size_, std::move(latency_info), std::move(current_image_), std::move(committed_overlays_),
std::move(committed_overlays_), gl_surface_->CommitOverlayPlanes(std::move(feedback)),
gl_surface_->PostSubBuffer(rect.x(), rect.y(), rect.width(), nullptr);
rect.height(), std::move(feedback)),
nullptr);
} }
committed_overlays_ = std::move(pending_overlays_); committed_overlays_.clear();
pending_overlays_.clear(); std::swap(committed_overlays_, pending_overlays_);
} }
void SkiaOutputDeviceBufferQueue::DoFinishSwapBuffers( void SkiaOutputDeviceBufferQueue::DoFinishSwapBuffers(
const gfx::Size& size, const gfx::Size& size,
std::vector<ui::LatencyInfo> latency_info, std::vector<ui::LatencyInfo> latency_info,
std::unique_ptr<Image> image, Image* image,
std::vector<OverlayData> overlays, std::vector<OverlayData> overlays,
gfx::SwapResult result, gfx::SwapResult result,
std::unique_ptr<gfx::GpuFence> gpu_fence) { std::unique_ptr<gfx::GpuFence> gpu_fence) {
DCHECK(!gpu_fence); DCHECK(!gpu_fence);
PageFlipComplete(std::move(image)); PageFlipComplete(image);
FinishSwapBuffers(result, size, latency_info); FinishSwapBuffers(result, size, latency_info);
} }
...@@ -462,14 +487,15 @@ bool SkiaOutputDeviceBufferQueue::Reshape(const gfx::Size& size, ...@@ -462,14 +487,15 @@ bool SkiaOutputDeviceBufferQueue::Reshape(const gfx::Size& size,
} }
SkSurface* SkiaOutputDeviceBufferQueue::BeginPaint() { SkSurface* SkiaOutputDeviceBufferQueue::BeginPaint() {
auto* image = GetCurrentImage(); if (!current_image_)
return image->BeginWriteSkia(); current_image_ = GetNextImage();
return current_image_->BeginWriteSkia();
} }
void SkiaOutputDeviceBufferQueue::EndPaint( void SkiaOutputDeviceBufferQueue::EndPaint(
const GrBackendSemaphore& semaphore) { const GrBackendSemaphore& semaphore) {
auto* image = GetCurrentImage(); DCHECK(current_image_);
image->EndWriteSkia(); current_image_->EndWriteSkia();
} }
} // namespace viz } // namespace viz
...@@ -42,9 +42,8 @@ class VIZ_SERVICE_EXPORT SkiaOutputDeviceBufferQueue final ...@@ -42,9 +42,8 @@ class VIZ_SERVICE_EXPORT SkiaOutputDeviceBufferQueue final
void SwapBuffers(BufferPresentedCallback feedback, void SwapBuffers(BufferPresentedCallback feedback,
std::vector<ui::LatencyInfo> latency_info) override; std::vector<ui::LatencyInfo> latency_info) override;
void PostSubBuffer(const gfx::Rect& rect, void CommitOverlayPlanes(BufferPresentedCallback feedback,
BufferPresentedCallback feedback, std::vector<ui::LatencyInfo> latency_info) override;
std::vector<ui::LatencyInfo> latency_info) override;
bool Reshape(const gfx::Size& size, bool Reshape(const gfx::Size& size,
float device_scale_factor, float device_scale_factor,
const gfx::ColorSpace& color_space, const gfx::ColorSpace& color_space,
...@@ -70,33 +69,37 @@ class VIZ_SERVICE_EXPORT SkiaOutputDeviceBufferQueue final ...@@ -70,33 +69,37 @@ class VIZ_SERVICE_EXPORT SkiaOutputDeviceBufferQueue final
base::CancelableOnceCallback<void(gfx::SwapResult, base::CancelableOnceCallback<void(gfx::SwapResult,
std::unique_ptr<gfx::GpuFence>)>; std::unique_ptr<gfx::GpuFence>)>;
Image* GetCurrentImage(); Image* GetNextImage();
std::unique_ptr<Image> GetNextImage(); void PageFlipComplete(Image* image);
void PageFlipComplete(std::unique_ptr<Image> image);
void FreeAllSurfaces(); void FreeAllSurfaces();
// Used as callback for SwapBuffersAsync and PostSubBufferAsync to finish // Used as callback for SwapBuff ersAsync and PostSubBufferAsync to finish
// operation // operation
void DoFinishSwapBuffers(const gfx::Size& size, void DoFinishSwapBuffers(const gfx::Size& size,
std::vector<ui::LatencyInfo> latency_info, std::vector<ui::LatencyInfo> latency_info,
std::unique_ptr<Image> image, Image* image,
std::vector<OverlayData> overlays, std::vector<OverlayData> overlays,
gfx::SwapResult result, gfx::SwapResult result,
std::unique_ptr<gfx::GpuFence> gpu_fence); std::unique_ptr<gfx::GpuFence> gpu_fence);
SkiaOutputSurfaceDependency* const dependency_; SkiaOutputSurfaceDependency* const dependency_;
scoped_refptr<gl::GLSurface> gl_surface_; scoped_refptr<gl::GLSurface> gl_surface_;
const bool supports_async_swap_;
// Format of images // Format of images
gfx::ColorSpace color_space_; gfx::ColorSpace color_space_;
gfx::Size image_size_; gfx::Size image_size_;
ResourceFormat image_format_; ResourceFormat image_format_;
// All allocated images.
std::vector<std::unique_ptr<Image>> images_;
// This image is currently used by Skia as RenderTarget. This may be nullptr // This image is currently used by Skia as RenderTarget. This may be nullptr
// if no drawing in progress or if allocation failed at bind. // if there is no drawing for the current frame or if allocation failed.
std::unique_ptr<Image> current_image_; Image* current_image_ = nullptr;
// The last image submitted for presenting.
Image* submitted_image_ = nullptr;
// The image currently on the screen, if any. // The image currently on the screen, if any.
std::unique_ptr<Image> displayed_image_; Image* displayed_image_ = nullptr;
// These are free for use, and are not nullptr. // These are free for use, and are not nullptr.
std::vector<std::unique_ptr<Image>> available_images_; std::vector<Image*> available_images_;
// These cancelable callbacks bind images that have been scheduled to display // These cancelable callbacks bind images that have been scheduled to display
// but are not displayed yet. This deque will be cleared when represented // but are not displayed yet. This deque will be cleared when represented
// frames are destroyed. Use CancelableOnceCallback to prevent resources // frames are destroyed. Use CancelableOnceCallback to prevent resources
......
...@@ -136,6 +136,23 @@ class MockGLSurfaceAsync : public gl::GLSurfaceStub { ...@@ -136,6 +136,23 @@ class MockGLSurfaceAsync : public gl::GLSurfaceStub {
callback_ = std::move(completion_callback); callback_ = std::move(completion_callback);
} }
void CommitOverlayPlanesAsync(
SwapCompletionCallback completion_callback,
PresentationCallback presentation_callback) override {
DCHECK(!callback_);
callback_ = std::move(completion_callback);
}
bool ScheduleOverlayPlane(int z_order,
gfx::OverlayTransform transform,
gl::GLImage* image,
const gfx::Rect& bounds_rect,
const gfx::RectF& crop_rect,
bool enable_blend,
std::unique_ptr<gfx::GpuFence> gpu_fence) override {
return true;
}
void SwapComplete() { void SwapComplete() {
std::move(callback_).Run(gfx::SwapResult::SWAP_ACK, nullptr); std::move(callback_).Run(gfx::SwapResult::SWAP_ACK, nullptr);
} }
...@@ -211,13 +228,13 @@ class SkiaOutputDeviceBufferQueueTest : public TestOnGpu { ...@@ -211,13 +228,13 @@ class SkiaOutputDeviceBufferQueueTest : public TestOnGpu {
using Image = SkiaOutputDeviceBufferQueue::Image; using Image = SkiaOutputDeviceBufferQueue::Image;
Image* current_image() { return output_device_->current_image_.get(); } Image* current_image() { return output_device_->current_image_; }
const std::vector<std::unique_ptr<Image>>& available_images() { const std::vector<Image*>& available_images() {
return output_device_->available_images_; return output_device_->available_images_;
} }
Image* displayed_image() { return output_device_->displayed_image_.get(); } Image* displayed_image() { return output_device_->displayed_image_; }
base::circular_deque<std::unique_ptr< base::circular_deque<std::unique_ptr<
SkiaOutputDeviceBufferQueue::CancelableSwapCompletionCallback>>& SkiaOutputDeviceBufferQueue::CancelableSwapCompletionCallback>>&
...@@ -239,8 +256,8 @@ class SkiaOutputDeviceBufferQueueTest : public TestOnGpu { ...@@ -239,8 +256,8 @@ class SkiaOutputDeviceBufferQueueTest : public TestOnGpu {
void CheckUnique() { void CheckUnique() {
std::set<Image*> images; std::set<Image*> images;
for (const auto& image : available_images()) for (auto* image : available_images())
images.insert(image.get()); images.insert(image);
if (displayed_image()) if (displayed_image())
images.insert(displayed_image()); images.insert(displayed_image());
...@@ -252,12 +269,17 @@ class SkiaOutputDeviceBufferQueueTest : public TestOnGpu { ...@@ -252,12 +269,17 @@ class SkiaOutputDeviceBufferQueueTest : public TestOnGpu {
(size_t)CountBuffers()); (size_t)CountBuffers());
} }
Image* GetCurrentImage() { Image* PaintAndSchedulePrimaryPlane() {
// Call Begin/EndPaint to ensusre the image is initialized before use. // Call Begin/EndPaint to ensusre the image is initialized before use.
output_device_->BeginPaint(); output_device_->BeginPaint();
GrBackendSemaphore semaphore; output_device_->EndPaint(GrBackendSemaphore());
output_device_->EndPaint(semaphore); SchedulePrimaryPlane();
return output_device_->GetCurrentImage(); return current_image();
}
void SchedulePrimaryPlane() {
output_device_->SchedulePrimaryPlane(
OverlayProcessorInterface::OutputSurfaceOverlayPlane());
} }
void SwapBuffers() { void SwapBuffers() {
...@@ -268,6 +290,14 @@ class SkiaOutputDeviceBufferQueueTest : public TestOnGpu { ...@@ -268,6 +290,14 @@ class SkiaOutputDeviceBufferQueueTest : public TestOnGpu {
std::vector<ui::LatencyInfo>()); std::vector<ui::LatencyInfo>());
} }
void CommitOverlayPlanes() {
auto present_callback =
base::DoNothing::Once<const gfx::PresentationFeedback&>();
output_device_->CommitOverlayPlanes(std::move(present_callback),
std::vector<ui::LatencyInfo>());
}
void PageFlipComplete() { gl_surface_->SwapComplete(); } void PageFlipComplete() { gl_surface_->SwapComplete(); }
protected: protected:
...@@ -287,11 +317,11 @@ TEST_F_GPU(SkiaOutputDeviceBufferQueueTest, MultipleGetCurrentBufferCalls) { ...@@ -287,11 +317,11 @@ TEST_F_GPU(SkiaOutputDeviceBufferQueueTest, MultipleGetCurrentBufferCalls) {
output_device_->Reshape(screen_size, 1.0f, gfx::ColorSpace(), false, output_device_->Reshape(screen_size, 1.0f, gfx::ColorSpace(), false,
gfx::OVERLAY_TRANSFORM_NONE); gfx::OVERLAY_TRANSFORM_NONE);
EXPECT_EQ(0U, memory_tracker().GetSize()); EXPECT_EQ(0U, memory_tracker().GetSize());
EXPECT_NE(GetCurrentImage(), nullptr); EXPECT_NE(PaintAndSchedulePrimaryPlane(), nullptr);
EXPECT_NE(0U, memory_tracker().GetSize()); EXPECT_NE(0U, memory_tracker().GetSize());
EXPECT_EQ(1, CountBuffers()); EXPECT_EQ(1, CountBuffers());
auto* fb = current_image(); auto* fb = current_image();
EXPECT_NE(GetCurrentImage(), nullptr); EXPECT_NE(PaintAndSchedulePrimaryPlane(), nullptr);
EXPECT_NE(0U, memory_tracker().GetSize()); EXPECT_NE(0U, memory_tracker().GetSize());
EXPECT_EQ(1, CountBuffers()); EXPECT_EQ(1, CountBuffers());
EXPECT_EQ(fb, current_image()); EXPECT_EQ(fb, current_image());
...@@ -304,7 +334,7 @@ TEST_F_GPU(SkiaOutputDeviceBufferQueueTest, CheckDoubleBuffering) { ...@@ -304,7 +334,7 @@ TEST_F_GPU(SkiaOutputDeviceBufferQueueTest, CheckDoubleBuffering) {
EXPECT_EQ(0U, memory_tracker().GetSize()); EXPECT_EQ(0U, memory_tracker().GetSize());
EXPECT_EQ(0, CountBuffers()); EXPECT_EQ(0, CountBuffers());
EXPECT_NE(GetCurrentImage(), nullptr); EXPECT_NE(PaintAndSchedulePrimaryPlane(), nullptr);
EXPECT_NE(0U, memory_tracker().GetSize()); EXPECT_NE(0U, memory_tracker().GetSize());
EXPECT_EQ(1, CountBuffers()); EXPECT_EQ(1, CountBuffers());
EXPECT_NE(current_image(), nullptr); EXPECT_NE(current_image(), nullptr);
...@@ -314,7 +344,7 @@ TEST_F_GPU(SkiaOutputDeviceBufferQueueTest, CheckDoubleBuffering) { ...@@ -314,7 +344,7 @@ TEST_F_GPU(SkiaOutputDeviceBufferQueueTest, CheckDoubleBuffering) {
PageFlipComplete(); PageFlipComplete();
EXPECT_EQ(0U, swap_completion_callbacks().size()); EXPECT_EQ(0U, swap_completion_callbacks().size());
EXPECT_TRUE(displayed_image()); EXPECT_TRUE(displayed_image());
EXPECT_NE(GetCurrentImage(), nullptr); EXPECT_NE(PaintAndSchedulePrimaryPlane(), nullptr);
EXPECT_NE(0U, memory_tracker().GetSize()); EXPECT_NE(0U, memory_tracker().GetSize());
EXPECT_EQ(2, CountBuffers()); EXPECT_EQ(2, CountBuffers());
CheckUnique(); CheckUnique();
...@@ -325,12 +355,13 @@ TEST_F_GPU(SkiaOutputDeviceBufferQueueTest, CheckDoubleBuffering) { ...@@ -325,12 +355,13 @@ TEST_F_GPU(SkiaOutputDeviceBufferQueueTest, CheckDoubleBuffering) {
CheckUnique(); CheckUnique();
EXPECT_EQ(1U, swap_completion_callbacks().size()); EXPECT_EQ(1U, swap_completion_callbacks().size());
EXPECT_TRUE(displayed_image()); EXPECT_TRUE(displayed_image());
PageFlipComplete(); PageFlipComplete();
CheckUnique(); CheckUnique();
EXPECT_EQ(0U, swap_completion_callbacks().size()); EXPECT_EQ(0U, swap_completion_callbacks().size());
EXPECT_EQ(1U, available_images().size()); EXPECT_EQ(1U, available_images().size());
EXPECT_TRUE(displayed_image()); EXPECT_TRUE(displayed_image());
EXPECT_NE(GetCurrentImage(), nullptr); EXPECT_NE(PaintAndSchedulePrimaryPlane(), nullptr);
EXPECT_NE(0U, memory_tracker().GetSize()); EXPECT_NE(0U, memory_tracker().GetSize());
EXPECT_EQ(2, CountBuffers()); EXPECT_EQ(2, CountBuffers());
CheckUnique(); CheckUnique();
...@@ -344,11 +375,11 @@ TEST_F_GPU(SkiaOutputDeviceBufferQueueTest, CheckTripleBuffering) { ...@@ -344,11 +375,11 @@ TEST_F_GPU(SkiaOutputDeviceBufferQueueTest, CheckTripleBuffering) {
EXPECT_EQ(0U, memory_tracker().GetSize()); EXPECT_EQ(0U, memory_tracker().GetSize());
// This bit is the same sequence tested in the doublebuffering case. // This bit is the same sequence tested in the doublebuffering case.
EXPECT_NE(GetCurrentImage(), nullptr); EXPECT_NE(PaintAndSchedulePrimaryPlane(), nullptr);
EXPECT_FALSE(displayed_image()); EXPECT_FALSE(displayed_image());
SwapBuffers(); SwapBuffers();
PageFlipComplete(); PageFlipComplete();
EXPECT_NE(GetCurrentImage(), nullptr); EXPECT_NE(PaintAndSchedulePrimaryPlane(), nullptr);
SwapBuffers(); SwapBuffers();
EXPECT_NE(0U, memory_tracker().GetSize()); EXPECT_NE(0U, memory_tracker().GetSize());
...@@ -356,7 +387,7 @@ TEST_F_GPU(SkiaOutputDeviceBufferQueueTest, CheckTripleBuffering) { ...@@ -356,7 +387,7 @@ TEST_F_GPU(SkiaOutputDeviceBufferQueueTest, CheckTripleBuffering) {
CheckUnique(); CheckUnique();
EXPECT_EQ(1U, swap_completion_callbacks().size()); EXPECT_EQ(1U, swap_completion_callbacks().size());
EXPECT_TRUE(displayed_image()); EXPECT_TRUE(displayed_image());
EXPECT_NE(GetCurrentImage(), nullptr); EXPECT_NE(PaintAndSchedulePrimaryPlane(), nullptr);
EXPECT_NE(0U, memory_tracker().GetSize()); EXPECT_NE(0U, memory_tracker().GetSize());
EXPECT_EQ(3, CountBuffers()); EXPECT_EQ(3, CountBuffers());
CheckUnique(); CheckUnique();
...@@ -380,7 +411,7 @@ TEST_F_GPU(SkiaOutputDeviceBufferQueueTest, CheckEmptySwap) { ...@@ -380,7 +411,7 @@ TEST_F_GPU(SkiaOutputDeviceBufferQueueTest, CheckEmptySwap) {
EXPECT_EQ(0, CountBuffers()); EXPECT_EQ(0, CountBuffers());
EXPECT_EQ(0U, memory_tracker().GetSize()); EXPECT_EQ(0U, memory_tracker().GetSize());
auto* image = GetCurrentImage(); auto* image = PaintAndSchedulePrimaryPlane();
EXPECT_NE(image, nullptr); EXPECT_NE(image, nullptr);
EXPECT_NE(0U, memory_tracker().GetSize()); EXPECT_NE(0U, memory_tracker().GetSize());
EXPECT_EQ(1, CountBuffers()); EXPECT_EQ(1, CountBuffers());
...@@ -389,22 +420,26 @@ TEST_F_GPU(SkiaOutputDeviceBufferQueueTest, CheckEmptySwap) { ...@@ -389,22 +420,26 @@ TEST_F_GPU(SkiaOutputDeviceBufferQueueTest, CheckEmptySwap) {
SwapBuffers(); SwapBuffers();
// Make sure we won't be drawing to the texture we just sent for scanout. // Make sure we won't be drawing to the texture we just sent for scanout.
auto* new_image = GetCurrentImage(); auto* new_image = PaintAndSchedulePrimaryPlane();
EXPECT_NE(new_image, nullptr); EXPECT_NE(new_image, nullptr);
EXPECT_NE(image, new_image); EXPECT_NE(image, new_image);
EXPECT_EQ(1U, swap_completion_callbacks().size()); EXPECT_EQ(1U, swap_completion_callbacks().size());
PageFlipComplete(); PageFlipComplete();
// Test swapbuffers without calling BeginPaint/EndPaint (i.e without // Test CommitOverlayPlanes without calling BeginPaint/EndPaint (i.e without
// GetCurrentImage) // PaintAndSchedulePrimaryPlane)
SwapBuffers(); SwapBuffers();
EXPECT_EQ(1U, swap_completion_callbacks().size()); EXPECT_EQ(1U, swap_completion_callbacks().size());
// Schedule the primary plane without drawing.
SchedulePrimaryPlane();
PageFlipComplete(); PageFlipComplete();
EXPECT_EQ(0U, swap_completion_callbacks().size()); EXPECT_EQ(0U, swap_completion_callbacks().size());
EXPECT_EQ(current_image(), nullptr); EXPECT_EQ(current_image(), nullptr);
SwapBuffers(); CommitOverlayPlanes();
EXPECT_EQ(1U, swap_completion_callbacks().size()); EXPECT_EQ(1U, swap_completion_callbacks().size());
PageFlipComplete(); PageFlipComplete();
EXPECT_EQ(0U, swap_completion_callbacks().size()); EXPECT_EQ(0U, swap_completion_callbacks().size());
...@@ -415,10 +450,10 @@ TEST_F_GPU(SkiaOutputDeviceBufferQueueTest, CheckCorrectBufferOrdering) { ...@@ -415,10 +450,10 @@ TEST_F_GPU(SkiaOutputDeviceBufferQueueTest, CheckCorrectBufferOrdering) {
gfx::OVERLAY_TRANSFORM_NONE); gfx::OVERLAY_TRANSFORM_NONE);
const size_t kSwapCount = 5; const size_t kSwapCount = 5;
EXPECT_NE(PaintAndSchedulePrimaryPlane(), nullptr);
for (size_t i = 0; i < kSwapCount; ++i) { for (size_t i = 0; i < kSwapCount; ++i) {
EXPECT_NE(GetCurrentImage(), nullptr);
SwapBuffers(); SwapBuffers();
EXPECT_NE(GetCurrentImage(), nullptr); EXPECT_NE(PaintAndSchedulePrimaryPlane(), nullptr);
PageFlipComplete(); PageFlipComplete();
} }
...@@ -426,13 +461,13 @@ TEST_F_GPU(SkiaOutputDeviceBufferQueueTest, CheckCorrectBufferOrdering) { ...@@ -426,13 +461,13 @@ TEST_F_GPU(SkiaOutputDeviceBufferQueueTest, CheckCorrectBufferOrdering) {
EXPECT_EQ(3, CountBuffers()); EXPECT_EQ(3, CountBuffers());
for (size_t i = 0; i < kSwapCount; ++i) { for (size_t i = 0; i < kSwapCount; ++i) {
EXPECT_NE(GetCurrentImage(), nullptr);
auto* next_image = current_image(); auto* next_image = current_image();
SwapBuffers(); SwapBuffers();
EXPECT_EQ(current_image(), nullptr); EXPECT_EQ(current_image(), nullptr);
EXPECT_EQ(1U, swap_completion_callbacks().size()); EXPECT_EQ(1U, swap_completion_callbacks().size());
PageFlipComplete(); PageFlipComplete();
EXPECT_EQ(displayed_image(), next_image); EXPECT_EQ(displayed_image(), next_image);
EXPECT_NE(PaintAndSchedulePrimaryPlane(), nullptr);
} }
} }
...@@ -442,10 +477,10 @@ TEST_F_GPU(SkiaOutputDeviceBufferQueueTest, ReshapeWithInFlightSurfaces) { ...@@ -442,10 +477,10 @@ TEST_F_GPU(SkiaOutputDeviceBufferQueueTest, ReshapeWithInFlightSurfaces) {
const size_t kSwapCount = 5; const size_t kSwapCount = 5;
EXPECT_NE(PaintAndSchedulePrimaryPlane(), nullptr);
for (size_t i = 0; i < kSwapCount; ++i) { for (size_t i = 0; i < kSwapCount; ++i) {
EXPECT_NE(GetCurrentImage(), nullptr);
SwapBuffers(); SwapBuffers();
EXPECT_NE(GetCurrentImage(), nullptr); EXPECT_NE(PaintAndSchedulePrimaryPlane(), nullptr);
PageFlipComplete(); PageFlipComplete();
} }
...@@ -463,7 +498,7 @@ TEST_F_GPU(SkiaOutputDeviceBufferQueueTest, ReshapeWithInFlightSurfaces) { ...@@ -463,7 +498,7 @@ TEST_F_GPU(SkiaOutputDeviceBufferQueueTest, ReshapeWithInFlightSurfaces) {
EXPECT_EQ(0u, available_images().size()); EXPECT_EQ(0u, available_images().size());
// Test swap after reshape // Test swap after reshape
EXPECT_NE(GetCurrentImage(), nullptr); EXPECT_NE(PaintAndSchedulePrimaryPlane(), nullptr);
SwapBuffers(); SwapBuffers();
PageFlipComplete(); PageFlipComplete();
EXPECT_NE(displayed_image(), nullptr); EXPECT_NE(displayed_image(), nullptr);
......
...@@ -996,17 +996,23 @@ void SkiaOutputSurfaceImplOnGpu::SwapBuffers( ...@@ -996,17 +996,23 @@ void SkiaOutputSurfaceImplOnGpu::SwapBuffers(
output_surface_plane_.reset(); output_surface_plane_.reset();
} }
if (frame.sub_buffer_rect && frame.sub_buffer_rect->IsEmpty()) { if (frame.sub_buffer_rect) {
// Call SwapBuffers() to present overlays. if (capabilities().supports_post_sub_buffer) {
output_device_->SwapBuffers(buffer_presented_callback_, if (!capabilities().flipped_output_surface)
std::move(frame.latency_info)); frame.sub_buffer_rect->set_y(size_.height() -
} else if (capabilities().supports_post_sub_buffer && frame.sub_buffer_rect) { frame.sub_buffer_rect->y() -
if (!capabilities().flipped_output_surface) frame.sub_buffer_rect->height());
frame.sub_buffer_rect->set_y(size_.height() - frame.sub_buffer_rect->y() - output_device_->PostSubBuffer(*frame.sub_buffer_rect,
frame.sub_buffer_rect->height()); buffer_presented_callback_,
output_device_->PostSubBuffer(*frame.sub_buffer_rect, std::move(frame.latency_info));
buffer_presented_callback_,
std::move(frame.latency_info)); } else if (capabilities().supports_commit_overlay_planes) {
DCHECK(frame.sub_buffer_rect->IsEmpty());
output_device_->CommitOverlayPlanes(buffer_presented_callback_,
std::move(frame.latency_info));
} else {
NOTREACHED();
}
} else { } else {
output_device_->SwapBuffers(buffer_presented_callback_, output_device_->SwapBuffers(buffer_presented_callback_,
std::move(frame.latency_info)); std::move(frame.latency_info));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment