Commit 23a637ea authored by Peng Huang's avatar Peng Huang Committed by Commit Bot

SkiaOutputSurfaceBufferQueue: support empty swap with CommitOverlayPlanes

For empty swap case, SchedulePrimaryPlane() will re-schedule the last
submitted image.

Bug: 1041035
Change-Id: Ic2994b3596e8919bf8f7a1828bcc73274e561470
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2007475
Commit-Queue: Peng Huang <penghuang@chromium.org>
Reviewed-by: default avatarVasiliy Telezhnikov <vasilyt@chromium.org>
Cr-Commit-Position: refs/heads/master@{#733049}
parent 1a8076b8
......@@ -141,6 +141,9 @@ void DirectRenderer::Initialize() {
.disable_non_empty_post_sub_buffers) {
use_partial_swap_ = false;
}
} else {
allow_empty_swap_ |=
output_surface_->capabilities().supports_commit_overlay_planes;
}
initialized_ = true;
......
......@@ -64,6 +64,8 @@ class VIZ_SERVICE_EXPORT OutputSurface {
bool supports_stencil = false;
// Whether this OutputSurface supports post sub buffer or not.
bool supports_post_sub_buffer = false;
// Whether this OutputSurface supports commit overlay planes.
bool supports_commit_overlay_planes = false;
// Whether this OutputSurface supports gpu vsync callbacks.
bool supports_gpu_vsync = false;
// Whether this OutputSurface supports pre transform. If it is supported,
......
......@@ -27,6 +27,12 @@ SkiaOutputDevice::SkiaOutputDevice(
SkiaOutputDevice::~SkiaOutputDevice() = default;
void SkiaOutputDevice::CommitOverlayPlanes(
BufferPresentedCallback feedback,
std::vector<ui::LatencyInfo> latency_info) {
NOTREACHED();
}
void SkiaOutputDevice::PostSubBuffer(
const gfx::Rect& rect,
BufferPresentedCallback feedback,
......
......@@ -88,6 +88,8 @@ class SkiaOutputDevice {
virtual void PostSubBuffer(const gfx::Rect& rect,
BufferPresentedCallback feedback,
std::vector<ui::LatencyInfo> latency_info);
virtual void CommitOverlayPlanes(BufferPresentedCallback feedback,
std::vector<ui::LatencyInfo> latency_info);
// Set the rectangle that will be drawn into on the surface.
virtual void SetDrawRectangle(const gfx::Rect& draw_rectangle);
......
......@@ -67,8 +67,8 @@ class SkiaOutputDeviceBufferQueue::Image {
}
SkSurface* BeginWriteSkia() {
DCHECK(!scoped_write_access_);
DCHECK(!scoped_read_access_);
DCHECK(!scoped_skia_write_access_);
DCHECK(!scoped_overlay_read_access_);
DCHECK(end_semaphores_.empty());
std::vector<GrBackendSemaphore> begin_semaphores;
......@@ -77,29 +77,29 @@ class SkiaOutputDeviceBufferQueue::Image {
// Buffer queue is internal to GPU proc and handles texture initialization,
// so allow uncleared access.
// TODO(vasilyt): Props and MSAA
scoped_write_access_ = skia_representation_->BeginScopedWriteAccess(
scoped_skia_write_access_ = skia_representation_->BeginScopedWriteAccess(
0 /* final_msaa_count */, surface_props, &begin_semaphores,
&end_semaphores_,
gpu::SharedImageRepresentation::AllowUnclearedAccess::kYes);
DCHECK(scoped_write_access_);
DCHECK(scoped_skia_write_access_);
if (!begin_semaphores.empty()) {
scoped_write_access_->surface()->wait(begin_semaphores.size(),
begin_semaphores.data());
scoped_skia_write_access_->surface()->wait(begin_semaphores.size(),
begin_semaphores.data());
}
return scoped_write_access_->surface();
return scoped_skia_write_access_->surface();
}
void EndWriteSkia() {
DCHECK(scoped_write_access_);
DCHECK(scoped_skia_write_access_);
GrFlushInfo flush_info = {
.fFlags = kNone_GrFlushFlags,
.fNumSemaphores = end_semaphores_.size(),
.fSignalSemaphores = end_semaphores_.data(),
};
scoped_write_access_->surface()->flush(
scoped_skia_write_access_->surface()->flush(
SkSurface::BackendSurfaceAccess::kNoAccess, flush_info);
scoped_write_access_.reset();
scoped_skia_write_access_.reset();
end_semaphores_.clear();
// SkiaRenderer always draws the full frame.
......@@ -107,14 +107,19 @@ class SkiaOutputDeviceBufferQueue::Image {
}
void BeginPresent() {
DCHECK(!scoped_write_access_);
DCHECK(!scoped_read_access_);
DCHECK(!scoped_gl_read_access_);
if (++present_count_ != 1) {
DCHECK(scoped_overlay_read_access_ || scoped_gl_read_access_);
return;
}
DCHECK(!scoped_skia_write_access_);
DCHECK(!scoped_overlay_read_access_);
if (overlay_representation_) {
scoped_read_access_ = overlay_representation_->BeginScopedReadAccess(
true /* need_gl_image */);
DCHECK(scoped_read_access_);
scoped_overlay_read_access_ =
overlay_representation_->BeginScopedReadAccess(
true /* need_gl_image */);
DCHECK(scoped_overlay_read_access_);
return;
}
......@@ -125,24 +130,32 @@ class SkiaOutputDeviceBufferQueue::Image {
}
void EndPresent() {
scoped_read_access_.reset();
DCHECK(present_count_);
if (--present_count_)
return;
scoped_overlay_read_access_.reset();
scoped_gl_read_access_.reset();
}
gl::GLImage* GetGLImage(std::unique_ptr<gfx::GpuFence>* fence) {
*fence = nullptr;
if (scoped_read_access_)
return scoped_read_access_->gl_image();
if (scoped_overlay_read_access_)
return scoped_overlay_read_access_->gl_image();
DCHECK(scoped_gl_read_access_);
if (auto gl_fence = gl::GLFence::CreateForGpuFence()) {
*fence = gl_fence->GetGpuFence();
if (gl::GLFence::IsGpuFenceSupported() && fence) {
if (auto gl_fence = gl::GLFence::CreateForGpuFence())
*fence = gl_fence->GetGpuFence();
}
auto* texture = gl_representation_->GetTexture();
return texture->GetLevelImage(texture->target(), 0);
}
int present_count() const { return present_count_; }
gpu::SharedImageRepresentationSkia* skia_representation() {
return skia_representation_.get();
}
private:
gpu::SharedImageFactory* const factory_;
gpu::SharedImageRepresentationFactory* const representation_factory_;
......@@ -153,12 +166,13 @@ class SkiaOutputDeviceBufferQueue::Image {
overlay_representation_;
std::unique_ptr<gpu::SharedImageRepresentationGLTexture> gl_representation_;
std::unique_ptr<gpu::SharedImageRepresentationSkia::ScopedWriteAccess>
scoped_write_access_;
scoped_skia_write_access_;
std::unique_ptr<gpu::SharedImageRepresentationOverlay::ScopedReadAccess>
scoped_read_access_;
scoped_overlay_read_access_;
std::unique_ptr<gpu::SharedImageRepresentationGLTexture::ScopedAccess>
scoped_gl_read_access_;
std::vector<GrBackendSemaphore> end_semaphores_;
int present_count_ = 0;
DISALLOW_COPY_AND_ASSIGN(Image);
};
......@@ -193,7 +207,8 @@ SkiaOutputDeviceBufferQueue::SkiaOutputDeviceBufferQueue(
memory_tracker,
did_swap_buffer_complete_callback),
dependency_(deps),
gl_surface_(gl_surface),
gl_surface_(std::move(gl_surface)),
supports_async_swap_(gl_surface_->SupportsAsyncSwap()),
shared_image_factory_(deps->GetGpuPreferences(),
deps->GetGpuDriverBugWorkarounds(),
deps->GetGpuFeatureInfo(),
......@@ -214,8 +229,11 @@ SkiaOutputDeviceBufferQueue::SkiaOutputDeviceBufferQueue(
image_format_ = RGBA_8888;
#endif
capabilities_.supports_post_sub_buffer = gl_surface_->SupportsPostSubBuffer();
// TODO(vasilyt): Need to figure out why partial swap isn't working
capabilities_.supports_post_sub_buffer = false;
capabilities_.supports_commit_overlay_planes =
gl_surface_->SupportsCommitOverlayPlanes();
capabilities_.max_frames_pending = 2;
// Set supports_surfaceless to enable overlays.
capabilities_.supports_surfaceless = true;
......@@ -226,7 +244,7 @@ SkiaOutputDeviceBufferQueue::SkiaOutputDeviceBufferQueue(
SkiaOutputSurfaceDependency* deps,
gpu::MemoryTracker* memory_tracker,
const DidSwapBufferCompleteCallback& did_swap_buffer_complete_callback)
: SkiaOutputDeviceBufferQueue(gl_surface,
: SkiaOutputDeviceBufferQueue(std::move(gl_surface),
deps,
memory_tracker,
did_swap_buffer_complete_callback,
......@@ -274,17 +292,9 @@ SkiaOutputDeviceBufferQueue::Create(
}
SkiaOutputDeviceBufferQueue::Image*
SkiaOutputDeviceBufferQueue::GetCurrentImage() {
if (!current_image_)
current_image_ = GetNextImage();
return current_image_.get();
}
std::unique_ptr<SkiaOutputDeviceBufferQueue::Image>
SkiaOutputDeviceBufferQueue::GetNextImage() {
if (!available_images_.empty()) {
std::unique_ptr<Image> image = std::move(available_images_.back());
auto* image = available_images_.back();
available_images_.pop_back();
return image;
}
......@@ -294,47 +304,59 @@ SkiaOutputDeviceBufferQueue::GetNextImage() {
if (image->Initialize(image_size_, color_space_, image_format_, dependency_,
shared_image_usage_)) {
return image;
images_.push_back(std::move(image));
return images_.back().get();
}
return nullptr;
}
void SkiaOutputDeviceBufferQueue::PageFlipComplete(
std::unique_ptr<Image> image) {
void SkiaOutputDeviceBufferQueue::PageFlipComplete(Image* image) {
DCHECK(image);
if (displayed_image_) {
DCHECK_EQ(displayed_image_->present_count() > 1, displayed_image_ == image);
displayed_image_->EndPresent();
available_images_.push_back(std::move(displayed_image_));
if (!displayed_image_->present_count()) {
available_images_.push_back(displayed_image_);
}
}
displayed_image_ = std::move(image);
displayed_image_ = image;
swap_completion_callbacks_.pop_front();
}
void SkiaOutputDeviceBufferQueue::FreeAllSurfaces() {
displayed_image_.reset();
current_image_.reset();
// Clear and cancel swap buffer callbacks.
swap_completion_callbacks_.clear();
images_.clear();
current_image_ = nullptr;
submitted_image_ = nullptr;
displayed_image_ = nullptr;
available_images_.clear();
}
void SkiaOutputDeviceBufferQueue::SchedulePrimaryPlane(
const OverlayProcessorInterface::OutputSurfaceOverlayPlane& plane) {
if (!current_image_)
return;
// If the current_image_ is nullptr, it means there is no change on the
// primary plane. So we just need to schedule the last submitted image.
auto* image = current_image_ ? current_image_ : submitted_image_;
DCHECK(image);
current_image_->BeginPresent();
image->BeginPresent();
std::unique_ptr<gfx::GpuFence> fence;
auto* image = current_image_->GetGLImage(&fence);
// If the submitted_image_ is being scheduled, we don't new a new fence.
auto* gl_image =
image->GetGLImage(image == submitted_image_ ? nullptr : &fence);
// Output surface is also z-order 0.
constexpr int kPlaneZOrder = 0;
// Output surface always uses the full texture.
constexpr gfx::RectF kUVRect(0.f, 0.f, 1.f, 1.f);
gl_surface_->ScheduleOverlayPlane(kPlaneZOrder, plane.transform, image,
constexpr gfx::RectF kUVRect(0.f, 0.f, 1.0f, 1.0f);
gl_surface_->ScheduleOverlayPlane(kPlaneZOrder, plane.transform, gl_image,
ToNearestRect(plane.display_rect), kUVRect,
plane.enable_blending, std::move(fence));
}
......@@ -382,19 +404,22 @@ void SkiaOutputDeviceBufferQueue::SwapBuffers(
std::vector<ui::LatencyInfo> latency_info) {
StartSwapBuffers({});
if (gl_surface_->SupportsAsyncSwap()) {
DCHECK(current_image_);
submitted_image_ = current_image_;
current_image_ = nullptr;
if (supports_async_swap_) {
// Cancelable callback uses weak ptr to drop this task upon destruction.
// Thus it is safe to use |base::Unretained(this)|.
swap_completion_callbacks_.emplace_back(
std::make_unique<CancelableSwapCompletionCallback>(base::BindOnce(
&SkiaOutputDeviceBufferQueue::DoFinishSwapBuffers,
base::Unretained(this), image_size_, std::move(latency_info),
std::move(current_image_), std::move(committed_overlays_))));
submitted_image_, std::move(committed_overlays_))));
gl_surface_->SwapBuffersAsync(swap_completion_callbacks_.back()->callback(),
std::move(feedback));
} else {
DoFinishSwapBuffers(image_size_, std::move(latency_info),
std::move(current_image_),
DoFinishSwapBuffers(image_size_, std::move(latency_info), submitted_image_,
std::move(committed_overlays_),
gl_surface_->SwapBuffers(std::move(feedback)), nullptr);
}
......@@ -402,46 +427,46 @@ void SkiaOutputDeviceBufferQueue::SwapBuffers(
std::swap(committed_overlays_, pending_overlays_);
}
void SkiaOutputDeviceBufferQueue::PostSubBuffer(
const gfx::Rect& rect,
void SkiaOutputDeviceBufferQueue::CommitOverlayPlanes(
BufferPresentedCallback feedback,
std::vector<ui::LatencyInfo> latency_info) {
StartSwapBuffers({});
if (gl_surface_->SupportsAsyncSwap()) {
// There is no drawing for this frame on the main buffer.
DCHECK(!current_image_);
// A main buffer has to be submitted for previous frames.
DCHECK(submitted_image_);
if (supports_async_swap_) {
// Cancelable callback uses weak ptr to drop this task upon destruction.
// Thus it is safe to use |base::Unretained(this)|.
swap_completion_callbacks_.emplace_back(
std::make_unique<CancelableSwapCompletionCallback>(base::BindOnce(
&SkiaOutputDeviceBufferQueue::DoFinishSwapBuffers,
base::Unretained(this), image_size_, std::move(latency_info),
std::move(current_image_), std::move(committed_overlays_))));
gl_surface_->PostSubBufferAsync(
rect.x(), rect.y(), rect.width(), rect.height(),
submitted_image_, std::move(committed_overlays_))));
gl_surface_->CommitOverlayPlanesAsync(
swap_completion_callbacks_.back()->callback(), std::move(feedback));
} else {
DoFinishSwapBuffers(
image_size_, std::move(latency_info), std::move(current_image_),
std::move(committed_overlays_),
gl_surface_->PostSubBuffer(rect.x(), rect.y(), rect.width(),
rect.height(), std::move(feedback)),
nullptr);
DoFinishSwapBuffers(image_size_, std::move(latency_info), submitted_image_,
std::move(committed_overlays_),
gl_surface_->CommitOverlayPlanes(std::move(feedback)),
nullptr);
}
committed_overlays_ = std::move(pending_overlays_);
pending_overlays_.clear();
committed_overlays_.clear();
std::swap(committed_overlays_, pending_overlays_);
}
void SkiaOutputDeviceBufferQueue::DoFinishSwapBuffers(
const gfx::Size& size,
std::vector<ui::LatencyInfo> latency_info,
std::unique_ptr<Image> image,
Image* image,
std::vector<OverlayData> overlays,
gfx::SwapResult result,
std::unique_ptr<gfx::GpuFence> gpu_fence) {
DCHECK(!gpu_fence);
PageFlipComplete(std::move(image));
PageFlipComplete(image);
FinishSwapBuffers(result, size, latency_info);
}
......@@ -462,14 +487,15 @@ bool SkiaOutputDeviceBufferQueue::Reshape(const gfx::Size& size,
}
SkSurface* SkiaOutputDeviceBufferQueue::BeginPaint() {
auto* image = GetCurrentImage();
return image->BeginWriteSkia();
if (!current_image_)
current_image_ = GetNextImage();
return current_image_->BeginWriteSkia();
}
void SkiaOutputDeviceBufferQueue::EndPaint(
const GrBackendSemaphore& semaphore) {
auto* image = GetCurrentImage();
image->EndWriteSkia();
DCHECK(current_image_);
current_image_->EndWriteSkia();
}
} // namespace viz
......@@ -42,9 +42,8 @@ class VIZ_SERVICE_EXPORT SkiaOutputDeviceBufferQueue final
void SwapBuffers(BufferPresentedCallback feedback,
std::vector<ui::LatencyInfo> latency_info) override;
void PostSubBuffer(const gfx::Rect& rect,
BufferPresentedCallback feedback,
std::vector<ui::LatencyInfo> latency_info) override;
void CommitOverlayPlanes(BufferPresentedCallback feedback,
std::vector<ui::LatencyInfo> latency_info) override;
bool Reshape(const gfx::Size& size,
float device_scale_factor,
const gfx::ColorSpace& color_space,
......@@ -70,33 +69,37 @@ class VIZ_SERVICE_EXPORT SkiaOutputDeviceBufferQueue final
base::CancelableOnceCallback<void(gfx::SwapResult,
std::unique_ptr<gfx::GpuFence>)>;
Image* GetCurrentImage();
std::unique_ptr<Image> GetNextImage();
void PageFlipComplete(std::unique_ptr<Image> image);
Image* GetNextImage();
void PageFlipComplete(Image* image);
void FreeAllSurfaces();
// Used as callback for SwapBuffersAsync and PostSubBufferAsync to finish
// Used as callback for SwapBuff ersAsync and PostSubBufferAsync to finish
// operation
void DoFinishSwapBuffers(const gfx::Size& size,
std::vector<ui::LatencyInfo> latency_info,
std::unique_ptr<Image> image,
Image* image,
std::vector<OverlayData> overlays,
gfx::SwapResult result,
std::unique_ptr<gfx::GpuFence> gpu_fence);
SkiaOutputSurfaceDependency* const dependency_;
scoped_refptr<gl::GLSurface> gl_surface_;
const bool supports_async_swap_;
// Format of images
gfx::ColorSpace color_space_;
gfx::Size image_size_;
ResourceFormat image_format_;
// All allocated images.
std::vector<std::unique_ptr<Image>> images_;
// This image is currently used by Skia as RenderTarget. This may be nullptr
// if no drawing in progress or if allocation failed at bind.
std::unique_ptr<Image> current_image_;
// if there is no drawing for the current frame or if allocation failed.
Image* current_image_ = nullptr;
// The last image submitted for presenting.
Image* submitted_image_ = nullptr;
// The image currently on the screen, if any.
std::unique_ptr<Image> displayed_image_;
Image* displayed_image_ = nullptr;
// These are free for use, and are not nullptr.
std::vector<std::unique_ptr<Image>> available_images_;
std::vector<Image*> available_images_;
// These cancelable callbacks bind images that have been scheduled to display
// but are not displayed yet. This deque will be cleared when represented
// frames are destroyed. Use CancelableOnceCallback to prevent resources
......
......@@ -136,6 +136,23 @@ class MockGLSurfaceAsync : public gl::GLSurfaceStub {
callback_ = std::move(completion_callback);
}
void CommitOverlayPlanesAsync(
SwapCompletionCallback completion_callback,
PresentationCallback presentation_callback) override {
DCHECK(!callback_);
callback_ = std::move(completion_callback);
}
bool ScheduleOverlayPlane(int z_order,
gfx::OverlayTransform transform,
gl::GLImage* image,
const gfx::Rect& bounds_rect,
const gfx::RectF& crop_rect,
bool enable_blend,
std::unique_ptr<gfx::GpuFence> gpu_fence) override {
return true;
}
void SwapComplete() {
std::move(callback_).Run(gfx::SwapResult::SWAP_ACK, nullptr);
}
......@@ -211,13 +228,13 @@ class SkiaOutputDeviceBufferQueueTest : public TestOnGpu {
using Image = SkiaOutputDeviceBufferQueue::Image;
Image* current_image() { return output_device_->current_image_.get(); }
Image* current_image() { return output_device_->current_image_; }
const std::vector<std::unique_ptr<Image>>& available_images() {
const std::vector<Image*>& available_images() {
return output_device_->available_images_;
}
Image* displayed_image() { return output_device_->displayed_image_.get(); }
Image* displayed_image() { return output_device_->displayed_image_; }
base::circular_deque<std::unique_ptr<
SkiaOutputDeviceBufferQueue::CancelableSwapCompletionCallback>>&
......@@ -239,8 +256,8 @@ class SkiaOutputDeviceBufferQueueTest : public TestOnGpu {
void CheckUnique() {
std::set<Image*> images;
for (const auto& image : available_images())
images.insert(image.get());
for (auto* image : available_images())
images.insert(image);
if (displayed_image())
images.insert(displayed_image());
......@@ -252,12 +269,17 @@ class SkiaOutputDeviceBufferQueueTest : public TestOnGpu {
(size_t)CountBuffers());
}
Image* GetCurrentImage() {
Image* PaintAndSchedulePrimaryPlane() {
// Call Begin/EndPaint to ensusre the image is initialized before use.
output_device_->BeginPaint();
GrBackendSemaphore semaphore;
output_device_->EndPaint(semaphore);
return output_device_->GetCurrentImage();
output_device_->EndPaint(GrBackendSemaphore());
SchedulePrimaryPlane();
return current_image();
}
void SchedulePrimaryPlane() {
output_device_->SchedulePrimaryPlane(
OverlayProcessorInterface::OutputSurfaceOverlayPlane());
}
void SwapBuffers() {
......@@ -268,6 +290,14 @@ class SkiaOutputDeviceBufferQueueTest : public TestOnGpu {
std::vector<ui::LatencyInfo>());
}
void CommitOverlayPlanes() {
auto present_callback =
base::DoNothing::Once<const gfx::PresentationFeedback&>();
output_device_->CommitOverlayPlanes(std::move(present_callback),
std::vector<ui::LatencyInfo>());
}
void PageFlipComplete() { gl_surface_->SwapComplete(); }
protected:
......@@ -287,11 +317,11 @@ TEST_F_GPU(SkiaOutputDeviceBufferQueueTest, MultipleGetCurrentBufferCalls) {
output_device_->Reshape(screen_size, 1.0f, gfx::ColorSpace(), false,
gfx::OVERLAY_TRANSFORM_NONE);
EXPECT_EQ(0U, memory_tracker().GetSize());
EXPECT_NE(GetCurrentImage(), nullptr);
EXPECT_NE(PaintAndSchedulePrimaryPlane(), nullptr);
EXPECT_NE(0U, memory_tracker().GetSize());
EXPECT_EQ(1, CountBuffers());
auto* fb = current_image();
EXPECT_NE(GetCurrentImage(), nullptr);
EXPECT_NE(PaintAndSchedulePrimaryPlane(), nullptr);
EXPECT_NE(0U, memory_tracker().GetSize());
EXPECT_EQ(1, CountBuffers());
EXPECT_EQ(fb, current_image());
......@@ -304,7 +334,7 @@ TEST_F_GPU(SkiaOutputDeviceBufferQueueTest, CheckDoubleBuffering) {
EXPECT_EQ(0U, memory_tracker().GetSize());
EXPECT_EQ(0, CountBuffers());
EXPECT_NE(GetCurrentImage(), nullptr);
EXPECT_NE(PaintAndSchedulePrimaryPlane(), nullptr);
EXPECT_NE(0U, memory_tracker().GetSize());
EXPECT_EQ(1, CountBuffers());
EXPECT_NE(current_image(), nullptr);
......@@ -314,7 +344,7 @@ TEST_F_GPU(SkiaOutputDeviceBufferQueueTest, CheckDoubleBuffering) {
PageFlipComplete();
EXPECT_EQ(0U, swap_completion_callbacks().size());
EXPECT_TRUE(displayed_image());
EXPECT_NE(GetCurrentImage(), nullptr);
EXPECT_NE(PaintAndSchedulePrimaryPlane(), nullptr);
EXPECT_NE(0U, memory_tracker().GetSize());
EXPECT_EQ(2, CountBuffers());
CheckUnique();
......@@ -325,12 +355,13 @@ TEST_F_GPU(SkiaOutputDeviceBufferQueueTest, CheckDoubleBuffering) {
CheckUnique();
EXPECT_EQ(1U, swap_completion_callbacks().size());
EXPECT_TRUE(displayed_image());
PageFlipComplete();
CheckUnique();
EXPECT_EQ(0U, swap_completion_callbacks().size());
EXPECT_EQ(1U, available_images().size());
EXPECT_TRUE(displayed_image());
EXPECT_NE(GetCurrentImage(), nullptr);
EXPECT_NE(PaintAndSchedulePrimaryPlane(), nullptr);
EXPECT_NE(0U, memory_tracker().GetSize());
EXPECT_EQ(2, CountBuffers());
CheckUnique();
......@@ -344,11 +375,11 @@ TEST_F_GPU(SkiaOutputDeviceBufferQueueTest, CheckTripleBuffering) {
EXPECT_EQ(0U, memory_tracker().GetSize());
// This bit is the same sequence tested in the doublebuffering case.
EXPECT_NE(GetCurrentImage(), nullptr);
EXPECT_NE(PaintAndSchedulePrimaryPlane(), nullptr);
EXPECT_FALSE(displayed_image());
SwapBuffers();
PageFlipComplete();
EXPECT_NE(GetCurrentImage(), nullptr);
EXPECT_NE(PaintAndSchedulePrimaryPlane(), nullptr);
SwapBuffers();
EXPECT_NE(0U, memory_tracker().GetSize());
......@@ -356,7 +387,7 @@ TEST_F_GPU(SkiaOutputDeviceBufferQueueTest, CheckTripleBuffering) {
CheckUnique();
EXPECT_EQ(1U, swap_completion_callbacks().size());
EXPECT_TRUE(displayed_image());
EXPECT_NE(GetCurrentImage(), nullptr);
EXPECT_NE(PaintAndSchedulePrimaryPlane(), nullptr);
EXPECT_NE(0U, memory_tracker().GetSize());
EXPECT_EQ(3, CountBuffers());
CheckUnique();
......@@ -380,7 +411,7 @@ TEST_F_GPU(SkiaOutputDeviceBufferQueueTest, CheckEmptySwap) {
EXPECT_EQ(0, CountBuffers());
EXPECT_EQ(0U, memory_tracker().GetSize());
auto* image = GetCurrentImage();
auto* image = PaintAndSchedulePrimaryPlane();
EXPECT_NE(image, nullptr);
EXPECT_NE(0U, memory_tracker().GetSize());
EXPECT_EQ(1, CountBuffers());
......@@ -389,22 +420,26 @@ TEST_F_GPU(SkiaOutputDeviceBufferQueueTest, CheckEmptySwap) {
SwapBuffers();
// Make sure we won't be drawing to the texture we just sent for scanout.
auto* new_image = GetCurrentImage();
auto* new_image = PaintAndSchedulePrimaryPlane();
EXPECT_NE(new_image, nullptr);
EXPECT_NE(image, new_image);
EXPECT_EQ(1U, swap_completion_callbacks().size());
PageFlipComplete();
// Test swapbuffers without calling BeginPaint/EndPaint (i.e without
// GetCurrentImage)
// Test CommitOverlayPlanes without calling BeginPaint/EndPaint (i.e without
// PaintAndSchedulePrimaryPlane)
SwapBuffers();
EXPECT_EQ(1U, swap_completion_callbacks().size());
// Schedule the primary plane without drawing.
SchedulePrimaryPlane();
PageFlipComplete();
EXPECT_EQ(0U, swap_completion_callbacks().size());
EXPECT_EQ(current_image(), nullptr);
SwapBuffers();
CommitOverlayPlanes();
EXPECT_EQ(1U, swap_completion_callbacks().size());
PageFlipComplete();
EXPECT_EQ(0U, swap_completion_callbacks().size());
......@@ -415,10 +450,10 @@ TEST_F_GPU(SkiaOutputDeviceBufferQueueTest, CheckCorrectBufferOrdering) {
gfx::OVERLAY_TRANSFORM_NONE);
const size_t kSwapCount = 5;
EXPECT_NE(PaintAndSchedulePrimaryPlane(), nullptr);
for (size_t i = 0; i < kSwapCount; ++i) {
EXPECT_NE(GetCurrentImage(), nullptr);
SwapBuffers();
EXPECT_NE(GetCurrentImage(), nullptr);
EXPECT_NE(PaintAndSchedulePrimaryPlane(), nullptr);
PageFlipComplete();
}
......@@ -426,13 +461,13 @@ TEST_F_GPU(SkiaOutputDeviceBufferQueueTest, CheckCorrectBufferOrdering) {
EXPECT_EQ(3, CountBuffers());
for (size_t i = 0; i < kSwapCount; ++i) {
EXPECT_NE(GetCurrentImage(), nullptr);
auto* next_image = current_image();
SwapBuffers();
EXPECT_EQ(current_image(), nullptr);
EXPECT_EQ(1U, swap_completion_callbacks().size());
PageFlipComplete();
EXPECT_EQ(displayed_image(), next_image);
EXPECT_NE(PaintAndSchedulePrimaryPlane(), nullptr);
}
}
......@@ -442,10 +477,10 @@ TEST_F_GPU(SkiaOutputDeviceBufferQueueTest, ReshapeWithInFlightSurfaces) {
const size_t kSwapCount = 5;
EXPECT_NE(PaintAndSchedulePrimaryPlane(), nullptr);
for (size_t i = 0; i < kSwapCount; ++i) {
EXPECT_NE(GetCurrentImage(), nullptr);
SwapBuffers();
EXPECT_NE(GetCurrentImage(), nullptr);
EXPECT_NE(PaintAndSchedulePrimaryPlane(), nullptr);
PageFlipComplete();
}
......@@ -463,7 +498,7 @@ TEST_F_GPU(SkiaOutputDeviceBufferQueueTest, ReshapeWithInFlightSurfaces) {
EXPECT_EQ(0u, available_images().size());
// Test swap after reshape
EXPECT_NE(GetCurrentImage(), nullptr);
EXPECT_NE(PaintAndSchedulePrimaryPlane(), nullptr);
SwapBuffers();
PageFlipComplete();
EXPECT_NE(displayed_image(), nullptr);
......
......@@ -996,17 +996,23 @@ void SkiaOutputSurfaceImplOnGpu::SwapBuffers(
output_surface_plane_.reset();
}
if (frame.sub_buffer_rect && frame.sub_buffer_rect->IsEmpty()) {
// Call SwapBuffers() to present overlays.
output_device_->SwapBuffers(buffer_presented_callback_,
std::move(frame.latency_info));
} else if (capabilities().supports_post_sub_buffer && frame.sub_buffer_rect) {
if (!capabilities().flipped_output_surface)
frame.sub_buffer_rect->set_y(size_.height() - frame.sub_buffer_rect->y() -
frame.sub_buffer_rect->height());
output_device_->PostSubBuffer(*frame.sub_buffer_rect,
buffer_presented_callback_,
std::move(frame.latency_info));
if (frame.sub_buffer_rect) {
if (capabilities().supports_post_sub_buffer) {
if (!capabilities().flipped_output_surface)
frame.sub_buffer_rect->set_y(size_.height() -
frame.sub_buffer_rect->y() -
frame.sub_buffer_rect->height());
output_device_->PostSubBuffer(*frame.sub_buffer_rect,
buffer_presented_callback_,
std::move(frame.latency_info));
} else if (capabilities().supports_commit_overlay_planes) {
DCHECK(frame.sub_buffer_rect->IsEmpty());
output_device_->CommitOverlayPlanes(buffer_presented_callback_,
std::move(frame.latency_info));
} else {
NOTREACHED();
}
} else {
output_device_->SwapBuffers(buffer_presented_callback_,
std::move(frame.latency_info));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment