Commit 2285ce0a authored by Peng Huang's avatar Peng Huang Committed by Commit Bot

Enable partial swap with SkiaOutputDeviceVulkan

Since vulkan swap chain will not modify the content of images, so
we can set preserve_buffer_content in output surface capabilities,
and then the SkiaRenderer will only draw damage area instead of the
whole buffer to save CPU and GPU cycles.

Note: we are using FIFO Vulkan swap chain, so the swap chain should
return images in presenting order.

Bug: 1043388
Change-Id: I6f3a4f84027607fe5cb6ecd992552c535c72ed52
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2042766
Commit-Queue: Peng Huang <penghuang@chromium.org>
Reviewed-by: default avatarVasiliy Telezhnikov <vasilyt@chromium.org>
Cr-Commit-Position: refs/heads/master@{#744752}
parent e39943c4
...@@ -32,8 +32,16 @@ SkiaOutputDeviceVulkan::SkiaOutputDeviceVulkan( ...@@ -32,8 +32,16 @@ SkiaOutputDeviceVulkan::SkiaOutputDeviceVulkan(
did_swap_buffer_complete_callback), did_swap_buffer_complete_callback),
context_provider_(context_provider), context_provider_(context_provider),
surface_handle_(surface_handle) { surface_handle_(surface_handle) {
if (!CreateVulkanSurface()) {
LOG(ERROR) << "Failed to create vulkan surface.";
}
capabilities_.max_frames_pending = vulkan_surface_->image_count() - 1;
// Vulkan FIFO swap chain should return vk images in presenting order, so set
// preserve_buffer_content & supports_post_sub_buffer to true to let
// SkiaOutputBufferImpl to manager damages.
capabilities_.preserve_buffer_content = true;
capabilities_.flipped_output_surface = true; capabilities_.flipped_output_surface = true;
capabilities_.supports_post_sub_buffer = false; capabilities_.supports_post_sub_buffer = true;
capabilities_.supports_pre_transform = true; capabilities_.supports_pre_transform = true;
} }
...@@ -44,24 +52,27 @@ SkiaOutputDeviceVulkan::~SkiaOutputDeviceVulkan() { ...@@ -44,24 +52,27 @@ SkiaOutputDeviceVulkan::~SkiaOutputDeviceVulkan() {
memory_type_tracker_->TrackMemFree(it->bytes_allocated); memory_type_tracker_->TrackMemFree(it->bytes_allocated);
} }
sk_surface_size_pairs_.clear(); sk_surface_size_pairs_.clear();
if (vulkan_surface_) {
if (!vulkan_surface_)
return;
#if defined(OS_ANDROID) #if defined(OS_ANDROID)
if (base::SysInfo::IsLowEndDevice()) { if (base::SysInfo::IsLowEndDevice()) {
// For low end device, output surface will be destroyed when chrome goes // For low end device, output surface will be destroyed when chrome goes
// into background. And a new output surface will be created when chrome // into background. And a new output surface will be created when chrome
// goes to foreground again. The vulkan surface cannot be created // goes to foreground again. The vulkan surface cannot be created
// successfully, if the old vulkan surface is not destroyed. To avoid the // successfully, if the old vulkan surface is not destroyed. To avoid the
// problem, we sync the device queue, and destroy the vulkan surface // problem, we sync the device queue, and destroy the vulkan surface
// synchronously. // synchronously.
vkQueueWaitIdle(context_provider_->GetDeviceQueue()->GetVulkanQueue()); vkQueueWaitIdle(context_provider_->GetDeviceQueue()->GetVulkanQueue());
vulkan_surface_->Destroy(); vulkan_surface_->Destroy();
return; return;
}
#endif
auto* fence_helper = context_provider_->GetDeviceQueue()->GetFenceHelper();
fence_helper->EnqueueVulkanObjectCleanupForSubmittedWork(
std::move(vulkan_surface_));
} }
#endif
auto* fence_helper = context_provider_->GetDeviceQueue()->GetFenceHelper();
fence_helper->EnqueueVulkanObjectCleanupForSubmittedWork(
std::move(vulkan_surface_));
} }
bool SkiaOutputDeviceVulkan::Reshape(const gfx::Size& size, bool SkiaOutputDeviceVulkan::Reshape(const gfx::Size& size,
...@@ -71,16 +82,11 @@ bool SkiaOutputDeviceVulkan::Reshape(const gfx::Size& size, ...@@ -71,16 +82,11 @@ bool SkiaOutputDeviceVulkan::Reshape(const gfx::Size& size,
gfx::OverlayTransform transform) { gfx::OverlayTransform transform) {
DCHECK(!scoped_write_); DCHECK(!scoped_write_);
uint32_t generation = 0; if (!vulkan_surface_)
if (!vulkan_surface_) { return false;
if (!CreateVulkanSurface())
return false;
} else {
generation = vulkan_surface_->swap_chain_generation();
}
auto generation = vulkan_surface_->swap_chain_generation();
vulkan_surface_->Reshape(size, transform); vulkan_surface_->Reshape(size, transform);
auto sk_color_space = color_space.ToSkColorSpace(); auto sk_color_space = color_space.ToSkColorSpace();
if (vulkan_surface_->swap_chain_generation() != generation || if (vulkan_surface_->swap_chain_generation() != generation ||
!SkColorSpace::Equals(sk_color_space.get(), sk_color_space_.get())) { !SkColorSpace::Equals(sk_color_space.get(), sk_color_space_.get())) {
...@@ -99,14 +105,30 @@ bool SkiaOutputDeviceVulkan::Reshape(const gfx::Size& size, ...@@ -99,14 +105,30 @@ bool SkiaOutputDeviceVulkan::Reshape(const gfx::Size& size,
void SkiaOutputDeviceVulkan::SwapBuffers( void SkiaOutputDeviceVulkan::SwapBuffers(
BufferPresentedCallback feedback, BufferPresentedCallback feedback,
std::vector<ui::LatencyInfo> latency_info) { std::vector<ui::LatencyInfo> latency_info) {
PostSubBuffer(gfx::Rect(vulkan_surface_->image_size()), std::move(feedback),
std::move(latency_info));
}
void SkiaOutputDeviceVulkan::PostSubBuffer(
const gfx::Rect& rect,
BufferPresentedCallback feedback,
std::vector<ui::LatencyInfo> latency_info) {
// Reshape should have been called first. // Reshape should have been called first.
DCHECK(vulkan_surface_); DCHECK(vulkan_surface_);
DCHECK(!scoped_write_); DCHECK(!scoped_write_);
#if DCHECK_IS_ON()
DCHECK_EQ(!rect.IsEmpty(), image_modified_);
image_modified_ = false;
#endif
// TODO(penghuang): pass rect to vulkan swap chain and let swap chain use
// VK_KHR_incremental_present
StartSwapBuffers(std::move(feedback)); StartSwapBuffers(std::move(feedback));
auto image_size = vulkan_surface_->image_size(); auto image_size = vulkan_surface_->image_size();
FinishSwapBuffers(vulkan_surface_->SwapBuffers(), image_size, gfx::SwapResult result = gfx::SwapResult::SWAP_ACK;
std::move(latency_info)); if (!rect.IsEmpty())
result = vulkan_surface_->SwapBuffers();
FinishSwapBuffers(result, image_size, std::move(latency_info));
} }
SkSurface* SkiaOutputDeviceVulkan::BeginPaint() { SkSurface* SkiaOutputDeviceVulkan::BeginPaint() {
...@@ -185,6 +207,9 @@ void SkiaOutputDeviceVulkan::EndPaint(const GrBackendSemaphore& semaphore) { ...@@ -185,6 +207,9 @@ void SkiaOutputDeviceVulkan::EndPaint(const GrBackendSemaphore& semaphore) {
if (semaphore.isInitialized()) if (semaphore.isInitialized())
scoped_write_->SetEndSemaphore(semaphore.vkSemaphore()); scoped_write_->SetEndSemaphore(semaphore.vkSemaphore());
scoped_write_.reset(); scoped_write_.reset();
#if DCHECK_IS_ON()
image_modified_ = true;
#endif
} }
bool SkiaOutputDeviceVulkan::CreateVulkanSurface() { bool SkiaOutputDeviceVulkan::CreateVulkanSurface() {
......
...@@ -39,6 +39,9 @@ class SkiaOutputDeviceVulkan final : public SkiaOutputDevice { ...@@ -39,6 +39,9 @@ class SkiaOutputDeviceVulkan final : public SkiaOutputDevice {
gfx::OverlayTransform transform) override; gfx::OverlayTransform transform) override;
void SwapBuffers(BufferPresentedCallback feedback, void SwapBuffers(BufferPresentedCallback feedback,
std::vector<ui::LatencyInfo> latency_info) override; std::vector<ui::LatencyInfo> latency_info) override;
void PostSubBuffer(const gfx::Rect& rect,
BufferPresentedCallback feedback,
std::vector<ui::LatencyInfo> latency_info) override;
SkSurface* BeginPaint() override; SkSurface* BeginPaint() override;
void EndPaint(const GrBackendSemaphore& semaphore) override; void EndPaint(const GrBackendSemaphore& semaphore) override;
...@@ -62,6 +65,10 @@ class SkiaOutputDeviceVulkan final : public SkiaOutputDevice { ...@@ -62,6 +65,10 @@ class SkiaOutputDeviceVulkan final : public SkiaOutputDevice {
base::Optional<gpu::VulkanSwapChain::ScopedWrite> scoped_write_; base::Optional<gpu::VulkanSwapChain::ScopedWrite> scoped_write_;
#if DCHECK_IS_ON()
bool image_modified_ = false;
#endif
// SkSurfaces for swap chain images. // SkSurfaces for swap chain images.
std::vector<SkSurfaceSizePair> sk_surface_size_pairs_; std::vector<SkSurfaceSizePair> sk_surface_size_pairs_;
......
...@@ -440,6 +440,8 @@ SkiaOutputSurfaceImpl::CreateImageContext( ...@@ -440,6 +440,8 @@ SkiaOutputSurfaceImpl::CreateImageContext(
void SkiaOutputSurfaceImpl::SwapBuffers(OutputSurfaceFrame frame) { void SkiaOutputSurfaceImpl::SwapBuffers(OutputSurfaceFrame frame) {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
DCHECK(!current_paint_); DCHECK(!current_paint_);
DCHECK_EQ(!frame.sub_buffer_rect || !frame.sub_buffer_rect->IsEmpty(),
current_buffer_modified_);
has_set_draw_rectangle_for_frame_ = false; has_set_draw_rectangle_for_frame_ = false;
...@@ -460,9 +462,8 @@ void SkiaOutputSurfaceImpl::SwapBuffers(OutputSurfaceFrame frame) { ...@@ -460,9 +462,8 @@ void SkiaOutputSurfaceImpl::SwapBuffers(OutputSurfaceFrame frame) {
// change the current buffer index to the next buffer in the queue. // change the current buffer index to the next buffer in the queue.
if (++current_buffer_ == damage_of_buffers_.size()) if (++current_buffer_ == damage_of_buffers_.size())
current_buffer_ = 0u; current_buffer_ = 0u;
current_buffer_modified_ = false;
} }
current_buffer_modified_ = false;
// impl_on_gpu_ is released on the GPU thread by a posted task from // impl_on_gpu_ is released on the GPU thread by a posted task from
// SkiaOutputSurfaceImpl::dtor. So it is safe to use base::Unretained. // SkiaOutputSurfaceImpl::dtor. So it is safe to use base::Unretained.
auto callback = auto callback =
...@@ -726,12 +727,8 @@ bool SkiaOutputSurfaceImpl::Initialize() { ...@@ -726,12 +727,8 @@ bool SkiaOutputSurfaceImpl::Initialize() {
ScheduleGpuTask(std::move(callback), {}); ScheduleGpuTask(std::move(callback), {});
event.Wait(); event.Wait();
if (capabilities_.preserve_buffer_content) { if (capabilities_.preserve_buffer_content &&
// If buffer content is preserved after presenting, SkiaOutputSurfaceImpl capabilities_.supports_post_sub_buffer) {
// can simulate partial swap with regular SwapBuffers(). It is because we
// track damaged area for every buffer and ask SkiaRenderer to redraw the
// damaged area to make sure the whole buffer is validated.
capabilities_.supports_post_sub_buffer = true;
capabilities_.only_invalidates_damage_rect = false; capabilities_.only_invalidates_damage_rect = false;
damage_of_buffers_.resize(capabilities_.max_frames_pending + 1); damage_of_buffers_.resize(capabilities_.max_frames_pending + 1);
} }
......
...@@ -1026,11 +1026,7 @@ void SkiaOutputSurfaceImplOnGpu::SwapBuffers( ...@@ -1026,11 +1026,7 @@ void SkiaOutputSurfaceImplOnGpu::SwapBuffers(
output_device_->CommitOverlayPlanes(buffer_presented_callback_, output_device_->CommitOverlayPlanes(buffer_presented_callback_,
std::move(frame.latency_info)); std::move(frame.latency_info));
} else { } else {
// Full swap can only be used to simulate PostSubBuffer(), if the buffer NOTREACHED();
// content is preserved after presenting.
DCHECK(capabilities().preserve_buffer_content);
output_device_->SwapBuffers(buffer_presented_callback_,
std::move(frame.latency_info));
} }
} else { } else {
output_device_->SwapBuffers(buffer_presented_callback_, output_device_->SwapBuffers(buffer_presented_callback_,
......
...@@ -90,13 +90,12 @@ bool VulkanSurface::Initialize(VulkanDeviceQueue* device_queue, ...@@ -90,13 +90,12 @@ bool VulkanSurface::Initialize(VulkanDeviceQueue* device_queue,
device_queue_ = device_queue; device_queue_ = device_queue;
VkResult result = VK_SUCCESS;
VkBool32 present_support; VkBool32 present_support;
if (vkGetPhysicalDeviceSurfaceSupportKHR( VkResult result = vkGetPhysicalDeviceSurfaceSupportKHR(
device_queue_->GetVulkanPhysicalDevice(), device_queue_->GetVulkanPhysicalDevice(),
device_queue_->GetVulkanQueueIndex(), surface_, device_queue_->GetVulkanQueueIndex(), surface_, &present_support);
&present_support) != VK_SUCCESS) { if (result != VK_SUCCESS) {
DLOG(ERROR) << "vkGetPhysicalDeviceSurfaceSupportKHR() failed: " << result; DLOG(ERROR) << "vkGetPhysicalDeviceSurfaceSupportKHR() failed: " << result;
return false; return false;
} }
...@@ -154,12 +153,26 @@ bool VulkanSurface::Initialize(VulkanDeviceQueue* device_queue, ...@@ -154,12 +153,26 @@ bool VulkanSurface::Initialize(VulkanDeviceQueue* device_queue,
return false; return false;
} }
} }
return CreateSwapChain(gfx::Size(), gfx::OVERLAY_TRANSFORM_INVALID);
VkSurfaceCapabilitiesKHR surface_caps;
result = vkGetPhysicalDeviceSurfaceCapabilitiesKHR(
device_queue_->GetVulkanPhysicalDevice(), surface_, &surface_caps);
if (VK_SUCCESS != result) {
DLOG(ERROR) << "vkGetPhysicalDeviceSurfaceCapabilitiesKHR() failed: "
<< result;
return false;
}
image_count_ = std::max(surface_caps.minImageCount, 3u);
return true;
} }
void VulkanSurface::Destroy() { void VulkanSurface::Destroy() {
swap_chain_->Destroy(); if (swap_chain_) {
swap_chain_ = nullptr; swap_chain_->Destroy();
swap_chain_ = nullptr;
}
vkDestroySurfaceKHR(vk_instance_, surface_, nullptr); vkDestroySurfaceKHR(vk_instance_, surface_, nullptr);
surface_ = VK_NULL_HANDLE; surface_ = VK_NULL_HANDLE;
} }
...@@ -240,11 +253,10 @@ bool VulkanSurface::CreateSwapChain(const gfx::Size& size, ...@@ -240,11 +253,10 @@ bool VulkanSurface::CreateSwapChain(const gfx::Size& size,
auto swap_chain = std::make_unique<VulkanSwapChain>(); auto swap_chain = std::make_unique<VulkanSwapChain>();
// Create swap chain. // Create swap chain.
uint32_t min_image_count = std::max(surface_caps.minImageCount, 3u); DCHECK_EQ(image_count_, std::max(surface_caps.minImageCount, 3u));
if (!swap_chain->Initialize(device_queue_, surface_, surface_format_, if (!swap_chain->Initialize(
image_size_, min_image_count, vk_transform, device_queue_, surface_, surface_format_, image_size_, image_count_,
enforce_protected_memory_, vk_transform, enforce_protected_memory_, std::move(swap_chain_))) {
std::move(swap_chain_))) {
return false; return false;
} }
......
...@@ -56,6 +56,7 @@ class VULKAN_EXPORT VulkanSurface { ...@@ -56,6 +56,7 @@ class VULKAN_EXPORT VulkanSurface {
uint32_t swap_chain_generation() const { return swap_chain_generation_; } uint32_t swap_chain_generation() const { return swap_chain_generation_; }
const gfx::Size& image_size() const { return image_size_; } const gfx::Size& image_size() const { return image_size_; }
gfx::OverlayTransform transform() const { return transform_; } gfx::OverlayTransform transform() const { return transform_; }
uint32_t image_count() const { return image_count_; }
VkSurfaceFormatKHR surface_format() const { return surface_format_; } VkSurfaceFormatKHR surface_format() const { return surface_format_; }
private: private:
...@@ -79,6 +80,9 @@ class VULKAN_EXPORT VulkanSurface { ...@@ -79,6 +80,9 @@ class VULKAN_EXPORT VulkanSurface {
// Swap chain pre-transform. // Swap chain pre-transform.
gfx::OverlayTransform transform_ = gfx::OVERLAY_TRANSFORM_INVALID; gfx::OverlayTransform transform_ = gfx::OVERLAY_TRANSFORM_INVALID;
// Swap chain image count.
uint32_t image_count_ = 0u;
std::unique_ptr<VulkanSwapChain> swap_chain_; std::unique_ptr<VulkanSwapChain> swap_chain_;
DISALLOW_COPY_AND_ASSIGN(VulkanSurface); DISALLOW_COPY_AND_ASSIGN(VulkanSurface);
......
...@@ -112,21 +112,22 @@ gfx::SwapResult VulkanSwapChain::PresentBuffer() { ...@@ -112,21 +112,22 @@ gfx::SwapResult VulkanSwapChain::PresentBuffer() {
} }
DLOG_IF(ERROR, result == VK_SUBOPTIMAL_KHR) << "Swapchian is suboptimal."; DLOG_IF(ERROR, result == VK_SUBOPTIMAL_KHR) << "Swapchian is suboptimal.";
if (current_image_data.present_wait_semaphore != VK_NULL_HANDLE) { if (current_image_data.present_begin_semaphore != VK_NULL_HANDLE) {
// |present_wait_semaphore| for the previous present for this image can be // |present_begin_semaphore| for the previous present for this image can be
// safely destroyed after semaphore got from vkAcquireNextImageHKR() is // safely destroyed after semaphore got from vkAcquireNextImageHKR() is
// passed. That acquired semaphore should be already waited on for a // passed. That acquired semaphore should be already waited on for a
// submitted GPU work. So we can safely eunqueue the // submitted GPU work. So we can safely eunqueue the
// |present_wait_semaphore| for cleanup here (the enqueued semaphore will be // |present_begin_semaphore| for cleanup here (the enqueued semaphore will
// destroyed when all submitted GPU work is finished). // be destroyed when all submitted GPU work is finished).
fence_helper->EnqueueSemaphoreCleanupForSubmittedWork( fence_helper->EnqueueSemaphoreCleanupForSubmittedWork(
current_image_data.present_wait_semaphore); current_image_data.present_begin_semaphore);
} }
// We are not sure when the semaphore is not used by present engine, so don't // We are not sure when the semaphore is not used by present engine, so don't
// destroy the semaphore until the image is returned from present engine. // destroy the semaphore until the image is returned from present engine.
current_image_data.present_wait_semaphore = end_write_semaphore_; current_image_data.present_begin_semaphore = end_write_semaphore_;
end_write_semaphore_ = VK_NULL_HANDLE; end_write_semaphore_ = VK_NULL_HANDLE;
in_present_images_.emplace_back(*acquired_image_);
acquired_image_.reset(); acquired_image_.reset();
return gfx::SwapResult::SWAP_ACK; return gfx::SwapResult::SWAP_ACK;
} }
...@@ -237,9 +238,14 @@ void VulkanSwapChain::DestroySwapImages() { ...@@ -237,9 +238,14 @@ void VulkanSwapChain::DestroySwapImages() {
image_data.command_buffer->Destroy(); image_data.command_buffer->Destroy();
image_data.command_buffer = nullptr; image_data.command_buffer = nullptr;
} }
if (image_data.present_wait_semaphore != VK_NULL_HANDLE) { if (image_data.present_begin_semaphore != VK_NULL_HANDLE) {
vkDestroySemaphore(device_queue_->GetVulkanDevice(), vkDestroySemaphore(device_queue_->GetVulkanDevice(),
image_data.present_wait_semaphore, image_data.present_begin_semaphore,
nullptr /* pAllocator */);
}
if (image_data.present_end_semaphore != VK_NULL_HANDLE) {
vkDestroySemaphore(device_queue_->GetVulkanDevice(),
image_data.present_end_semaphore,
nullptr /* pAllocator */); nullptr /* pAllocator */);
} }
} }
...@@ -263,33 +269,20 @@ bool VulkanSwapChain::BeginWriteCurrentImage(VkImage* image, ...@@ -263,33 +269,20 @@ bool VulkanSwapChain::BeginWriteCurrentImage(VkImage* image,
if (!acquired_image_) { if (!acquired_image_) {
DCHECK(end_write_semaphore_ == VK_NULL_HANDLE); DCHECK(end_write_semaphore_ == VK_NULL_HANDLE);
if (!AcquireNextImage())
VkDevice device = device_queue_->GetVulkanDevice();
vk_semaphore = CreateSemaphore(device);
DCHECK(vk_semaphore != VK_NULL_HANDLE);
uint32_t next_image = 0;
// Acquire then next image.
auto result = vkAcquireNextImageKHR(
device, swap_chain_, UINT64_MAX, vk_semaphore,
static_cast<VkFence>(VK_NULL_HANDLE), &next_image);
if (result != VK_SUCCESS && result != VK_SUBOPTIMAL_KHR) {
vkDestroySemaphore(device, vk_semaphore, nullptr /* pAllocator */);
DLOG(ERROR) << "vkAcquireNextImageKHR() failed: " << result;
return false; return false;
} DCHECK(acquired_image_);
acquired_image_.emplace(next_image); std::swap(vk_semaphore, images_[*acquired_image_].present_end_semaphore);
} else { } else {
// In this case, PresentBuffer() is not called after // In this case, PresentBuffer() is not called after
// {Begin,End}WriteCurrentImage pairs, |end_write_semaphore_| should be // {Begin,End}WriteCurrentImage pairs, |end_write_semaphore_| should be
// waited on before writing the image again. // waited on before writing the image again.
vk_semaphore = end_write_semaphore_; std::swap(vk_semaphore, end_write_semaphore_);
end_write_semaphore_ = VK_NULL_HANDLE;
} }
auto& current_image_data = images_[*acquired_image_]; auto& current_image_data = images_[*acquired_image_];
*image = current_image_data.image; *image = current_image_data.image;
*image_index = *acquired_image_; *image_index = acquired_image_.value();
*image_layout = current_image_data.layout; *image_layout = current_image_data.layout;
*semaphore = vk_semaphore; *semaphore = vk_semaphore;
is_writing_ = true; is_writing_ = true;
...@@ -309,6 +302,60 @@ void VulkanSwapChain::EndWriteCurrentImage(VkImageLayout image_layout, ...@@ -309,6 +302,60 @@ void VulkanSwapChain::EndWriteCurrentImage(VkImageLayout image_layout,
is_writing_ = false; is_writing_ = false;
} }
bool VulkanSwapChain::AcquireNextImage() {
DCHECK(!acquired_image_);
VkDevice device = device_queue_->GetVulkanDevice();
// The Vulkan spec doesn't require vkAcquireNextImageKHR() returns images in
// the present order for a vulkan swap chain. However for the best performnce,
// the driver should return images in order. To avoid buggy drivers, we will
// call vkAcquireNextImageKHR() continueslly until the expected image is
// returned.
do {
bool all_images_are_tracked = in_present_images_.size() == images_.size();
if (all_images_are_tracked) {
// Only check the expected_next_image, when all images are tracked.
uint32_t expected_next_image = in_present_images_.front();
// If the expected next image has been acquired, use it and return true.
if (images_[expected_next_image].present_end_semaphore !=
VK_NULL_HANDLE) {
in_present_images_.pop_front();
acquired_image_.emplace(expected_next_image);
break;
}
}
VkSemaphore vk_semaphore = CreateSemaphore(device);
DCHECK(vk_semaphore != VK_NULL_HANDLE);
// Acquire the next image.
uint32_t next_image;
auto result = vkAcquireNextImageKHR(
device, swap_chain_, UINT64_MAX, vk_semaphore,
static_cast<VkFence>(VK_NULL_HANDLE), &next_image);
if (result != VK_SUCCESS && result != VK_SUBOPTIMAL_KHR) {
vkDestroySemaphore(device, vk_semaphore, nullptr /* pAllocator */);
DLOG(ERROR) << "vkAcquireNextImageKHR() failed: " << result;
return false;
}
DCHECK(images_[next_image].present_end_semaphore == VK_NULL_HANDLE);
images_[next_image].present_end_semaphore = vk_semaphore;
auto it = std::find(in_present_images_.begin(), in_present_images_.end(),
next_image);
if (it == in_present_images_.end()) {
DCHECK(!all_images_are_tracked);
// Got an image which is not in the present queue due to the new created
// swap chain. In this case, just use this image.
acquired_image_.emplace(next_image);
break;
}
DLOG_IF(ERROR, it != in_present_images_.begin())
<< "vkAcquireNextImageKHR() returned an unexpected image.";
} while (true);
return true;
}
VulkanSwapChain::ScopedWrite::ScopedWrite(VulkanSwapChain* swap_chain) VulkanSwapChain::ScopedWrite::ScopedWrite(VulkanSwapChain* swap_chain)
: swap_chain_(swap_chain) { : swap_chain_(swap_chain) {
success_ = swap_chain_->BeginWriteCurrentImage( success_ = swap_chain_->BeginWriteCurrentImage(
......
...@@ -5,10 +5,12 @@ ...@@ -5,10 +5,12 @@
#ifndef GPU_VULKAN_VULKAN_SWAP_CHAIN_H_ #ifndef GPU_VULKAN_VULKAN_SWAP_CHAIN_H_
#define GPU_VULKAN_VULKAN_SWAP_CHAIN_H_ #define GPU_VULKAN_VULKAN_SWAP_CHAIN_H_
#include <vulkan/vulkan.h>
#include <memory> #include <memory>
#include <vector> #include <vector>
#include <vulkan/vulkan.h>
#include "base/containers/circular_deque.h"
#include "base/logging.h" #include "base/logging.h"
#include "base/optional.h" #include "base/optional.h"
#include "gpu/vulkan/vulkan_export.h" #include "gpu/vulkan/vulkan_export.h"
...@@ -95,6 +97,7 @@ class VULKAN_EXPORT VulkanSwapChain { ...@@ -95,6 +97,7 @@ class VULKAN_EXPORT VulkanSwapChain {
VkImageLayout* layout, VkImageLayout* layout,
VkSemaphore* semaphore); VkSemaphore* semaphore);
void EndWriteCurrentImage(VkImageLayout layout, VkSemaphore semaphore); void EndWriteCurrentImage(VkImageLayout layout, VkSemaphore semaphore);
bool AcquireNextImage();
bool use_protected_memory_ = false; bool use_protected_memory_ = false;
VulkanDeviceQueue* device_queue_; VulkanDeviceQueue* device_queue_;
...@@ -115,11 +118,14 @@ class VULKAN_EXPORT VulkanSwapChain { ...@@ -115,11 +118,14 @@ class VULKAN_EXPORT VulkanSwapChain {
VkImageLayout layout = VK_IMAGE_LAYOUT_UNDEFINED; VkImageLayout layout = VK_IMAGE_LAYOUT_UNDEFINED;
std::unique_ptr<VulkanCommandBuffer> command_buffer; std::unique_ptr<VulkanCommandBuffer> command_buffer;
// Semaphore passed to vkQueuePresentKHR to wait on. // Semaphore passed to vkQueuePresentKHR to wait on.
VkSemaphore present_wait_semaphore = VK_NULL_HANDLE; VkSemaphore present_begin_semaphore = VK_NULL_HANDLE;
// Semaphore signaled when present engine is done with the image.
VkSemaphore present_end_semaphore = VK_NULL_HANDLE;
}; };
std::vector<ImageData> images_; std::vector<ImageData> images_;
// Acquired image index. // Acquired image index.
base::circular_deque<uint32_t> in_present_images_;
base::Optional<uint32_t> acquired_image_; base::Optional<uint32_t> acquired_image_;
bool is_writing_ = false; bool is_writing_ = false;
VkSemaphore end_write_semaphore_ = VK_NULL_HANDLE; VkSemaphore end_write_semaphore_ = VK_NULL_HANDLE;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment