Commit fa7c497f authored by Peng Huang's avatar Peng Huang Committed by Commit Bot

vulkan: Add VulkanSwapChain::PostSubBufferAsync()

The bug is because X11 will present hided Window at 1Hz. So if there are
two windows on screen. One is hided, vkAcquireNextImageKHR() will be
blocked for 1 second, so the GPU main thread will be blocked with it.
Fix this problem by add PostSubBufferAsync(). It will call
vkAcquireNextImageKHR() off the GPU main thread.

Bug: 1097014
Change-Id: I2dc7988d5c6320d167b3280b3f92ac9979644887
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2256589
Commit-Queue: Peng Huang <penghuang@chromium.org>
Reviewed-by: default avatarVasiliy Telezhnikov <vasilyt@chromium.org>
Cr-Commit-Position: refs/heads/master@{#781654}
parent 397cd551
...@@ -56,6 +56,10 @@ class VIZ_SERVICE_EXPORT OutputSurface { ...@@ -56,6 +56,10 @@ class VIZ_SERVICE_EXPORT OutputSurface {
Capabilities(const Capabilities& capabilities); Capabilities(const Capabilities& capabilities);
int max_frames_pending = 1; int max_frames_pending = 1;
// The number of buffers for the SkiaOutputDevice. If the
// |supports_post_sub_buffer| true, SkiaOutputSurfaceImpl will track target
// damaged area based on this number.
int number_of_buffers = 2;
// Whether this output surface renders to the default OpenGL zero // Whether this output surface renders to the default OpenGL zero
// framebuffer or to an offscreen framebuffer. // framebuffer or to an offscreen framebuffer.
bool uses_default_gl_framebuffer = true; bool uses_default_gl_framebuffer = true;
......
...@@ -4,6 +4,10 @@ ...@@ -4,6 +4,10 @@
#include "components/viz/service/display_embedder/skia_output_device_buffer_queue.h" #include "components/viz/service/display_embedder/skia_output_device_buffer_queue.h"
#include <memory>
#include <utility>
#include <vector>
#include "base/command_line.h" #include "base/command_line.h"
#include "components/viz/common/switches.h" #include "components/viz/common/switches.h"
#include "components/viz/service/display_embedder/skia_output_surface_dependency.h" #include "components/viz/service/display_embedder/skia_output_surface_dependency.h"
...@@ -29,7 +33,7 @@ SkiaOutputDeviceBufferQueue::SkiaOutputDeviceBufferQueue( ...@@ -29,7 +33,7 @@ SkiaOutputDeviceBufferQueue::SkiaOutputDeviceBufferQueue(
capabilities_.uses_default_gl_framebuffer = false; capabilities_.uses_default_gl_framebuffer = false;
capabilities_.preserve_buffer_content = true; capabilities_.preserve_buffer_content = true;
capabilities_.only_invalidates_damage_rect = false; capabilities_.only_invalidates_damage_rect = false;
capabilities_.max_frames_pending = 2; capabilities_.number_of_buffers = 3;
// Force the number of max pending frames to one when the switch // Force the number of max pending frames to one when the switch
// "double-buffer-compositing" is passed. // "double-buffer-compositing" is passed.
...@@ -37,7 +41,8 @@ SkiaOutputDeviceBufferQueue::SkiaOutputDeviceBufferQueue( ...@@ -37,7 +41,8 @@ SkiaOutputDeviceBufferQueue::SkiaOutputDeviceBufferQueue(
// allocates at most one additional buffer. // allocates at most one additional buffer.
base::CommandLine* command_line = base::CommandLine::ForCurrentProcess(); base::CommandLine* command_line = base::CommandLine::ForCurrentProcess();
if (command_line->HasSwitch(switches::kDoubleBufferCompositing)) if (command_line->HasSwitch(switches::kDoubleBufferCompositing))
capabilities_.max_frames_pending = 1; capabilities_.number_of_buffers = 2;
capabilities_.max_frames_pending = capabilities_.number_of_buffers - 1;
presenter_->InitializeCapabilities(&capabilities_); presenter_->InitializeCapabilities(&capabilities_);
} }
...@@ -214,7 +219,7 @@ bool SkiaOutputDeviceBufferQueue::Reshape(const gfx::Size& size, ...@@ -214,7 +219,7 @@ bool SkiaOutputDeviceBufferQueue::Reshape(const gfx::Size& size,
FreeAllSurfaces(); FreeAllSurfaces();
images_ = presenter_->AllocateImages(color_space_, image_size_, images_ = presenter_->AllocateImages(color_space_, image_size_,
capabilities_.max_frames_pending + 1); capabilities_.number_of_buffers);
if (images_.empty()) if (images_.empty())
return false; return false;
......
...@@ -102,23 +102,25 @@ void SkiaOutputDeviceVulkan::PostSubBuffer( ...@@ -102,23 +102,25 @@ void SkiaOutputDeviceVulkan::PostSubBuffer(
#endif #endif
StartSwapBuffers(std::move(feedback)); StartSwapBuffers(std::move(feedback));
auto image_size = vulkan_surface_->image_size();
gfx::SwapResult result = gfx::SwapResult::SWAP_ACK; if (!rect.IsEmpty()) {
// If the swapchain is new created, but rect doesn't cover the whole buffer, // If the swapchain is new created, but rect doesn't cover the whole buffer,
// we will still present it even it causes a artifact in this frame and // we will still present it even it causes a artifact in this frame and
// recovered when the next frame is presented. We do that because the old // recovered when the next frame is presented. We do that because the old
// swapchain's present thread is blocked on waiting a reply from xserver, and // swapchain's present thread is blocked on waiting a reply from xserver,
// presenting a new image with the new create swapchain will somehow makes // and presenting a new image with the new create swapchain will somehow
// xserver send a reply to us, and then unblock the old swapchain's present // makes xserver send a reply to us, and then unblock the old swapchain's
// thread. So the old swapchain can be destroyed properly. // present thread. So the old swapchain can be destroyed properly.
if (!rect.IsEmpty()) vulkan_surface_->PostSubBufferAsync(
result = vulkan_surface_->PostSubBuffer(rect); rect, base::BindOnce(&SkiaOutputDeviceVulkan::OnPostSubBufferFinished,
if (is_new_swapchain_) { weak_ptr_factory_.GetWeakPtr(),
is_new_swapchain_ = false; std::move(latency_info), is_new_swapchain_));
result = gfx::SwapResult::SWAP_NAK_RECREATE_BUFFERS; } else {
OnPostSubBufferFinished(std::move(latency_info), is_new_swapchain_,
gfx::SwapResult::SWAP_ACK);
} }
FinishSwapBuffers(gfx::SwapCompletionResult(result), image_size,
std::move(latency_info)); is_new_swapchain_ = false;
} }
SkSurface* SkiaOutputDeviceVulkan::BeginPaint( SkSurface* SkiaOutputDeviceVulkan::BeginPaint(
...@@ -243,7 +245,8 @@ bool SkiaOutputDeviceVulkan::Initialize() { ...@@ -243,7 +245,8 @@ bool SkiaOutputDeviceVulkan::Initialize() {
vulkan_surface_ = std::move(vulkan_surface); vulkan_surface_ = std::move(vulkan_surface);
capabilities_.uses_default_gl_framebuffer = false; capabilities_.uses_default_gl_framebuffer = false;
capabilities_.max_frames_pending = vulkan_surface_->image_count() - 1; capabilities_.max_frames_pending = 1;
capabilities_.number_of_buffers = vulkan_surface_->image_count();
// Vulkan FIFO swap chain should return vk images in presenting order, so set // Vulkan FIFO swap chain should return vk images in presenting order, so set
// preserve_buffer_content & supports_post_sub_buffer to true to let // preserve_buffer_content & supports_post_sub_buffer to true to let
// SkiaOutputBufferImpl to manager damages. // SkiaOutputBufferImpl to manager damages.
...@@ -288,6 +291,16 @@ bool SkiaOutputDeviceVulkan::RecreateSwapChain( ...@@ -288,6 +291,16 @@ bool SkiaOutputDeviceVulkan::RecreateSwapChain(
return true; return true;
} }
void SkiaOutputDeviceVulkan::OnPostSubBufferFinished(
std::vector<ui::LatencyInfo> latency_info,
bool is_new_swapchain,
gfx::SwapResult result) {
if (is_new_swapchain)
result = gfx::SwapResult::SWAP_NAK_RECREATE_BUFFERS;
FinishSwapBuffers(gfx::SwapCompletionResult(result),
vulkan_surface_->image_size(), std::move(latency_info));
}
SkiaOutputDeviceVulkan::SkSurfaceSizePair::SkSurfaceSizePair() = default; SkiaOutputDeviceVulkan::SkSurfaceSizePair::SkSurfaceSizePair() = default;
SkiaOutputDeviceVulkan::SkSurfaceSizePair::SkSurfaceSizePair( SkiaOutputDeviceVulkan::SkSurfaceSizePair::SkSurfaceSizePair(
const SkSurfaceSizePair& other) = default; const SkSurfaceSizePair& other) = default;
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include <vector> #include <vector>
#include "base/macros.h" #include "base/macros.h"
#include "base/memory/weak_ptr.h"
#include "base/optional.h" #include "base/optional.h"
#include "base/util/type_safety/pass_key.h" #include "base/util/type_safety/pass_key.h"
#include "build/build_config.h" #include "build/build_config.h"
...@@ -72,6 +73,9 @@ class SkiaOutputDeviceVulkan final : public SkiaOutputDevice { ...@@ -72,6 +73,9 @@ class SkiaOutputDeviceVulkan final : public SkiaOutputDevice {
bool RecreateSwapChain(const gfx::Size& size, bool RecreateSwapChain(const gfx::Size& size,
sk_sp<SkColorSpace> color_space, sk_sp<SkColorSpace> color_space,
gfx::OverlayTransform transform); gfx::OverlayTransform transform);
void OnPostSubBufferFinished(std::vector<ui::LatencyInfo> latency_info,
bool is_new_swapchain,
gfx::SwapResult result);
VulkanContextProvider* const context_provider_; VulkanContextProvider* const context_provider_;
...@@ -90,6 +94,8 @@ class SkiaOutputDeviceVulkan final : public SkiaOutputDevice { ...@@ -90,6 +94,8 @@ class SkiaOutputDeviceVulkan final : public SkiaOutputDevice {
sk_sp<SkColorSpace> color_space_; sk_sp<SkColorSpace> color_space_;
bool is_new_swapchain_ = true; bool is_new_swapchain_ = true;
base::WeakPtrFactory<SkiaOutputDeviceVulkan> weak_ptr_factory_{this};
DISALLOW_COPY_AND_ASSIGN(SkiaOutputDeviceVulkan); DISALLOW_COPY_AND_ASSIGN(SkiaOutputDeviceVulkan);
}; };
......
...@@ -695,7 +695,7 @@ bool SkiaOutputSurfaceImpl::Initialize() { ...@@ -695,7 +695,7 @@ bool SkiaOutputSurfaceImpl::Initialize() {
capabilities_.supports_post_sub_buffer) { capabilities_.supports_post_sub_buffer) {
capabilities_.only_invalidates_damage_rect = false; capabilities_.only_invalidates_damage_rect = false;
capabilities_.supports_target_damage = true; capabilities_.supports_target_damage = true;
damage_of_buffers_.resize(capabilities_.max_frames_pending + 1); damage_of_buffers_.resize(capabilities_.number_of_buffers);
} }
return result; return result;
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include "base/logging.h" #include "base/logging.h"
#include "base/macros.h" #include "base/macros.h"
#include "base/stl_util.h" #include "base/stl_util.h"
#include "base/threading/scoped_blocking_call.h"
#include "gpu/vulkan/vulkan_device_queue.h" #include "gpu/vulkan/vulkan_device_queue.h"
#include "gpu/vulkan/vulkan_function_pointers.h" #include "gpu/vulkan/vulkan_function_pointers.h"
#include "gpu/vulkan/vulkan_swap_chain.h" #include "gpu/vulkan/vulkan_swap_chain.h"
...@@ -69,6 +70,9 @@ gfx::OverlayTransform FromVkSurfaceTransformFlag( ...@@ -69,6 +70,9 @@ gfx::OverlayTransform FromVkSurfaceTransformFlag(
} }
} }
// Minimum VkImages in a vulkan swap chain.
uint32_t kMinImageCount = 3u;
} // namespace } // namespace
VulkanSurface::~VulkanSurface() { VulkanSurface::~VulkanSurface() {
...@@ -166,7 +170,7 @@ bool VulkanSurface::Initialize(VulkanDeviceQueue* device_queue, ...@@ -166,7 +170,7 @@ bool VulkanSurface::Initialize(VulkanDeviceQueue* device_queue,
return false; return false;
} }
image_count_ = std::max(surface_caps.minImageCount, 3u); image_count_ = std::max(surface_caps.minImageCount, kMinImageCount);
return true; return true;
} }
...@@ -185,10 +189,18 @@ gfx::SwapResult VulkanSurface::SwapBuffers() { ...@@ -185,10 +189,18 @@ gfx::SwapResult VulkanSurface::SwapBuffers() {
} }
gfx::SwapResult VulkanSurface::PostSubBuffer(const gfx::Rect& rect) { gfx::SwapResult VulkanSurface::PostSubBuffer(const gfx::Rect& rect) {
return swap_chain_->PresentBuffer(rect); return swap_chain_->PostSubBuffer(rect);
}
void VulkanSurface::PostSubBufferAsync(
const gfx::Rect& rect,
VulkanSwapChain::PostSubBufferCompletionCallback callback) {
swap_chain_->PostSubBufferAsync(rect, std::move(callback));
} }
void VulkanSurface::Finish() { void VulkanSurface::Finish() {
base::ScopedBlockingCall scoped_blocking_call(FROM_HERE,
base::BlockingType::WILL_BLOCK);
vkQueueWaitIdle(device_queue_->GetVulkanQueue()); vkQueueWaitIdle(device_queue_->GetVulkanQueue());
} }
...@@ -262,7 +274,7 @@ bool VulkanSurface::CreateSwapChain(const gfx::Size& size, ...@@ -262,7 +274,7 @@ bool VulkanSurface::CreateSwapChain(const gfx::Size& size,
auto swap_chain = std::make_unique<VulkanSwapChain>(); auto swap_chain = std::make_unique<VulkanSwapChain>();
// Create swap chain. // Create swap chain.
DCHECK_EQ(image_count_, std::max(surface_caps.minImageCount, 3u)); DCHECK_EQ(image_count_, std::max(surface_caps.minImageCount, kMinImageCount));
if (!swap_chain->Initialize( if (!swap_chain->Initialize(
device_queue_, surface_, surface_format_, image_size_, image_count_, device_queue_, surface_, surface_format_, image_size_, image_count_,
vk_transform, enforce_protected_memory_, std::move(swap_chain_))) { vk_transform, enforce_protected_memory_, std::move(swap_chain_))) {
......
...@@ -46,6 +46,9 @@ class COMPONENT_EXPORT(VULKAN) VulkanSurface { ...@@ -46,6 +46,9 @@ class COMPONENT_EXPORT(VULKAN) VulkanSurface {
gfx::SwapResult SwapBuffers(); gfx::SwapResult SwapBuffers();
gfx::SwapResult PostSubBuffer(const gfx::Rect& rect); gfx::SwapResult PostSubBuffer(const gfx::Rect& rect);
void PostSubBufferAsync(
const gfx::Rect& rect,
VulkanSwapChain::PostSubBufferCompletionCallback callback);
void Finish(); void Finish();
......
...@@ -6,6 +6,10 @@ ...@@ -6,6 +6,10 @@
#include "base/bind.h" #include "base/bind.h"
#include "base/logging.h" #include "base/logging.h"
#include "base/task/task_traits.h"
#include "base/task/thread_pool.h"
#include "base/threading/scoped_blocking_call.h"
#include "base/threading/thread_task_runner_handle.h"
#include "base/time/time.h" #include "base/time/time.h"
#include "gpu/vulkan/vulkan_command_buffer.h" #include "gpu/vulkan/vulkan_command_buffer.h"
#include "gpu/vulkan/vulkan_command_pool.h" #include "gpu/vulkan/vulkan_command_pool.h"
...@@ -31,11 +35,17 @@ VkSemaphore CreateSemaphore(VkDevice vk_device) { ...@@ -31,11 +35,17 @@ VkSemaphore CreateSemaphore(VkDevice vk_device) {
} // namespace } // namespace
VulkanSwapChain::VulkanSwapChain() {} VulkanSwapChain::VulkanSwapChain() {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
}
VulkanSwapChain::~VulkanSwapChain() { VulkanSwapChain::~VulkanSwapChain() {
#if DCHECK_IS_ON()
base::AutoLock auto_lock(lock_);
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
DCHECK(images_.empty()); DCHECK(images_.empty());
DCHECK_EQ(static_cast<VkSwapchainKHR>(VK_NULL_HANDLE), swap_chain_); DCHECK_EQ(static_cast<VkSwapchainKHR>(VK_NULL_HANDLE), swap_chain_);
#endif
} }
bool VulkanSwapChain::Initialize( bool VulkanSwapChain::Initialize(
...@@ -47,8 +57,12 @@ bool VulkanSwapChain::Initialize( ...@@ -47,8 +57,12 @@ bool VulkanSwapChain::Initialize(
VkSurfaceTransformFlagBitsKHR pre_transform, VkSurfaceTransformFlagBitsKHR pre_transform,
bool use_protected_memory, bool use_protected_memory,
std::unique_ptr<VulkanSwapChain> old_swap_chain) { std::unique_ptr<VulkanSwapChain> old_swap_chain) {
base::AutoLock auto_lock(lock_);
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
DCHECK(device_queue); DCHECK(device_queue);
DCHECK(!use_protected_memory || device_queue->allow_protected_memory()); DCHECK(!use_protected_memory || device_queue->allow_protected_memory());
task_runner_ = base::ThreadTaskRunnerHandle::Get();
use_protected_memory_ = use_protected_memory; use_protected_memory_ = use_protected_memory;
device_queue_ = device_queue; device_queue_ = device_queue;
is_incremental_present_supported_ = is_incremental_present_supported_ =
...@@ -58,100 +72,64 @@ bool VulkanSwapChain::Initialize( ...@@ -58,100 +72,64 @@ bool VulkanSwapChain::Initialize(
return InitializeSwapChain(surface, surface_format, image_size, return InitializeSwapChain(surface, surface_format, image_size,
min_image_count, pre_transform, min_image_count, pre_transform,
use_protected_memory, std::move(old_swap_chain)) && use_protected_memory, std::move(old_swap_chain)) &&
InitializeSwapImages(surface_format); InitializeSwapImages(surface_format) && AcquireNextImage();
} }
void VulkanSwapChain::Destroy() { void VulkanSwapChain::Destroy() {
base::AutoLock auto_lock(lock_);
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
WaitUntilPostSubBufferAsyncFinished();
DCHECK(!is_writing_); DCHECK(!is_writing_);
DestroySwapImages(); DestroySwapImages();
DestroySwapChain(); DestroySwapChain();
} }
gfx::SwapResult VulkanSwapChain::PresentBuffer(const gfx::Rect& rect) { gfx::SwapResult VulkanSwapChain::PostSubBuffer(const gfx::Rect& rect) {
DCHECK(acquired_image_); base::AutoLock auto_lock(lock_);
DCHECK(end_write_semaphore_ != VK_NULL_HANDLE); DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
VkResult result = VK_SUCCESS;
VkDevice device = device_queue_->GetVulkanDevice();
VkQueue queue = device_queue_->GetVulkanQueue();
auto* fence_helper = device_queue_->GetFenceHelper();
auto& current_image_data = images_[*acquired_image_];
if (current_image_data.layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) {
{
current_image_data.command_buffer->Clear();
ScopedSingleUseCommandBufferRecorder recorder(
*current_image_data.command_buffer);
current_image_data.command_buffer->TransitionImageLayout(
current_image_data.image, current_image_data.layout,
VK_IMAGE_LAYOUT_PRESENT_SRC_KHR);
}
current_image_data.layout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
VkSemaphore vk_semaphore = CreateSemaphore(device); if (!PresentBuffer(rect))
// Submit our command_buffer for the current buffer. It sets the image return gfx::SwapResult::SWAP_FAILED;
// layout for presenting.
if (!current_image_data.command_buffer->Submit(1, &end_write_semaphore_, 1,
&vk_semaphore)) {
vkDestroySemaphore(device, vk_semaphore, nullptr /* pAllocator */);
return gfx::SwapResult::SWAP_FAILED;
}
current_image_data.layout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
fence_helper->EnqueueSemaphoreCleanupForSubmittedWork(end_write_semaphore_);
end_write_semaphore_ = vk_semaphore;
}
VkPresentInfoKHR present_info = {VK_STRUCTURE_TYPE_PRESENT_INFO_KHR};
present_info.waitSemaphoreCount = 1;
present_info.pWaitSemaphores = &end_write_semaphore_;
present_info.swapchainCount = 1;
present_info.pSwapchains = &swap_chain_;
present_info.pImageIndices = &acquired_image_.value();
VkRectLayerKHR rect_layer;
VkPresentRegionKHR present_region;
VkPresentRegionsKHR present_regions = {VK_STRUCTURE_TYPE_PRESENT_REGIONS_KHR};
if (is_incremental_present_supported_) {
rect_layer.offset = {rect.x(), rect.y()};
rect_layer.extent = {rect.width(), rect.height()};
rect_layer.layer = 0;
present_region.rectangleCount = 1;
present_region.pRectangles = &rect_layer;
present_regions.swapchainCount = 1;
present_regions.pRegions = &present_region;
present_info.pNext = &present_regions;
}
result = vkQueuePresentKHR(queue, &present_info); if (!AcquireNextImage())
if (result != VK_SUCCESS && result != VK_SUBOPTIMAL_KHR) {
LOG(DFATAL) << "vkQueuePresentKHR() failed: " << result;
return gfx::SwapResult::SWAP_FAILED; return gfx::SwapResult::SWAP_FAILED;
}
current_image_data.is_acquired = false; return gfx::SwapResult::SWAP_ACK;
LOG_IF(ERROR, result == VK_SUBOPTIMAL_KHR) << "Swapchian is suboptimal."; }
if (current_image_data.present_begin_semaphore != VK_NULL_HANDLE) { void VulkanSwapChain::PostSubBufferAsync(
// |present_begin_semaphore| for the previous present for this image can be const gfx::Rect& rect,
// safely destroyed after semaphore got from vkAcquireNextImageHKR() is PostSubBufferCompletionCallback callback) {
// passed. That acquired semaphore should be already waited on for a base::AutoLock auto_lock(lock_);
// submitted GPU work. So we can safely enqueue the DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
// |present_begin_semaphore| for cleanup here (the enqueued semaphore will
// be destroyed when all submitted GPU work is finished). if (!PresentBuffer(rect)) {
fence_helper->EnqueueSemaphoreCleanupForSubmittedWork( task_runner_->PostTask(
current_image_data.present_begin_semaphore); FROM_HERE,
base::BindOnce(std::move(callback), gfx::SwapResult::SWAP_FAILED));
return;
} }
// We are not sure when the semaphore is not used by present engine, so don't
// destroy the semaphore until the image is returned from present engine.
current_image_data.present_begin_semaphore = end_write_semaphore_;
end_write_semaphore_ = VK_NULL_HANDLE;
in_present_images_.emplace_back(*acquired_image_); DCHECK_EQ(state_, VK_SUCCESS);
acquired_image_.reset();
return gfx::SwapResult::SWAP_ACK; ++pending_post_sub_buffer_;
post_sub_buffer_task_runner_->PostTask(
FROM_HERE,
base::BindOnce(
[](VulkanSwapChain* self, PostSubBufferCompletionCallback callback) {
base::AutoLock auto_lock(self->lock_);
auto swap_result = self->AcquireNextImage()
? gfx::SwapResult::SWAP_ACK
: gfx::SwapResult::SWAP_FAILED;
self->task_runner_->PostTask(
FROM_HERE, base::BindOnce(std::move(callback), swap_result));
--self->pending_post_sub_buffer_;
self->condition_variable_.Signal();
},
base::Unretained(this), std::move(callback)));
} }
bool VulkanSwapChain::InitializeSwapChain( bool VulkanSwapChain::InitializeSwapChain(
...@@ -162,7 +140,8 @@ bool VulkanSwapChain::InitializeSwapChain( ...@@ -162,7 +140,8 @@ bool VulkanSwapChain::InitializeSwapChain(
VkSurfaceTransformFlagBitsKHR pre_transform, VkSurfaceTransformFlagBitsKHR pre_transform,
bool use_protected_memory, bool use_protected_memory,
std::unique_ptr<VulkanSwapChain> old_swap_chain) { std::unique_ptr<VulkanSwapChain> old_swap_chain) {
DCHECK(!acquired_image_); DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
VkDevice device = device_queue_->GetVulkanDevice(); VkDevice device = device_queue_->GetVulkanDevice();
VkResult result = VK_SUCCESS; VkResult result = VK_SUCCESS;
...@@ -183,8 +162,14 @@ bool VulkanSwapChain::InitializeSwapChain( ...@@ -183,8 +162,14 @@ bool VulkanSwapChain::InitializeSwapChain(
swap_chain_create_info.compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR; swap_chain_create_info.compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
swap_chain_create_info.presentMode = VK_PRESENT_MODE_FIFO_KHR; swap_chain_create_info.presentMode = VK_PRESENT_MODE_FIFO_KHR;
swap_chain_create_info.clipped = true; swap_chain_create_info.clipped = true;
swap_chain_create_info.oldSwapchain = if (old_swap_chain) {
old_swap_chain ? old_swap_chain->swap_chain_ : VK_NULL_HANDLE; base::AutoLock auto_lock(old_swap_chain->lock_);
old_swap_chain->WaitUntilPostSubBufferAsyncFinished();
swap_chain_create_info.oldSwapchain =
old_swap_chain ? old_swap_chain->swap_chain_ : VK_NULL_HANDLE;
// Reuse |post_sub_buffer_task_runner_| from the |old_swap_chain|.
post_sub_buffer_task_runner_ = old_swap_chain->post_sub_buffer_task_runner_;
}
VkSwapchainKHR new_swap_chain = VK_NULL_HANDLE; VkSwapchainKHR new_swap_chain = VK_NULL_HANDLE;
result = vkCreateSwapchainKHR(device, &swap_chain_create_info, nullptr, result = vkCreateSwapchainKHR(device, &swap_chain_create_info, nullptr,
...@@ -205,10 +190,18 @@ bool VulkanSwapChain::InitializeSwapChain( ...@@ -205,10 +190,18 @@ bool VulkanSwapChain::InitializeSwapChain(
size_ = gfx::Size(swap_chain_create_info.imageExtent.width, size_ = gfx::Size(swap_chain_create_info.imageExtent.width,
swap_chain_create_info.imageExtent.height); swap_chain_create_info.imageExtent.height);
if (!post_sub_buffer_task_runner_) {
post_sub_buffer_task_runner_ = base::ThreadPool::CreateSequencedTaskRunner(
{base::TaskPriority::USER_BLOCKING,
base::TaskShutdownBehavior::BLOCK_SHUTDOWN, base::MayBlock()});
}
return true; return true;
} }
void VulkanSwapChain::DestroySwapChain() { void VulkanSwapChain::DestroySwapChain() {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
if (swap_chain_ == VK_NULL_HANDLE) if (swap_chain_ == VK_NULL_HANDLE)
return; return;
vkDestroySwapchainKHR(device_queue_->GetVulkanDevice(), swap_chain_, vkDestroySwapchainKHR(device_queue_->GetVulkanDevice(), swap_chain_,
...@@ -218,6 +211,8 @@ void VulkanSwapChain::DestroySwapChain() { ...@@ -218,6 +211,8 @@ void VulkanSwapChain::DestroySwapChain() {
bool VulkanSwapChain::InitializeSwapImages( bool VulkanSwapChain::InitializeSwapImages(
const VkSurfaceFormatKHR& surface_format) { const VkSurfaceFormatKHR& surface_format) {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
VkDevice device = device_queue_->GetVulkanDevice(); VkDevice device = device_queue_->GetVulkanDevice();
VkResult result = VK_SUCCESS; VkResult result = VK_SUCCESS;
...@@ -251,6 +246,8 @@ bool VulkanSwapChain::InitializeSwapImages( ...@@ -251,6 +246,8 @@ bool VulkanSwapChain::InitializeSwapImages(
} }
void VulkanSwapChain::DestroySwapImages() { void VulkanSwapChain::DestroySwapImages() {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
if (end_write_semaphore_) if (end_write_semaphore_)
vkDestroySemaphore(device_queue_->GetVulkanDevice(), end_write_semaphore_, vkDestroySemaphore(device_queue_->GetVulkanDevice(), end_write_semaphore_,
nullptr /* pAllocator */); nullptr /* pAllocator */);
...@@ -282,30 +279,41 @@ bool VulkanSwapChain::BeginWriteCurrentImage(VkImage* image, ...@@ -282,30 +279,41 @@ bool VulkanSwapChain::BeginWriteCurrentImage(VkImage* image,
uint32_t* image_index, uint32_t* image_index,
VkImageLayout* image_layout, VkImageLayout* image_layout,
VkSemaphore* semaphore) { VkSemaphore* semaphore) {
base::AutoLock auto_lock(lock_);
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
DCHECK(image); DCHECK(image);
DCHECK(image_index); DCHECK(image_index);
DCHECK(image_layout); DCHECK(image_layout);
DCHECK(semaphore); DCHECK(semaphore);
DCHECK(!is_writing_); DCHECK(!is_writing_);
VkSemaphore vk_semaphore = VK_NULL_HANDLE; while (state_ == VK_SUCCESS && acquired_images_.empty()) {
DCHECK(pending_post_sub_buffer_);
condition_variable_.Wait();
}
if (state_ != VK_SUCCESS)
return false;
if (!acquired_image_) { DCHECK(!acquired_images_.empty());
auto& current_image_data = images_[acquired_images_.front()];
VkSemaphore vk_semaphore = VK_NULL_HANDLE;
if (current_image_data.present_end_semaphore != VK_NULL_HANDLE) {
DCHECK(end_write_semaphore_ == VK_NULL_HANDLE); DCHECK(end_write_semaphore_ == VK_NULL_HANDLE);
if (!AcquireNextImage()) vk_semaphore = current_image_data.present_end_semaphore;
return false; current_image_data.present_end_semaphore = VK_NULL_HANDLE;
DCHECK(acquired_image_);
std::swap(vk_semaphore, images_[*acquired_image_].present_end_semaphore);
} else { } else {
// In this case, PresentBuffer() is not called after DCHECK(end_write_semaphore_ != VK_NULL_HANDLE);
// In this case, PostSubBuffer() is not called after
// {Begin,End}WriteCurrentImage pairs, |end_write_semaphore_| should be // {Begin,End}WriteCurrentImage pairs, |end_write_semaphore_| should be
// waited on before writing the image again. // waited on before writing the image again.
std::swap(vk_semaphore, end_write_semaphore_); vk_semaphore = end_write_semaphore_;
end_write_semaphore_ = VK_NULL_HANDLE;
} }
auto& current_image_data = images_[*acquired_image_];
*image = current_image_data.image; *image = current_image_data.image;
*image_index = acquired_image_.value(); *image_index = acquired_images_.front();
*image_layout = current_image_data.layout; *image_layout = current_image_data.layout;
*semaphore = vk_semaphore; *semaphore = vk_semaphore;
is_writing_ = true; is_writing_ = true;
...@@ -315,19 +323,119 @@ bool VulkanSwapChain::BeginWriteCurrentImage(VkImage* image, ...@@ -315,19 +323,119 @@ bool VulkanSwapChain::BeginWriteCurrentImage(VkImage* image,
void VulkanSwapChain::EndWriteCurrentImage(VkImageLayout image_layout, void VulkanSwapChain::EndWriteCurrentImage(VkImageLayout image_layout,
VkSemaphore semaphore) { VkSemaphore semaphore) {
base::AutoLock auto_lock(lock_);
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
DCHECK(is_writing_); DCHECK(is_writing_);
DCHECK(acquired_image_); DCHECK(!acquired_images_.empty());
DCHECK(end_write_semaphore_ == VK_NULL_HANDLE); DCHECK(end_write_semaphore_ == VK_NULL_HANDLE);
auto& current_image_data = images_[*acquired_image_]; auto& current_image_data = images_[acquired_images_.front()];
current_image_data.layout = image_layout; current_image_data.layout = image_layout;
end_write_semaphore_ = semaphore; end_write_semaphore_ = semaphore;
is_writing_ = false; is_writing_ = false;
} }
bool VulkanSwapChain::PresentBuffer(const gfx::Rect& rect) {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
DCHECK_EQ(state_, VK_SUCCESS);
DCHECK(!acquired_images_.empty());
DCHECK(end_write_semaphore_ != VK_NULL_HANDLE);
VkResult result = VK_SUCCESS;
VkDevice device = device_queue_->GetVulkanDevice();
VkQueue queue = device_queue_->GetVulkanQueue();
auto* fence_helper = device_queue_->GetFenceHelper();
auto& current_image_data = images_[acquired_images_.front()];
if (current_image_data.layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) {
{
current_image_data.command_buffer->Clear();
ScopedSingleUseCommandBufferRecorder recorder(
*current_image_data.command_buffer);
current_image_data.command_buffer->TransitionImageLayout(
current_image_data.image, current_image_data.layout,
VK_IMAGE_LAYOUT_PRESENT_SRC_KHR);
}
current_image_data.layout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
VkSemaphore vk_semaphore = CreateSemaphore(device);
// Submit our command_buffer for the current buffer. It sets the image
// layout for presenting.
if (!current_image_data.command_buffer->Submit(1, &end_write_semaphore_, 1,
&vk_semaphore)) {
vkDestroySemaphore(device, vk_semaphore, nullptr /* pAllocator */);
return false;
}
current_image_data.layout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
fence_helper->EnqueueSemaphoreCleanupForSubmittedWork(end_write_semaphore_);
end_write_semaphore_ = vk_semaphore;
}
VkPresentInfoKHR present_info = {VK_STRUCTURE_TYPE_PRESENT_INFO_KHR};
present_info.waitSemaphoreCount = 1;
present_info.pWaitSemaphores = &end_write_semaphore_;
present_info.swapchainCount = 1;
present_info.pSwapchains = &swap_chain_;
present_info.pImageIndices = &acquired_images_.front();
VkRectLayerKHR rect_layer;
VkPresentRegionKHR present_region;
VkPresentRegionsKHR present_regions = {VK_STRUCTURE_TYPE_PRESENT_REGIONS_KHR};
if (is_incremental_present_supported_) {
rect_layer.offset = {rect.x(), rect.y()};
rect_layer.extent = {rect.width(), rect.height()};
rect_layer.layer = 0;
present_region.rectangleCount = 1;
present_region.pRectangles = &rect_layer;
present_regions.swapchainCount = 1;
present_regions.pRegions = &present_region;
present_info.pNext = &present_regions;
}
result = vkQueuePresentKHR(queue, &present_info);
if (result != VK_SUCCESS && result != VK_SUBOPTIMAL_KHR) {
LOG(DFATAL) << "vkQueuePresentKHR() failed: " << result;
state_ = result;
return false;
}
current_image_data.is_acquired = false;
LOG_IF(ERROR, result == VK_SUBOPTIMAL_KHR) << "Swapchian is suboptimal.";
if (current_image_data.present_begin_semaphore != VK_NULL_HANDLE) {
// |present_begin_semaphore| for the previous present for this image can be
// safely destroyed after semaphore got from vkAcquireNextImageHKR() is
// passed. That acquired semaphore should be already waited on for a
// submitted GPU work. So we can safely enqueue the
// |present_begin_semaphore| for cleanup here (the enqueued semaphore will
// be destroyed when all submitted GPU work is finished).
fence_helper->EnqueueSemaphoreCleanupForSubmittedWork(
current_image_data.present_begin_semaphore);
}
// We are not sure when the semaphore is not used by present engine, so don't
// destroy the semaphore until the image is returned from present engine.
current_image_data.present_begin_semaphore = end_write_semaphore_;
end_write_semaphore_ = VK_NULL_HANDLE;
in_present_images_.emplace_back(acquired_images_.front());
acquired_images_.pop_front();
return true;
}
bool VulkanSwapChain::AcquireNextImage() { bool VulkanSwapChain::AcquireNextImage() {
DCHECK(!acquired_image_); DCHECK_EQ(state_, VK_SUCCESS);
DCHECK_LT(acquired_images_.size(), images_.size());
// VulkanDeviceQueue is not threadsafe for now, but |device_queue_| will not
// be released, and device_queue_->device will never be changed after
// initialization, so it is safe for now.
// TODO(penghuang): make VulkanDeviceQueue threadsafe.
VkDevice device = device_queue_->GetVulkanDevice(); VkDevice device = device_queue_->GetVulkanDevice();
// The Vulkan spec doesn't require vkAcquireNextImageKHR() returns images in // The Vulkan spec doesn't require vkAcquireNextImageKHR() returns images in
// the present order for a vulkan swap chain. However for the best // the present order for a vulkan swap chain. However for the best
// performance, the driver should return images in order. To avoid buggy // performance, the driver should return images in order. To avoid buggy
...@@ -341,7 +449,7 @@ bool VulkanSwapChain::AcquireNextImage() { ...@@ -341,7 +449,7 @@ bool VulkanSwapChain::AcquireNextImage() {
// If the expected next image has been acquired, use it and return true. // If the expected next image has been acquired, use it and return true.
if (images_[expected_next_image].is_acquired) { if (images_[expected_next_image].is_acquired) {
in_present_images_.pop_front(); in_present_images_.pop_front();
acquired_image_.emplace(expected_next_image); acquired_images_.emplace_back(expected_next_image);
break; break;
} }
} }
...@@ -364,15 +472,20 @@ bool VulkanSwapChain::AcquireNextImage() { ...@@ -364,15 +472,20 @@ bool VulkanSwapChain::AcquireNextImage() {
#endif #endif
// Acquire the next image. // Acquire the next image.
uint32_t next_image; uint32_t next_image;
auto result = auto result = ({
vkAcquireNextImageKHR(device, swap_chain_, kTimeout, vk_semaphore, base::ScopedBlockingCall scoped_blocking_call(
VK_NULL_HANDLE, &next_image); FROM_HERE, base::BlockingType::WILL_BLOCK);
vkAcquireNextImageKHR(device, swap_chain_, kTimeout, vk_semaphore,
VK_NULL_HANDLE, &next_image);
});
if (result == VK_TIMEOUT) { if (result == VK_TIMEOUT) {
LOG(ERROR) << "vkAcquireNextImageKHR() hangs."; LOG(ERROR) << "vkAcquireNextImageKHR() hangs.";
vkDestroySemaphore(device, vk_semaphore, nullptr /* pAllocator */); vkDestroySemaphore(device, vk_semaphore, nullptr /* pAllocator */);
state_ = VK_ERROR_SURFACE_LOST_KHR; state_ = VK_ERROR_SURFACE_LOST_KHR;
return false; return false;
} }
if (result != VK_SUCCESS && result != VK_SUBOPTIMAL_KHR) { if (result != VK_SUCCESS && result != VK_SUBOPTIMAL_KHR) {
LOG(DFATAL) << "vkAcquireNextImageKHR() failed: " << result; LOG(DFATAL) << "vkAcquireNextImageKHR() failed: " << result;
vkDestroySemaphore(device, vk_semaphore, nullptr /* pAllocator */); vkDestroySemaphore(device, vk_semaphore, nullptr /* pAllocator */);
...@@ -391,7 +504,7 @@ bool VulkanSwapChain::AcquireNextImage() { ...@@ -391,7 +504,7 @@ bool VulkanSwapChain::AcquireNextImage() {
DCHECK(!all_images_are_tracked); DCHECK(!all_images_are_tracked);
// Got an image which is not in the present queue due to the new created // Got an image which is not in the present queue due to the new created
// swap chain. In this case, just use this image. // swap chain. In this case, just use this image.
acquired_image_.emplace(next_image); acquired_images_.emplace_back(next_image);
break; break;
} }
LOG_IF(ERROR, it != in_present_images_.begin()) LOG_IF(ERROR, it != in_present_images_.begin())
...@@ -400,6 +513,17 @@ bool VulkanSwapChain::AcquireNextImage() { ...@@ -400,6 +513,17 @@ bool VulkanSwapChain::AcquireNextImage() {
return true; return true;
} }
void VulkanSwapChain::WaitUntilPostSubBufferAsyncFinished() {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
while (pending_post_sub_buffer_) {
base::ScopedBlockingCall scoped_blocking_call(
FROM_HERE, base::BlockingType::WILL_BLOCK);
condition_variable_.Wait();
}
DCHECK(!acquired_images_.empty() || state_ != VK_SUCCESS);
}
VulkanSwapChain::ScopedWrite::ScopedWrite(VulkanSwapChain* swap_chain) VulkanSwapChain::ScopedWrite::ScopedWrite(VulkanSwapChain* swap_chain)
: swap_chain_(swap_chain) { : swap_chain_(swap_chain) {
success_ = swap_chain_->BeginWriteCurrentImage( success_ = swap_chain_->BeginWriteCurrentImage(
......
...@@ -10,13 +10,22 @@ ...@@ -10,13 +10,22 @@
#include <memory> #include <memory>
#include <vector> #include <vector>
#include "base/callback.h"
#include "base/component_export.h" #include "base/component_export.h"
#include "base/containers/circular_deque.h" #include "base/containers/circular_deque.h"
#include "base/memory/scoped_refptr.h"
#include "base/optional.h" #include "base/optional.h"
#include "base/synchronization/condition_variable.h"
#include "base/synchronization/lock.h"
#include "base/threading/thread_checker.h"
#include "ui/gfx/geometry/rect.h" #include "ui/gfx/geometry/rect.h"
#include "ui/gfx/geometry/size.h" #include "ui/gfx/geometry/size.h"
#include "ui/gfx/swap_result.h" #include "ui/gfx/swap_result.h"
namespace base {
class SingleThreadTaskRunner;
}
namespace gpu { namespace gpu {
class VulkanCommandBuffer; class VulkanCommandBuffer;
...@@ -72,12 +81,30 @@ class COMPONENT_EXPORT(VULKAN) VulkanSwapChain { ...@@ -72,12 +81,30 @@ class COMPONENT_EXPORT(VULKAN) VulkanSwapChain {
void Destroy(); void Destroy();
// Present the current buffer. // Present the current buffer.
gfx::SwapResult PresentBuffer(const gfx::Rect& rect); gfx::SwapResult PostSubBuffer(const gfx::Rect& rect);
using PostSubBufferCompletionCallback =
uint32_t num_images() const { return static_cast<uint32_t>(images_.size()); } base::OnceCallback<void(gfx::SwapResult)>;
const gfx::Size& size() const { return size_; } void PostSubBufferAsync(const gfx::Rect& rect,
bool use_protected_memory() const { return use_protected_memory_; } PostSubBufferCompletionCallback callback);
VkResult state() const { return state_; }
uint32_t num_images() const {
// size of |images_| will not be changed after initializing, so it is safe
// to read it here.
return static_cast<uint32_t>(TS_UNCHECKED_READ(images_).size());
}
const gfx::Size& size() const {
// |size_| is never changed after initialization.
return size_;
}
bool use_protected_memory() const {
// |use_protected_memory_| is never changed after initialization.
return use_protected_memory_;
}
VkResult state() const {
base::AutoLock auto_lock(lock_);
return state_;
}
private: private:
bool InitializeSwapChain(VkSurfaceKHR surface, bool InitializeSwapChain(VkSurfaceKHR surface,
...@@ -86,26 +113,31 @@ class COMPONENT_EXPORT(VULKAN) VulkanSwapChain { ...@@ -86,26 +113,31 @@ class COMPONENT_EXPORT(VULKAN) VulkanSwapChain {
uint32_t min_image_count, uint32_t min_image_count,
VkSurfaceTransformFlagBitsKHR pre_transform, VkSurfaceTransformFlagBitsKHR pre_transform,
bool use_protected_memory, bool use_protected_memory,
std::unique_ptr<VulkanSwapChain> old_swap_chain); std::unique_ptr<VulkanSwapChain> old_swap_chain)
void DestroySwapChain(); EXCLUSIVE_LOCKS_REQUIRED(lock_);
void DestroySwapChain() EXCLUSIVE_LOCKS_REQUIRED(lock_);
bool InitializeSwapImages(const VkSurfaceFormatKHR& surface_format); bool InitializeSwapImages(const VkSurfaceFormatKHR& surface_format)
void DestroySwapImages(); EXCLUSIVE_LOCKS_REQUIRED(lock_);
void DestroySwapImages() EXCLUSIVE_LOCKS_REQUIRED(lock_);
bool BeginWriteCurrentImage(VkImage* image, bool BeginWriteCurrentImage(VkImage* image,
uint32_t* image_index, uint32_t* image_index,
VkImageLayout* layout, VkImageLayout* layout,
VkSemaphore* semaphore); VkSemaphore* semaphore);
void EndWriteCurrentImage(VkImageLayout layout, VkSemaphore semaphore); void EndWriteCurrentImage(VkImageLayout layout, VkSemaphore semaphore);
bool AcquireNextImage(); bool PresentBuffer(const gfx::Rect& rect) EXCLUSIVE_LOCKS_REQUIRED(lock_);
bool AcquireNextImage() EXCLUSIVE_LOCKS_REQUIRED(lock_);
// Wait until PostSubBufferAsync() is finished on ThreadPool.
void WaitUntilPostSubBufferAsyncFinished() EXCLUSIVE_LOCKS_REQUIRED(lock_);
mutable base::Lock lock_;
bool use_protected_memory_ = false; bool use_protected_memory_ = false;
VulkanDeviceQueue* device_queue_ = nullptr; VulkanDeviceQueue* device_queue_ = nullptr;
bool is_incremental_present_supported_ = false; bool is_incremental_present_supported_ = false;
VkSwapchainKHR swap_chain_ = VK_NULL_HANDLE; VkSwapchainKHR swap_chain_ GUARDED_BY(lock_) = VK_NULL_HANDLE;
std::unique_ptr<VulkanCommandPool> command_pool_; std::unique_ptr<VulkanCommandPool> command_pool_;
gfx::Size size_; gfx::Size size_;
struct ImageData { struct ImageData {
...@@ -126,14 +158,33 @@ class COMPONENT_EXPORT(VULKAN) VulkanSwapChain { ...@@ -126,14 +158,33 @@ class COMPONENT_EXPORT(VULKAN) VulkanSwapChain {
// to swapchain for presenting. // to swapchain for presenting.
bool is_acquired = false; bool is_acquired = false;
}; };
std::vector<ImageData> images_;
// Images in the swap chain.
// Acquired image index. std::vector<ImageData> images_ GUARDED_BY(lock_);
base::circular_deque<uint32_t> in_present_images_;
base::Optional<uint32_t> acquired_image_; base::circular_deque<uint32_t> in_present_images_ GUARDED_BY(lock_);
bool is_writing_ = false; bool is_writing_ GUARDED_BY(lock_) = false;
VkSemaphore end_write_semaphore_ = VK_NULL_HANDLE; VkSemaphore end_write_semaphore_ GUARDED_BY(lock_) = VK_NULL_HANDLE;
VkResult state_ = VK_SUCCESS;
// Condition variable is signalled when a PostSubBufferAsync() is finished.
base::ConditionVariable condition_variable_{&lock_};
// Count of pending unfinished PostSubBufferAsync() calls.
uint32_t pending_post_sub_buffer_ GUARDED_BY(lock_) = 0;
// The current swapchain state_.
VkResult state_ GUARDED_BY(lock_) = VK_SUCCESS;
// Acquired images queue.
base::circular_deque<uint32_t> acquired_images_ GUARDED_BY(lock_);
// For executing task on GPU main thread.
scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
// For executing PosSubBufferAsync tasks off the GPU main thread.
scoped_refptr<base::SequencedTaskRunner> post_sub_buffer_task_runner_;
THREAD_CHECKER(thread_checker_);
DISALLOW_COPY_AND_ASSIGN(VulkanSwapChain); DISALLOW_COPY_AND_ASSIGN(VulkanSwapChain);
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment