Commit 9058f786 authored by Antoine Labour's avatar Antoine Labour Committed by Commit Bot

Use SharedImageInterface for one-copy staging buffers

Bug: 882513
Change-Id: I7643ae90356a4a98f57cb5e14691af4362ad9b83
Reviewed-on: https://chromium-review.googlesource.com/c/1323719
Commit-Queue: Antoine Labour <piman@chromium.org>
Reviewed-by: default avatarJonathan Backer <backer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#607454}
parent 0d45a99a
......@@ -366,61 +366,50 @@ gpu::SyncToken OneCopyRasterBufferProvider::CopyOnWorkerThread(
bool mailbox_texture_is_overlay_candidate,
const gpu::SyncToken& sync_token,
const gfx::ColorSpace& color_space) {
viz::RasterContextProvider::ScopedRasterContextLock scoped_context(
worker_context_provider_);
gpu::raster::RasterInterface* ri = scoped_context.RasterInterface();
DCHECK(ri);
auto* sii = worker_context_provider_->SharedImageInterface();
DCHECK(sii);
if (!staging_buffer->gpu_memory_buffer.get()) {
// If GpuMemoryBuffer allocation failed (https://crbug.com/554541), then
// we don't have anything to give to copy into the resource. We report a
// zero mailbox that will result in checkerboarding, and be treated as OOM
// which should retry.
if (!mailbox->IsZero()) {
sii->DestroySharedImage(sync_token, *mailbox);
mailbox->SetZero();
}
return gpu::SyncToken();
}
if (mailbox->IsZero()) {
auto* sii = worker_context_provider_->SharedImageInterface();
uint32_t flags = gpu::SHARED_IMAGE_USAGE_RASTER;
if (mailbox_texture_is_overlay_candidate)
flags |= gpu::SHARED_IMAGE_USAGE_SCANOUT;
*mailbox = sii->CreateSharedImage(resource_format, resource_size,
color_space, flags);
ri->WaitSyncTokenCHROMIUM(sii->GenUnverifiedSyncToken().GetConstData());
}
// Create staging shared image.
if (staging_buffer->mailbox.IsZero()) {
staging_buffer->mailbox = sii->CreateSharedImage(
staging_buffer->gpu_memory_buffer.get(), gpu_memory_buffer_manager_,
color_space, gpu::SHARED_IMAGE_USAGE_RASTER);
} else {
ri->WaitSyncTokenCHROMIUM(sync_token.GetConstData());
sii->UpdateSharedImage(staging_buffer->sync_token, staging_buffer->mailbox);
}
viz::RasterContextProvider::ScopedRasterContextLock scoped_context(
worker_context_provider_);
gpu::raster::RasterInterface* ri = scoped_context.RasterInterface();
DCHECK(ri);
ri->WaitSyncTokenCHROMIUM(sync_token.GetConstData());
ri->WaitSyncTokenCHROMIUM(sii->GenUnverifiedSyncToken().GetConstData());
GLuint mailbox_texture_id = ri->CreateAndConsumeTexture(
mailbox_texture_is_overlay_candidate, gfx::BufferUsage::SCANOUT,
resource_format, mailbox->name);
// Create and bind staging texture.
if (!staging_buffer->texture_id) {
staging_buffer->texture_id =
ri->CreateTexture(true, StagingBufferUsage(), staging_buffer->format);
ri->TexParameteri(staging_buffer->texture_id, GL_TEXTURE_MIN_FILTER,
GL_NEAREST);
ri->TexParameteri(staging_buffer->texture_id, GL_TEXTURE_MAG_FILTER,
GL_NEAREST);
ri->TexParameteri(staging_buffer->texture_id, GL_TEXTURE_WRAP_S,
GL_CLAMP_TO_EDGE);
ri->TexParameteri(staging_buffer->texture_id, GL_TEXTURE_WRAP_T,
GL_CLAMP_TO_EDGE);
}
// Create and bind image.
if (!staging_buffer->image_id) {
if (staging_buffer->gpu_memory_buffer) {
staging_buffer->image_id = ri->CreateImageCHROMIUM(
staging_buffer->gpu_memory_buffer->AsClientBuffer(),
staging_buffer->size.width(), staging_buffer->size.height(),
GLInternalFormat(staging_buffer->format));
ri->BindTexImage2DCHROMIUM(staging_buffer->texture_id,
staging_buffer->image_id);
}
} else {
ri->ReleaseTexImage2DCHROMIUM(staging_buffer->texture_id,
staging_buffer->image_id);
ri->BindTexImage2DCHROMIUM(staging_buffer->texture_id,
staging_buffer->image_id);
}
// Unbind staging texture.
// TODO(vmiura): Need a way to ensure we don't hold onto bindings?
// ri->BindTexture(image_target, 0);
GLuint staging_texture_id = ri->CreateAndConsumeTexture(
true, StagingBufferUsage(), staging_buffer->format,
staging_buffer->mailbox.name);
// Do not use queries unless COMMANDS_COMPLETED queries are supported, or
// COMMANDS_ISSUED queries are sufficient.
......@@ -467,8 +456,8 @@ gpu::SyncToken OneCopyRasterBufferProvider::CopyOnWorkerThread(
int rows_to_copy = std::min(chunk_size_in_rows, height - y);
DCHECK_GT(rows_to_copy, 0);
ri->CopySubTexture(staging_buffer->texture_id, mailbox_texture_id, 0, y, 0,
y, rect_to_copy.width(), rows_to_copy);
ri->CopySubTexture(staging_texture_id, mailbox_texture_id, 0, y, 0, y,
rect_to_copy.width(), rows_to_copy);
y += rows_to_copy;
// Increment |bytes_scheduled_since_last_flush_| by the amount of memory
......@@ -484,11 +473,20 @@ gpu::SyncToken OneCopyRasterBufferProvider::CopyOnWorkerThread(
if (query_target != GL_NONE)
ri->EndQueryEXT(query_target);
ri->DeleteTextures(1, &mailbox_texture_id);
GLuint textures_to_delete[] = {mailbox_texture_id, staging_texture_id};
ri->DeleteTextures(2, textures_to_delete);
// Generate sync token on the worker context that will be sent to and waited
// for by the display compositor before using the content generated here.
return viz::ClientResourceProvider::GenerateSyncTokenHelper(ri);
// The same sync token is used to synchronize operations on the staging
// buffer. Note, the query completion is generally enough to guarantee
// ordering, but there are some paths (e.g.
// StagingBufferPool::ReduceMemoryUsage) that may destroy the staging buffer
// without waiting for the query completion.
gpu::SyncToken out_sync_token =
viz::ClientResourceProvider::GenerateSyncTokenHelper(ri);
staging_buffer->sync_token = out_sync_token;
return out_sync_token;
}
gfx::BufferUsage OneCopyRasterBufferProvider::StagingBufferUsage() const {
......
......@@ -13,6 +13,7 @@
#include "components/viz/common/gpu/raster_context_provider.h"
#include "components/viz/common/resources/resource_sizes.h"
#include "gpu/command_buffer/client/raster_interface.h"
#include "gpu/command_buffer/client/shared_image_interface.h"
#include "third_party/khronos/GLES2/gl2.h"
#include "third_party/khronos/GLES2/gl2ext.h"
#include "ui/gfx/gpu_memory_buffer.h"
......@@ -68,23 +69,19 @@ StagingBuffer::StagingBuffer(const gfx::Size& size, viz::ResourceFormat format)
: size(size), format(format) {}
StagingBuffer::~StagingBuffer() {
DCHECK_EQ(texture_id, 0u);
DCHECK_EQ(image_id, 0u);
DCHECK(mailbox.IsZero());
DCHECK_EQ(query_id, 0u);
}
void StagingBuffer::DestroyGLResources(gpu::raster::RasterInterface* ri) {
void StagingBuffer::DestroyGLResources(gpu::raster::RasterInterface* ri,
gpu::SharedImageInterface* sii) {
if (query_id) {
ri->DeleteQueriesEXT(1, &query_id);
query_id = 0;
}
if (image_id) {
ri->DestroyImageCHROMIUM(image_id);
image_id = 0;
}
if (texture_id) {
ri->DeleteTextures(1, &texture_id);
texture_id = 0;
if (!mailbox.IsZero()) {
sii->DestroySharedImage(sync_token, mailbox);
mailbox.SetZero();
}
}
......@@ -248,6 +245,8 @@ std::unique_ptr<StagingBuffer> StagingBufferPool::AcquireStagingBuffer(
worker_context_provider_);
gpu::raster::RasterInterface* ri = scoped_context.RasterInterface();
gpu::SharedImageInterface* sii =
worker_context_provider_->SharedImageInterface();
DCHECK(ri);
// Check if any busy buffers have become available.
......@@ -324,7 +323,7 @@ std::unique_ptr<StagingBuffer> StagingBufferPool::AcquireStagingBuffer(
if (free_buffers_.empty())
break;
free_buffers_.front()->DestroyGLResources(ri);
free_buffers_.front()->DestroyGLResources(ri, sii);
MarkStagingBufferAsBusy(free_buffers_.front().get());
RemoveStagingBuffer(free_buffers_.front().get());
free_buffers_.pop_front();
......@@ -395,6 +394,9 @@ void StagingBufferPool::ReleaseBuffersNotUsedSince(base::TimeTicks time) {
gpu::raster::RasterInterface* ri = scoped_context.RasterInterface();
DCHECK(ri);
gpu::SharedImageInterface* sii =
worker_context_provider_->SharedImageInterface();
DCHECK(sii);
// Note: Front buffer is guaranteed to be LRU so we can stop releasing
// buffers as soon as we find a buffer that has been used since |time|.
......@@ -402,7 +404,7 @@ void StagingBufferPool::ReleaseBuffersNotUsedSince(base::TimeTicks time) {
if (free_buffers_.front()->last_usage > time)
return;
free_buffers_.front()->DestroyGLResources(ri);
free_buffers_.front()->DestroyGLResources(ri, sii);
MarkStagingBufferAsBusy(free_buffers_.front().get());
RemoveStagingBuffer(free_buffers_.front().get());
free_buffers_.pop_front();
......@@ -412,7 +414,7 @@ void StagingBufferPool::ReleaseBuffersNotUsedSince(base::TimeTicks time) {
if (busy_buffers_.front()->last_usage > time)
return;
busy_buffers_.front()->DestroyGLResources(ri);
busy_buffers_.front()->DestroyGLResources(ri, sii);
RemoveStagingBuffer(busy_buffers_.front().get());
busy_buffers_.pop_front();
}
......
......@@ -22,6 +22,8 @@
#include "cc/cc_export.h"
#include "components/viz/common/resources/resource_format.h"
#include "gpu/command_buffer/common/gl2_types.h"
#include "gpu/command_buffer/common/mailbox.h"
#include "gpu/command_buffer/common/sync_token.h"
#include "ui/gfx/geometry/size.h"
#include "ui/gfx/gpu_memory_buffer.h"
......@@ -32,6 +34,7 @@ namespace gpu {
namespace raster {
class RasterInterface;
}
class SharedImageInterface;
} // namespace gpu
namespace viz {
......@@ -44,7 +47,8 @@ struct StagingBuffer {
StagingBuffer(const gfx::Size& size, viz::ResourceFormat format);
~StagingBuffer();
void DestroyGLResources(gpu::raster::RasterInterface* gl);
void DestroyGLResources(gpu::raster::RasterInterface* gl,
gpu::SharedImageInterface* sii);
void OnMemoryDump(base::trace_event::ProcessMemoryDump* pmd,
viz::ResourceFormat format,
bool is_free) const;
......@@ -58,11 +62,11 @@ struct StagingBuffer {
// GpuMemoryBuffer.
std::unique_ptr<gfx::GpuMemoryBuffer> gpu_memory_buffer;
// Id for image used to import the GpuMemoryBuffer to command buffer.
GLuint image_id = 0;
// Mailbox for the shared image bound to the GpuMemoryBuffer.
gpu::Mailbox mailbox;
// Id for texture that's bound to the GpuMemoryBuffer image.
GLuint texture_id = 0;
// Sync token for the last RasterInterface operations using the shared image.
gpu::SyncToken sync_token;
// Id of command buffer query that tracks use of this staging buffer by the
// GPU. In general, GPU synchronization is necessary for native
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment