Commit 7a59e081 authored by James Darpinian's avatar James Darpinian Committed by Commit Bot

GPU: Don't flush when destroying transfer buffers.

Destroying a transfer buffer now requires only an ordering barrier, not
a full flush. This removes a source of unnecessary flushes and makes
resizing the transfer buffer more efficient.

Bug: 850271, 835353, 828363
Cq-Include-Trybots: luci.chromium.try:android_optional_gpu_tests_rel;luci.chromium.try:linux_optional_gpu_tests_rel;luci.chromium.try:mac_optional_gpu_tests_rel;luci.chromium.try:win_optional_gpu_tests_rel
Change-Id: I9bfe43c46c00f8e9fc29e6450da788f86e74bc52
Reviewed-on: https://chromium-review.googlesource.com/1093580Reviewed-by: default avatarRobert Sesek <rsesek@chromium.org>
Reviewed-by: default avatarAntoine Labour <piman@chromium.org>
Reviewed-by: default avatarSunny Sachanandani <sunnyps@chromium.org>
Commit-Queue: James Darpinian <jdarpinian@chromium.org>
Cr-Commit-Position: refs/heads/master@{#567494}
parent cf42dd28
......@@ -125,8 +125,10 @@ void CommandBufferHelper::SetGetBuffer(int32_t id,
void CommandBufferHelper::FreeRingBuffer() {
if (HaveRingBuffer()) {
FlushLazy();
OrderingBarrier();
command_buffer_->DestroyTransferBuffer(ring_buffer_id_);
// SetGetBuffer is an IPC, so previous work needs to be flushed first.
Flush();
SetGetBuffer(-1, nullptr);
}
}
......
......@@ -629,8 +629,8 @@ TEST_F(CommandBufferHelperTest, FreeRingBuffer) {
helper_->FreeRingBuffer();
EXPECT_FALSE(helper_->HaveRingBuffer());
// FreeRingBuffer should have caused a flush.
EXPECT_EQ(command_buffer_->FlushCount(), old_flush_count + 1);
// FreeRingBuffer should have caused an ordering barrier and a flush.
EXPECT_EQ(command_buffer_->FlushCount(), old_flush_count + 2);
// However it shouldn't force a finish.
EXPECT_EQ(command_buffer_->GetLastState().get_offset, old_get_offset);
......@@ -638,7 +638,7 @@ TEST_F(CommandBufferHelperTest, FreeRingBuffer) {
// should work.
helper_->Finish();
EXPECT_FALSE(helper_->HaveRingBuffer());
EXPECT_EQ(command_buffer_->FlushCount(), old_flush_count + 1);
EXPECT_EQ(command_buffer_->FlushCount(), old_flush_count + 2);
EXPECT_EQ(command_buffer_->GetLastState().get_offset,
helper_->GetPutOffsetForTest());
}
......
......@@ -53,7 +53,7 @@ MappedMemoryManager::MappedMemoryManager(CommandBufferHelper* helper,
}
MappedMemoryManager::~MappedMemoryManager() {
helper_->FlushLazy();
helper_->OrderingBarrier();
CommandBuffer* cmd_buf = helper_->command_buffer();
for (auto& chunk : chunks_) {
cmd_buf->DestroyTransferBuffer(chunk->shm_id());
......@@ -152,7 +152,7 @@ void MappedMemoryManager::FreeUnused() {
chunk->FreeUnused();
if (chunk->bytes_in_use() == 0u) {
if (chunk->InUseOrFreePending())
helper_->FlushLazy();
helper_->OrderingBarrier();
cmd_buf->DestroyTransferBuffer(chunk->shm_id());
allocated_memory_ -= chunk->GetSize();
iter = chunks_.erase(iter);
......
......@@ -65,7 +65,7 @@ bool TransferBuffer::Initialize(
void TransferBuffer::Free() {
if (HaveBuffer()) {
TRACE_EVENT0("gpu", "TransferBuffer::Free");
helper_->FlushLazy();
helper_->OrderingBarrier();
helper_->command_buffer()->DestroyTransferBuffer(buffer_id_);
buffer_id_ = -1;
buffer_ = NULL;
......
......@@ -86,8 +86,8 @@ void TransferBufferTest::TearDown() {
EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
.Times(1)
.RetiresOnSaturation();
EXPECT_CALL(*command_buffer(), OnFlush()).Times(AtMost(1));
EXPECT_CALL(*command_buffer(), Flush(_)).Times(AtMost(1));
EXPECT_CALL(*command_buffer(), OrderingBarrier(_)).Times(AtMost(2));
transfer_buffer_.reset();
}
......@@ -122,6 +122,9 @@ TEST_F(TransferBufferTest, Free) {
EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
.Times(1)
.RetiresOnSaturation();
EXPECT_CALL(*command_buffer(), OrderingBarrier(_))
.Times(1)
.RetiresOnSaturation();
transfer_buffer_->Free();
// See it's freed.
EXPECT_FALSE(transfer_buffer_->HaveBuffer());
......@@ -137,6 +140,9 @@ TEST_F(TransferBufferTest, Free) {
EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
.Times(1)
.RetiresOnSaturation();
EXPECT_CALL(*command_buffer(), OrderingBarrier(_))
.Times(1)
.RetiresOnSaturation();
transfer_buffer_->Free();
// See it's freed.
EXPECT_FALSE(transfer_buffer_->HaveBuffer());
......@@ -153,6 +159,9 @@ TEST_F(TransferBufferTest, Free) {
EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
.Times(1)
.RetiresOnSaturation();
EXPECT_CALL(*command_buffer(), OrderingBarrier(_))
.Times(1)
.RetiresOnSaturation();
transfer_buffer_->Free();
// See it's freed.
EXPECT_FALSE(transfer_buffer_->HaveBuffer());
......@@ -170,8 +179,8 @@ TEST_F(TransferBufferTest, Free) {
int32_t put_offset = helper_->GetPutOffsetForTest();
transfer_buffer_->FreePendingToken(data, token);
// Free buffer. Should cause a Flush.
EXPECT_CALL(*command_buffer(), Flush(_)).Times(AtMost(1));
// Free buffer. Should cause an ordering barrier.
EXPECT_CALL(*command_buffer(), OrderingBarrier(_)).Times(AtMost(1));
EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
.Times(1)
.RetiresOnSaturation();
......@@ -180,9 +189,7 @@ TEST_F(TransferBufferTest, Free) {
EXPECT_FALSE(transfer_buffer_->HaveBuffer());
EXPECT_EQ(base::UnguessableToken(),
transfer_buffer_->shared_memory_handle().GetGUID());
// Free should have flushed.
EXPECT_EQ(put_offset, command_buffer_->GetServicePutOffset());
// However it shouldn't have caused a finish.
// Free should not have caused a finish.
EXPECT_LT(command_buffer_->GetState().get_offset, put_offset);
// See that it gets reallocated.
......@@ -199,6 +206,9 @@ TEST_F(TransferBufferTest, Free) {
EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
.Times(1)
.RetiresOnSaturation();
EXPECT_CALL(*command_buffer(), OrderingBarrier(_))
.Times(1)
.RetiresOnSaturation();
transfer_buffer_->Free();
transfer_buffer_->Free();
}
......@@ -334,11 +344,18 @@ void TransferBufferExpandContractTest::TearDown() {
EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
.Times(1)
.RetiresOnSaturation();
EXPECT_CALL(*command_buffer(), OrderingBarrier(_))
.Times(1)
.RetiresOnSaturation();
}
// For command buffer.
EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
.Times(1)
.RetiresOnSaturation();
EXPECT_CALL(*command_buffer(), OrderingBarrier(_))
.Times(1)
.RetiresOnSaturation();
EXPECT_CALL(*command_buffer(), Flush(_)).Times(1).RetiresOnSaturation();
transfer_buffer_.reset();
}
......@@ -362,6 +379,9 @@ TEST_F(TransferBufferExpandContractTest, Expand) {
EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
.Times(1)
.RetiresOnSaturation();
EXPECT_CALL(*command_buffer(), OrderingBarrier(_))
.Times(1)
.RetiresOnSaturation();
EXPECT_CALL(*command_buffer(),
CreateTransferBuffer(kStartTransferBufferSize * 2, _))
.WillOnce(Invoke(
......@@ -381,6 +401,9 @@ TEST_F(TransferBufferExpandContractTest, Expand) {
EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
.Times(1)
.RetiresOnSaturation();
EXPECT_CALL(*command_buffer(), OrderingBarrier(_))
.Times(1)
.RetiresOnSaturation();
EXPECT_CALL(*command_buffer(),
CreateTransferBuffer(kMaxTransferBufferSize, _))
.WillOnce(Invoke(
......@@ -415,6 +438,9 @@ TEST_F(TransferBufferExpandContractTest, Contract) {
EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
.Times(1)
.RetiresOnSaturation();
EXPECT_CALL(*command_buffer(), OrderingBarrier(_))
.Times(1)
.RetiresOnSaturation();
transfer_buffer_->Free();
// See it's freed.
EXPECT_FALSE(transfer_buffer_->HaveBuffer());
......@@ -445,6 +471,9 @@ TEST_F(TransferBufferExpandContractTest, Contract) {
EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
.Times(1)
.RetiresOnSaturation();
EXPECT_CALL(*command_buffer(), OrderingBarrier(_))
.Times(1)
.RetiresOnSaturation();
transfer_buffer_->Free();
// See it's freed.
EXPECT_FALSE(transfer_buffer_->HaveBuffer());
......@@ -469,6 +498,9 @@ TEST_F(TransferBufferExpandContractTest, OutOfMemory) {
EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
.Times(1)
.RetiresOnSaturation();
EXPECT_CALL(*command_buffer(), OrderingBarrier(_))
.Times(1)
.RetiresOnSaturation();
transfer_buffer_->Free();
// See it's freed.
EXPECT_FALSE(transfer_buffer_->HaveBuffer());
......@@ -495,6 +527,9 @@ TEST_F(TransferBufferExpandContractTest, ReallocsToDefault) {
EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
.Times(1)
.RetiresOnSaturation();
EXPECT_CALL(*command_buffer(), OrderingBarrier(_))
.Times(1)
.RetiresOnSaturation();
transfer_buffer_->Free();
// See it's freed.
EXPECT_FALSE(transfer_buffer_->HaveBuffer());
......
......@@ -411,7 +411,7 @@ void CommandBufferProxyImpl::DestroyTransferBuffer(int32_t id) {
if (last_state_.error != gpu::error::kNoError)
return;
Send(new GpuCommandBufferMsg_DestroyTransferBuffer(route_id_, id));
channel_->DestroyTransferBuffer(route_id_, id);
}
void CommandBufferProxyImpl::SetGpuControlClient(GpuControlClient* client) {
......
......@@ -91,7 +91,8 @@ uint32_t GpuChannelHost::OrderingBarrier(
std::vector<SyncToken> sync_token_fences) {
AutoLock lock(context_lock_);
if (flush_list_.empty() || flush_list_.back().route_id != route_id)
if (flush_list_.empty() || flush_list_.back().route_id != route_id ||
flush_list_.back().transfer_buffer_id_to_destroy)
flush_list_.push_back(FlushParams());
FlushParams& flush_params = flush_list_.back();
......@@ -102,9 +103,21 @@ uint32_t GpuChannelHost::OrderingBarrier(
flush_params.sync_token_fences.end(),
std::make_move_iterator(sync_token_fences.begin()),
std::make_move_iterator(sync_token_fences.end()));
flush_params.transfer_buffer_id_to_destroy = 0;
return flush_params.flush_id;
}
void GpuChannelHost::DestroyTransferBuffer(int32_t route_id,
int32_t id_to_destroy) {
AutoLock lock(context_lock_);
flush_list_.push_back(FlushParams());
FlushParams& flush_params = flush_list_.back();
flush_params.flush_id = next_flush_id_++;
flush_params.route_id = route_id;
flush_params.transfer_buffer_id_to_destroy = id_to_destroy;
}
void GpuChannelHost::EnsureFlush(uint32_t flush_id) {
AutoLock lock(context_lock_);
InternalFlush(flush_id);
......@@ -172,7 +185,10 @@ base::SharedMemoryHandle GpuChannelHost::ShareToGpuProcess(
int32_t GpuChannelHost::ReserveTransferBufferId() {
// 0 is a reserved value.
return g_next_transfer_buffer_id.GetNext() + 1;
int32_t id = g_next_transfer_buffer_id.GetNext();
if (id)
return id;
return g_next_transfer_buffer_id.GetNext();
}
int32_t GpuChannelHost::ReserveImageId() {
......
......@@ -121,6 +121,10 @@ class GPU_EXPORT GpuChannelHost
// Reserve one unused transfer buffer ID.
int32_t ReserveTransferBufferId();
// An ordering barrier must be placed after any commands that use the buffer
// before it is safe to call this function to destroy it.
void DestroyTransferBuffer(int32_t route_id, int32_t id_to_destroy);
// Reserve one unused image ID.
int32_t ReserveImageId();
......
......@@ -23,6 +23,8 @@ struct GPU_EXPORT FlushParams {
// Route ID of the command buffer for this flush.
int32_t route_id;
// If nonzero, destroy this transfer buffer instead of flushing.
int32_t transfer_buffer_id_to_destroy;
// Client put offset. Service get offset is updated in shared memory.
int32_t put_offset;
// Increasing counter for the flush.
......
......@@ -21,6 +21,7 @@
IPC_STRUCT_TRAITS_BEGIN(gpu::FlushParams)
IPC_STRUCT_TRAITS_MEMBER(route_id)
IPC_STRUCT_TRAITS_MEMBER(transfer_buffer_id_to_destroy)
IPC_STRUCT_TRAITS_MEMBER(put_offset)
IPC_STRUCT_TRAITS_MEMBER(flush_id)
IPC_STRUCT_TRAITS_MEMBER(sync_token_fences)
......
......@@ -252,20 +252,31 @@ bool GpuChannelMessageFilter::OnMessageReceived(const IPC::Message& message) {
std::vector<FlushParams> flush_list = std::get<0>(std::move(params));
for (auto& flush_info : flush_list) {
GpuCommandBufferMsg_AsyncFlush flush_message(
flush_info.route_id, flush_info.put_offset, flush_info.flush_id);
auto it = route_sequences_.find(flush_info.route_id);
if (it == route_sequences_.end()) {
DLOG(ERROR) << "Invalid route id in flush list";
continue;
}
tasks.emplace_back(
it->second /* sequence_id */,
base::BindOnce(&GpuChannel::HandleMessage, gpu_channel_->AsWeakPtr(),
flush_message),
std::move(flush_info.sync_token_fences));
if (flush_info.transfer_buffer_id_to_destroy) {
tasks.emplace_back(
it->second /* sequence_id */,
base::BindOnce(&GpuChannel::HandleMessage,
gpu_channel_->AsWeakPtr(),
GpuCommandBufferMsg_DestroyTransferBuffer(
flush_info.route_id,
flush_info.transfer_buffer_id_to_destroy)),
std::move(flush_info.sync_token_fences));
} else {
GpuCommandBufferMsg_AsyncFlush flush_message(
flush_info.route_id, flush_info.put_offset, flush_info.flush_id);
tasks.emplace_back(
it->second /* sequence_id */,
base::BindOnce(&GpuChannel::HandleMessage,
gpu_channel_->AsWeakPtr(), flush_message),
std::move(flush_info.sync_token_fences));
}
}
scheduler_->ScheduleTasks(std::move(tasks));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment