Commit 5a49da30 authored by Francois Doray's avatar Francois Doray Committed by Commit Bot

Revert "GPU: Don't flush when destroying transfer buffers."

This reverts commit 7a59e081.

Reason for revert: https://crbug.com/853194

Original change's description:
> GPU: Don't flush when destroying transfer buffers.
>
> Destroying a transfer buffer now requires only an ordering barrier, not
> a full flush. This removes a source of unnecessary flushes and makes
> resizing the transfer buffer more efficient.
>
> Bug: 850271, 835353, 828363
> Cq-Include-Trybots: luci.chromium.try:android_optional_gpu_tests_rel;luci.chromium.try:linux_optional_gpu_tests_rel;luci.chromium.try:mac_optional_gpu_tests_rel;luci.chromium.try:win_optional_gpu_tests_rel
> Change-Id: I9bfe43c46c00f8e9fc29e6450da788f86e74bc52
> Reviewed-on: https://chromium-review.googlesource.com/1093580
> Reviewed-by: Robert Sesek <rsesek@chromium.org>
> Reviewed-by: Antoine Labour <piman@chromium.org>
> Reviewed-by: Sunny Sachanandani <sunnyps@chromium.org>
> Commit-Queue: James Darpinian <jdarpinian@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#567494}

TBR=sunnyps@chromium.org,rsesek@chromium.org,piman@chromium.org,jdarpinian@chromium.org

No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: 850271, 835353, 828363
Cq-Include-Trybots: luci.chromium.try:android_optional_gpu_tests_rel;luci.chromium.try:linux_optional_gpu_tests_rel;luci.chromium.try:mac_optional_gpu_tests_rel;luci.chromium.try:win_optional_gpu_tests_rel
Change-Id: I7cd21d6cee9beca02ae28da8614a58e2f53dc848
Reviewed-on: https://chromium-review.googlesource.com/1102346
Commit-Queue: François Doray <fdoray@chromium.org>
Reviewed-by: default avatarFrançois Doray <fdoray@chromium.org>
Cr-Commit-Position: refs/heads/master@{#567631}
parent f6842afd
......@@ -125,10 +125,8 @@ void CommandBufferHelper::SetGetBuffer(int32_t id,
void CommandBufferHelper::FreeRingBuffer() {
if (HaveRingBuffer()) {
OrderingBarrier();
FlushLazy();
command_buffer_->DestroyTransferBuffer(ring_buffer_id_);
// SetGetBuffer is an IPC, so previous work needs to be flushed first.
Flush();
SetGetBuffer(-1, nullptr);
}
}
......
......@@ -629,8 +629,8 @@ TEST_F(CommandBufferHelperTest, FreeRingBuffer) {
helper_->FreeRingBuffer();
EXPECT_FALSE(helper_->HaveRingBuffer());
// FreeRingBuffer should have caused an ordering barrier and a flush.
EXPECT_EQ(command_buffer_->FlushCount(), old_flush_count + 2);
// FreeRingBuffer should have caused a flush.
EXPECT_EQ(command_buffer_->FlushCount(), old_flush_count + 1);
// However it shouldn't force a finish.
EXPECT_EQ(command_buffer_->GetLastState().get_offset, old_get_offset);
......@@ -638,7 +638,7 @@ TEST_F(CommandBufferHelperTest, FreeRingBuffer) {
// should work.
helper_->Finish();
EXPECT_FALSE(helper_->HaveRingBuffer());
EXPECT_EQ(command_buffer_->FlushCount(), old_flush_count + 2);
EXPECT_EQ(command_buffer_->FlushCount(), old_flush_count + 1);
EXPECT_EQ(command_buffer_->GetLastState().get_offset,
helper_->GetPutOffsetForTest());
}
......
......@@ -53,7 +53,7 @@ MappedMemoryManager::MappedMemoryManager(CommandBufferHelper* helper,
}
MappedMemoryManager::~MappedMemoryManager() {
helper_->OrderingBarrier();
helper_->FlushLazy();
CommandBuffer* cmd_buf = helper_->command_buffer();
for (auto& chunk : chunks_) {
cmd_buf->DestroyTransferBuffer(chunk->shm_id());
......@@ -152,7 +152,7 @@ void MappedMemoryManager::FreeUnused() {
chunk->FreeUnused();
if (chunk->bytes_in_use() == 0u) {
if (chunk->InUseOrFreePending())
helper_->OrderingBarrier();
helper_->FlushLazy();
cmd_buf->DestroyTransferBuffer(chunk->shm_id());
allocated_memory_ -= chunk->GetSize();
iter = chunks_.erase(iter);
......
......@@ -65,7 +65,7 @@ bool TransferBuffer::Initialize(
void TransferBuffer::Free() {
if (HaveBuffer()) {
TRACE_EVENT0("gpu", "TransferBuffer::Free");
helper_->OrderingBarrier();
helper_->FlushLazy();
helper_->command_buffer()->DestroyTransferBuffer(buffer_id_);
buffer_id_ = -1;
buffer_ = NULL;
......
......@@ -86,8 +86,8 @@ void TransferBufferTest::TearDown() {
EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
.Times(1)
.RetiresOnSaturation();
EXPECT_CALL(*command_buffer(), OnFlush()).Times(AtMost(1));
EXPECT_CALL(*command_buffer(), Flush(_)).Times(AtMost(1));
EXPECT_CALL(*command_buffer(), OrderingBarrier(_)).Times(AtMost(2));
transfer_buffer_.reset();
}
......@@ -122,9 +122,6 @@ TEST_F(TransferBufferTest, Free) {
EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
.Times(1)
.RetiresOnSaturation();
EXPECT_CALL(*command_buffer(), OrderingBarrier(_))
.Times(1)
.RetiresOnSaturation();
transfer_buffer_->Free();
// See it's freed.
EXPECT_FALSE(transfer_buffer_->HaveBuffer());
......@@ -140,9 +137,6 @@ TEST_F(TransferBufferTest, Free) {
EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
.Times(1)
.RetiresOnSaturation();
EXPECT_CALL(*command_buffer(), OrderingBarrier(_))
.Times(1)
.RetiresOnSaturation();
transfer_buffer_->Free();
// See it's freed.
EXPECT_FALSE(transfer_buffer_->HaveBuffer());
......@@ -159,9 +153,6 @@ TEST_F(TransferBufferTest, Free) {
EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
.Times(1)
.RetiresOnSaturation();
EXPECT_CALL(*command_buffer(), OrderingBarrier(_))
.Times(1)
.RetiresOnSaturation();
transfer_buffer_->Free();
// See it's freed.
EXPECT_FALSE(transfer_buffer_->HaveBuffer());
......@@ -179,8 +170,8 @@ TEST_F(TransferBufferTest, Free) {
int32_t put_offset = helper_->GetPutOffsetForTest();
transfer_buffer_->FreePendingToken(data, token);
// Free buffer. Should cause an ordering barrier.
EXPECT_CALL(*command_buffer(), OrderingBarrier(_)).Times(AtMost(1));
// Free buffer. Should cause a Flush.
EXPECT_CALL(*command_buffer(), Flush(_)).Times(AtMost(1));
EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
.Times(1)
.RetiresOnSaturation();
......@@ -189,7 +180,9 @@ TEST_F(TransferBufferTest, Free) {
EXPECT_FALSE(transfer_buffer_->HaveBuffer());
EXPECT_EQ(base::UnguessableToken(),
transfer_buffer_->shared_memory_handle().GetGUID());
// Free should not have caused a finish.
// Free should have flushed.
EXPECT_EQ(put_offset, command_buffer_->GetServicePutOffset());
// However it shouldn't have caused a finish.
EXPECT_LT(command_buffer_->GetState().get_offset, put_offset);
// See that it gets reallocated.
......@@ -206,9 +199,6 @@ TEST_F(TransferBufferTest, Free) {
EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
.Times(1)
.RetiresOnSaturation();
EXPECT_CALL(*command_buffer(), OrderingBarrier(_))
.Times(1)
.RetiresOnSaturation();
transfer_buffer_->Free();
transfer_buffer_->Free();
}
......@@ -344,18 +334,11 @@ void TransferBufferExpandContractTest::TearDown() {
EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
.Times(1)
.RetiresOnSaturation();
EXPECT_CALL(*command_buffer(), OrderingBarrier(_))
.Times(1)
.RetiresOnSaturation();
}
// For command buffer.
EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
.Times(1)
.RetiresOnSaturation();
EXPECT_CALL(*command_buffer(), OrderingBarrier(_))
.Times(1)
.RetiresOnSaturation();
EXPECT_CALL(*command_buffer(), Flush(_)).Times(1).RetiresOnSaturation();
transfer_buffer_.reset();
}
......@@ -379,9 +362,6 @@ TEST_F(TransferBufferExpandContractTest, Expand) {
EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
.Times(1)
.RetiresOnSaturation();
EXPECT_CALL(*command_buffer(), OrderingBarrier(_))
.Times(1)
.RetiresOnSaturation();
EXPECT_CALL(*command_buffer(),
CreateTransferBuffer(kStartTransferBufferSize * 2, _))
.WillOnce(Invoke(
......@@ -401,9 +381,6 @@ TEST_F(TransferBufferExpandContractTest, Expand) {
EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
.Times(1)
.RetiresOnSaturation();
EXPECT_CALL(*command_buffer(), OrderingBarrier(_))
.Times(1)
.RetiresOnSaturation();
EXPECT_CALL(*command_buffer(),
CreateTransferBuffer(kMaxTransferBufferSize, _))
.WillOnce(Invoke(
......@@ -438,9 +415,6 @@ TEST_F(TransferBufferExpandContractTest, Contract) {
EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
.Times(1)
.RetiresOnSaturation();
EXPECT_CALL(*command_buffer(), OrderingBarrier(_))
.Times(1)
.RetiresOnSaturation();
transfer_buffer_->Free();
// See it's freed.
EXPECT_FALSE(transfer_buffer_->HaveBuffer());
......@@ -471,9 +445,6 @@ TEST_F(TransferBufferExpandContractTest, Contract) {
EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
.Times(1)
.RetiresOnSaturation();
EXPECT_CALL(*command_buffer(), OrderingBarrier(_))
.Times(1)
.RetiresOnSaturation();
transfer_buffer_->Free();
// See it's freed.
EXPECT_FALSE(transfer_buffer_->HaveBuffer());
......@@ -498,9 +469,6 @@ TEST_F(TransferBufferExpandContractTest, OutOfMemory) {
EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
.Times(1)
.RetiresOnSaturation();
EXPECT_CALL(*command_buffer(), OrderingBarrier(_))
.Times(1)
.RetiresOnSaturation();
transfer_buffer_->Free();
// See it's freed.
EXPECT_FALSE(transfer_buffer_->HaveBuffer());
......@@ -527,9 +495,6 @@ TEST_F(TransferBufferExpandContractTest, ReallocsToDefault) {
EXPECT_CALL(*command_buffer(), DestroyTransferBuffer(_))
.Times(1)
.RetiresOnSaturation();
EXPECT_CALL(*command_buffer(), OrderingBarrier(_))
.Times(1)
.RetiresOnSaturation();
transfer_buffer_->Free();
// See it's freed.
EXPECT_FALSE(transfer_buffer_->HaveBuffer());
......
......@@ -411,7 +411,7 @@ void CommandBufferProxyImpl::DestroyTransferBuffer(int32_t id) {
if (last_state_.error != gpu::error::kNoError)
return;
channel_->DestroyTransferBuffer(route_id_, id);
Send(new GpuCommandBufferMsg_DestroyTransferBuffer(route_id_, id));
}
void CommandBufferProxyImpl::SetGpuControlClient(GpuControlClient* client) {
......
......@@ -91,8 +91,7 @@ uint32_t GpuChannelHost::OrderingBarrier(
std::vector<SyncToken> sync_token_fences) {
AutoLock lock(context_lock_);
if (flush_list_.empty() || flush_list_.back().route_id != route_id ||
flush_list_.back().transfer_buffer_id_to_destroy)
if (flush_list_.empty() || flush_list_.back().route_id != route_id)
flush_list_.push_back(FlushParams());
FlushParams& flush_params = flush_list_.back();
......@@ -103,21 +102,9 @@ uint32_t GpuChannelHost::OrderingBarrier(
flush_params.sync_token_fences.end(),
std::make_move_iterator(sync_token_fences.begin()),
std::make_move_iterator(sync_token_fences.end()));
flush_params.transfer_buffer_id_to_destroy = 0;
return flush_params.flush_id;
}
void GpuChannelHost::DestroyTransferBuffer(int32_t route_id,
int32_t id_to_destroy) {
AutoLock lock(context_lock_);
flush_list_.push_back(FlushParams());
FlushParams& flush_params = flush_list_.back();
flush_params.flush_id = next_flush_id_++;
flush_params.route_id = route_id;
flush_params.transfer_buffer_id_to_destroy = id_to_destroy;
}
void GpuChannelHost::EnsureFlush(uint32_t flush_id) {
AutoLock lock(context_lock_);
InternalFlush(flush_id);
......@@ -185,10 +172,7 @@ base::SharedMemoryHandle GpuChannelHost::ShareToGpuProcess(
int32_t GpuChannelHost::ReserveTransferBufferId() {
// 0 is a reserved value.
int32_t id = g_next_transfer_buffer_id.GetNext();
if (id)
return id;
return g_next_transfer_buffer_id.GetNext();
return g_next_transfer_buffer_id.GetNext() + 1;
}
int32_t GpuChannelHost::ReserveImageId() {
......
......@@ -121,10 +121,6 @@ class GPU_EXPORT GpuChannelHost
// Reserve one unused transfer buffer ID.
int32_t ReserveTransferBufferId();
// An ordering barrier must be placed after any commands that use the buffer
// before it is safe to call this function to destroy it.
void DestroyTransferBuffer(int32_t route_id, int32_t id_to_destroy);
// Reserve one unused image ID.
int32_t ReserveImageId();
......
......@@ -23,8 +23,6 @@ struct GPU_EXPORT FlushParams {
// Route ID of the command buffer for this flush.
int32_t route_id;
// If nonzero, destroy this transfer buffer instead of flushing.
int32_t transfer_buffer_id_to_destroy;
// Client put offset. Service get offset is updated in shared memory.
int32_t put_offset;
// Increasing counter for the flush.
......
......@@ -21,7 +21,6 @@
IPC_STRUCT_TRAITS_BEGIN(gpu::FlushParams)
IPC_STRUCT_TRAITS_MEMBER(route_id)
IPC_STRUCT_TRAITS_MEMBER(transfer_buffer_id_to_destroy)
IPC_STRUCT_TRAITS_MEMBER(put_offset)
IPC_STRUCT_TRAITS_MEMBER(flush_id)
IPC_STRUCT_TRAITS_MEMBER(sync_token_fences)
......
......@@ -252,31 +252,20 @@ bool GpuChannelMessageFilter::OnMessageReceived(const IPC::Message& message) {
std::vector<FlushParams> flush_list = std::get<0>(std::move(params));
for (auto& flush_info : flush_list) {
GpuCommandBufferMsg_AsyncFlush flush_message(
flush_info.route_id, flush_info.put_offset, flush_info.flush_id);
auto it = route_sequences_.find(flush_info.route_id);
if (it == route_sequences_.end()) {
DLOG(ERROR) << "Invalid route id in flush list";
continue;
}
if (flush_info.transfer_buffer_id_to_destroy) {
tasks.emplace_back(
it->second /* sequence_id */,
base::BindOnce(&GpuChannel::HandleMessage,
gpu_channel_->AsWeakPtr(),
GpuCommandBufferMsg_DestroyTransferBuffer(
flush_info.route_id,
flush_info.transfer_buffer_id_to_destroy)),
std::move(flush_info.sync_token_fences));
} else {
GpuCommandBufferMsg_AsyncFlush flush_message(
flush_info.route_id, flush_info.put_offset, flush_info.flush_id);
tasks.emplace_back(
it->second /* sequence_id */,
base::BindOnce(&GpuChannel::HandleMessage,
gpu_channel_->AsWeakPtr(), flush_message),
std::move(flush_info.sync_token_fences));
}
tasks.emplace_back(
it->second /* sequence_id */,
base::BindOnce(&GpuChannel::HandleMessage, gpu_channel_->AsWeakPtr(),
flush_message),
std::move(flush_info.sync_token_fences));
}
scheduler_->ScheduleTasks(std::move(tasks));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment