Commit ef0850f4 authored by miletus's avatar miletus Committed by Commit bot

Merge GpuCommandBufferMsg_SetLatencyInfo into GpuCommandBufferMsg_AsyncFlush

GpuCommandBufferMsg_SetLatencyInfo is used to send current frame's latencyinfo
to gpu. We can piggyback the latencyinfo in GpuCommandBufferMsg_AsyncFlush so
eliminate the extra IPC.

BUG=404650
TEST=check trace that now GLRenderer::SwapBuffers only sends 2 IPC messages,
     i.e. AsyncFlush and Echo. And LatencyInfo tracking is still working.

Review URL: https://codereview.chromium.org/564903002

Cr-Commit-Position: refs/heads/master@{#294895}
parent 6a755527
......@@ -35,7 +35,6 @@ void GpuBrowserCompositorOutputSurface::SwapBuffers(
CommandBufferProxyImpl* command_buffer_proxy =
provider_command_buffer->GetCommandBufferProxy();
DCHECK(command_buffer_proxy);
context_provider_->ContextGL()->ShallowFlushCHROMIUM();
command_buffer_proxy->SetLatencyInfo(frame->metadata.latency_info);
if (reflector_.get()) {
......
......@@ -189,15 +189,15 @@ void CommandBufferProxyImpl::Flush(int32 put_offset) {
Send(new GpuCommandBufferMsg_AsyncFlush(route_id_,
put_offset,
++flush_count_));
++flush_count_,
latency_info_));
latency_info_.clear();
}
void CommandBufferProxyImpl::SetLatencyInfo(
const std::vector<ui::LatencyInfo>& latency_info) {
if (last_state_.error != gpu::error::kNoError ||
latency_info.empty())
return;
Send(new GpuCommandBufferMsg_SetLatencyInfo(route_id_, latency_info));
for (size_t i = 0; i < latency_info.size(); i++)
latency_info_.push_back(latency_info[i]);
}
void CommandBufferProxyImpl::WaitForTokenInRange(int32 start, int32 end) {
......
......@@ -204,6 +204,8 @@ class CommandBufferProxyImpl
gpu::Capabilities capabilities_;
std::vector<ui::LatencyInfo> latency_info_;
DISALLOW_COPY_AND_ASSIGN(CommandBufferProxyImpl);
};
......
......@@ -667,21 +667,15 @@ size_t GpuChannel::MatchSwapBufferMessagesPattern(
DCHECK(current_message);
if (deferred_messages_.empty() || !current_message)
return 0;
// Only care about SetLatencyInfo and AsyncFlush message.
if (current_message->type() != GpuCommandBufferMsg_SetLatencyInfo::ID &&
current_message->type() != GpuCommandBufferMsg_AsyncFlush::ID)
// Only care about AsyncFlush message.
if (current_message->type() != GpuCommandBufferMsg_AsyncFlush::ID)
return 0;
size_t index = 0;
int32 routing_id = current_message->routing_id();
// In case of the current message is SetLatencyInfo, we try to look ahead one
// more deferred messages.
IPC::Message *first_message = NULL;
IPC::Message *second_message = NULL;
// Fetch the first message and move index to point to the second message.
first_message = deferred_messages_[index++];
IPC::Message* first_message = deferred_messages_[index++];
// If the current message is AsyncFlush, the expected message sequence for
// SwapBuffer should be AsyncFlush->Echo. We only try to match Echo message.
......@@ -691,20 +685,6 @@ size_t GpuChannel::MatchSwapBufferMessagesPattern(
return 1;
}
// If the current message is SetLatencyInfo, the expected message sequence
// for SwapBuffer should be SetLatencyInfo->AsyncFlush->Echo (optional).
if (current_message->type() == GpuCommandBufferMsg_SetLatencyInfo::ID &&
first_message->type() == GpuCommandBufferMsg_AsyncFlush::ID &&
first_message->routing_id() == routing_id) {
if (deferred_messages_.size() >= 2)
second_message = deferred_messages_[index];
if (!second_message)
return 1;
if (second_message->type() == GpuCommandBufferMsg_Echo::ID &&
second_message->routing_id() == routing_id) {
return 2;
}
}
// No matched message is found.
return 0;
}
......@@ -771,7 +751,7 @@ void GpuChannel::HandleMessage() {
// We process the pending messages immediately if these messages matches
// the pattern of SwapBuffers, for example, GLRenderer always issues
// SwapBuffers calls with a specific IPC message patterns, for example,
// it should be SetLatencyInfo->AsyncFlush->Echo sequence.
// it should be AsyncFlush->Echo sequence.
//
// Instead of posting a task to message loop, it could avoid the possibility
// of being blocked by other channels, and make SwapBuffers executed as soon
......
......@@ -170,7 +170,7 @@ class GpuChannel : public IPC::Listener, public IPC::Sender {
// Try to match the messages pattern for GL SwapBuffers operation in the
// deferred message queue starting from the current processing message.
// Return the number of messages that matches the given pattern, e.g.
// SetLatencyInfo -> AsyncFlush -> Echo sequence.
// AsyncFlush -> Echo sequence.
size_t MatchSwapBufferMessagesPattern(IPC::Message* current_message);
// The lifetime of objects of this class is managed by a GpuChannelManager.
......
......@@ -229,8 +229,7 @@ bool GpuCommandBufferStub::OnMessageReceived(const IPC::Message& message) {
if (decoder_.get() && message.type() != GpuCommandBufferMsg_Echo::ID &&
message.type() != GpuCommandBufferMsg_WaitForTokenInRange::ID &&
message.type() != GpuCommandBufferMsg_WaitForGetOffsetInRange::ID &&
message.type() != GpuCommandBufferMsg_RetireSyncPoint::ID &&
message.type() != GpuCommandBufferMsg_SetLatencyInfo::ID) {
message.type() != GpuCommandBufferMsg_RetireSyncPoint::ID) {
if (!MakeCurrent())
return false;
have_context = true;
......@@ -252,7 +251,6 @@ bool GpuCommandBufferStub::OnMessageReceived(const IPC::Message& message) {
IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_WaitForGetOffsetInRange,
OnWaitForGetOffsetInRange);
IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_AsyncFlush, OnAsyncFlush);
IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SetLatencyInfo, OnSetLatencyInfo);
IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Rescheduled, OnRescheduled);
IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_RegisterTransferBuffer,
OnRegisterTransferBuffer);
......@@ -630,15 +628,6 @@ void GpuCommandBufferStub::OnInitialize(
}
}
void GpuCommandBufferStub::OnSetLatencyInfo(
const std::vector<ui::LatencyInfo>& latency_info) {
if (!ui::LatencyInfo::Verify(latency_info,
"GpuCommandBufferStub::OnSetLatencyInfo"))
return;
if (!latency_info_callback_.is_null())
latency_info_callback_.Run(latency_info);
}
void GpuCommandBufferStub::OnCreateStreamTexture(
uint32 texture_id, int32 stream_id, bool* succeeded) {
#if defined(OS_ANDROID)
......@@ -758,9 +747,18 @@ void GpuCommandBufferStub::CheckCompleteWaits() {
}
}
void GpuCommandBufferStub::OnAsyncFlush(int32 put_offset, uint32 flush_count) {
void GpuCommandBufferStub::OnAsyncFlush(
int32 put_offset,
uint32 flush_count,
const std::vector<ui::LatencyInfo>& latency_info) {
TRACE_EVENT1(
"gpu", "GpuCommandBufferStub::OnAsyncFlush", "put_offset", put_offset);
if (ui::LatencyInfo::Verify(latency_info,
"GpuCommandBufferStub::OnAsyncFlush") &&
!latency_info_callback_.is_null()) {
latency_info_callback_.Run(latency_info);
}
DCHECK(command_buffer_.get());
if (flush_count - last_flush_count_ < 0x8000000U) {
last_flush_count_ = flush_count;
......
......@@ -164,7 +164,8 @@ class GpuCommandBufferStub
void OnWaitForGetOffsetInRange(int32 start,
int32 end,
IPC::Message* reply_message);
void OnAsyncFlush(int32 put_offset, uint32 flush_count);
void OnAsyncFlush(int32 put_offset, uint32 flush_count,
const std::vector<ui::LatencyInfo>& latency_info);
void OnEcho(const IPC::Message& message);
void OnRescheduled();
void OnRegisterTransferBuffer(int32 id,
......@@ -205,7 +206,6 @@ class GpuCommandBufferStub
void OnCommandProcessed();
void OnParseError();
void OnSetLatencyInfo(const std::vector<ui::LatencyInfo>& latency_info);
void OnCreateStreamTexture(
uint32 texture_id, int32 stream_id, bool* succeeded);
......
......@@ -524,14 +524,11 @@ IPC_SYNC_MESSAGE_ROUTED2_1(GpuCommandBufferMsg_WaitForGetOffsetInRange,
// Asynchronously synchronize the put and get offsets of both processes.
// Caller passes its current put offset. Current state (including get offset)
// is returned in shared memory.
IPC_MESSAGE_ROUTED2(GpuCommandBufferMsg_AsyncFlush,
// is returned in shared memory. The input latency info for the current
// frame is also sent to the GPU process.
IPC_MESSAGE_ROUTED3(GpuCommandBufferMsg_AsyncFlush,
int32 /* put_offset */,
uint32 /* flush_count */)
// Sends information about the latency of the current frame to the GPU
// process.
IPC_MESSAGE_ROUTED1(GpuCommandBufferMsg_SetLatencyInfo,
uint32 /* flush_count */,
std::vector<ui::LatencyInfo> /* latency_info */)
// Asynchronously process any commands known to the GPU process. This is only
......
......@@ -186,7 +186,6 @@ void CompositorOutputSurface::SwapBuffers(cc::CompositorFrame* frame) {
}
if (frame->gl_frame_data) {
context_provider()->ContextGL()->ShallowFlushCHROMIUM();
ContextProviderCommandBuffer* provider_command_buffer =
static_cast<ContextProviderCommandBuffer*>(context_provider());
CommandBufferProxyImpl* command_buffer_proxy =
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment