Commit fd979a83 authored by Sunny Sachanandani's avatar Sunny Sachanandani Committed by Commit Bot

ppapi: Unit tests for pepper command buffer proxy

Similar to the IPC command buffer proxy unit tests.

R=piman
BUG=824510

Change-Id: I14ecfc448e12d4f143b645cc9290c476fdbf5d0d
Reviewed-on: https://chromium-review.googlesource.com/978959
Commit-Queue: Sunny Sachanandani <sunnyps@chromium.org>
Reviewed-by: default avatarBill Budge <bbudge@chromium.org>
Reviewed-by: default avatarAntoine Labour <piman@chromium.org>
Cr-Commit-Position: refs/heads/master@{#545899}
parent 7a1b3bb0
......@@ -348,6 +348,7 @@ test("ppapi_unittests") {
"proxy/plugin_dispatcher_unittest.cc",
"proxy/plugin_resource_tracker_unittest.cc",
"proxy/plugin_var_tracker_unittest.cc",
"proxy/ppapi_command_buffer_proxy_unittest.cc",
"proxy/ppb_var_unittest.cc",
"proxy/ppp_instance_private_proxy_unittest.cc",
"proxy/ppp_instance_proxy_unittest.cc",
......
......@@ -45,7 +45,7 @@ class ResourceCreationAPI;
namespace proxy {
// Used to keep track of per-instance data.
struct InstanceData {
struct PPAPI_PROXY_EXPORT InstanceData {
InstanceData();
~InstanceData();
......@@ -55,7 +55,7 @@ struct InstanceData {
scoped_refptr<TrackedCallback> mouse_lock_callback;
// A map of singleton resources which are lazily created.
typedef std::map<SingletonResourceID, scoped_refptr<Resource> >
typedef std::map<SingletonResourceID, scoped_refptr<Resource>>
SingletonResourceMap;
SingletonResourceMap singleton_resources;
......@@ -71,18 +71,29 @@ struct InstanceData {
std::unique_ptr<MessageHandler> message_handler;
// Flush info for PpapiCommandBufferProxy::OrderingBarrier().
struct FlushInfo {
struct PPAPI_PROXY_EXPORT FlushInfo {
FlushInfo();
~FlushInfo();
bool flush_pending;
HostResource resource;
int32_t put_offset;
};
FlushInfo flush_info_;
FlushInfo flush_info;
};
class PPAPI_PROXY_EXPORT LockedSender {
public:
// Unlike |Send()|, this function continues to hold the Pepper proxy lock
// until we are finished sending |msg|, even if it is a synchronous message.
virtual bool SendAndStayLocked(IPC::Message* msg) = 0;
protected:
virtual ~LockedSender() {}
};
class PPAPI_PROXY_EXPORT PluginDispatcher
: public Dispatcher,
public LockedSender,
public base::SupportsWeakPtr<PluginDispatcher> {
public:
class PPAPI_PROXY_EXPORT PluginDelegate : public ProxyChannel::Delegate {
......@@ -179,7 +190,7 @@ class PPAPI_PROXY_EXPORT PluginDispatcher
// Unlike |Send()|, this function continues to hold the Pepper proxy lock
// until we are finished sending |msg|, even if it is a synchronous message.
bool SendAndStayLocked(IPC::Message* msg);
bool SendAndStayLocked(IPC::Message* msg) override;
// IPC::Listener implementation.
bool OnMessageReceived(const IPC::Message& msg) override;
......
......@@ -17,22 +17,22 @@ namespace proxy {
PpapiCommandBufferProxy::PpapiCommandBufferProxy(
const ppapi::HostResource& resource,
PluginDispatcher* dispatcher,
InstanceData::FlushInfo* flush_info,
LockedSender* sender,
const gpu::Capabilities& capabilities,
const SerializedHandle& shared_state,
gpu::CommandBufferId command_buffer_id)
: command_buffer_id_(command_buffer_id),
capabilities_(capabilities),
resource_(resource),
dispatcher_(dispatcher),
flush_info_(flush_info),
sender_(sender),
next_fence_sync_release_(1),
pending_fence_sync_release_(0),
flushed_fence_sync_release_(0),
validated_fence_sync_release_(0) {
shared_state_shm_.reset(new base::SharedMemory(shared_state.shmem(), false));
shared_state_shm_->Map(shared_state.size());
InstanceData* data = dispatcher->GetInstanceData(resource.instance());
flush_info_ = &data->flush_info_;
}
PpapiCommandBufferProxy::~PpapiCommandBufferProxy() {
......@@ -190,7 +190,10 @@ gpu::CommandBufferId PpapiCommandBufferProxy::GetCommandBufferID() const {
}
void PpapiCommandBufferProxy::FlushPendingWork() {
// This is only relevant for out-of-process command buffers.
if (last_state_.error != gpu::error::kNoError)
return;
if (flush_info_->flush_pending)
FlushInternal();
}
uint64_t PpapiCommandBufferProxy::GenerateFenceSyncRelease() {
......@@ -263,7 +266,7 @@ bool PpapiCommandBufferProxy::Send(IPC::Message* msg) {
// buffer may use a sync IPC with another lock held which could lead to lock
// and deadlock if we dropped the proxy lock here.
// http://crbug.com/418651
if (dispatcher_->SendAndStayLocked(msg))
if (sender_->SendAndStayLocked(msg))
return true;
last_state_.error = gpu::error::kLostContext;
......
......@@ -34,7 +34,8 @@ class PPAPI_PROXY_EXPORT PpapiCommandBufferProxy : public gpu::CommandBuffer,
public gpu::GpuControl {
public:
PpapiCommandBufferProxy(const HostResource& resource,
PluginDispatcher* dispatcher,
InstanceData::FlushInfo* flush_info,
LockedSender* sender,
const gpu::Capabilities& capabilities,
const SerializedHandle& shared_state,
gpu::CommandBufferId command_buffer_id);
......@@ -98,12 +99,11 @@ class PPAPI_PROXY_EXPORT PpapiCommandBufferProxy : public gpu::CommandBuffer,
std::unique_ptr<base::SharedMemory> shared_state_shm_;
HostResource resource_;
PluginDispatcher* dispatcher_;
InstanceData::FlushInfo* flush_info_;
LockedSender* sender_;
base::Closure channel_error_callback_;
InstanceData::FlushInfo *flush_info_;
uint64_t next_fence_sync_release_;
uint64_t pending_fence_sync_release_;
uint64_t flushed_fence_sync_release_;
......
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "ppapi/proxy/ppapi_command_buffer_proxy.h"
#include "ipc/ipc_test_sink.h"
#include "ppapi/proxy/ppapi_messages.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace ppapi {
class PpapiCommandBufferProxyTest : public testing::Test,
public proxy::LockedSender {
public:
PpapiCommandBufferProxyTest()
: proxy_(HostResource(),
&flush_info_,
this,
gpu::Capabilities(),
proxy::SerializedHandle(proxy::SerializedHandle::SHARED_MEMORY),
gpu::CommandBufferId()) {}
~PpapiCommandBufferProxyTest() override {}
protected:
// We can't verify sync message behavior with this setup.
bool SendAndStayLocked(IPC::Message* msg) override { return sink_.Send(msg); }
IPC::TestSink sink_;
proxy::InstanceData::FlushInfo flush_info_;
proxy::PpapiCommandBufferProxy proxy_;
};
TEST_F(PpapiCommandBufferProxyTest, OrderingBarriersAreCoalescedWithFlush) {
proxy_.OrderingBarrier(10);
proxy_.OrderingBarrier(20);
proxy_.OrderingBarrier(30);
proxy_.Flush(40);
EXPECT_EQ(1u, sink_.message_count());
const IPC::Message* msg =
sink_.GetFirstMessageMatching(PpapiHostMsg_PPBGraphics3D_AsyncFlush::ID);
ASSERT_TRUE(msg);
PpapiHostMsg_PPBGraphics3D_AsyncFlush::Param params;
ASSERT_TRUE(PpapiHostMsg_PPBGraphics3D_AsyncFlush::Read(msg, &params));
int32_t sent_put_offset = std::get<1>(params);
EXPECT_EQ(40, sent_put_offset);
EXPECT_FALSE(flush_info_.flush_pending);
EXPECT_EQ(40, flush_info_.put_offset);
}
TEST_F(PpapiCommandBufferProxyTest, FlushPendingWorkFlushesOrderingBarriers) {
proxy_.OrderingBarrier(10);
proxy_.OrderingBarrier(20);
proxy_.OrderingBarrier(30);
proxy_.FlushPendingWork();
EXPECT_EQ(1u, sink_.message_count());
const IPC::Message* msg =
sink_.GetFirstMessageMatching(PpapiHostMsg_PPBGraphics3D_AsyncFlush::ID);
ASSERT_TRUE(msg);
PpapiHostMsg_PPBGraphics3D_AsyncFlush::Param params;
ASSERT_TRUE(PpapiHostMsg_PPBGraphics3D_AsyncFlush::Read(msg, &params));
int32_t sent_put_offset = std::get<1>(params);
EXPECT_EQ(30, sent_put_offset);
EXPECT_FALSE(flush_info_.flush_pending);
EXPECT_EQ(30, flush_info_.put_offset);
}
TEST_F(PpapiCommandBufferProxyTest, EnsureWorkVisibleFlushesOrderingBarriers) {
proxy_.OrderingBarrier(10);
proxy_.OrderingBarrier(20);
proxy_.OrderingBarrier(30);
proxy_.EnsureWorkVisible();
EXPECT_EQ(2u, sink_.message_count());
const IPC::Message* msg = sink_.GetMessageAt(0);
ASSERT_TRUE(msg);
EXPECT_EQ(static_cast<uint32_t>(PpapiHostMsg_PPBGraphics3D_AsyncFlush::ID),
msg->type());
PpapiHostMsg_PPBGraphics3D_AsyncFlush::Param params;
ASSERT_TRUE(PpapiHostMsg_PPBGraphics3D_AsyncFlush::Read(msg, &params));
int32_t sent_put_offset = std::get<1>(params);
EXPECT_EQ(30, sent_put_offset);
EXPECT_FALSE(flush_info_.flush_pending);
EXPECT_EQ(30, flush_info_.put_offset);
msg = sink_.GetMessageAt(1);
ASSERT_TRUE(msg);
EXPECT_EQ(
static_cast<uint32_t>(PpapiHostMsg_PPBGraphics3D_EnsureWorkVisible::ID),
msg->type());
}
} // namespace ppapi
......@@ -61,9 +61,12 @@ bool Graphics3D::Init(gpu::gles2::GLES2Implementation* share_gles2,
if (!dispatcher)
return false;
InstanceData* data = dispatcher->GetInstanceData(host_resource().instance());
DCHECK(data);
command_buffer_.reset(new PpapiCommandBufferProxy(
host_resource(), dispatcher, capabilities, shared_state,
command_buffer_id));
host_resource(), &data->flush_info, dispatcher, capabilities,
shared_state, command_buffer_id));
return CreateGLES2Impl(share_gles2);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment