Commit 1a3d539e authored by Yuzhu Shen's avatar Yuzhu Shen Committed by Commit Bot

Fix sync IPC perf tests.

- Actually uses IPC::SyncChannel to send sync messages.
- More accurately record the endtime.

After this change the Mojo sync IPC is faster than IPC-over-Mojo sync IPC:
(On linux z620; release build with profiling turned on.)

MojoChannelPerfTest.ChannelProxySyncPing
IPC_ChannelProxy_Perf_50000x_144  6507.49 ms

MojoInterfacePerfTest.MultiprocessSyncPing
IPC_MultiprocessSync_Perf_50000x_144  5283.86 ms

BUG=743263

Change-Id: Ie1834869b8d88e25a5986bdc9714132d6b7fc87b
Reviewed-on: https://chromium-review.googlesource.com/575032
Commit-Queue: Yuzhu Shen <yzshen@chromium.org>
Commit-Queue: John Abd-El-Malek <jam@chromium.org>
Reviewed-by: default avatarJohn Abd-El-Malek <jam@chromium.org>
Cr-Commit-Position: refs/heads/master@{#487326}
parent 11a66767
// Copyright 2014 The Chromium Authors. All rights reserved. // Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be // Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. // found in the LICENSE file.
#include <stddef.h> #include <stddef.h>
#include <memory> #include <memory>
#include "base/memory/ptr_util.h" #include "base/memory/ptr_util.h"
#include "base/message_loop/message_loop.h" #include "base/message_loop/message_loop.h"
#include "base/process/process_metrics.h" #include "base/process/process_metrics.h"
#include "base/run_loop.h" #include "base/run_loop.h"
#include "base/strings/stringprintf.h" #include "base/strings/stringprintf.h"
#include "base/test/perf_time_logger.h" #include "base/synchronization/waitable_event.h"
#include "base/test/test_io_thread.h" #include "base/test/perf_time_logger.h"
#include "base/threading/thread_task_runner_handle.h" #include "base/test/test_io_thread.h"
#include "build/build_config.h" #include "base/threading/thread_task_runner_handle.h"
#include "ipc/ipc_channel_mojo.h" #include "build/build_config.h"
#include "ipc/ipc_test.mojom.h" #include "ipc/ipc_channel_mojo.h"
#include "ipc/ipc_test_base.h" #include "ipc/ipc_sync_channel.h"
#include "mojo/edk/embedder/embedder.h" #include "ipc/ipc_test.mojom.h"
#include "mojo/edk/embedder/platform_channel_pair.h" #include "ipc/ipc_test_base.h"
#include "mojo/edk/test/mojo_test_base.h" #include "mojo/edk/embedder/embedder.h"
#include "mojo/edk/test/multiprocess_test_helper.h" #include "mojo/edk/embedder/platform_channel_pair.h"
#include "mojo/public/cpp/bindings/binding.h" #include "mojo/edk/test/mojo_test_base.h"
#include "mojo/public/cpp/system/message_pipe.h" #include "mojo/edk/test/multiprocess_test_helper.h"
#include "mojo/public/cpp/bindings/binding.h"
#define IPC_MESSAGE_IMPL #include "mojo/public/cpp/system/message_pipe.h"
#include "ipc/ipc_message_macros.h"
#define IPC_MESSAGE_IMPL
#define IPC_MESSAGE_START TestMsgStart #include "ipc/ipc_message_macros.h"
IPC_MESSAGE_CONTROL0(TestMsg_Hello) #define IPC_MESSAGE_START TestMsgStart
IPC_MESSAGE_CONTROL0(TestMsg_Quit)
IPC_MESSAGE_CONTROL1(TestMsg_Ping, std::string) IPC_MESSAGE_CONTROL0(TestMsg_Hello)
IPC_SYNC_MESSAGE_CONTROL1_1(TestMsg_SyncPing, std::string, std::string) IPC_MESSAGE_CONTROL0(TestMsg_Quit)
IPC_MESSAGE_CONTROL1(TestMsg_Ping, std::string)
namespace IPC { IPC_SYNC_MESSAGE_CONTROL1_1(TestMsg_SyncPing, std::string, std::string)
namespace {
namespace IPC {
class PerformanceChannelListener : public Listener { namespace {
public:
explicit PerformanceChannelListener(const std::string& label) class PerformanceChannelListener : public Listener {
: label_(label), public:
sender_(NULL), explicit PerformanceChannelListener(const std::string& label)
msg_count_(0), : label_(label),
msg_size_(0), sender_(NULL),
sync_(false), msg_count_(0),
count_down_(0) { msg_size_(0),
VLOG(1) << "Server listener up"; sync_(false),
} count_down_(0) {
VLOG(1) << "Server listener up";
~PerformanceChannelListener() override { VLOG(1) << "Server listener down"; } }
void Init(Sender* sender) { ~PerformanceChannelListener() override { VLOG(1) << "Server listener down"; }
DCHECK(!sender_);
sender_ = sender; void Init(Sender* sender) {
} DCHECK(!sender_);
sender_ = sender;
// Call this before running the message loop. }
void SetTestParams(int msg_count, size_t msg_size, bool sync) {
DCHECK_EQ(0, count_down_); // Call this before running the message loop.
msg_count_ = msg_count; void SetTestParams(int msg_count, size_t msg_size, bool sync) {
msg_size_ = msg_size; DCHECK_EQ(0, count_down_);
sync_ = sync; msg_count_ = msg_count;
count_down_ = msg_count_; msg_size_ = msg_size;
payload_ = std::string(msg_size_, 'a'); sync_ = sync;
} count_down_ = msg_count_;
payload_ = std::string(msg_size_, 'a');
bool OnMessageReceived(const Message& message) override { }
CHECK(sender_);
bool OnMessageReceived(const Message& message) override {
bool handled = true; CHECK(sender_);
IPC_BEGIN_MESSAGE_MAP(PerformanceChannelListener, message)
IPC_MESSAGE_HANDLER(TestMsg_Hello, OnHello) bool handled = true;
IPC_MESSAGE_HANDLER(TestMsg_Ping, OnPing) IPC_BEGIN_MESSAGE_MAP(PerformanceChannelListener, message)
IPC_MESSAGE_UNHANDLED(handled = false) IPC_MESSAGE_HANDLER(TestMsg_Hello, OnHello)
IPC_END_MESSAGE_MAP() IPC_MESSAGE_HANDLER(TestMsg_Ping, OnPing)
return handled; IPC_MESSAGE_UNHANDLED(handled = false)
} IPC_END_MESSAGE_MAP()
return handled;
void OnHello() { }
// Start timing on hello.
DCHECK(!perf_logger_.get()); void OnHello() {
std::string test_name = // Start timing on hello.
base::StringPrintf("IPC_%s_Perf_%dx_%u", label_.c_str(), msg_count_, DCHECK(!perf_logger_.get());
static_cast<unsigned>(msg_size_)); std::string test_name =
perf_logger_.reset(new base::PerfTimeLogger(test_name.c_str())); base::StringPrintf("IPC_%s_Perf_%dx_%u", label_.c_str(), msg_count_,
if (sync_) { static_cast<unsigned>(msg_size_));
for (int i = 0; i < count_down_; ++i) { perf_logger_.reset(new base::PerfTimeLogger(test_name.c_str()));
std::string response; if (sync_) {
sender_->Send(new TestMsg_SyncPing(payload_, &response)); for (int i = 0; i < count_down_; ++i) {
} std::string response;
base::MessageLoop::current()->QuitWhenIdle(); sender_->Send(new TestMsg_SyncPing(payload_, &response));
} else { DCHECK_EQ(response, payload_);
SendPong(); }
} perf_logger_.reset();
} base::MessageLoop::current()->QuitWhenIdle();
} else {
void OnPing(const std::string& payload) { SendPong();
// Include message deserialization in latency. }
DCHECK_EQ(payload_.size(), payload.size()); }
CHECK(count_down_ > 0); void OnPing(const std::string& payload) {
count_down_--; // Include message deserialization in latency.
if (count_down_ == 0) { DCHECK_EQ(payload_.size(), payload.size());
perf_logger_.reset(); // Stop the perf timer now.
base::MessageLoop::current()->QuitWhenIdle(); CHECK(count_down_ > 0);
return; count_down_--;
} if (count_down_ == 0) {
perf_logger_.reset(); // Stop the perf timer now.
SendPong(); base::MessageLoop::current()->QuitWhenIdle();
} return;
}
void SendPong() { sender_->Send(new TestMsg_Ping(payload_)); }
SendPong();
private: }
std::string label_;
Sender* sender_; void SendPong() { sender_->Send(new TestMsg_Ping(payload_)); }
int msg_count_;
size_t msg_size_; private:
bool sync_; std::string label_;
Sender* sender_;
int count_down_; int msg_count_;
std::string payload_; size_t msg_size_;
std::unique_ptr<base::PerfTimeLogger> perf_logger_; bool sync_;
};
int count_down_;
// This channel listener just replies to all messages with the exact same std::string payload_;
// message. It assumes each message has one string parameter. When the string std::unique_ptr<base::PerfTimeLogger> perf_logger_;
// "quit" is sent, it will exit. };
class ChannelReflectorListener : public Listener {
public: // This channel listener just replies to all messages with the exact same
ChannelReflectorListener() : channel_(NULL) { // message. It assumes each message has one string parameter. When the string
VLOG(1) << "Client listener up"; // "quit" is sent, it will exit.
} class ChannelReflectorListener : public Listener {
public:
~ChannelReflectorListener() override { VLOG(1) << "Client listener down"; } ChannelReflectorListener() : channel_(NULL) {
VLOG(1) << "Client listener up";
void Init(Sender* channel) { }
DCHECK(!channel_);
channel_ = channel; ~ChannelReflectorListener() override { VLOG(1) << "Client listener down"; }
}
void Init(Sender* channel) {
bool OnMessageReceived(const Message& message) override { DCHECK(!channel_);
CHECK(channel_); channel_ = channel;
bool handled = true; }
IPC_BEGIN_MESSAGE_MAP(ChannelReflectorListener, message)
IPC_MESSAGE_HANDLER(TestMsg_Hello, OnHello) bool OnMessageReceived(const Message& message) override {
IPC_MESSAGE_HANDLER(TestMsg_Ping, OnPing) CHECK(channel_);
IPC_MESSAGE_HANDLER(TestMsg_SyncPing, OnSyncPing) bool handled = true;
IPC_MESSAGE_HANDLER(TestMsg_Quit, OnQuit) IPC_BEGIN_MESSAGE_MAP(ChannelReflectorListener, message)
IPC_MESSAGE_UNHANDLED(handled = false) IPC_MESSAGE_HANDLER(TestMsg_Hello, OnHello)
IPC_END_MESSAGE_MAP() IPC_MESSAGE_HANDLER(TestMsg_Ping, OnPing)
return handled; IPC_MESSAGE_HANDLER(TestMsg_SyncPing, OnSyncPing)
} IPC_MESSAGE_HANDLER(TestMsg_Quit, OnQuit)
IPC_MESSAGE_UNHANDLED(handled = false)
void OnHello() { channel_->Send(new TestMsg_Hello); } IPC_END_MESSAGE_MAP()
return handled;
void OnPing(const std::string& payload) { }
channel_->Send(new TestMsg_Ping(payload));
} void OnHello() { channel_->Send(new TestMsg_Hello); }
void OnSyncPing(const std::string& payload, std::string* response) { void OnPing(const std::string& payload) {
*response = payload; channel_->Send(new TestMsg_Ping(payload));
} }
void OnQuit() { base::MessageLoop::current()->QuitWhenIdle(); } void OnSyncPing(const std::string& payload, std::string* response) {
*response = payload;
void Send(IPC::Message* message) { channel_->Send(message); } }
private: void OnQuit() { base::MessageLoop::current()->QuitWhenIdle(); }
Sender* channel_;
}; void Send(IPC::Message* message) { channel_->Send(message); }
// This class locks the current thread to a particular CPU core. This is private:
// important because otherwise the different threads and processes of these Sender* channel_;
// tests end up on different CPU cores which means that all of the cores are };
// lightly loaded so the OS (Windows and Linux) fails to ramp up the CPU
// frequency, leading to unpredictable and often poor performance. // This class locks the current thread to a particular CPU core. This is
class LockThreadAffinity { // important because otherwise the different threads and processes of these
public: // tests end up on different CPU cores which means that all of the cores are
explicit LockThreadAffinity(int cpu_number) : affinity_set_ok_(false) { // lightly loaded so the OS (Windows and Linux) fails to ramp up the CPU
#if defined(OS_WIN) // frequency, leading to unpredictable and often poor performance.
const DWORD_PTR thread_mask = static_cast<DWORD_PTR>(1) << cpu_number; class LockThreadAffinity {
old_affinity_ = SetThreadAffinityMask(GetCurrentThread(), thread_mask); public:
affinity_set_ok_ = old_affinity_ != 0; explicit LockThreadAffinity(int cpu_number) : affinity_set_ok_(false) {
#elif defined(OS_LINUX) #if defined(OS_WIN)
cpu_set_t cpuset; const DWORD_PTR thread_mask = static_cast<DWORD_PTR>(1) << cpu_number;
CPU_ZERO(&cpuset); old_affinity_ = SetThreadAffinityMask(GetCurrentThread(), thread_mask);
CPU_SET(cpu_number, &cpuset); affinity_set_ok_ = old_affinity_ != 0;
auto get_result = sched_getaffinity(0, sizeof(old_cpuset_), &old_cpuset_); #elif defined(OS_LINUX)
DCHECK_EQ(0, get_result); cpu_set_t cpuset;
auto set_result = sched_setaffinity(0, sizeof(cpuset), &cpuset); CPU_ZERO(&cpuset);
// Check for get_result failure, even though it should always succeed. CPU_SET(cpu_number, &cpuset);
affinity_set_ok_ = (set_result == 0) && (get_result == 0); auto get_result = sched_getaffinity(0, sizeof(old_cpuset_), &old_cpuset_);
#endif DCHECK_EQ(0, get_result);
if (!affinity_set_ok_) auto set_result = sched_setaffinity(0, sizeof(cpuset), &cpuset);
LOG(WARNING) << "Failed to set thread affinity to CPU " << cpu_number; // Check for get_result failure, even though it should always succeed.
} affinity_set_ok_ = (set_result == 0) && (get_result == 0);
#endif
~LockThreadAffinity() { if (!affinity_set_ok_)
if (!affinity_set_ok_) LOG(WARNING) << "Failed to set thread affinity to CPU " << cpu_number;
return; }
#if defined(OS_WIN)
auto set_result = SetThreadAffinityMask(GetCurrentThread(), old_affinity_); ~LockThreadAffinity() {
DCHECK_NE(0u, set_result); if (!affinity_set_ok_)
#elif defined(OS_LINUX) return;
auto set_result = sched_setaffinity(0, sizeof(old_cpuset_), &old_cpuset_); #if defined(OS_WIN)
DCHECK_EQ(0, set_result); auto set_result = SetThreadAffinityMask(GetCurrentThread(), old_affinity_);
#endif DCHECK_NE(0u, set_result);
} #elif defined(OS_LINUX)
auto set_result = sched_setaffinity(0, sizeof(old_cpuset_), &old_cpuset_);
private: DCHECK_EQ(0, set_result);
bool affinity_set_ok_; #endif
#if defined(OS_WIN) }
DWORD_PTR old_affinity_;
#elif defined(OS_LINUX) private:
cpu_set_t old_cpuset_; bool affinity_set_ok_;
#endif #if defined(OS_WIN)
DWORD_PTR old_affinity_;
DISALLOW_COPY_AND_ASSIGN(LockThreadAffinity); #elif defined(OS_LINUX)
}; cpu_set_t old_cpuset_;
#endif
class PingPongTestParams {
public: DISALLOW_COPY_AND_ASSIGN(LockThreadAffinity);
PingPongTestParams(size_t size, int count) };
: message_size_(size), message_count_(count) {}
class PingPongTestParams {
size_t message_size() const { return message_size_; } public:
int message_count() const { return message_count_; } PingPongTestParams(size_t size, int count)
: message_size_(size), message_count_(count) {}
private:
size_t message_size_; size_t message_size() const { return message_size_; }
int message_count_; int message_count() const { return message_count_; }
};
private:
std::vector<PingPongTestParams> GetDefaultTestParams() { size_t message_size_;
// Test several sizes. We use 12^N for message size, and limit the message int message_count_;
// count to keep the test duration reasonable. };
#ifdef NDEBUG
const int kMultiplier = 100; std::vector<PingPongTestParams> GetDefaultTestParams() {
#else // Test several sizes. We use 12^N for message size, and limit the message
// Debug builds on Windows run these tests orders of magnitude more slowly. // count to keep the test duration reasonable.
const int kMultiplier = 1; #ifdef NDEBUG
#endif const int kMultiplier = 100;
std::vector<PingPongTestParams> list; #else
list.push_back(PingPongTestParams(12, 500 * kMultiplier)); // Debug builds on Windows run these tests orders of magnitude more slowly.
list.push_back(PingPongTestParams(144, 500 * kMultiplier)); const int kMultiplier = 1;
list.push_back(PingPongTestParams(1728, 500 * kMultiplier)); #endif
list.push_back(PingPongTestParams(20736, 120 * kMultiplier)); std::vector<PingPongTestParams> list;
list.push_back(PingPongTestParams(248832, 10 * kMultiplier)); list.push_back(PingPongTestParams(12, 500 * kMultiplier));
return list; list.push_back(PingPongTestParams(144, 500 * kMultiplier));
} list.push_back(PingPongTestParams(1728, 500 * kMultiplier));
list.push_back(PingPongTestParams(20736, 120 * kMultiplier));
// Avoid core 0 due to conflicts with Intel's Power Gadget. list.push_back(PingPongTestParams(248832, 10 * kMultiplier));
// Setting thread affinity will fail harmlessly on single/dual core machines. return list;
const int kSharedCore = 2; }
class MojoChannelPerfTest : public IPCChannelMojoTestBase { // Avoid core 0 due to conflicts with Intel's Power Gadget.
public: // Setting thread affinity will fail harmlessly on single/dual core machines.
MojoChannelPerfTest() = default; const int kSharedCore = 2;
~MojoChannelPerfTest() override = default;
class MojoChannelPerfTest : public IPCChannelMojoTestBase {
void RunTestChannelProxyPingPong() { public:
io_thread_.reset(new base::TestIOThread(base::TestIOThread::kAutoStart)); MojoChannelPerfTest() = default;
~MojoChannelPerfTest() override = default;
Init("MojoPerfTestClient");
void RunTestChannelProxyPingPong() {
// Set up IPC channel and start client. io_thread_.reset(new base::TestIOThread(base::TestIOThread::kAutoStart));
PerformanceChannelListener listener("ChannelProxy");
auto channel_proxy = IPC::ChannelProxy::Create( Init("MojoPerfTestClient");
TakeHandle().release(), IPC::Channel::MODE_SERVER, &listener,
io_thread_->task_runner()); // Set up IPC channel and start client.
listener.Init(channel_proxy.get()); PerformanceChannelListener listener("ChannelProxy");
auto channel_proxy = IPC::ChannelProxy::Create(
LockThreadAffinity thread_locker(kSharedCore); TakeHandle().release(), IPC::Channel::MODE_SERVER, &listener,
std::vector<PingPongTestParams> params = GetDefaultTestParams(); io_thread_->task_runner());
for (size_t i = 0; i < params.size(); i++) { listener.Init(channel_proxy.get());
listener.SetTestParams(params[i].message_count(),
params[i].message_size(), false); LockThreadAffinity thread_locker(kSharedCore);
std::vector<PingPongTestParams> params = GetDefaultTestParams();
// This initial message will kick-start the ping-pong of messages. for (size_t i = 0; i < params.size(); i++) {
channel_proxy->Send(new TestMsg_Hello); listener.SetTestParams(params[i].message_count(),
params[i].message_size(), false);
// Run message loop.
base::RunLoop().Run(); // This initial message will kick-start the ping-pong of messages.
} channel_proxy->Send(new TestMsg_Hello);
// Send quit message. // Run message loop.
channel_proxy->Send(new TestMsg_Quit); base::RunLoop().Run();
}
EXPECT_TRUE(WaitForClientShutdown());
channel_proxy.reset(); // Send quit message.
channel_proxy->Send(new TestMsg_Quit);
io_thread_.reset();
} EXPECT_TRUE(WaitForClientShutdown());
channel_proxy.reset();
void RunTestChannelProxySyncPing() {
io_thread_.reset(new base::TestIOThread(base::TestIOThread::kAutoStart)); io_thread_.reset();
}
Init("MojoPerfTestClient");
void RunTestChannelProxySyncPing() {
// Set up IPC channel and start client. io_thread_.reset(new base::TestIOThread(base::TestIOThread::kAutoStart));
PerformanceChannelListener listener("ChannelProxy");
auto channel_proxy = IPC::ChannelProxy::Create( Init("MojoPerfTestClient");
TakeHandle().release(), IPC::Channel::MODE_SERVER, &listener,
io_thread_->task_runner()); // Set up IPC channel and start client.
listener.Init(channel_proxy.get()); PerformanceChannelListener listener("ChannelProxy");
base::WaitableEvent shutdown_event(
LockThreadAffinity thread_locker(kSharedCore); base::WaitableEvent::ResetPolicy::MANUAL,
std::vector<PingPongTestParams> params = GetDefaultTestParams(); base::WaitableEvent::InitialState::NOT_SIGNALED);
for (size_t i = 0; i < params.size(); i++) { auto channel_proxy = IPC::SyncChannel::Create(
listener.SetTestParams(params[i].message_count(), TakeHandle().release(), IPC::Channel::MODE_SERVER, &listener,
params[i].message_size(), true); io_thread_->task_runner(), false, &shutdown_event);
listener.Init(channel_proxy.get());
// This initial message will kick-start the ping-pong of messages.
channel_proxy->Send(new TestMsg_Hello); LockThreadAffinity thread_locker(kSharedCore);
std::vector<PingPongTestParams> params = GetDefaultTestParams();
// Run message loop. for (size_t i = 0; i < params.size(); i++) {
base::RunLoop().Run(); listener.SetTestParams(params[i].message_count(),
} params[i].message_size(), true);
// Send quit message. // This initial message will kick-start the ping-pong of messages.
channel_proxy->Send(new TestMsg_Quit); channel_proxy->Send(new TestMsg_Hello);
EXPECT_TRUE(WaitForClientShutdown()); // Run message loop.
channel_proxy.reset(); base::RunLoop().Run();
}
io_thread_.reset();
} // Send quit message.
channel_proxy->Send(new TestMsg_Quit);
scoped_refptr<base::TaskRunner> io_task_runner() {
if (io_thread_) EXPECT_TRUE(WaitForClientShutdown());
return io_thread_->task_runner(); channel_proxy.reset();
return base::ThreadTaskRunnerHandle::Get();
} io_thread_.reset();
}
private:
std::unique_ptr<base::TestIOThread> io_thread_; scoped_refptr<base::TaskRunner> io_task_runner() {
}; if (io_thread_)
return io_thread_->task_runner();
TEST_F(MojoChannelPerfTest, ChannelProxyPingPong) { return base::ThreadTaskRunnerHandle::Get();
RunTestChannelProxyPingPong(); }
base::RunLoop run_loop; private:
run_loop.RunUntilIdle(); std::unique_ptr<base::TestIOThread> io_thread_;
} };
TEST_F(MojoChannelPerfTest, ChannelProxySyncPing) { TEST_F(MojoChannelPerfTest, ChannelProxyPingPong) {
RunTestChannelProxySyncPing(); RunTestChannelProxyPingPong();
base::RunLoop run_loop; base::RunLoop run_loop;
run_loop.RunUntilIdle(); run_loop.RunUntilIdle();
} }
class MojoPerfTestClient { TEST_F(MojoChannelPerfTest, ChannelProxySyncPing) {
public: RunTestChannelProxySyncPing();
MojoPerfTestClient() : listener_(new ChannelReflectorListener()) {
mojo::edk::test::MultiprocessTestHelper::ChildSetup(); base::RunLoop run_loop;
} run_loop.RunUntilIdle();
}
~MojoPerfTestClient() = default;
class MojoPerfTestClient {
int Run(MojoHandle handle) { public:
handle_ = mojo::MakeScopedHandle(mojo::MessagePipeHandle(handle)); MojoPerfTestClient() : listener_(new ChannelReflectorListener()) {
LockThreadAffinity thread_locker(kSharedCore); mojo::edk::test::MultiprocessTestHelper::ChildSetup();
base::TestIOThread io_thread(base::TestIOThread::kAutoStart); }
std::unique_ptr<ChannelProxy> channel = ~MojoPerfTestClient() = default;
IPC::ChannelProxy::Create(handle_.release(), Channel::MODE_CLIENT,
listener_.get(), io_thread.task_runner()); int Run(MojoHandle handle) {
listener_->Init(channel.get()); handle_ = mojo::MakeScopedHandle(mojo::MessagePipeHandle(handle));
LockThreadAffinity thread_locker(kSharedCore);
base::RunLoop().Run(); base::TestIOThread io_thread(base::TestIOThread::kAutoStart);
return 0;
} std::unique_ptr<ChannelProxy> channel =
IPC::ChannelProxy::Create(handle_.release(), Channel::MODE_CLIENT,
private: listener_.get(), io_thread.task_runner());
base::MessageLoop main_message_loop_; listener_->Init(channel.get());
std::unique_ptr<ChannelReflectorListener> listener_;
std::unique_ptr<Channel> channel_; base::RunLoop().Run();
mojo::ScopedMessagePipeHandle handle_; return 0;
}; }
MULTIPROCESS_TEST_MAIN(MojoPerfTestClientTestChildMain) { private:
MojoPerfTestClient client; base::MessageLoop main_message_loop_;
int rv = mojo::edk::test::MultiprocessTestHelper::RunClientMain( std::unique_ptr<ChannelReflectorListener> listener_;
base::Bind(&MojoPerfTestClient::Run, base::Unretained(&client)), std::unique_ptr<Channel> channel_;
true /* pass_pipe_ownership_to_main */); mojo::ScopedMessagePipeHandle handle_;
};
base::RunLoop run_loop;
run_loop.RunUntilIdle(); MULTIPROCESS_TEST_MAIN(MojoPerfTestClientTestChildMain) {
MojoPerfTestClient client;
return rv; int rv = mojo::edk::test::MultiprocessTestHelper::RunClientMain(
} base::Bind(&MojoPerfTestClient::Run, base::Unretained(&client)),
true /* pass_pipe_ownership_to_main */);
class ReflectorImpl : public IPC::mojom::Reflector {
public: base::RunLoop run_loop;
explicit ReflectorImpl(mojo::ScopedMessagePipeHandle handle) run_loop.RunUntilIdle();
: binding_(this, IPC::mojom::ReflectorRequest(std::move(handle))) {}
~ReflectorImpl() override { return rv;
ignore_result(binding_.Unbind().PassMessagePipe().release()); }
}
class ReflectorImpl : public IPC::mojom::Reflector {
private: public:
// IPC::mojom::Reflector: explicit ReflectorImpl(mojo::ScopedMessagePipeHandle handle)
void Ping(const std::string& value, PingCallback callback) override { : binding_(this, IPC::mojom::ReflectorRequest(std::move(handle))) {}
std::move(callback).Run(value); ~ReflectorImpl() override {
} ignore_result(binding_.Unbind().PassMessagePipe().release());
}
void SyncPing(const std::string& value, PingCallback callback) override {
std::move(callback).Run(value); private:
} // IPC::mojom::Reflector:
void Ping(const std::string& value, PingCallback callback) override {
void Quit() override { base::MessageLoop::current()->QuitWhenIdle(); } std::move(callback).Run(value);
}
mojo::Binding<IPC::mojom::Reflector> binding_;
}; void SyncPing(const std::string& value, PingCallback callback) override {
std::move(callback).Run(value);
class MojoInterfacePerfTest : public mojo::edk::test::MojoTestBase { }
public:
MojoInterfacePerfTest() : message_count_(0), count_down_(0) {} void Quit() override { base::MessageLoop::current()->QuitWhenIdle(); }
protected: mojo::Binding<IPC::mojom::Reflector> binding_;
void RunPingPongServer(MojoHandle mp, const std::string& label) { };
label_ = label;
class MojoInterfacePerfTest : public mojo::edk::test::MojoTestBase {
mojo::MessagePipeHandle mp_handle(mp); public:
mojo::ScopedMessagePipeHandle scoped_mp(mp_handle); MojoInterfacePerfTest() : message_count_(0), count_down_(0) {}
ping_receiver_.Bind(IPC::mojom::ReflectorPtrInfo(std::move(scoped_mp), 0u));
protected:
LockThreadAffinity thread_locker(kSharedCore); void RunPingPongServer(MojoHandle mp, const std::string& label) {
std::vector<PingPongTestParams> params = GetDefaultTestParams(); label_ = label;
for (size_t i = 0; i < params.size(); i++) {
ping_receiver_->Ping("hello", base::Bind(&MojoInterfacePerfTest::OnPong, mojo::MessagePipeHandle mp_handle(mp);
base::Unretained(this))); mojo::ScopedMessagePipeHandle scoped_mp(mp_handle);
message_count_ = count_down_ = params[i].message_count(); ping_receiver_.Bind(IPC::mojom::ReflectorPtrInfo(std::move(scoped_mp), 0u));
payload_ = std::string(params[i].message_size(), 'a');
LockThreadAffinity thread_locker(kSharedCore);
base::RunLoop().Run(); std::vector<PingPongTestParams> params = GetDefaultTestParams();
} for (size_t i = 0; i < params.size(); i++) {
ping_receiver_->Ping("hello", base::Bind(&MojoInterfacePerfTest::OnPong,
ping_receiver_->Quit(); base::Unretained(this)));
message_count_ = count_down_ = params[i].message_count();
ignore_result(ping_receiver_.PassInterface().PassHandle().release()); payload_ = std::string(params[i].message_size(), 'a');
}
base::RunLoop().Run();
void OnPong(const std::string& value) { }
if (value == "hello") {
DCHECK(!perf_logger_.get()); ping_receiver_->Quit();
std::string test_name =
base::StringPrintf("IPC_%s_Perf_%dx_%zu", label_.c_str(), ignore_result(ping_receiver_.PassInterface().PassHandle().release());
message_count_, payload_.size()); }
perf_logger_.reset(new base::PerfTimeLogger(test_name.c_str()));
} else { void OnPong(const std::string& value) {
DCHECK_EQ(payload_.size(), value.size()); if (value == "hello") {
DCHECK(!perf_logger_.get());
CHECK(count_down_ > 0); std::string test_name =
count_down_--; base::StringPrintf("IPC_%s_Perf_%dx_%zu", label_.c_str(),
if (count_down_ == 0) { message_count_, payload_.size());
perf_logger_.reset(); perf_logger_.reset(new base::PerfTimeLogger(test_name.c_str()));
base::MessageLoop::current()->QuitWhenIdle(); } else {
return; DCHECK_EQ(payload_.size(), value.size());
}
} CHECK(count_down_ > 0);
count_down_--;
if (sync_) { if (count_down_ == 0) {
for (int i = 0; i < count_down_; ++i) { perf_logger_.reset();
std::string response; base::MessageLoop::current()->QuitWhenIdle();
ping_receiver_->SyncPing(payload_, &response); return;
} }
base::MessageLoop::current()->QuitWhenIdle(); }
} else {
ping_receiver_->Ping(payload_, base::Bind(&MojoInterfacePerfTest::OnPong, if (sync_) {
base::Unretained(this))); for (int i = 0; i < count_down_; ++i) {
} std::string response;
} ping_receiver_->SyncPing(payload_, &response);
DCHECK_EQ(response, payload_);
static int RunPingPongClient(MojoHandle mp) { }
mojo::MessagePipeHandle mp_handle(mp); perf_logger_.reset();
mojo::ScopedMessagePipeHandle scoped_mp(mp_handle); base::MessageLoop::current()->QuitWhenIdle();
} else {
// In single process mode, this is running in a task and by default other ping_receiver_->Ping(payload_, base::Bind(&MojoInterfacePerfTest::OnPong,
// tasks (in particular, the binding) won't run. To keep the single process base::Unretained(this)));
// and multi-process code paths the same, enable nestable tasks. }
base::MessageLoop::ScopedNestableTaskAllower nest_loop( }
base::MessageLoop::current());
static int RunPingPongClient(MojoHandle mp) {
LockThreadAffinity thread_locker(kSharedCore); mojo::MessagePipeHandle mp_handle(mp);
ReflectorImpl impl(std::move(scoped_mp)); mojo::ScopedMessagePipeHandle scoped_mp(mp_handle);
base::RunLoop().Run();
return 0; // In single process mode, this is running in a task and by default other
} // tasks (in particular, the binding) won't run. To keep the single process
// and multi-process code paths the same, enable nestable tasks.
bool sync_ = false; base::MessageLoop::ScopedNestableTaskAllower nest_loop(
base::MessageLoop::current());
private:
int message_count_; LockThreadAffinity thread_locker(kSharedCore);
int count_down_; ReflectorImpl impl(std::move(scoped_mp));
std::string label_; base::RunLoop().Run();
std::string payload_; return 0;
IPC::mojom::ReflectorPtr ping_receiver_; }
std::unique_ptr<base::PerfTimeLogger> perf_logger_;
bool sync_ = false;
DISALLOW_COPY_AND_ASSIGN(MojoInterfacePerfTest);
}; private:
int message_count_;
enum class InProcessMessageMode { int count_down_;
kSerialized, std::string label_;
kUnserialized, std::string payload_;
}; IPC::mojom::ReflectorPtr ping_receiver_;
std::unique_ptr<base::PerfTimeLogger> perf_logger_;
class MojoInProcessInterfacePerfTest
: public MojoInterfacePerfTest, DISALLOW_COPY_AND_ASSIGN(MojoInterfacePerfTest);
public testing::WithParamInterface<InProcessMessageMode> { };
public:
MojoInProcessInterfacePerfTest() { enum class InProcessMessageMode {
switch (GetParam()) { kSerialized,
case InProcessMessageMode::kSerialized: kUnserialized,
mojo::Connector::OverrideDefaultSerializationBehaviorForTesting( };
mojo::Connector::OutgoingSerializationMode::kEager,
mojo::Connector::IncomingSerializationMode::kDispatchAsIs); class MojoInProcessInterfacePerfTest
break; : public MojoInterfacePerfTest,
case InProcessMessageMode::kUnserialized: public testing::WithParamInterface<InProcessMessageMode> {
mojo::Connector::OverrideDefaultSerializationBehaviorForTesting( public:
mojo::Connector::OutgoingSerializationMode::kLazy, MojoInProcessInterfacePerfTest() {
mojo::Connector::IncomingSerializationMode::kDispatchAsIs); switch (GetParam()) {
break; case InProcessMessageMode::kSerialized:
} mojo::Connector::OverrideDefaultSerializationBehaviorForTesting(
} mojo::Connector::OutgoingSerializationMode::kEager,
}; mojo::Connector::IncomingSerializationMode::kDispatchAsIs);
break;
DEFINE_TEST_CLIENT_WITH_PIPE(PingPongClient, MojoInterfacePerfTest, h) { case InProcessMessageMode::kUnserialized:
base::MessageLoop main_message_loop; mojo::Connector::OverrideDefaultSerializationBehaviorForTesting(
return RunPingPongClient(h); mojo::Connector::OutgoingSerializationMode::kLazy,
} mojo::Connector::IncomingSerializationMode::kDispatchAsIs);
break;
// Similar to MojoChannelPerfTest above, but uses a Mojo interface instead of }
// raw IPC::Messages. }
TEST_F(MojoInterfacePerfTest, MultiprocessPingPong) { };
RunTestClient("PingPongClient", [&](MojoHandle h) {
base::MessageLoop main_message_loop; DEFINE_TEST_CLIENT_WITH_PIPE(PingPongClient, MojoInterfacePerfTest, h) {
RunPingPongServer(h, "Multiprocess"); base::MessageLoop main_message_loop;
}); return RunPingPongClient(h);
} }
TEST_F(MojoInterfacePerfTest, MultiprocessSyncPing) { // Similar to MojoChannelPerfTest above, but uses a Mojo interface instead of
sync_ = true; // raw IPC::Messages.
RunTestClient("PingPongClient", [&](MojoHandle h) { TEST_F(MojoInterfacePerfTest, MultiprocessPingPong) {
base::MessageLoop main_message_loop; RunTestClient("PingPongClient", [&](MojoHandle h) {
RunPingPongServer(h, "MultiprocessSync"); base::MessageLoop main_message_loop;
}); RunPingPongServer(h, "Multiprocess");
} });
}
// A single process version of the above test.
TEST_P(MojoInProcessInterfacePerfTest, MultiThreadPingPong) { TEST_F(MojoInterfacePerfTest, MultiprocessSyncPing) {
MojoHandle server_handle, client_handle; sync_ = true;
CreateMessagePipe(&server_handle, &client_handle); RunTestClient("PingPongClient", [&](MojoHandle h) {
base::MessageLoop main_message_loop;
base::Thread client_thread("PingPongClient"); RunPingPongServer(h, "MultiprocessSync");
client_thread.Start(); });
client_thread.task_runner()->PostTask( }
FROM_HERE,
base::Bind(base::IgnoreResult(&RunPingPongClient), client_handle)); // A single process version of the above test.
TEST_P(MojoInProcessInterfacePerfTest, MultiThreadPingPong) {
base::MessageLoop main_message_loop; MojoHandle server_handle, client_handle;
RunPingPongServer(server_handle, "SingleProcess"); CreateMessagePipe(&server_handle, &client_handle);
}
base::Thread client_thread("PingPongClient");
TEST_P(MojoInProcessInterfacePerfTest, SingleThreadPingPong) { client_thread.Start();
MojoHandle server_handle, client_handle; client_thread.task_runner()->PostTask(
CreateMessagePipe(&server_handle, &client_handle); FROM_HERE,
base::Bind(base::IgnoreResult(&RunPingPongClient), client_handle));
base::MessageLoop main_message_loop;
mojo::MessagePipeHandle mp_handle(client_handle); base::MessageLoop main_message_loop;
mojo::ScopedMessagePipeHandle scoped_mp(mp_handle); RunPingPongServer(server_handle, "SingleProcess");
LockThreadAffinity thread_locker(kSharedCore); }
ReflectorImpl impl(std::move(scoped_mp));
TEST_P(MojoInProcessInterfacePerfTest, SingleThreadPingPong) {
RunPingPongServer(server_handle, "SingleProcess"); MojoHandle server_handle, client_handle;
} CreateMessagePipe(&server_handle, &client_handle);
INSTANTIATE_TEST_CASE_P(, base::MessageLoop main_message_loop;
MojoInProcessInterfacePerfTest, mojo::MessagePipeHandle mp_handle(client_handle);
testing::Values(InProcessMessageMode::kSerialized, mojo::ScopedMessagePipeHandle scoped_mp(mp_handle);
InProcessMessageMode::kUnserialized)); LockThreadAffinity thread_locker(kSharedCore);
ReflectorImpl impl(std::move(scoped_mp));
class CallbackPerfTest : public testing::Test {
public: RunPingPongServer(server_handle, "SingleProcess");
CallbackPerfTest() }
: client_thread_("PingPongClient"), message_count_(0), count_down_(0) {}
INSTANTIATE_TEST_CASE_P(,
protected: MojoInProcessInterfacePerfTest,
void RunMultiThreadPingPongServer() { testing::Values(InProcessMessageMode::kSerialized,
client_thread_.Start(); InProcessMessageMode::kUnserialized));
LockThreadAffinity thread_locker(kSharedCore); class CallbackPerfTest : public testing::Test {
std::vector<PingPongTestParams> params = GetDefaultTestParams(); public:
for (size_t i = 0; i < params.size(); i++) { CallbackPerfTest()
std::string hello("hello"); : client_thread_("PingPongClient"), message_count_(0), count_down_(0) {}
client_thread_.task_runner()->PostTask(
FROM_HERE, protected:
base::Bind(&CallbackPerfTest::Ping, base::Unretained(this), hello)); void RunMultiThreadPingPongServer() {
message_count_ = count_down_ = params[i].message_count(); client_thread_.Start();
payload_ = std::string(params[i].message_size(), 'a');
LockThreadAffinity thread_locker(kSharedCore);
base::RunLoop().Run(); std::vector<PingPongTestParams> params = GetDefaultTestParams();
} for (size_t i = 0; i < params.size(); i++) {
} std::string hello("hello");
client_thread_.task_runner()->PostTask(
void Ping(const std::string& value) { FROM_HERE,
main_message_loop_.task_runner()->PostTask( base::Bind(&CallbackPerfTest::Ping, base::Unretained(this), hello));
FROM_HERE, message_count_ = count_down_ = params[i].message_count();
base::Bind(&CallbackPerfTest::OnPong, base::Unretained(this), value)); payload_ = std::string(params[i].message_size(), 'a');
}
base::RunLoop().Run();
void OnPong(const std::string& value) { }
if (value == "hello") { }
DCHECK(!perf_logger_.get());
std::string test_name = void Ping(const std::string& value) {
base::StringPrintf("Callback_MultiProcess_Perf_%dx_%zu", main_message_loop_.task_runner()->PostTask(
message_count_, payload_.size()); FROM_HERE,
perf_logger_.reset(new base::PerfTimeLogger(test_name.c_str())); base::Bind(&CallbackPerfTest::OnPong, base::Unretained(this), value));
} else { }
DCHECK_EQ(payload_.size(), value.size());
void OnPong(const std::string& value) {
CHECK(count_down_ > 0); if (value == "hello") {
count_down_--; DCHECK(!perf_logger_.get());
if (count_down_ == 0) { std::string test_name =
perf_logger_.reset(); base::StringPrintf("Callback_MultiProcess_Perf_%dx_%zu",
base::MessageLoop::current()->QuitWhenIdle(); message_count_, payload_.size());
return; perf_logger_.reset(new base::PerfTimeLogger(test_name.c_str()));
} } else {
} DCHECK_EQ(payload_.size(), value.size());
client_thread_.task_runner()->PostTask( CHECK(count_down_ > 0);
FROM_HERE, count_down_--;
base::Bind(&CallbackPerfTest::Ping, base::Unretained(this), payload_)); if (count_down_ == 0) {
} perf_logger_.reset();
base::MessageLoop::current()->QuitWhenIdle();
void RunSingleThreadNoPostTaskPingPongServer() { return;
LockThreadAffinity thread_locker(kSharedCore); }
std::vector<PingPongTestParams> params = GetDefaultTestParams(); }
base::Callback<void(const std::string&,
const base::Callback<void(const std::string&)>&)> client_thread_.task_runner()->PostTask(
ping = base::Bind(&CallbackPerfTest::SingleThreadPingNoPostTask, FROM_HERE,
base::Unretained(this)); base::Bind(&CallbackPerfTest::Ping, base::Unretained(this), payload_));
for (size_t i = 0; i < params.size(); i++) { }
payload_ = std::string(params[i].message_size(), 'a');
std::string test_name = void RunSingleThreadNoPostTaskPingPongServer() {
base::StringPrintf("Callback_SingleThreadPostTask_Perf_%dx_%zu", LockThreadAffinity thread_locker(kSharedCore);
params[i].message_count(), payload_.size()); std::vector<PingPongTestParams> params = GetDefaultTestParams();
perf_logger_.reset(new base::PerfTimeLogger(test_name.c_str())); base::Callback<void(const std::string&,
for (int j = 0; j < params[i].message_count(); ++j) { const base::Callback<void(const std::string&)>&)>
ping.Run(payload_, ping = base::Bind(&CallbackPerfTest::SingleThreadPingNoPostTask,
base::Bind(&CallbackPerfTest::SingleThreadPongNoPostTask, base::Unretained(this));
base::Unretained(this))); for (size_t i = 0; i < params.size(); i++) {
} payload_ = std::string(params[i].message_size(), 'a');
perf_logger_.reset(); std::string test_name =
} base::StringPrintf("Callback_SingleThreadPostTask_Perf_%dx_%zu",
} params[i].message_count(), payload_.size());
perf_logger_.reset(new base::PerfTimeLogger(test_name.c_str()));
void SingleThreadPingNoPostTask( for (int j = 0; j < params[i].message_count(); ++j) {
const std::string& value, ping.Run(payload_,
const base::Callback<void(const std::string&)>& pong) { base::Bind(&CallbackPerfTest::SingleThreadPongNoPostTask,
pong.Run(value); base::Unretained(this)));
} }
perf_logger_.reset();
void SingleThreadPongNoPostTask(const std::string& value) {} }
}
void RunSingleThreadPostTaskPingPongServer() {
LockThreadAffinity thread_locker(kSharedCore); void SingleThreadPingNoPostTask(
std::vector<PingPongTestParams> params = GetDefaultTestParams(); const std::string& value,
for (size_t i = 0; i < params.size(); i++) { const base::Callback<void(const std::string&)>& pong) {
std::string hello("hello"); pong.Run(value);
base::MessageLoop::current()->task_runner()->PostTask( }
FROM_HERE, base::Bind(&CallbackPerfTest::SingleThreadPingPostTask,
base::Unretained(this), hello)); void SingleThreadPongNoPostTask(const std::string& value) {}
message_count_ = count_down_ = params[i].message_count();
payload_ = std::string(params[i].message_size(), 'a'); void RunSingleThreadPostTaskPingPongServer() {
LockThreadAffinity thread_locker(kSharedCore);
base::RunLoop().Run(); std::vector<PingPongTestParams> params = GetDefaultTestParams();
} for (size_t i = 0; i < params.size(); i++) {
} std::string hello("hello");
base::MessageLoop::current()->task_runner()->PostTask(
void SingleThreadPingPostTask(const std::string& value) { FROM_HERE, base::Bind(&CallbackPerfTest::SingleThreadPingPostTask,
base::MessageLoop::current()->task_runner()->PostTask( base::Unretained(this), hello));
FROM_HERE, base::Bind(&CallbackPerfTest::SingleThreadPongPostTask, message_count_ = count_down_ = params[i].message_count();
base::Unretained(this), value)); payload_ = std::string(params[i].message_size(), 'a');
}
base::RunLoop().Run();
void SingleThreadPongPostTask(const std::string& value) { }
if (value == "hello") { }
DCHECK(!perf_logger_.get());
std::string test_name = void SingleThreadPingPostTask(const std::string& value) {
base::StringPrintf("Callback_SingleThreadNoPostTask_Perf_%dx_%zu", base::MessageLoop::current()->task_runner()->PostTask(
message_count_, payload_.size()); FROM_HERE, base::Bind(&CallbackPerfTest::SingleThreadPongPostTask,
perf_logger_.reset(new base::PerfTimeLogger(test_name.c_str())); base::Unretained(this), value));
} else { }
DCHECK_EQ(payload_.size(), value.size());
void SingleThreadPongPostTask(const std::string& value) {
CHECK(count_down_ > 0); if (value == "hello") {
count_down_--; DCHECK(!perf_logger_.get());
if (count_down_ == 0) { std::string test_name =
perf_logger_.reset(); base::StringPrintf("Callback_SingleThreadNoPostTask_Perf_%dx_%zu",
base::MessageLoop::current()->QuitWhenIdle(); message_count_, payload_.size());
return; perf_logger_.reset(new base::PerfTimeLogger(test_name.c_str()));
} } else {
} DCHECK_EQ(payload_.size(), value.size());
base::MessageLoop::current()->task_runner()->PostTask( CHECK(count_down_ > 0);
FROM_HERE, base::Bind(&CallbackPerfTest::SingleThreadPingPostTask, count_down_--;
base::Unretained(this), payload_)); if (count_down_ == 0) {
} perf_logger_.reset();
base::MessageLoop::current()->QuitWhenIdle();
private: return;
base::Thread client_thread_; }
base::MessageLoop main_message_loop_; }
int message_count_;
int count_down_; base::MessageLoop::current()->task_runner()->PostTask(
std::string payload_; FROM_HERE, base::Bind(&CallbackPerfTest::SingleThreadPingPostTask,
std::unique_ptr<base::PerfTimeLogger> perf_logger_; base::Unretained(this), payload_));
}
DISALLOW_COPY_AND_ASSIGN(CallbackPerfTest);
}; private:
base::Thread client_thread_;
// Sends the same data as above using PostTask to a different thread instead of base::MessageLoop main_message_loop_;
// IPCs for comparison. int message_count_;
TEST_F(CallbackPerfTest, MultiThreadPingPong) { int count_down_;
RunMultiThreadPingPongServer(); std::string payload_;
} std::unique_ptr<base::PerfTimeLogger> perf_logger_;
// Sends the same data as above using PostTask to the same thread. DISALLOW_COPY_AND_ASSIGN(CallbackPerfTest);
TEST_F(CallbackPerfTest, SingleThreadPostTaskPingPong) { };
RunSingleThreadPostTaskPingPongServer();
} // Sends the same data as above using PostTask to a different thread instead of
// IPCs for comparison.
// Sends the same data as above without using PostTask to the same thread. TEST_F(CallbackPerfTest, MultiThreadPingPong) {
TEST_F(CallbackPerfTest, SingleThreadNoPostTaskPingPong) { RunMultiThreadPingPongServer();
RunSingleThreadNoPostTaskPingPongServer(); }
}
// Sends the same data as above using PostTask to the same thread.
} // namespace TEST_F(CallbackPerfTest, SingleThreadPostTaskPingPong) {
} // namespace IPC RunSingleThreadPostTaskPingPongServer();
}
// Sends the same data as above without using PostTask to the same thread.
TEST_F(CallbackPerfTest, SingleThreadNoPostTaskPingPong) {
RunSingleThreadNoPostTaskPingPongServer();
}
} // namespace
} // namespace IPC
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment