Removing the JumpRelative, Call, CallRelative and Return commands.

This removes the extra commands from the command buffer. I've left the
Jump struct and handler in place as it's used to advance the buffer
if there isn't enough room at the end to place the next command.

TESTING=Ran the gpu_unittests and everything worked correctly.


Review URL: https://chromiumcodereview.appspot.com/11613021

git-svn-id: svn://svn.chromium.org/chrome/trunk/src@175158 0039d316-1c4b-4281-b951-d872f2087c98
parent 197ea35d
......@@ -21,7 +21,6 @@ CommandBufferHelper::CommandBufferHelper(CommandBuffer* command_buffer)
ring_buffer_size_(0),
entries_(NULL),
total_entry_count_(0),
usable_entry_count_(0),
token_(0),
put_(0),
last_put_sent_(0),
......@@ -74,11 +73,7 @@ bool CommandBufferHelper::AllocateRingBuffer() {
return false;
}
const int32 kJumpEntries =
sizeof(cmd::Jump) / sizeof(*entries_); // NOLINT
total_entry_count_ = num_ring_buffer_entries;
usable_entry_count_ = total_entry_count_ - kJumpEntries;
put_ = state.put_offset;
return true;
}
......@@ -195,7 +190,7 @@ void CommandBufferHelper::WaitForToken(int32 token) {
// Waits for available entries, basically waiting until get >= put + count + 1.
// It actually waits for contiguous entries, so it may need to wrap the buffer
// around, adding a jump. Thus this function may change the value of put_. The
// around, adding a noops. Thus this function may change the value of put_. The
// function will return early if an error occurs, in which case the available
// space may not be available.
void CommandBufferHelper::WaitForAvailableEntries(int32 count) {
......@@ -204,12 +199,12 @@ void CommandBufferHelper::WaitForAvailableEntries(int32 count) {
return;
}
GPU_DCHECK(HaveRingBuffer());
GPU_DCHECK(count < usable_entry_count_);
if (put_ + count > usable_entry_count_) {
GPU_DCHECK(count < total_entry_count_);
if (put_ + count > total_entry_count_) {
// There's not enough room between the current put and the end of the
// buffer, so we need to wrap. We will add a jump back to the start, but we
// need to make sure get wraps first, actually that get is 1 or more (since
// put will wrap to 0 after we add the jump).
// buffer, so we need to wrap. We will add noops all the way to the end,
// but we need to make sure get wraps first, actually that get is 1 or
// more (since put will wrap to 0 after we add the noops).
GPU_DCHECK_LE(1, put_);
if (get_offset() > put_ || get_offset() == 0) {
TRACE_EVENT0("gpu", "CommandBufferHelper::WaitForAvailableEntries");
......@@ -220,8 +215,14 @@ void CommandBufferHelper::WaitForAvailableEntries(int32 count) {
return;
}
}
// Insert a jump back to the beginning.
cmd::Jump::Set(&entries_[put_], 0);
// Insert Noops to fill out the buffer.
int32 num_entries = total_entry_count_ - put_;
while (num_entries > 0) {
int32 num_to_skip = std::min(CommandHeader::kMaxSize, num_entries);
cmd::Noop::Set(&entries_[put_], num_to_skip);
put_ += num_to_skip;
num_entries -= num_to_skip;
}
put_ = 0;
}
if (AvailableEntries() < count) {
......@@ -236,8 +237,8 @@ void CommandBufferHelper::WaitForAvailableEntries(int32 count) {
// Force a flush if the buffer is getting half full, or even earlier if the
// reader is known to be idle.
int32 pending =
(put_ + usable_entry_count_ - last_put_sent_) % usable_entry_count_;
int32 limit = usable_entry_count_ /
(put_ + total_entry_count_ - last_put_sent_) % total_entry_count_;
int32 limit = total_entry_count_ /
((get_offset() == last_put_sent_) ? 16 : 2);
if (pending > limit) {
Flush();
......@@ -265,9 +266,8 @@ CommandBufferEntry* CommandBufferHelper::GetSpace(uint32 entries) {
WaitForAvailableEntries(entries);
CommandBufferEntry* space = &entries_[put_];
put_ += entries;
GPU_DCHECK_LE(put_, usable_entry_count_);
if (put_ == usable_entry_count_) {
cmd::Jump::Set(&entries_[put_], 0);
GPU_DCHECK_LE(put_, total_entry_count_);
if (put_ == total_entry_count_) {
put_ = 0;
}
return space;
......
......@@ -149,41 +149,6 @@ class GPU_EXPORT CommandBufferHelper {
}
}
void Jump(uint32 offset) {
cmd::Jump* cmd = GetCmdSpace<cmd::Jump>();
if (cmd) {
cmd->Init(offset);
}
}
void JumpRelative(int32 offset) {
cmd::JumpRelative* cmd = GetCmdSpace<cmd::JumpRelative>();
if (cmd) {
cmd->Init(offset);
}
}
void Call(uint32 offset) {
cmd::Call* cmd = GetCmdSpace<cmd::Call>();
if (cmd) {
cmd->Init(offset);
}
}
void CallRelative(int32 offset) {
cmd::CallRelative* cmd = GetCmdSpace<cmd::CallRelative>();
if (cmd) {
cmd->Init(offset);
}
}
void Return() {
cmd::Return* cmd = GetCmdSpace<cmd::Return>();
if (cmd) {
cmd->Init();
}
}
void SetBucketSize(uint32 bucket_id, uint32 size) {
cmd::SetBucketSize* cmd = GetCmdSpace<cmd::SetBucketSize>();
if (cmd) {
......@@ -276,8 +241,7 @@ class GPU_EXPORT CommandBufferHelper {
// Returns the number of available entries (they may not be contiguous).
int32 AvailableEntries() {
return (get_offset() - put_ - 1 + usable_entry_count_) %
usable_entry_count_;
return (get_offset() - put_ - 1 + total_entry_count_) % total_entry_count_;
}
bool AllocateRingBuffer();
......@@ -289,7 +253,6 @@ class GPU_EXPORT CommandBufferHelper {
Buffer ring_buffer_;
CommandBufferEntry* entries_;
int32 total_entry_count_; // the total number of entries
int32 usable_entry_count_; // the usable number (ie, minus space for jump)
int32 token_;
int32 put_;
int32 last_put_sent_;
......
......@@ -28,8 +28,7 @@ using testing::DoAll;
using testing::Invoke;
using testing::_;
const int32 kTotalNumCommandEntries = 12;
const int32 kUsableNumCommandEntries = 10;
const int32 kTotalNumCommandEntries = 10;
const int32 kCommandBufferSizeBytes =
kTotalNumCommandEntries * sizeof(CommandBufferEntry);
const int32 kUnusedCommandId = 5; // we use 0 and 2 currently.
......@@ -39,26 +38,6 @@ const int32 kUnusedCommandId = 5; // we use 0 and 2 currently.
// (calling it directly, not through the RPC mechanism).
class CommandBufferHelperTest : public testing::Test {
protected:
// Helper so mock can handle the Jump command.
class DoJumpCommand {
public:
explicit DoJumpCommand(GpuScheduler* gpu_scheduler)
: gpu_scheduler_(gpu_scheduler) {
}
error::Error DoCommand(
unsigned int command,
unsigned int arg_count,
const void* cmd_data) {
const cmd::Jump* jump_cmd = static_cast<const cmd::Jump*>(cmd_data);
gpu_scheduler_->parser()->set_get(jump_cmd->offset);
return error::kNoError;
};
private:
GpuScheduler* gpu_scheduler_;
};
virtual void SetUp() {
api_mock_.reset(new AsyncAPIMock);
// ignore noops in the mock - we don't want to inspect the internals of the
......@@ -82,12 +61,6 @@ class CommandBufferHelperTest : public testing::Test {
command_buffer_->SetGetBufferChangeCallback(base::Bind(
&GpuScheduler::SetGetBuffer, base::Unretained(gpu_scheduler_.get())));
do_jump_command_.reset(new DoJumpCommand(gpu_scheduler_.get()));
EXPECT_CALL(*api_mock_, DoCommand(cmd::kJump, _, _))
.WillRepeatedly(
Invoke(do_jump_command_.get(), &DoJumpCommand::DoCommand));
api_mock_->set_engine(gpu_scheduler_.get());
helper_.reset(new CommandBufferHelper(command_buffer_.get()));
......@@ -172,7 +145,6 @@ class CommandBufferHelperTest : public testing::Test {
scoped_ptr<GpuScheduler> gpu_scheduler_;
scoped_ptr<CommandBufferHelper> helper_;
Sequence sequence_;
scoped_ptr<DoJumpCommand> do_jump_command_;
};
// Checks that commands in the buffer are properly executed, and that the
......@@ -234,7 +206,7 @@ TEST_F(CommandBufferHelperTest, TestCommandWrapping) {
TEST_F(CommandBufferHelperTest, TestCommandWrappingExactMultiple) {
const int32 kCommandSize = 5;
const size_t kNumArgs = kCommandSize - 1;
COMPILE_ASSERT(kUsableNumCommandEntries % kCommandSize == 0,
COMPILE_ASSERT(kTotalNumCommandEntries % kCommandSize == 0,
Not_multiple_of_num_command_entries);
CommandBufferEntry args1[kNumArgs];
for (size_t ii = 0; ii < kNumArgs; ++ii) {
......
......@@ -35,25 +35,6 @@ class BaseRingBufferTest : public testing::Test {
static const unsigned int kBaseOffset = 128;
static const unsigned int kBufferSize = 1024;
class DoJumpCommand {
public:
explicit DoJumpCommand(GpuScheduler* gpu_scheduler)
: gpu_scheduler_(gpu_scheduler) {
}
error::Error DoCommand(
unsigned int command,
unsigned int arg_count,
const void* cmd_data) {
const cmd::Jump* jump_cmd = static_cast<const cmd::Jump*>(cmd_data);
gpu_scheduler_->parser()->set_get(jump_cmd->offset);
return error::kNoError;
};
private:
GpuScheduler* gpu_scheduler_;
};
virtual void SetUp() {
api_mock_.reset(new AsyncAPIMock);
// ignore noops in the mock - we don't want to inspect the internals of the
......@@ -82,10 +63,6 @@ class BaseRingBufferTest : public testing::Test {
&GpuScheduler::SetGetBuffer, base::Unretained(gpu_scheduler_.get())));
api_mock_->set_engine(gpu_scheduler_.get());
do_jump_command_.reset(new DoJumpCommand(gpu_scheduler_.get()));
EXPECT_CALL(*api_mock_, DoCommand(cmd::kJump, _, _))
.WillRepeatedly(
Invoke(do_jump_command_.get(), &DoJumpCommand::DoCommand));
helper_.reset(new CommandBufferHelper(command_buffer_.get()));
helper_->Initialize(kBufferSize);
......@@ -104,7 +81,6 @@ class BaseRingBufferTest : public testing::Test {
scoped_ptr<CommandBufferService> command_buffer_;
scoped_ptr<GpuScheduler> gpu_scheduler_;
scoped_ptr<CommandBufferHelper> helper_;
scoped_ptr<DoJumpCommand> do_jump_command_;
};
#ifndef _MSC_VER
......
......@@ -150,16 +150,11 @@ namespace cmd {
#define COMMON_COMMAND_BUFFER_CMDS(OP) \
OP(Noop) /* 0 */ \
OP(SetToken) /* 1 */ \
OP(Jump) /* 2 */ \
OP(JumpRelative) /* 3 */ \
OP(Call) /* 4 */ \
OP(CallRelative) /* 5 */ \
OP(Return) /* 6 */ \
OP(SetBucketSize) /* 7 */ \
OP(SetBucketData) /* 8 */ \
OP(SetBucketDataImmediate) /* 9 */ \
OP(GetBucketStart) /* 10 */ \
OP(GetBucketData) /* 11 */ \
OP(SetBucketSize) /* 2 */ \
OP(SetBucketData) /* 3 */ \
OP(SetBucketDataImmediate) /* 4 */ \
OP(GetBucketStart) /* 5 */ \
OP(GetBucketData) /* 6 */ \
// Common commands.
enum CommandId {
......@@ -234,152 +229,6 @@ COMPILE_ASSERT(offsetof(SetToken, header) == 0,
COMPILE_ASSERT(offsetof(SetToken, token) == 4,
Offsetof_SetToken_token_not_4);
// The Jump command jumps to another place in the command buffer.
struct Jump {
typedef Jump ValueType;
static const CommandId kCmdId = kJump;
static const cmd::ArgFlags kArgFlags = cmd::kFixed;
void SetHeader() {
header.SetCmd<ValueType>();
}
void Init(uint32 _offset) {
SetHeader();
offset = _offset;
}
static void* Set(
void* cmd, uint32 _offset) {
static_cast<ValueType*>(cmd)->Init(_offset);
return NextCmdAddress<ValueType>(cmd);
}
CommandHeader header;
uint32 offset;
};
COMPILE_ASSERT(sizeof(Jump) == 8, Sizeof_Jump_is_not_8);
COMPILE_ASSERT(offsetof(Jump, header) == 0,
Offsetof_Jump_header_not_0);
COMPILE_ASSERT(offsetof(Jump, offset) == 4,
Offsetof_Jump_offset_not_4);
// The JumpRelative command jumps to another place in the command buffer
// relative to the end of this command. In other words. JumpRelative with an
// offset of zero is effectively a noop.
struct JumpRelative {
typedef JumpRelative ValueType;
static const CommandId kCmdId = kJumpRelative;
static const cmd::ArgFlags kArgFlags = cmd::kFixed;
void SetHeader() {
header.SetCmd<ValueType>();
}
void Init(int32 _offset) {
SetHeader();
offset = _offset;
}
static void* Set(void* cmd, int32 _offset) {
static_cast<ValueType*>(cmd)->Init(_offset);
return NextCmdAddress<ValueType>(cmd);
}
CommandHeader header;
int32 offset;
};
COMPILE_ASSERT(sizeof(JumpRelative) == 8, Sizeof_JumpRelative_is_not_8);
COMPILE_ASSERT(offsetof(JumpRelative, header) == 0,
Offsetof_JumpRelative_header_not_0);
COMPILE_ASSERT(offsetof(JumpRelative, offset) == 4,
Offsetof_JumpRelative_offset_4);
// The Call command jumps to a subroutine which can be returned from with the
// Return command.
struct Call {
typedef Call ValueType;
static const CommandId kCmdId = kCall;
static const cmd::ArgFlags kArgFlags = cmd::kFixed;
void SetHeader() {
header.SetCmd<ValueType>();
}
void Init(uint32 _offset) {
SetHeader();
offset = _offset;
}
static void* Set(void* cmd, uint32 _offset) {
static_cast<ValueType*>(cmd)->Init(_offset);
return NextCmdAddress<ValueType>(cmd);
}
CommandHeader header;
uint32 offset;
};
COMPILE_ASSERT(sizeof(Call) == 8, Sizeof_Call_is_not_8);
COMPILE_ASSERT(offsetof(Call, header) == 0,
Offsetof_Call_header_not_0);
COMPILE_ASSERT(offsetof(Call, offset) == 4,
Offsetof_Call_offset_not_4);
// The CallRelative command jumps to a subroutine using a relative offset. The
// offset is relative to the end of this command..
struct CallRelative {
typedef CallRelative ValueType;
static const CommandId kCmdId = kCallRelative;
static const cmd::ArgFlags kArgFlags = cmd::kFixed;
void SetHeader() {
header.SetCmd<ValueType>();
}
void Init(int32 _offset) {
SetHeader();
offset = _offset;
}
static void* Set(void* cmd, int32 _offset) {
static_cast<ValueType*>(cmd)->Init(_offset);
return NextCmdAddress<ValueType>(cmd);
}
CommandHeader header;
int32 offset;
};
COMPILE_ASSERT(sizeof(CallRelative) == 8, Sizeof_CallRelative_is_not_8);
COMPILE_ASSERT(offsetof(CallRelative, header) == 0,
Offsetof_CallRelative_header_not_0);
COMPILE_ASSERT(offsetof(CallRelative, offset) == 4,
Offsetof_CallRelative_offset_4);
// Returns from a subroutine called by the Call or CallRelative commands.
struct Return {
typedef Return ValueType;
static const CommandId kCmdId = kReturn;
static const cmd::ArgFlags kArgFlags = cmd::kFixed;
void SetHeader() {
header.SetCmd<ValueType>();
}
void Init() {
SetHeader();
}
static void* Set(void* cmd) {
static_cast<ValueType*>(cmd)->Init();
return NextCmdAddress<ValueType>(cmd);
}
CommandHeader header;
};
COMPILE_ASSERT(sizeof(Return) == 4, Sizeof_Return_is_not_4);
COMPILE_ASSERT(offsetof(Return, header) == 0,
Offsetof_Return_header_not_0);
// Sets the size of a bucket for collecting data on the service side.
// This is a utility for gathering data on the service side so it can be used
// all at once when some service side API is called. It removes the need to add
......
......@@ -186,56 +186,6 @@ error::Error CommonDecoder::HandleSetToken(
return error::kNoError;
}
error::Error CommonDecoder::HandleJump(
uint32 immediate_data_size,
const cmd::Jump& args) {
if (!engine_->SetGetOffset(args.offset)) {
return error::kInvalidArguments;
}
return error::kNoError;
}
error::Error CommonDecoder::HandleJumpRelative(
uint32 immediate_data_size,
const cmd::JumpRelative& args) {
if (!engine_->SetGetOffset(engine_->GetGetOffset() + args.offset)) {
return error::kInvalidArguments;
}
return error::kNoError;
}
error::Error CommonDecoder::HandleCall(
uint32 immediate_data_size,
const cmd::Call& args) {
if (!PushAddress(args.offset)) {
return error::kInvalidArguments;
}
return error::kNoError;
}
error::Error CommonDecoder::HandleCallRelative(
uint32 immediate_data_size,
const cmd::CallRelative& args) {
if (!PushAddress(engine_->GetGetOffset() + args.offset)) {
return error::kInvalidArguments;
}
return error::kNoError;
}
error::Error CommonDecoder::HandleReturn(
uint32 immediate_data_size,
const cmd::Return& args) {
if (call_stack_.empty()) {
return error::kInvalidArguments;
}
CommandAddress return_address = call_stack_.top();
call_stack_.pop();
if (!engine_->SetGetOffset(return_address.offset)) {
return error::kInvalidArguments;
}
return error::kNoError;
}
error::Error CommonDecoder::HandleSetBucketSize(
uint32 immediate_data_size,
const cmd::SetBucketSize& args) {
......
......@@ -212,134 +212,6 @@ TEST_F(CommonDecoderTest, SetToken) {
EXPECT_EQ(kTokenId, engine_.token());
}
TEST_F(CommonDecoderTest, Jump) {
cmd::Jump cmd;
// Check valid args succeed.
cmd.Init(MockCommandBufferEngine::kValidOffset);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(MockCommandBufferEngine::kValidOffset,
engine_.GetGetOffset());
// Check invalid offset fails.
cmd.Init(MockCommandBufferEngine::kInvalidOffset);
EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(MockCommandBufferEngine::kValidOffset,
engine_.GetGetOffset());
// Check negative offset fails
cmd.Init(-1);
EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
}
// NOTE: The read_pointer checks for relative commands do not take into account
// that the actual implementation of CommandBufferEngine uses the parse
// which will advance the read pointer to the start of the next command.
TEST_F(CommonDecoderTest, JumpRelative) {
cmd::JumpRelative cmd;
// Check valid positive offset succeeds.
const int32 kPositiveOffset = 16;
cmd.Init(kPositiveOffset);
int32 read_pointer = engine_.GetGetOffset();
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
// See note above.
EXPECT_EQ(read_pointer + kPositiveOffset, engine_.GetGetOffset());
// Check valid negative offset succeeds.
const int32 kNegativeOffset = -8;
read_pointer = engine_.GetGetOffset();
cmd.Init(kNegativeOffset);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
// See note above.
EXPECT_EQ(read_pointer + kNegativeOffset, engine_.GetGetOffset());
// Check invalid offset fails.
cmd.Init(MockCommandBufferEngine::kInvalidOffset);
EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
// See note above.
EXPECT_EQ(read_pointer + kNegativeOffset, engine_.GetGetOffset());
// Check invalid negative offset fails.
const int32 kInvalidNegativeOffset = -kPositiveOffset + kNegativeOffset - 1;
cmd.Init(kInvalidNegativeOffset);
EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
}
TEST_F(CommonDecoderTest, Call) {
cmd::Call cmd;
// Check valid args succeed.
cmd.Init(MockCommandBufferEngine::kValidOffset);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(MockCommandBufferEngine::kValidOffset,
engine_.GetGetOffset());
// Check invalid offset fails.
cmd.Init(MockCommandBufferEngine::kInvalidOffset);
EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
EXPECT_EQ(MockCommandBufferEngine::kValidOffset,
engine_.GetGetOffset());
// Check negative offset fails
cmd.Init(-1);
EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
// Check that the call values are on the stack.
cmd::Return return_cmd;
return_cmd.Init();
EXPECT_EQ(error::kNoError, ExecuteCmd(return_cmd));
EXPECT_EQ(0, engine_.GetGetOffset());
// Check that stack overflow fails.
cmd.Init(MockCommandBufferEngine::kValidOffset);
for (unsigned int ii = 0; ii < CommonDecoder::kMaxStackDepth; ++ii) {
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
}
EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
}
TEST_F(CommonDecoderTest, CallRelative) {
cmd::CallRelative cmd;
// Check valid positive offset succeeds.
const int32 kPositiveOffset = 16;
cmd.Init(kPositiveOffset);
int32 read_pointer_1 = engine_.GetGetOffset();
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
// See note above.
EXPECT_EQ(read_pointer_1 + kPositiveOffset, engine_.GetGetOffset());
// Check valid negative offset succeeds.
const int32 kNegativeOffset = -8;
int32 read_pointer_2 = engine_.GetGetOffset();
cmd.Init(kNegativeOffset);
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
// See note above.
EXPECT_EQ(read_pointer_2 + kNegativeOffset, engine_.GetGetOffset());
// Check invalid offset fails.
cmd.Init(MockCommandBufferEngine::kInvalidOffset);
EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
// See note above.
EXPECT_EQ(read_pointer_2 + kNegativeOffset, engine_.GetGetOffset());
// Check invalid negative offset fails.
const int32 kInvalidNegativeOffset = -kPositiveOffset + kNegativeOffset - 1;
cmd.Init(kInvalidNegativeOffset);
EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
// Check that the call values are on the stack.
cmd::Return return_cmd;
return_cmd.Init();
EXPECT_EQ(error::kNoError, ExecuteCmd(return_cmd));
// See note above.
EXPECT_EQ(read_pointer_1 + kPositiveOffset, engine_.GetGetOffset());
EXPECT_EQ(error::kNoError, ExecuteCmd(return_cmd));
// See note above.
EXPECT_EQ(0, engine_.GetGetOffset());
// Check that stack overflow fails.
cmd.Init(kPositiveOffset);
for (unsigned int ii = 0; ii < CommonDecoder::kMaxStackDepth; ++ii) {
EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
}
EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
}
TEST_F(CommonDecoderTest, Return) {
// Success is tested by Call and CallRelative
// Test that an empty stack fails.
cmd::Return cmd;
cmd.Init();
EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
}
TEST_F(CommonDecoderTest, SetBucketSize) {
cmd::SetBucketSize cmd;
const uint32 kBucketId = 123;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment