Commit 793071a0 authored by James Darpinian's avatar James Darpinian Committed by Commit Bot

gpu: Detect when result pointers are invalidated

If the transfer buffer is resized, pointers returned by GetResultAs are
invalidated. This changes GetResultAs to return a smart pointer class
that allows us to detect if a result pointer is still in use when the
buffer is resized and safely crash.

Bug: 905889, 905890
Change-Id: I67b243a779f1a2996e7c13740c5ebdcfda16d0d3
Reviewed-on: https://chromium-review.googlesource.com/c/1336753
Commit-Queue: James Darpinian <jdarpinian@chromium.org>
Reviewed-by: default avatarAntoine Labour <piman@chromium.org>
Reviewed-by: default avatarKenneth Russell <kbr@chromium.org>
Cr-Commit-Position: refs/heads/master@{#608856}
parent 775938b6
......@@ -3052,13 +3052,13 @@ class GETnHandler(TypeHandler):
return;
}
typedef cmds::%(func_name)s::Result Result;
Result* result = GetResultAs<Result*>();
ScopedResultPtr<Result> result = GetResultAs<Result>();
if (!result) {
return;
}
result->SetNumResults(0);
helper_->%(func_name)s(%(arg_string)s,
GetResultShmId(), GetResultShmOffset());
GetResultShmId(), result.offset());
WaitForCmd();
result->CopyResult(%(last_arg_name)s);
GPU_CLIENT_LOG_CODE_BLOCK({
......@@ -4544,7 +4544,7 @@ TEST_P(%(test_name)s, %(name)sInvalidArgsBadSharedMemoryId) {
func.WriteDestinationInitalizationValidation(f)
self.WriteClientGLCallLog(func, f)
f.write(" typedef cmds::%s::Result Result;\n" % func.name)
f.write(" Result* result = GetResultAs<Result*>();\n")
f.write(" ScopedResultPtr<Result> result = GetResultAs<Result>();\n")
f.write(" if (!result) {\n")
f.write(" return %s;\n" % error_value)
f.write(" }\n")
......@@ -4556,7 +4556,7 @@ TEST_P(%(test_name)s, %(name)sInvalidArgsBadSharedMemoryId) {
else:
arg_string = func.MakeOriginalArgString("")
f.write(
" helper_->%s(%s, GetResultShmId(), GetResultShmOffset());\n" %
" helper_->%s(%s, GetResultShmId(), result.offset());\n" %
(func.name, arg_string))
f.write(" WaitForCmd();\n")
f.write(" %s result_value = *result" % func.return_type)
......
......@@ -176,18 +176,10 @@ void ImplementationBase::WaitForCmd() {
helper_->Finish();
}
void* ImplementationBase::GetResultBuffer() {
return transfer_buffer_->GetResultBuffer();
}
int32_t ImplementationBase::GetResultShmId() {
return transfer_buffer_->GetShmId();
}
uint32_t ImplementationBase::GetResultShmOffset() {
return transfer_buffer_->GetResultOffset();
}
bool ImplementationBase::GetBucketContents(uint32_t bucket_id,
std::vector<int8_t>* data) {
TRACE_EVENT0("gpu", "ImplementationBase::GetBucketContents");
......@@ -198,12 +190,12 @@ bool ImplementationBase::GetBucketContents(uint32_t bucket_id,
return false;
}
typedef cmd::GetBucketStart::Result Result;
Result* result = GetResultAs<Result*>();
auto result = GetResultAs<Result>();
if (!result) {
return false;
}
*result = 0;
helper_->GetBucketStart(bucket_id, GetResultShmId(), GetResultShmOffset(),
helper_->GetBucketStart(bucket_id, GetResultShmId(), result.offset(),
buffer.size(), buffer.shm_id(), buffer.offset());
WaitForCmd();
uint32_t size = *result;
......
......@@ -97,13 +97,11 @@ class GLES2_IMPL_EXPORT ImplementationBase
// Gets the value of the result.
template <typename T>
T GetResultAs() {
return static_cast<T>(GetResultBuffer());
ScopedResultPtr<T> GetResultAs() {
return ScopedResultPtr<T>(transfer_buffer_);
}
void* GetResultBuffer();
int32_t GetResultShmId();
uint32_t GetResultShmOffset();
// TODO(gman): These bucket functions really seem like they belong in
// CommandBufferHelper (or maybe BucketHelper?). Unfortunately they need
......
......@@ -54,10 +54,17 @@ int MockTransferBuffer::GetShmId() {
return buffer_ids_[actual_buffer_index_];
}
void* MockTransferBuffer::GetResultBuffer() {
void* MockTransferBuffer::AcquireResultBuffer() {
EXPECT_FALSE(outstanding_result_pointer_);
outstanding_result_pointer_ = true;
return actual_buffer() + actual_buffer_index_ * alignment_;
}
void MockTransferBuffer::ReleaseResultBuffer() {
EXPECT_TRUE(outstanding_result_pointer_);
outstanding_result_pointer_ = false;
}
int MockTransferBuffer::GetResultOffset() {
return actual_buffer_index_ * alignment_;
}
......
......@@ -36,7 +36,8 @@ class MockTransferBuffer : public TransferBufferInterface {
unsigned int /* max_buffer_size */,
unsigned int alignment) override;
int GetShmId() override;
void* GetResultBuffer() override;
void* AcquireResultBuffer() override;
void ReleaseResultBuffer() override;
int GetResultOffset() override;
void Free() override;
bool HaveBuffer() const override;
......@@ -85,6 +86,7 @@ class MockTransferBuffer : public TransferBufferInterface {
uint32_t expected_offset_;
uint32_t actual_offset_;
bool initialize_fail_;
bool outstanding_result_pointer_ = false;
DISALLOW_COPY_AND_ASSIGN(MockTransferBuffer);
};
......
......@@ -597,14 +597,13 @@ CommandBuffer* RasterImplementation::command_buffer() const {
GLenum RasterImplementation::GetGLError() {
TRACE_EVENT0("gpu", "RasterImplementation::GetGLError");
// Check the GL error first, then our wrapped error.
typedef cmds::GetError::Result Result;
Result* result = GetResultAs<Result*>();
auto result = GetResultAs<cmds::GetError::Result>();
// If we couldn't allocate a result the context is lost.
if (!result) {
return GL_NO_ERROR;
}
*result = GL_NO_ERROR;
helper_->GetError(GetResultShmId(), GetResultShmOffset());
helper_->GetError(GetResultShmId(), result.offset());
WaitForCmd();
GLenum error = *result;
if (error == GL_NO_ERROR) {
......
......@@ -46,12 +46,12 @@ void RasterImplementation::GetIntegerv(GLenum pname, GLint* params) {
return;
}
typedef cmds::GetIntegerv::Result Result;
Result* result = GetResultAs<Result*>();
ScopedResultPtr<Result> result = GetResultAs<Result>();
if (!result) {
return;
}
result->SetNumResults(0);
helper_->GetIntegerv(pname, GetResultShmId(), GetResultShmOffset());
helper_->GetIntegerv(pname, GetResultShmId(), result.offset());
WaitForCmd();
result->CopyResult(params);
GPU_CLIENT_LOG_CODE_BLOCK({
......
......@@ -55,6 +55,7 @@ bool TransferBuffer::Initialize(unsigned int default_buffer_size,
}
void TransferBuffer::Free() {
DCHECK(!outstanding_result_pointer_);
if (HaveBuffer()) {
TRACE_EVENT0("gpu", "TransferBuffer::Free");
helper_->OrderingBarrier();
......@@ -133,6 +134,9 @@ static unsigned int ComputePOTSize(unsigned int dimension) {
}
void TransferBuffer::ReallocateRingBuffer(unsigned int size, bool shrink) {
// We should never attempt to shrink the buffer if someone has a result
// pointer that hasn't been released.
DCHECK(!shrink || !outstanding_result_pointer_);
// What size buffer would we ask for if we needed a new one?
unsigned int needed_buffer_size = ComputePOTSize(size + result_size_);
DCHECK_EQ(needed_buffer_size % alignment_, 0u)
......@@ -147,6 +151,9 @@ void TransferBuffer::ReallocateRingBuffer(unsigned int size, bool shrink) {
return;
if (usable_ && (shrink || needed_buffer_size > current_size)) {
// We should never attempt to reallocate the buffer if someone has a result
// pointer that hasn't been released. This would cause a use-after-free.
CHECK(!outstanding_result_pointer_);
if (HaveBuffer()) {
Free();
}
......@@ -168,6 +175,9 @@ unsigned int TransferBuffer::GetPreviousRingBufferUsedBytes() {
void TransferBuffer::ShrinkOrExpandRingBufferIfNecessary(
unsigned int size_to_allocate) {
// We should never attempt to shrink the buffer if someone has a result
// pointer that hasn't been released.
CHECK(!outstanding_result_pointer_);
// Don't resize the buffer while blocks are in use to avoid throwing away
// live allocations.
if (HaveBuffer() && ring_buffer_->NumUsedBlocks() > 0)
......@@ -228,13 +238,23 @@ void* TransferBuffer::Alloc(unsigned int size) {
return ring_buffer_->Alloc(size);
}
void* TransferBuffer::GetResultBuffer() {
void* TransferBuffer::AcquireResultBuffer() {
// There should never be two result pointers active at the same time. The
// previous pointer should always be released first. ScopedResultPtr helps
// ensure this invariant.
DCHECK(!outstanding_result_pointer_);
ReallocateRingBuffer(result_size_);
outstanding_result_pointer_ = true;
return result_buffer_;
}
void TransferBuffer::ReleaseResultBuffer() {
DCHECK(outstanding_result_pointer_);
outstanding_result_pointer_ = false;
}
int TransferBuffer::GetResultOffset() {
ReallocateRingBuffer(result_size_);
DCHECK(outstanding_result_pointer_);
return result_shm_offset_;
}
......
......@@ -21,6 +21,8 @@
namespace gpu {
class CommandBufferHelper;
template <typename>
class ScopedResultPtr;
// Interface for managing the transfer buffer.
class GPU_EXPORT TransferBufferInterface {
......@@ -39,8 +41,6 @@ class GPU_EXPORT TransferBufferInterface {
unsigned int alignment) = 0;
virtual int GetShmId() = 0;
virtual void* GetResultBuffer() = 0;
virtual int GetResultOffset() = 0;
virtual void Free() = 0;
......@@ -66,6 +66,17 @@ class GPU_EXPORT TransferBufferInterface {
virtual unsigned int GetFragmentedFreeSize() const = 0;
virtual void ShrinkLastBlock(unsigned int new_size) = 0;
protected:
template <typename>
friend class ScopedResultPtr;
// Use ScopedResultPtr instead of calling these directly. The acquire/release
// semantics allow TransferBuffer to detect if there is an outstanding result
// pointer when the buffer is resized, which would otherwise cause a
// use-after-free bug.
virtual void* AcquireResultBuffer() = 0;
virtual void ReleaseResultBuffer() = 0;
virtual int GetResultOffset() = 0;
};
// Class that manages the transfer buffer.
......@@ -82,7 +93,8 @@ class GPU_EXPORT TransferBuffer : public TransferBufferInterface {
unsigned int max_buffer_size,
unsigned int alignment) override;
int GetShmId() override;
void* GetResultBuffer() override;
void* AcquireResultBuffer() override;
void ReleaseResultBuffer() override;
int GetResultOffset() override;
void Free() override;
bool HaveBuffer() const override;
......@@ -158,6 +170,10 @@ class GPU_EXPORT TransferBuffer : public TransferBufferInterface {
// false if we failed to allocate min_buffer_size
bool usable_;
// While a ScopedResultPtr exists, we can't resize the transfer buffer. Only
// one ScopedResultPtr should exist at a time. This tracks whether one exists.
bool outstanding_result_pointer_ = false;
};
// A class that will manage the lifetime of a transferbuffer allocation.
......@@ -244,6 +260,42 @@ class ScopedTransferBufferArray : public ScopedTransferBufferPtr {
}
};
// ScopedResultPtr is a move-only smart pointer that calls AcquireResultBuffer
// and ReleaseResultBuffer for you.
template <typename T>
class ScopedResultPtr {
public:
explicit ScopedResultPtr(TransferBufferInterface* tb)
: result_(static_cast<T*>(tb->AcquireResultBuffer())),
transfer_buffer_(tb) {}
~ScopedResultPtr() {
if (transfer_buffer_)
transfer_buffer_->ReleaseResultBuffer();
}
int offset() const { return transfer_buffer_->GetResultOffset(); }
// Make this a move-only class like unique_ptr.
DISALLOW_COPY_AND_ASSIGN(ScopedResultPtr);
ScopedResultPtr(ScopedResultPtr<T>&& other) { *this = std::move(other); }
ScopedResultPtr& operator=(ScopedResultPtr<T>&& other) {
this->result_ = other.result_;
this->transfer_buffer_ = other.transfer_buffer_;
other.result_ = nullptr;
other.transfer_buffer_ = nullptr;
return *this;
};
// Dereferencing behaviors
T& operator*() const { return *result_; }
T* operator->() const { return result_; }
explicit operator bool() { return result_; }
private:
T* result_;
TransferBufferInterface* transfer_buffer_;
};
} // namespace gpu
#endif // GPU_COMMAND_BUFFER_CLIENT_TRANSFER_BUFFER_H_
......@@ -141,7 +141,8 @@ TEST_F(TransferBufferTest, Free) {
EXPECT_EQ(base::UnguessableToken(), transfer_buffer_->shared_memory_guid());
// See that it gets reallocated.
EXPECT_TRUE(transfer_buffer_->GetResultBuffer() != nullptr);
EXPECT_TRUE(transfer_buffer_->AcquireResultBuffer() != nullptr);
transfer_buffer_->ReleaseResultBuffer();
EXPECT_TRUE(transfer_buffer_->HaveBuffer());
EXPECT_NE(base::UnguessableToken(), transfer_buffer_->shared_memory_guid());
......@@ -180,7 +181,7 @@ TEST_F(TransferBufferTest, Free) {
EXPECT_LT(command_buffer_->GetState().get_offset, put_offset);
// See that it gets reallocated.
transfer_buffer_->GetResultOffset();
transfer_buffer_->GetShmId();
EXPECT_TRUE(transfer_buffer_->HaveBuffer());
EXPECT_NE(base::UnguessableToken(), transfer_buffer_->shared_memory_guid());
......@@ -741,4 +742,41 @@ TEST_F(TransferBufferTest, MultipleAllocsAndFrees) {
EXPECT_EQ(transfer_buffer_->GetFragmentedFreeSize(), original_free_size);
}
#if defined(GTEST_HAS_DEATH_TEST)
TEST_F(TransferBufferTest, ResizeDuringScopedResultPtr) {
Initialize();
ScopedResultPtr<int> ptr(transfer_buffer_.get());
// If an attempt is made to resize the transfer buffer while a result
// pointer exists, we should hit a CHECK. Allocate just enough to force a
// resize.
unsigned int size_allocated;
ASSERT_DEATH(transfer_buffer_->AllocUpTo(transfer_buffer_->GetFreeSize() + 1,
&size_allocated),
"outstanding_result_pointer_");
}
#if DCHECK_IS_ON()
TEST_F(TransferBufferTest, AllocDuringScopedResultPtr) {
Initialize();
ScopedResultPtr<int> ptr(transfer_buffer_.get());
// If an attempt is made to allocate any amount in the transfer buffer while a
// result pointer exists, we should hit a DCHECK.
unsigned int size_allocated;
ASSERT_DEATH(transfer_buffer_->AllocUpTo(transfer_buffer_->GetFreeSize() + 1,
&size_allocated),
"outstanding_result_pointer_");
}
TEST_F(TransferBufferTest, TwoScopedResultPtrs) {
Initialize();
// Attempting to create two ScopedResultPtrs at the same time should DCHECK.
ScopedResultPtr<int> ptr(transfer_buffer_.get());
ASSERT_DEATH(ScopedResultPtr<int>(transfer_buffer_.get()),
"outstanding_result_pointer_");
}
#endif // DCHECK_IS_ON()
#endif // defined(GTEST_HAS_DEATH_TEST)
} // namespace gpu
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment