Commit 53fd1291 authored by Vlad Tsyrklevich's avatar Vlad Tsyrklevich Committed by Commit Bot

GWP-ASan: Add debug allocator

This allocator is adapted from the GuardedPageAllocator in tcmalloc.
The allocator is intended to be a 'debug' allocator to help catch memory
errors like buffer overflows and use-after-frees. It does so by giving
allocations an entire page and left- or right- aligning them between
guard pages (to catch both underflows and overflows). It also sets pages
inaccessible once they have been freed to detect use-after-frees and can
also detect double frees.

Forthcoming changes add an allocator shim to sample allocations to this
allocator and to add additional debugging context when crashes due to
GWP-ASan occur.

GWP-ASan is going to be launched on Windows first; however, if it’s
successful it will be ported to other platforms (and therefore also be
used by different embedders.)

Bug: 896019
Change-Id: I358e1b6f91569d1d448fc7ee16d5df27ed2467aa
Reviewed-on: https://chromium-review.googlesource.com/c/1284699
Commit-Queue: Vlad Tsyrklevich <vtsyrklevich@chromium.org>
Commit-Queue: Vitaly Buka <vitalybuka@chromium.org>
Reviewed-by: default avatarCait Phillips <caitkp@chromium.org>
Reviewed-by: default avatarAlbert J. Wong <ajwong@chromium.org>
Reviewed-by: default avatarVitaly Buka <vitalybuka@chromium.org>
Cr-Commit-Position: refs/heads/master@{#603616}
parent c9a52dc6
...@@ -339,7 +339,10 @@ test("components_unittests") { ...@@ -339,7 +339,10 @@ test("components_unittests") {
} }
if (is_win) { if (is_win) {
deps += [ "//components/browser_watcher:unit_tests" ] deps += [
"//components/browser_watcher:unit_tests",
"//components/gwp_asan/common:unit_tests",
]
} }
if (enable_basic_printing) { if (enable_basic_printing) {
......
vitalybuka@chromium.org
vtsyrklevich@chromium.org
GWP-ASan is a sampling debug allocator, similar to ElectricFence or Page Heap,
intended to detect heap memory errors in the wild. When crashes in a GWP-ASan
allocation are detected in the crash handler, additional debug information about
the allocation/deallocation context is added to the crash minidump.
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
assert(is_win, "GWP-ASan currently only supports Windows.")
static_library("common") {
sources = [
"guarded_page_allocator.cc",
"guarded_page_allocator.h",
"guarded_page_allocator_win.cc",
]
deps = [
"//base",
]
}
source_set("unit_tests") {
testonly = true
sources = [
"guarded_page_allocator_unittest.cc",
]
deps = [
":common",
"//base/test:test_support",
"//testing/gtest",
]
}
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "components/gwp_asan/common/guarded_page_allocator.h"
#include "base/bits.h"
#include "base/no_destructor.h"
#include "base/process/process_metrics.h"
#include "base/rand_util.h"
#include "base/synchronization/lock.h"
#include "base/threading/platform_thread.h"
#include "build/build_config.h"
using base::debug::StackTrace;
namespace gwp_asan {
namespace internal {
// TODO: Delete out-of-line constexpr defininitons once C++17 is in use.
constexpr size_t GuardedPageAllocator::kGpaMaxPages;
constexpr size_t GuardedPageAllocator::kGpaAllocAlignment;
constexpr size_t GuardedPageAllocator::kFreePagesNumBits;
GuardedPageAllocator& GuardedPageAllocator::InitializeSingleton(
size_t num_pages) {
static base::NoDestructor<GuardedPageAllocator> gpa(num_pages);
return *gpa;
}
GuardedPageAllocator& GuardedPageAllocator::Get() {
// The constructor will fail if it is called with num_pages = 0, forcing
// InitializeSingleton() to be called first.
return InitializeSingleton(0);
}
GuardedPageAllocator::GuardedPageAllocator(size_t num_pages) {
CHECK_GT(num_pages, 0U);
CHECK_LE(num_pages, kFreePagesNumBits);
num_pages_ = num_pages;
page_size_ = base::GetPageSize();
CHECK(MapPages());
free_pages_ =
(num_pages_ == kFreePagesNumBits) ? ~0ULL : (1ULL << num_pages_) - 1;
for (size_t i = 0; i < num_pages_; i++)
data_[i].Init();
}
GuardedPageAllocator::~GuardedPageAllocator() {
UnmapPages();
}
void* GuardedPageAllocator::Allocate(size_t size) {
size_t free_slot = ReserveSlot();
if (free_slot == SIZE_MAX)
return nullptr; // All slots are reserved.
uintptr_t free_page = SlotToAddr(free_slot);
MarkPageReadWrite(reinterpret_cast<void*>(free_page));
size_t offset;
if (base::RandInt(0, 1)) {
// Return right-aligned allocation to detect overflows.
size_t alignment =
std::min(size_t{1} << base::bits::Log2Floor(size), kGpaAllocAlignment);
offset = page_size_ - base::bits::Align(size, alignment);
} else {
// Return left-aligned allocation to detect underflows.
offset = 0;
}
// Initialize slot metadata.
data_[free_slot].RecordAllocation(size, offset);
return reinterpret_cast<void*>(free_page + offset);
}
void GuardedPageAllocator::Deallocate(void* ptr) {
CHECK(PointerIsMine(ptr));
const uintptr_t addr = reinterpret_cast<uintptr_t>(ptr);
MarkPageInaccessible(reinterpret_cast<void*>(GetPageAddr(addr)));
size_t slot = AddrToSlot(GetPageAddr(addr));
DCHECK_EQ(addr, GetPageAddr(addr) + data_[slot].alloc_offset);
// Check for double free.
if (data_[slot].dealloc_trace_addr) {
double_free_detected_ = true;
*reinterpret_cast<char*>(ptr) = 'X'; // Trigger exception.
__builtin_trap();
}
// Record deallocation stack trace/thread id.
data_[slot].RecordDeallocation();
FreeSlot(slot);
}
size_t GuardedPageAllocator::GetRequestedSize(const void* ptr) const {
DCHECK(PointerIsMine(ptr));
const uintptr_t addr = reinterpret_cast<uintptr_t>(ptr);
size_t slot = AddrToSlot(GetPageAddr(addr));
DCHECK_EQ(addr, GetPageAddr(addr) + data_[slot].alloc_offset);
return data_[slot].alloc_size;
}
bool GuardedPageAllocator::PointerIsMine(const void* ptr) const {
uintptr_t addr = reinterpret_cast<uintptr_t>(ptr);
return pages_base_addr_ <= addr && addr < pages_end_addr_;
}
// Selects a random slot in O(1) time by rotating the free_pages bitmap by a
// random amount, using an intrinsic to get the least-significant 1-bit after
// the rotation, and then computing the position of the bit before the rotation.
// Picking a random slot is useful for randomizing allocator behavior across
// different runs, so certain bits being more heavily biased is not a concern.
size_t GuardedPageAllocator::ReserveSlot() {
base::AutoLock lock(lock_);
if (!free_pages_)
return SIZE_MAX;
// Disable allocations after a double free is detected so that the double
// freed allocation is not reallocated while the crash handler could be
// concurrently inspecting the metadata.
if (double_free_detected_)
return SIZE_MAX;
uint64_t rot = base::RandGenerator(kFreePagesNumBits);
BitMap rotated_bitmap =
(free_pages_ << rot) | (free_pages_ >> (kFreePagesNumBits - rot));
int rotated_selection = CountTrailingZeroBits64(rotated_bitmap);
size_t selection =
(rotated_selection - rot + kFreePagesNumBits) % kFreePagesNumBits;
DCHECK_LT(selection, kFreePagesNumBits);
DCHECK(free_pages_ & (1ULL << selection));
free_pages_ &= ~(1ULL << selection);
return selection;
}
void GuardedPageAllocator::FreeSlot(size_t slot) {
DCHECK_LT(slot, kFreePagesNumBits);
BitMap bit = 1ULL << slot;
base::AutoLock lock(lock_);
DCHECK_EQ((free_pages_ & bit), 0ULL);
free_pages_ |= bit;
}
uintptr_t GuardedPageAllocator::GetPageAddr(uintptr_t addr) const {
const uintptr_t addr_mask = ~(page_size_ - 1ULL);
return addr & addr_mask;
}
uintptr_t GuardedPageAllocator::GetNearestValidPage(uintptr_t addr) const {
if (addr < first_page_addr_)
return first_page_addr_;
const uintptr_t last_page_addr = pages_end_addr_ - 2 * page_size_;
if (addr > last_page_addr)
return last_page_addr;
uintptr_t offset = addr - first_page_addr_;
// If addr is already on a valid page, just return addr.
if ((offset / page_size_) % 2 == 0)
return addr;
// ptr points to a guard page, so get nearest valid page.
const size_t kHalfPageSize = page_size_ / 2;
if ((offset / kHalfPageSize) % 2 == 0) {
return addr - kHalfPageSize; // Round down.
}
return addr + kHalfPageSize; // Round up.
}
size_t GuardedPageAllocator::GetNearestSlot(uintptr_t addr) const {
return AddrToSlot(GetPageAddr(GetNearestValidPage(addr)));
}
GuardedPageAllocator::ErrorType GuardedPageAllocator::GetErrorType(
uintptr_t addr,
bool allocated,
bool deallocated) const {
if (!allocated)
return ErrorType::kUnknown;
if (double_free_detected_)
return ErrorType::kDoubleFree;
if (deallocated)
return ErrorType::kUseAfterFree;
if (addr < first_page_addr_)
return ErrorType::kBufferUnderflow;
const uintptr_t last_page_addr = pages_end_addr_ - 2 * page_size_;
if (addr > last_page_addr)
return ErrorType::kBufferOverflow;
const uintptr_t offset = addr - first_page_addr_;
DCHECK_NE((offset / page_size_) % 2, 0ULL);
const size_t kHalfPageSize = page_size_ / 2;
return (offset / kHalfPageSize) % 2 == 0 ? ErrorType::kBufferOverflow
: ErrorType::kBufferUnderflow;
}
uintptr_t GuardedPageAllocator::SlotToAddr(size_t slot) const {
DCHECK_LT(slot, kFreePagesNumBits);
return first_page_addr_ + 2 * slot * page_size_;
}
size_t GuardedPageAllocator::AddrToSlot(uintptr_t addr) const {
DCHECK_EQ(addr % page_size_, 0ULL);
uintptr_t offset = addr - first_page_addr_;
DCHECK_EQ((offset / page_size_) % 2, 0ULL);
size_t slot = offset / page_size_ / 2;
DCHECK_LT(slot, kFreePagesNumBits);
return slot;
}
GuardedPageAllocator::SlotMetadata::SlotMetadata() {}
GuardedPageAllocator::SlotMetadata::~SlotMetadata() {
if (!stacktrace_alloc)
return;
Reset();
free(stacktrace_alloc);
free(stacktrace_dealloc);
}
void GuardedPageAllocator::SlotMetadata::Init() {
// new is not used so that we can explicitly call the constructor when we
// want to collect a stack trace.
stacktrace_alloc =
static_cast<StackTrace*>(malloc(sizeof(*stacktrace_alloc)));
CHECK(stacktrace_alloc);
stacktrace_dealloc =
static_cast<StackTrace*>(malloc(sizeof(*stacktrace_dealloc)));
CHECK(stacktrace_dealloc);
}
void GuardedPageAllocator::SlotMetadata::Reset() {
// Destruct previous allocation/deallocation traces. The constructor was only
// called if (de)alloc_trace_addr is non-null.
if (alloc_trace_addr)
stacktrace_alloc->~StackTrace();
if (dealloc_trace_addr)
stacktrace_dealloc->~StackTrace();
}
void GuardedPageAllocator::SlotMetadata::RecordAllocation(size_t size,
size_t offset) {
Reset();
alloc_size = size;
alloc_offset = offset;
alloc_tid = base::PlatformThread::CurrentId();
new (stacktrace_alloc) StackTrace();
alloc_trace_addr = stacktrace_alloc->Addresses(&alloc_trace_len);
dealloc_tid = base::kInvalidThreadId;
dealloc_trace_addr = nullptr;
dealloc_trace_len = 0;
}
void GuardedPageAllocator::SlotMetadata::RecordDeallocation() {
dealloc_tid = base::PlatformThread::CurrentId();
new (stacktrace_dealloc) StackTrace();
dealloc_trace_addr = stacktrace_dealloc->Addresses(&dealloc_trace_len);
}
} // namespace internal
} // namespace gwp_asan
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef COMPONENTS_GWP_ASAN_COMMON_GUARDED_PAGE_ALLOCATOR_H_
#define COMPONENTS_GWP_ASAN_COMMON_GUARDED_PAGE_ALLOCATOR_H_
#include <atomic>
#include "base/debug/stack_trace.h"
#include "base/gtest_prod_util.h"
#include "base/no_destructor.h"
#include "base/synchronization/lock.h"
#include "base/thread_annotations.h"
#include "base/threading/platform_thread.h"
namespace gwp_asan {
namespace internal {
// Method to count trailing zero bits in a uint64_t (identical to
// base::bits::CountTrailingZeroBits64 except that it also works on 32-bit
// platforms.)
unsigned CountTrailingZeroBits64(uint64_t x);
class GuardedPageAllocator {
public:
// Maximum number of pages this class can allocate.
static constexpr size_t kGpaMaxPages = 64;
// Maximum alignment for all returned allocations.
static constexpr size_t kGpaAllocAlignment = 16;
enum class ErrorType {
kUseAfterFree = 0,
kBufferUnderflow = 1,
kBufferOverflow = 2,
kDoubleFree = 3,
kUnknown = 4,
};
// Initialize the singleton. Used to configure the allocator to map memory
// for num_pages pages (excluding guard pages). num_pages must be in the range
// [1, kGpaMaxPages].
static GuardedPageAllocator& InitializeSingleton(size_t num_pages);
// Returns the global allocator singleton.
static GuardedPageAllocator& Get();
// On success, returns a pointer to size bytes of page-guarded memory. On
// failure, returns nullptr. The allocation is not guaranteed to be
// zero-filled. Failure can occur if memory could not be mapped or protected,
// or if all guarded pages are already allocated.
//
// Precondition: size <= page_size_
void* Allocate(size_t size);
// Deallocates memory pointed to by ptr. ptr must have been previously
// returned by a call to Allocate.
void Deallocate(void* ptr);
// Returns the size requested when ptr was allocated. ptr must have been
// previously returned by a call to Allocate.
size_t GetRequestedSize(const void* ptr) const;
// Returns true if ptr points to memory managed by this class.
bool PointerIsMine(const void* ptr) const;
private:
using BitMap = uint64_t;
// Structure for storing data about a slot.
struct SlotMetadata {
SlotMetadata();
~SlotMetadata();
// Allocate internal data (StackTraces) for this slot. StackTrace objects
// are large so we only allocate them if they're required (instead of
// having them be statically allocated in the SlotMetadata itself.)
void Init();
// Update slot metadata on an allocation with the given size and offset.
void RecordAllocation(size_t size, size_t offset);
// Update slot metadata on a deallocation.
void RecordDeallocation();
// Size of the allocation
size_t alloc_size = 0;
// How far into the page is the returned allocation.
size_t alloc_offset = 0;
// (De)allocation thread id or base::kInvalidThreadId if no (de)allocation
// occurred.
base::PlatformThreadId alloc_tid = base::kInvalidThreadId;
base::PlatformThreadId dealloc_tid = base::kInvalidThreadId;
// Pointer to stack trace addresses or null if no (de)allocation occurred.
const void* const* alloc_trace_addr = nullptr;
const void* const* dealloc_trace_addr = nullptr;
// Stack trace length or 0 if no (de)allocation occurred.
size_t alloc_trace_len = 0;
size_t dealloc_trace_len = 0;
private:
// Call destructors on stacktrace_alloc and stacktrace_dealloc if
// constructors for them have previously been called.
void Reset();
// StackTrace objects for this slot, they are allocated by Init() and only
// used internally, (de)alloc_trace_addr/len should be used by external
// consumers of the stack trace data.
base::debug::StackTrace* stacktrace_alloc = nullptr;
base::debug::StackTrace* stacktrace_dealloc = nullptr;
};
// Number of bits in the free_pages_ bitmap.
static constexpr size_t kFreePagesNumBits = sizeof(BitMap) * 8;
// Configures this allocator to map memory for num_pages pages (excluding
// guard pages). num_pages must be in the range [1, kGpaMaxPages].
//
// Marked private so that the singleton Get() method is the only way to obtain
// an instance.
explicit GuardedPageAllocator(size_t num_pages);
// Unmaps memory allocated by this class.
//
// This method should be called only once to complete destruction.
~GuardedPageAllocator();
// Maps pages into memory and sets pages_base_addr_, first_page_addr_, and
// pages_end_addr on success. Returns true on success, false on failure.
bool MapPages();
// Unmaps pages.
void UnmapPages();
// Mark page read-write or inaccessible.
void MarkPageReadWrite(void*);
void MarkPageInaccessible(void*);
// Reserves and returns a slot randomly selected from the free slots in
// free_pages_. Returns SIZE_MAX if no slots available.
size_t ReserveSlot() LOCKS_EXCLUDED(lock_);
// Marks the specified slot as unreserved.
void FreeSlot(size_t slot) LOCKS_EXCLUDED(lock_);
// Returns the address of the page that addr resides on.
uintptr_t GetPageAddr(uintptr_t addr) const;
// Returns an address somewhere on the valid page nearest to addr.
uintptr_t GetNearestValidPage(uintptr_t addr) const;
// Returns the slot number for the page nearest to addr.
size_t GetNearestSlot(uintptr_t addr) const;
// Returns the likely error type given an exception address and whether its
// previously been allocated and deallocated.
ErrorType GetErrorType(uintptr_t addr,
bool allocated,
bool deallocated) const;
uintptr_t SlotToAddr(size_t slot) const;
size_t AddrToSlot(uintptr_t addr) const;
// Allocator lock that protects free_pages_.
base::Lock lock_;
// Maps each bit to one page.
// Bit=1: Free. Bit=0: Reserved.
BitMap free_pages_ GUARDED_BY(lock_) = 0;
// Information about every allocation, including its size, offset, and
// pointers to the allocation/deallocation stack traces (if present.)
SlotMetadata data_[kFreePagesNumBits] = {};
uintptr_t pages_base_addr_ = 0; // Points to start of mapped region.
uintptr_t pages_end_addr_ = 0; // Points to the end of mapped region.
uintptr_t first_page_addr_ = 0; // Points to first allocatable page.
size_t num_pages_ = 0; // Number of pages mapped (excluding guard pages).
size_t page_size_ = 0; // Page size.
// Set to true if a double free has occurred.
std::atomic<bool> double_free_detected_{false};
// Required to access the constructor in Get().
friend base::NoDestructor<GuardedPageAllocator>;
DISALLOW_COPY_AND_ASSIGN(GuardedPageAllocator);
friend class GuardedPageAllocatorTest;
FRIEND_TEST_ALL_PREFIXES(GuardedPageAllocatorTest,
GetNearestValidPageEdgeCases);
FRIEND_TEST_ALL_PREFIXES(GuardedPageAllocatorTest, GetErrorTypeEdgeCases);
};
} // namespace internal
} // namespace gwp_asan
#endif // COMPONENTS_GWP_ASAN_COMMON_GUARDED_PAGE_ALLOCATOR_H_
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "components/gwp_asan/common/guarded_page_allocator.h"
#include <array>
#include <set>
#include "base/bits.h"
#include "base/process/process_metrics.h"
#include "base/test/gtest_util.h"
#include "base/threading/simple_thread.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace gwp_asan {
namespace internal {
static constexpr size_t kGpaMaxPages = GuardedPageAllocator::kGpaMaxPages;
class GuardedPageAllocatorTest : public testing::Test {
protected:
explicit GuardedPageAllocatorTest(size_t num_pages = kGpaMaxPages)
: gpa_(num_pages) {}
// Get a left- or right- aligned allocation (or nullptr on error.)
char* GetAlignedAllocation(bool left_aligned, size_t sz) {
for (size_t i = 0; i < 100; i++) {
void* alloc = gpa_.Allocate(sz);
if (!alloc)
return nullptr;
uintptr_t addr = reinterpret_cast<uintptr_t>(alloc);
bool is_left_aligned =
(base::bits::Align(addr, base::GetPageSize()) == addr);
if (is_left_aligned == left_aligned)
return reinterpret_cast<char*>(addr);
gpa_.Deallocate(alloc);
}
return nullptr;
}
GuardedPageAllocator gpa_;
};
TEST_F(GuardedPageAllocatorTest, SingleAllocDealloc) {
char* buf = reinterpret_cast<char*>(gpa_.Allocate(base::GetPageSize()));
EXPECT_NE(buf, nullptr);
EXPECT_TRUE(gpa_.PointerIsMine(buf));
memset(buf, 'A', base::GetPageSize());
EXPECT_DEATH(buf[base::GetPageSize()] = 'A', "");
gpa_.Deallocate(buf);
EXPECT_DEATH(buf[0] = 'B', "");
EXPECT_DEATH(gpa_.Deallocate(buf), "");
}
TEST_F(GuardedPageAllocatorTest, PointerIsMine) {
void* buf = gpa_.Allocate(1);
auto malloc_ptr = std::make_unique<char>();
EXPECT_TRUE(gpa_.PointerIsMine(buf));
gpa_.Deallocate(buf);
EXPECT_TRUE(gpa_.PointerIsMine(buf));
int stack_var;
EXPECT_FALSE(gpa_.PointerIsMine(&stack_var));
EXPECT_FALSE(gpa_.PointerIsMine(malloc_ptr.get()));
}
TEST_F(GuardedPageAllocatorTest, LeftAlignedAllocation) {
char* buf = GetAlignedAllocation(true, 16);
ASSERT_NE(buf, nullptr);
EXPECT_DEATH(buf[-1] = 'A', "");
buf[0] = 'A';
buf[base::GetPageSize() - 1] = 'A';
gpa_.Deallocate(buf);
}
TEST_F(GuardedPageAllocatorTest, RightAlignedAllocation) {
char* buf =
GetAlignedAllocation(false, GuardedPageAllocator::kGpaAllocAlignment);
ASSERT_NE(buf, nullptr);
buf[-1] = 'A';
buf[0] = 'A';
EXPECT_DEATH(buf[GuardedPageAllocator::kGpaAllocAlignment] = 'A', "");
gpa_.Deallocate(buf);
}
TEST_F(GuardedPageAllocatorTest, GetNearestValidPageEdgeCases) {
EXPECT_EQ(gpa_.GetPageAddr(gpa_.GetNearestValidPage(gpa_.pages_base_addr_)),
gpa_.first_page_addr_);
EXPECT_EQ(
gpa_.GetPageAddr(gpa_.GetNearestValidPage(gpa_.pages_end_addr_ - 1)),
gpa_.pages_end_addr_ - (2 * gpa_.page_size_));
}
TEST_F(GuardedPageAllocatorTest, GetErrorTypeEdgeCases) {
EXPECT_EQ(gpa_.GetErrorType(gpa_.pages_base_addr_, true, false),
GuardedPageAllocator::ErrorType::kBufferUnderflow);
EXPECT_EQ(gpa_.GetErrorType(gpa_.pages_end_addr_ - 1, true, false),
GuardedPageAllocator::ErrorType::kBufferOverflow);
}
class GuardedPageAllocatorParamTest
: public GuardedPageAllocatorTest,
public testing::WithParamInterface<size_t> {
protected:
GuardedPageAllocatorParamTest() : GuardedPageAllocatorTest(GetParam()) {}
};
TEST_P(GuardedPageAllocatorParamTest, AllocDeallocAllPages) {
size_t num_pages = GetParam();
char* bufs[kGpaMaxPages];
for (size_t i = 0; i < num_pages; i++) {
bufs[i] = reinterpret_cast<char*>(gpa_.Allocate(1));
EXPECT_NE(bufs[i], nullptr);
EXPECT_TRUE(gpa_.PointerIsMine(bufs[i]));
}
EXPECT_EQ(gpa_.Allocate(1), nullptr);
gpa_.Deallocate(bufs[0]);
bufs[0] = reinterpret_cast<char*>(gpa_.Allocate(1));
EXPECT_NE(bufs[0], nullptr);
EXPECT_TRUE(gpa_.PointerIsMine(bufs[0]));
// Ensure that no allocation is returned twice.
std::set<char*> ptr_set;
for (size_t i = 0; i < num_pages; i++)
ptr_set.insert(bufs[i]);
EXPECT_EQ(ptr_set.size(), num_pages);
for (size_t i = 0; i < num_pages; i++) {
SCOPED_TRACE(i);
// Ensure all allocations are valid and writable.
bufs[i][0] = 'A';
gpa_.Deallocate(bufs[i]);
// Performing death tests post-allocation times out on Windows.
}
}
INSTANTIATE_TEST_CASE_P(VaryNumPages,
GuardedPageAllocatorParamTest,
testing::Values(1, kGpaMaxPages / 2, kGpaMaxPages));
class ThreadedAllocCountDelegate : public base::DelegateSimpleThread::Delegate {
public:
explicit ThreadedAllocCountDelegate(
GuardedPageAllocator* gpa,
std::array<void*, kGpaMaxPages>* allocations)
: gpa_(gpa), allocations_(allocations) {}
void Run() override {
for (size_t i = 0; i < kGpaMaxPages; i++) {
(*allocations_)[i] = gpa_->Allocate(1);
}
}
private:
GuardedPageAllocator* gpa_;
std::array<void*, kGpaMaxPages>* allocations_;
DISALLOW_COPY_AND_ASSIGN(ThreadedAllocCountDelegate);
};
// Test that no pages are double-allocated or left unallocated, and that no
// extra pages are allocated when there's concurrent calls to Allocate().
TEST_F(GuardedPageAllocatorTest, ThreadedAllocCount) {
constexpr size_t num_threads = 2;
std::array<void*, kGpaMaxPages> allocations[num_threads];
{
base::DelegateSimpleThreadPool threads("alloc_threads", num_threads);
threads.Start();
std::vector<std::unique_ptr<ThreadedAllocCountDelegate>> delegates;
for (size_t i = 0; i < num_threads; i++) {
auto delegate =
std::make_unique<ThreadedAllocCountDelegate>(&gpa_, &allocations[i]);
threads.AddWork(delegate.get());
delegates.push_back(std::move(delegate));
}
threads.JoinAll();
}
std::set<void*> allocations_set;
for (size_t i = 0; i < num_threads; i++) {
for (size_t j = 0; j < kGpaMaxPages; j++) {
allocations_set.insert(allocations[i][j]);
}
}
allocations_set.erase(nullptr);
EXPECT_EQ(allocations_set.size(), kGpaMaxPages);
}
class ThreadedHighContentionDelegate
: public base::DelegateSimpleThread::Delegate {
public:
explicit ThreadedHighContentionDelegate(GuardedPageAllocator* gpa)
: gpa_(gpa) {}
void Run() override {
char* buf;
while ((buf = reinterpret_cast<char*>(gpa_->Allocate(1))) == nullptr) {
base::PlatformThread::Sleep(base::TimeDelta::FromNanoseconds(5000));
}
// Verify that no other thread has access to this page.
EXPECT_EQ(buf[0], 0);
// Mark this page and allow some time for another thread to potentially
// gain access to this page.
buf[0] = 'A';
base::PlatformThread::Sleep(base::TimeDelta::FromNanoseconds(10000));
EXPECT_EQ(buf[0], 'A');
// Unmark this page and deallocate.
buf[0] = 0;
gpa_->Deallocate(buf);
}
private:
GuardedPageAllocator* gpa_;
DISALLOW_COPY_AND_ASSIGN(ThreadedHighContentionDelegate);
};
// Test that allocator remains in consistent state under high contention and
// doesn't double-allocate pages or fail to deallocate pages.
TEST_F(GuardedPageAllocatorTest, ThreadedHighContention) {
constexpr size_t num_threads = 1000;
{
base::DelegateSimpleThreadPool threads("page_writers", num_threads);
threads.Start();
std::vector<std::unique_ptr<ThreadedHighContentionDelegate>> delegates;
for (size_t i = 0; i < num_threads; i++) {
auto delegate = std::make_unique<ThreadedHighContentionDelegate>(&gpa_);
threads.AddWork(delegate.get());
delegates.push_back(std::move(delegate));
}
threads.JoinAll();
}
// Verify all pages have been deallocated now that all threads are done.
for (size_t i = 0; i < kGpaMaxPages; i++)
EXPECT_NE(gpa_.Allocate(1), nullptr);
}
} // namespace internal
} // namespace gwp_asan
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <windows.h>
#include "components/gwp_asan/common/guarded_page_allocator.h"
#include "base/bits.h"
#include "base/logging.h"
#include "build/build_config.h"
namespace gwp_asan {
namespace internal {
unsigned CountTrailingZeroBits64(uint64_t x) {
#if defined(ARCH_CPU_64_BITS)
return base::bits::CountTrailingZeroBits(x);
#else
// Windows 32-bit builds do not support CountTrailingZeroBits on a uint64_t.
// TODO(vtsyrklevich): Fix this in base::bits instead.
uint32_t right = static_cast<uint32_t>(x);
unsigned right_trailing = base::bits::CountTrailingZeroBits(right);
if (right_trailing < 32)
return right_trailing;
uint32_t left = static_cast<uint32_t>(x >> 32);
unsigned left_trailing = base::bits::CountTrailingZeroBits(left);
return left_trailing + right_trailing;
#endif
}
// TODO(vtsyrklevich): See if the platform-specific memory allocation and
// protection routines can be broken out in base/ and merged with those used for
// PartionAlloc/ProtectedMemory.
bool GuardedPageAllocator::MapPages() {
size_t len = (2 * num_pages_ + 1) * page_size_;
void* base_ptr = VirtualAlloc(nullptr, len, MEM_RESERVE, PAGE_NOACCESS);
if (!base_ptr) {
DPLOG(ERROR) << "Failed to reserve guarded allocator region";
return false;
}
uintptr_t base_addr = reinterpret_cast<uintptr_t>(base_ptr);
// Commit the pages used for allocations.
for (size_t i = 0; i < num_pages_; i++) {
uintptr_t address = base_addr + page_size_ * ((2 * i) + 1);
LPVOID ret = VirtualAlloc(reinterpret_cast<LPVOID>(address), page_size_,
MEM_COMMIT, PAGE_NOACCESS);
if (!ret) {
PLOG(ERROR) << "Failed to commit allocation page";
UnmapPages();
return false;
}
}
pages_base_addr_ = base_addr;
first_page_addr_ = pages_base_addr_ + page_size_;
pages_end_addr_ = pages_base_addr_ + len;
return true;
}
void GuardedPageAllocator::UnmapPages() {
DCHECK(pages_base_addr_);
BOOL err =
VirtualFree(reinterpret_cast<void*>(pages_base_addr_), 0, MEM_RELEASE);
DCHECK(err);
(void)err;
}
void GuardedPageAllocator::MarkPageReadWrite(void* ptr) {
DWORD old_prot;
BOOL err = VirtualProtect(ptr, page_size_, PAGE_READWRITE, &old_prot);
PCHECK(err != 0) << "VirtualProtect";
}
void GuardedPageAllocator::MarkPageInaccessible(void* ptr) {
DWORD old_prot;
BOOL err = VirtualProtect(ptr, page_size_, PAGE_NOACCESS, &old_prot);
PCHECK(err != 0) << "VirtualProtect";
}
} // namespace internal
} // namespace gwp_asan
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment