Commit 380a7a60 authored by Vlad Tsyrklevich's avatar Vlad Tsyrklevich Committed by Commit Bot

GWP-ASan: Allow specifying allocation alignment

Allow specifying an allocation alignment to Allocate() that works
similarly to the alignment parameter to aligned_alloc(), e.g. the
alignment parameter must be a power-of-two smaller than size. Unlike
aligned_alloc it doesn't verify that the size is a multiple of alignment
since that's not enforced by some functions like posix_memalign.

Default alignment behavior stays the same if no alignment is specified.

Bug: 896019
Change-Id: I542314aa2405265af03cd6519f5c997ed1e95152
Reviewed-on: https://chromium-review.googlesource.com/c/1310563
Commit-Queue: Vlad Tsyrklevich <vtsyrklevich@chromium.org>
Reviewed-by: default avatarVitaly Buka <vitalybuka@chromium.org>
Cr-Commit-Position: refs/heads/master@{#605063}
parent b50ac5aa
......@@ -53,7 +53,20 @@ GuardedPageAllocator::~GuardedPageAllocator() {
UnmapPages();
}
void* GuardedPageAllocator::Allocate(size_t size) {
void* GuardedPageAllocator::Allocate(size_t size, size_t align) {
CHECK_LE(size, page_size_);
if (!size)
return nullptr;
// Default alignment is size's next smallest power-of-two, up to
// kGpaAllocAlignment.
if (!align) {
align =
std::min(size_t{1} << base::bits::Log2Floor(size), kGpaAllocAlignment);
}
CHECK_LE(align, size);
CHECK(base::bits::IsPowerOfTwo(align));
size_t free_slot = ReserveSlot();
if (free_slot == SIZE_MAX)
return nullptr; // All slots are reserved.
......@@ -62,15 +75,12 @@ void* GuardedPageAllocator::Allocate(size_t size) {
MarkPageReadWrite(reinterpret_cast<void*>(free_page));
size_t offset;
if (base::RandInt(0, 1)) {
if (base::RandInt(0, 1))
// Return right-aligned allocation to detect overflows.
size_t alignment =
std::min(size_t{1} << base::bits::Log2Floor(size), kGpaAllocAlignment);
offset = page_size_ - base::bits::Align(size, alignment);
} else {
offset = page_size_ - base::bits::Align(size, align);
else
// Return left-aligned allocation to detect underflows.
offset = 0;
}
// Initialize slot metadata.
data_[free_slot].RecordAllocation(size, offset);
......
......@@ -28,7 +28,7 @@ class GWP_ASAN_EXPORT GuardedPageAllocator {
// Maximum number of pages this class can allocate.
static constexpr size_t kGpaMaxPages = 64;
// Maximum alignment for all returned allocations.
// Default maximum alignment for all returned allocations.
static constexpr size_t kGpaAllocAlignment = 16;
enum class ErrorType {
......@@ -52,8 +52,12 @@ class GWP_ASAN_EXPORT GuardedPageAllocator {
// zero-filled. Failure can occur if memory could not be mapped or protected,
// or if all guarded pages are already allocated.
//
// Precondition: size <= page_size_
void* Allocate(size_t size);
// The align parameter specifies a power of two to align the allocation up to.
// It must be less than or equal to the allocation size. If it's left as zero
// it will default to the default alignment the allocator chooses.
//
// Precondition: align <= size <= page_size_
void* Allocate(size_t size, size_t align = 0);
// Deallocates memory pointed to by ptr. ptr must have been previously
// returned by a call to Allocate.
......
......@@ -24,9 +24,9 @@ class GuardedPageAllocatorTest : public testing::Test {
: gpa_(num_pages) {}
// Get a left- or right- aligned allocation (or nullptr on error.)
char* GetAlignedAllocation(bool left_aligned, size_t sz) {
char* GetAlignedAllocation(bool left_aligned, size_t sz, size_t align = 0) {
for (size_t i = 0; i < 100; i++) {
void* alloc = gpa_.Allocate(sz);
void* alloc = gpa_.Allocate(sz, align);
if (!alloc)
return nullptr;
......@@ -42,6 +42,18 @@ class GuardedPageAllocatorTest : public testing::Test {
return nullptr;
}
// Helper that returns the offset of a right-aligned allocation in the
// allocation's page.
uintptr_t GetRightAlignedAllocationOffset(size_t size, size_t align) {
const uintptr_t page_mask = base::GetPageSize() - 1;
void* buf = GetAlignedAllocation(false, size, align);
CHECK(buf);
gpa_.Deallocate(buf);
return reinterpret_cast<uintptr_t>(buf) & page_mask;
}
GuardedPageAllocator gpa_;
};
......@@ -86,6 +98,26 @@ TEST_F(GuardedPageAllocatorTest, RightAlignedAllocation) {
gpa_.Deallocate(buf);
}
TEST_F(GuardedPageAllocatorTest, AllocationAlignment) {
const uintptr_t page_size = base::GetPageSize();
EXPECT_EQ(GetRightAlignedAllocationOffset(9, 1), page_size - 9);
EXPECT_EQ(GetRightAlignedAllocationOffset(9, 2), page_size - 10);
EXPECT_EQ(GetRightAlignedAllocationOffset(9, 4), page_size - 12);
EXPECT_EQ(GetRightAlignedAllocationOffset(9, 8), page_size - 16);
EXPECT_EQ(GetRightAlignedAllocationOffset(513, 512), page_size - 1024);
// Default alignment aligns up to the next lowest power of two.
EXPECT_EQ(GetRightAlignedAllocationOffset(5, 0), page_size - 8);
EXPECT_EQ(GetRightAlignedAllocationOffset(9, 0), page_size - 16);
// But only up to 16 bytes.
EXPECT_EQ(GetRightAlignedAllocationOffset(513, 0), page_size - (512 + 16));
EXPECT_DEATH(GetRightAlignedAllocationOffset(5, 8), "");
EXPECT_DEATH(GetRightAlignedAllocationOffset(5, 3), "");
}
TEST_F(GuardedPageAllocatorTest, GetNearestValidPageEdgeCases) {
EXPECT_EQ(gpa_.GetPageAddr(gpa_.GetNearestValidPage(gpa_.pages_base_addr_)),
gpa_.first_page_addr_);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment