Commit b2fe4f85 authored by Vlad Tsyrklevich's avatar Vlad Tsyrklevich Committed by Commit Bot

GWP-ASan: Rename metadata member variables

The slots_ member was always misnamed, as it is really an array of
metadata _about_ slots, not the slots (pages) themselves. The
slot_metadata member variable would be clearer as 'metadata_addr' to
denote that it's an address. These renames are in anticipation of an
upcoming change that will make slots/metadata more distinct (no 1:1
mapping.)

Bug: 939142
Change-Id: I58e5f8035ea571fe7c0b0a4fb6bb0114580984e8
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1507217
Commit-Queue: Vlad Tsyrklevich <vtsyrklevich@chromium.org>
Auto-Submit: Vlad Tsyrklevich <vtsyrklevich@chromium.org>
Reviewed-by: default avatarVitaly Buka <vitalybuka@chromium.org>
Cr-Commit-Position: refs/heads/master@{#638659}
parent 7a61389f
...@@ -76,8 +76,8 @@ void GuardedPageAllocator::Init(size_t max_alloced_pages, size_t total_pages) { ...@@ -76,8 +76,8 @@ void GuardedPageAllocator::Init(size_t max_alloced_pages, size_t total_pages) {
std::next(free_slot_ring_buffer_.begin(), total_pages)); std::next(free_slot_ring_buffer_.begin(), total_pages));
} }
slots_ = std::make_unique<AllocatorState::SlotMetadata[]>(total_pages); metadata_ = std::make_unique<AllocatorState::SlotMetadata[]>(total_pages);
state_.slot_metadata = reinterpret_cast<uintptr_t>(slots_.get()); state_.metadata_addr = reinterpret_cast<uintptr_t>(metadata_.get());
} }
GuardedPageAllocator::~GuardedPageAllocator() { GuardedPageAllocator::~GuardedPageAllocator() {
...@@ -128,13 +128,13 @@ void GuardedPageAllocator::Deallocate(void* ptr) { ...@@ -128,13 +128,13 @@ void GuardedPageAllocator::Deallocate(void* ptr) {
// Check for a call to free() with an incorrect pointer (e.g. the pointer does // Check for a call to free() with an incorrect pointer (e.g. the pointer does
// not match the allocated pointer.) // not match the allocated pointer.)
if (addr != slots_[slot].alloc_ptr) { if (addr != metadata_[slot].alloc_ptr) {
state_.free_invalid_address = addr; state_.free_invalid_address = addr;
__builtin_trap(); __builtin_trap();
} }
// Check for double free. // Check for double free.
if (slots_[slot].deallocation_occurred.exchange(true)) { if (metadata_[slot].deallocation_occurred.exchange(true)) {
state_.double_free_address = addr; state_.double_free_address = addr;
// TODO(https://crbug.com/925447): The other thread may not be done writing // TODO(https://crbug.com/925447): The other thread may not be done writing
// a stack trace so we could spin here until it's read; however, it's also // a stack trace so we could spin here until it's read; however, it's also
...@@ -156,8 +156,8 @@ size_t GuardedPageAllocator::GetRequestedSize(const void* ptr) const { ...@@ -156,8 +156,8 @@ size_t GuardedPageAllocator::GetRequestedSize(const void* ptr) const {
CHECK(PointerIsMine(ptr)); CHECK(PointerIsMine(ptr));
const uintptr_t addr = reinterpret_cast<uintptr_t>(ptr); const uintptr_t addr = reinterpret_cast<uintptr_t>(ptr);
size_t slot = state_.AddrToSlot(state_.GetPageAddr(addr)); size_t slot = state_.AddrToSlot(state_.GetPageAddr(addr));
DCHECK_EQ(addr, slots_[slot].alloc_ptr); DCHECK_EQ(addr, metadata_[slot].alloc_ptr);
return slots_[slot].alloc_size; return metadata_[slot].alloc_size;
} }
size_t GuardedPageAllocator::RegionSize() const { size_t GuardedPageAllocator::RegionSize() const {
...@@ -194,34 +194,35 @@ void GuardedPageAllocator::FreeSlot(size_t slot) { ...@@ -194,34 +194,35 @@ void GuardedPageAllocator::FreeSlot(size_t slot) {
void GuardedPageAllocator::RecordAllocationMetadata(size_t slot, void GuardedPageAllocator::RecordAllocationMetadata(size_t slot,
size_t size, size_t size,
void* ptr) { void* ptr) {
slots_[slot].alloc_size = size; metadata_[slot].alloc_size = size;
slots_[slot].alloc_ptr = reinterpret_cast<uintptr_t>(ptr); metadata_[slot].alloc_ptr = reinterpret_cast<uintptr_t>(ptr);
void* trace[AllocatorState::kMaxStackFrames]; void* trace[AllocatorState::kMaxStackFrames];
size_t len = size_t len =
base::debug::CollectStackTrace(trace, AllocatorState::kMaxStackFrames); base::debug::CollectStackTrace(trace, AllocatorState::kMaxStackFrames);
slots_[slot].alloc.trace_len = Pack(reinterpret_cast<uintptr_t*>(trace), len, metadata_[slot].alloc.trace_len =
slots_[slot].alloc.packed_trace, Pack(reinterpret_cast<uintptr_t*>(trace), len,
sizeof(slots_[slot].alloc.packed_trace)); metadata_[slot].alloc.packed_trace,
slots_[slot].alloc.tid = ReportTid(); sizeof(metadata_[slot].alloc.packed_trace));
slots_[slot].alloc.trace_collected = true; metadata_[slot].alloc.tid = ReportTid();
metadata_[slot].alloc.trace_collected = true;
slots_[slot].dealloc.tid = base::kInvalidThreadId;
slots_[slot].dealloc.trace_len = 0; metadata_[slot].dealloc.tid = base::kInvalidThreadId;
slots_[slot].dealloc.trace_collected = false; metadata_[slot].dealloc.trace_len = 0;
slots_[slot].deallocation_occurred = false; metadata_[slot].dealloc.trace_collected = false;
metadata_[slot].deallocation_occurred = false;
} }
void GuardedPageAllocator::RecordDeallocationMetadata(size_t slot) { void GuardedPageAllocator::RecordDeallocationMetadata(size_t slot) {
void* trace[AllocatorState::kMaxStackFrames]; void* trace[AllocatorState::kMaxStackFrames];
size_t len = size_t len =
base::debug::CollectStackTrace(trace, AllocatorState::kMaxStackFrames); base::debug::CollectStackTrace(trace, AllocatorState::kMaxStackFrames);
slots_[slot].dealloc.trace_len = metadata_[slot].dealloc.trace_len =
Pack(reinterpret_cast<uintptr_t*>(trace), len, Pack(reinterpret_cast<uintptr_t*>(trace), len,
slots_[slot].dealloc.packed_trace, metadata_[slot].dealloc.packed_trace,
sizeof(slots_[slot].dealloc.packed_trace)); sizeof(metadata_[slot].dealloc.packed_trace));
slots_[slot].dealloc.tid = ReportTid(); metadata_[slot].dealloc.tid = ReportTid();
slots_[slot].dealloc.trace_collected = true; metadata_[slot].dealloc.trace_collected = true;
} }
uintptr_t GuardedPageAllocator::GetCrashKeyAddress() const { uintptr_t GuardedPageAllocator::GetCrashKeyAddress() const {
......
...@@ -124,7 +124,7 @@ class GWP_ASAN_EXPORT GuardedPageAllocator { ...@@ -124,7 +124,7 @@ class GWP_ASAN_EXPORT GuardedPageAllocator {
// We dynamically allocate the SlotMetadata array to avoid allocating // We dynamically allocate the SlotMetadata array to avoid allocating
// extraneous memory for when total_pages < kGpaMaxPages. // extraneous memory for when total_pages < kGpaMaxPages.
std::unique_ptr<AllocatorState::SlotMetadata[]> slots_; std::unique_ptr<AllocatorState::SlotMetadata[]> metadata_;
// Required for a singleton to access the constructor. // Required for a singleton to access the constructor.
friend base::NoDestructor<GuardedPageAllocator>; friend base::NoDestructor<GuardedPageAllocator>;
......
...@@ -31,7 +31,7 @@ AllocatorState::GetMetadataReturnType AllocatorState::GetMetadataForAddress( ...@@ -31,7 +31,7 @@ AllocatorState::GetMetadataReturnType AllocatorState::GetMetadataForAddress(
if (slot_idx >= kGpaMaxPages) if (slot_idx >= kGpaMaxPages)
return GetMetadataReturnType::kErrorBadSlot; return GetMetadataReturnType::kErrorBadSlot;
*slot_address = slot_metadata + (slot_idx * sizeof(SlotMetadata)); *slot_address = metadata_addr + (slot_idx * sizeof(SlotMetadata));
return GetMetadataReturnType::kGwpAsanCrash; return GetMetadataReturnType::kGwpAsanCrash;
} }
...@@ -53,7 +53,7 @@ bool AllocatorState::IsValid() const { ...@@ -53,7 +53,7 @@ bool AllocatorState::IsValid() const {
pages_end_addr - pages_base_addr != page_size * (total_pages * 2 + 1)) pages_end_addr - pages_base_addr != page_size * (total_pages * 2 + 1))
return false; return false;
if (!slot_metadata) if (!metadata_addr)
return false; return false;
return true; return true;
......
...@@ -140,7 +140,7 @@ class AllocatorState { ...@@ -140,7 +140,7 @@ class AllocatorState {
// Pointer to an array of metadata about every allocation, including its size, // Pointer to an array of metadata about every allocation, including its size,
// offset, and pointers to the allocation/deallocation stack traces (if // offset, and pointers to the allocation/deallocation stack traces (if
// present.) // present.)
uintptr_t slot_metadata = 0; uintptr_t metadata_addr = 0;
// Set to the address of a double freed allocation if a double free occurred. // Set to the address of a double freed allocation if a double free occurred.
uintptr_t double_free_address = 0; uintptr_t double_free_address = 0;
......
...@@ -33,7 +33,7 @@ class AllocatorStateTest : public testing::Test { ...@@ -33,7 +33,7 @@ class AllocatorStateTest : public testing::Test {
end_addr_offset + base + page_size * (total_pages * 2 + 1); end_addr_offset + base + page_size * (total_pages * 2 + 1);
// An invalid address, but it's never dereferenced in AllocatorState. // An invalid address, but it's never dereferenced in AllocatorState.
state_.slot_metadata = 0x1234; state_.metadata_addr = 0x1234;
} }
AllocatorState state_; AllocatorState state_;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment