Commit 08f74b8d authored by Vlad Tsyrklevich's avatar Vlad Tsyrklevich Committed by Commit Bot

GWP-ASan: Refactor SlotMetadata methods

In an upcoming change, I split the GuardedPageAllocator into two
objects. I'm moving these methods into the GuardedPageAllocator because
SlotMetadata lives in the base object but these methods should not.

Bug: 896019
Change-Id: Ia98e966ce6cc8aa843bcaa4ce322c1b355e508a1
Reviewed-on: https://chromium-review.googlesource.com/c/1341062
Commit-Queue: Vlad Tsyrklevich <vtsyrklevich@chromium.org>
Reviewed-by: default avatarVitaly Buka <vitalybuka@chromium.org>
Cr-Commit-Position: refs/heads/master@{#609352}
parent fe8ca988
...@@ -39,16 +39,13 @@ void GuardedPageAllocator::Init(size_t num_pages) { ...@@ -39,16 +39,13 @@ void GuardedPageAllocator::Init(size_t num_pages) {
(num_pages_ == kGpaMaxPages) ? ~0ULL : (1ULL << num_pages_) - 1; (num_pages_ == kGpaMaxPages) ? ~0ULL : (1ULL << num_pages_) - 1;
} }
for (size_t i = 0; i < num_pages_; i++) AllocateStackTraces();
data_[i].Init();
} }
GuardedPageAllocator::~GuardedPageAllocator() { GuardedPageAllocator::~GuardedPageAllocator() {
if (num_pages_) { if (num_pages_) {
UnmapPages(); UnmapPages();
DeallocateStackTraces();
for (size_t i = 0; i < num_pages_; i++)
data_[i].Destroy();
} }
} }
...@@ -83,7 +80,7 @@ void* GuardedPageAllocator::Allocate(size_t size, size_t align) { ...@@ -83,7 +80,7 @@ void* GuardedPageAllocator::Allocate(size_t size, size_t align) {
void* alloc = reinterpret_cast<void*>(free_page + offset); void* alloc = reinterpret_cast<void*>(free_page + offset);
// Initialize slot metadata. // Initialize slot metadata.
data_[free_slot].RecordAllocation(size, alloc); RecordAllocationInSlot(free_slot, size, alloc);
return alloc; return alloc;
} }
...@@ -104,7 +101,7 @@ void GuardedPageAllocator::Deallocate(void* ptr) { ...@@ -104,7 +101,7 @@ void GuardedPageAllocator::Deallocate(void* ptr) {
} }
// Record deallocation stack trace/thread id. // Record deallocation stack trace/thread id.
data_[slot].RecordDeallocation(); RecordDeallocationInSlot(slot);
FreeSlot(slot); FreeSlot(slot);
} }
...@@ -218,56 +215,60 @@ size_t GuardedPageAllocator::AddrToSlot(uintptr_t addr) const { ...@@ -218,56 +215,60 @@ size_t GuardedPageAllocator::AddrToSlot(uintptr_t addr) const {
return slot; return slot;
} }
void GuardedPageAllocator::SlotMetadata::Init() { void GuardedPageAllocator::AllocateStackTraces() {
// new is not used so that we can explicitly call the constructor when we // new is not used so that we can explicitly call the constructor when we
// want to collect a stack trace. // want to collect a stack trace.
alloc.stacktrace = for (size_t i = 0; i < num_pages_; i++) {
static_cast<StackTrace*>(malloc(sizeof(*alloc.stacktrace))); alloc_traces[i] =
CHECK(alloc.stacktrace); static_cast<StackTrace*>(malloc(sizeof(*alloc_traces[i])));
dealloc.stacktrace = CHECK(alloc_traces[i]);
static_cast<StackTrace*>(malloc(sizeof(*dealloc.stacktrace))); dealloc_traces[i] =
CHECK(dealloc.stacktrace); static_cast<StackTrace*>(malloc(sizeof(*dealloc_traces[i])));
CHECK(dealloc_traces[i]);
}
} }
void GuardedPageAllocator::SlotMetadata::Destroy() { void GuardedPageAllocator::DeallocateStackTraces() {
CHECK(alloc.stacktrace); for (size_t i = 0; i < num_pages_; i++) {
CHECK(dealloc.stacktrace); DestructStackTrace(i);
Reset();
free(alloc.stacktrace); free(alloc_traces[i]);
free(dealloc.stacktrace); alloc_traces[i] = nullptr;
free(dealloc_traces[i]);
dealloc_traces[i] = nullptr;
}
} }
void GuardedPageAllocator::SlotMetadata::Reset() { void GuardedPageAllocator::DestructStackTrace(size_t slot) {
// Destruct previous allocation/deallocation traces. The constructor was only // Destruct previous allocation/deallocation traces. The constructor was only
// called if trace_addr is non-null. // called if trace_addr is non-null.
if (alloc.trace_addr) if (data_[slot].alloc.trace_addr)
alloc.stacktrace->~StackTrace(); alloc_traces[slot]->~StackTrace();
if (dealloc.trace_addr) if (data_[slot].dealloc.trace_addr)
dealloc.stacktrace->~StackTrace(); dealloc_traces[slot]->~StackTrace();
} }
void GuardedPageAllocator::SlotMetadata::RecordAllocation(size_t size, void GuardedPageAllocator::RecordAllocationInSlot(size_t slot,
void* ptr) { size_t size,
Reset(); void* ptr) {
data_[slot].alloc_size = size;
alloc_size = size; data_[slot].alloc_ptr = reinterpret_cast<uintptr_t>(ptr);
alloc_ptr = reinterpret_cast<uintptr_t>(ptr);
alloc.tid = base::PlatformThread::CurrentId(); data_[slot].alloc.tid = base::PlatformThread::CurrentId();
new (alloc.stacktrace) StackTrace(); new (alloc_traces[slot]) StackTrace();
alloc.trace_addr = alloc.stacktrace->Addresses(&alloc.trace_len); data_[slot].alloc.trace_addr = reinterpret_cast<uintptr_t>(
alloc_traces[slot]->Addresses(&data_[slot].alloc.trace_len));
dealloc.tid = base::kInvalidThreadId; data_[slot].dealloc.tid = base::kInvalidThreadId;
dealloc.trace_addr = nullptr; data_[slot].dealloc.trace_addr = 0;
dealloc.trace_len = 0; data_[slot].dealloc.trace_len = 0;
} }
void GuardedPageAllocator::SlotMetadata::RecordDeallocation() { void GuardedPageAllocator::RecordDeallocationInSlot(size_t slot) {
dealloc.tid = base::PlatformThread::CurrentId(); data_[slot].dealloc.tid = base::PlatformThread::CurrentId();
new (dealloc.stacktrace) StackTrace(); new (dealloc_traces[slot]) StackTrace();
dealloc.trace_addr = dealloc.stacktrace->Addresses(&dealloc.trace_len); data_[slot].dealloc.trace_addr = reinterpret_cast<uintptr_t>(
dealloc_traces[slot]->Addresses(&data_[slot].dealloc.trace_len));
} }
} // namespace internal } // namespace internal
......
...@@ -82,36 +82,12 @@ class GuardedPageAllocator { ...@@ -82,36 +82,12 @@ class GuardedPageAllocator {
// (De)allocation thread id or base::kInvalidThreadId if no (de)allocation // (De)allocation thread id or base::kInvalidThreadId if no (de)allocation
// occurred. // occurred.
base::PlatformThreadId tid = base::kInvalidThreadId; base::PlatformThreadId tid = base::kInvalidThreadId;
// Address of stack trace addresses or null if no (de)allocation occurred.
// Pointer to stack trace addresses or null if no (de)allocation occurred. uintptr_t trace_addr = 0;
const void* const* trace_addr = nullptr;
// Stack trace length or 0 if no (de)allocation occurred. // Stack trace length or 0 if no (de)allocation occurred.
size_t trace_len = 0; size_t trace_len = 0;
private:
// StackTrace object for this slot, it's allocated in
// SlotMetadata()::Init() and only used internally, trace_addr/len should
// be used by external consumers of the stack trace data.
base::debug::StackTrace* stacktrace = nullptr;
friend struct SlotMetadata;
}; };
// Allocate internal data (StackTraces) for this slot. StackTrace objects
// are large so we only allocate them if they're required (instead of
// having them be statically allocated in the SlotMetadata itself.)
void Init();
// Frees internal data. May only be called after Init().
void Destroy();
// Update slot metadata on an allocation with the given size and pointer.
void RecordAllocation(size_t size, void* ptr);
// Update slot metadata on a deallocation.
void RecordDeallocation();
// Size of the allocation // Size of the allocation
size_t alloc_size = 0; size_t alloc_size = 0;
// The allocation address. // The allocation address.
...@@ -119,11 +95,6 @@ class GuardedPageAllocator { ...@@ -119,11 +95,6 @@ class GuardedPageAllocator {
AllocationInfo alloc; AllocationInfo alloc;
AllocationInfo dealloc; AllocationInfo dealloc;
private:
// Call destructors on (de)alloc.stacktrace if constructors for them have
// previously been called.
void Reset();
}; };
// Does not allocate any memory for the allocator, to finish initializing call // Does not allocate any memory for the allocator, to finish initializing call
...@@ -169,6 +140,23 @@ class GuardedPageAllocator { ...@@ -169,6 +140,23 @@ class GuardedPageAllocator {
uintptr_t SlotToAddr(size_t slot) const; uintptr_t SlotToAddr(size_t slot) const;
size_t AddrToSlot(uintptr_t addr) const; size_t AddrToSlot(uintptr_t addr) const;
// Allocate num_pages_ stack traces.
void AllocateStackTraces();
// Deallocate stack traces. May only be called after AllocateStackTraces().
void DeallocateStackTraces();
// Call the destructor on the allocation and deallocation stack traces for
// a given slot index if the constructors for those stack traces have been
// called.
void DestructStackTrace(size_t slot);
// Record an allocation or deallocation for a given slot index. This
// encapsulates the logic for updating the stack traces and metadata for a
// given slot.
void RecordAllocationInSlot(size_t slot, size_t size, void* ptr);
void RecordDeallocationInSlot(size_t slot);
// Allocator lock that protects free_pages_. // Allocator lock that protects free_pages_.
base::Lock lock_; base::Lock lock_;
...@@ -189,6 +177,12 @@ class GuardedPageAllocator { ...@@ -189,6 +177,12 @@ class GuardedPageAllocator {
// Set to true if a double free has occurred. // Set to true if a double free has occurred.
std::atomic<bool> double_free_detected_{false}; std::atomic<bool> double_free_detected_{false};
// StackTrace objects for every slot in AllocateStateBase::data_. We avoid
// statically allocating the StackTrace objects because they are large and
// the allocator may be initialized with num_pages_ < kGpaMaxPages.
base::debug::StackTrace* alloc_traces[kGpaMaxPages];
base::debug::StackTrace* dealloc_traces[kGpaMaxPages];
// Required for a singleton to access the constructor. // Required for a singleton to access the constructor.
friend base::NoDestructor<GuardedPageAllocator>; friend base::NoDestructor<GuardedPageAllocator>;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment