Commit 08f74b8d authored by Vlad Tsyrklevich's avatar Vlad Tsyrklevich Committed by Commit Bot

GWP-ASan: Refactor SlotMetadata methods

In an upcoming change, I split the GuardedPageAllocator into two
objects. I'm moving these methods into the GuardedPageAllocator because
SlotMetadata lives in the base object but these methods should not.

Bug: 896019
Change-Id: Ia98e966ce6cc8aa843bcaa4ce322c1b355e508a1
Reviewed-on: https://chromium-review.googlesource.com/c/1341062
Commit-Queue: Vlad Tsyrklevich <vtsyrklevich@chromium.org>
Reviewed-by: default avatarVitaly Buka <vitalybuka@chromium.org>
Cr-Commit-Position: refs/heads/master@{#609352}
parent fe8ca988
......@@ -39,16 +39,13 @@ void GuardedPageAllocator::Init(size_t num_pages) {
(num_pages_ == kGpaMaxPages) ? ~0ULL : (1ULL << num_pages_) - 1;
}
for (size_t i = 0; i < num_pages_; i++)
data_[i].Init();
AllocateStackTraces();
}
GuardedPageAllocator::~GuardedPageAllocator() {
if (num_pages_) {
UnmapPages();
for (size_t i = 0; i < num_pages_; i++)
data_[i].Destroy();
DeallocateStackTraces();
}
}
......@@ -83,7 +80,7 @@ void* GuardedPageAllocator::Allocate(size_t size, size_t align) {
void* alloc = reinterpret_cast<void*>(free_page + offset);
// Initialize slot metadata.
data_[free_slot].RecordAllocation(size, alloc);
RecordAllocationInSlot(free_slot, size, alloc);
return alloc;
}
......@@ -104,7 +101,7 @@ void GuardedPageAllocator::Deallocate(void* ptr) {
}
// Record deallocation stack trace/thread id.
data_[slot].RecordDeallocation();
RecordDeallocationInSlot(slot);
FreeSlot(slot);
}
......@@ -218,56 +215,60 @@ size_t GuardedPageAllocator::AddrToSlot(uintptr_t addr) const {
return slot;
}
void GuardedPageAllocator::SlotMetadata::Init() {
void GuardedPageAllocator::AllocateStackTraces() {
// new is not used so that we can explicitly call the constructor when we
// want to collect a stack trace.
alloc.stacktrace =
static_cast<StackTrace*>(malloc(sizeof(*alloc.stacktrace)));
CHECK(alloc.stacktrace);
dealloc.stacktrace =
static_cast<StackTrace*>(malloc(sizeof(*dealloc.stacktrace)));
CHECK(dealloc.stacktrace);
for (size_t i = 0; i < num_pages_; i++) {
alloc_traces[i] =
static_cast<StackTrace*>(malloc(sizeof(*alloc_traces[i])));
CHECK(alloc_traces[i]);
dealloc_traces[i] =
static_cast<StackTrace*>(malloc(sizeof(*dealloc_traces[i])));
CHECK(dealloc_traces[i]);
}
}
void GuardedPageAllocator::SlotMetadata::Destroy() {
CHECK(alloc.stacktrace);
CHECK(dealloc.stacktrace);
Reset();
void GuardedPageAllocator::DeallocateStackTraces() {
for (size_t i = 0; i < num_pages_; i++) {
DestructStackTrace(i);
free(alloc.stacktrace);
free(dealloc.stacktrace);
free(alloc_traces[i]);
alloc_traces[i] = nullptr;
free(dealloc_traces[i]);
dealloc_traces[i] = nullptr;
}
}
void GuardedPageAllocator::SlotMetadata::Reset() {
void GuardedPageAllocator::DestructStackTrace(size_t slot) {
// Destruct previous allocation/deallocation traces. The constructor was only
// called if trace_addr is non-null.
if (alloc.trace_addr)
alloc.stacktrace->~StackTrace();
if (dealloc.trace_addr)
dealloc.stacktrace->~StackTrace();
if (data_[slot].alloc.trace_addr)
alloc_traces[slot]->~StackTrace();
if (data_[slot].dealloc.trace_addr)
dealloc_traces[slot]->~StackTrace();
}
void GuardedPageAllocator::SlotMetadata::RecordAllocation(size_t size,
void GuardedPageAllocator::RecordAllocationInSlot(size_t slot,
size_t size,
void* ptr) {
Reset();
alloc_size = size;
alloc_ptr = reinterpret_cast<uintptr_t>(ptr);
data_[slot].alloc_size = size;
data_[slot].alloc_ptr = reinterpret_cast<uintptr_t>(ptr);
alloc.tid = base::PlatformThread::CurrentId();
new (alloc.stacktrace) StackTrace();
alloc.trace_addr = alloc.stacktrace->Addresses(&alloc.trace_len);
data_[slot].alloc.tid = base::PlatformThread::CurrentId();
new (alloc_traces[slot]) StackTrace();
data_[slot].alloc.trace_addr = reinterpret_cast<uintptr_t>(
alloc_traces[slot]->Addresses(&data_[slot].alloc.trace_len));
dealloc.tid = base::kInvalidThreadId;
dealloc.trace_addr = nullptr;
dealloc.trace_len = 0;
data_[slot].dealloc.tid = base::kInvalidThreadId;
data_[slot].dealloc.trace_addr = 0;
data_[slot].dealloc.trace_len = 0;
}
void GuardedPageAllocator::SlotMetadata::RecordDeallocation() {
dealloc.tid = base::PlatformThread::CurrentId();
new (dealloc.stacktrace) StackTrace();
dealloc.trace_addr = dealloc.stacktrace->Addresses(&dealloc.trace_len);
void GuardedPageAllocator::RecordDeallocationInSlot(size_t slot) {
data_[slot].dealloc.tid = base::PlatformThread::CurrentId();
new (dealloc_traces[slot]) StackTrace();
data_[slot].dealloc.trace_addr = reinterpret_cast<uintptr_t>(
dealloc_traces[slot]->Addresses(&data_[slot].dealloc.trace_len));
}
} // namespace internal
......
......@@ -82,36 +82,12 @@ class GuardedPageAllocator {
// (De)allocation thread id or base::kInvalidThreadId if no (de)allocation
// occurred.
base::PlatformThreadId tid = base::kInvalidThreadId;
// Pointer to stack trace addresses or null if no (de)allocation occurred.
const void* const* trace_addr = nullptr;
// Address of stack trace addresses or null if no (de)allocation occurred.
uintptr_t trace_addr = 0;
// Stack trace length or 0 if no (de)allocation occurred.
size_t trace_len = 0;
private:
// StackTrace object for this slot, it's allocated in
// SlotMetadata()::Init() and only used internally, trace_addr/len should
// be used by external consumers of the stack trace data.
base::debug::StackTrace* stacktrace = nullptr;
friend struct SlotMetadata;
};
// Allocate internal data (StackTraces) for this slot. StackTrace objects
// are large so we only allocate them if they're required (instead of
// having them be statically allocated in the SlotMetadata itself.)
void Init();
// Frees internal data. May only be called after Init().
void Destroy();
// Update slot metadata on an allocation with the given size and pointer.
void RecordAllocation(size_t size, void* ptr);
// Update slot metadata on a deallocation.
void RecordDeallocation();
// Size of the allocation
size_t alloc_size = 0;
// The allocation address.
......@@ -119,11 +95,6 @@ class GuardedPageAllocator {
AllocationInfo alloc;
AllocationInfo dealloc;
private:
// Call destructors on (de)alloc.stacktrace if constructors for them have
// previously been called.
void Reset();
};
// Does not allocate any memory for the allocator, to finish initializing call
......@@ -169,6 +140,23 @@ class GuardedPageAllocator {
uintptr_t SlotToAddr(size_t slot) const;
size_t AddrToSlot(uintptr_t addr) const;
// Allocate num_pages_ stack traces.
void AllocateStackTraces();
// Deallocate stack traces. May only be called after AllocateStackTraces().
void DeallocateStackTraces();
// Call the destructor on the allocation and deallocation stack traces for
// a given slot index if the constructors for those stack traces have been
// called.
void DestructStackTrace(size_t slot);
// Record an allocation or deallocation for a given slot index. This
// encapsulates the logic for updating the stack traces and metadata for a
// given slot.
void RecordAllocationInSlot(size_t slot, size_t size, void* ptr);
void RecordDeallocationInSlot(size_t slot);
// Allocator lock that protects free_pages_.
base::Lock lock_;
......@@ -189,6 +177,12 @@ class GuardedPageAllocator {
// Set to true if a double free has occurred.
std::atomic<bool> double_free_detected_{false};
// StackTrace objects for every slot in AllocateStateBase::data_. We avoid
// statically allocating the StackTrace objects because they are large and
// the allocator may be initialized with num_pages_ < kGpaMaxPages.
base::debug::StackTrace* alloc_traces[kGpaMaxPages];
base::debug::StackTrace* dealloc_traces[kGpaMaxPages];
// Required for a singleton to access the constructor.
friend base::NoDestructor<GuardedPageAllocator>;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment