Commit d0213f19 authored by Vlad Tsyrklevich's avatar Vlad Tsyrklevich Committed by Commit Bot

GWP-ASan: Switch to new stack trace API

Switch to using new stack trace collection API:
base::debug::CollectStackTrace() collects stack traces directly,
allowing us to remove cumbersome logic handling StackTrace
construction/destruction and also move the stack traces into the
SlotMetadata simplifying the crash handler logic.

Bug: 896019
Change-Id: I36f525c242c5a6d24d9ded9d1bfa7949c7cab30c
Reviewed-on: https://chromium-review.googlesource.com/c/1429624
Auto-Submit: Vlad Tsyrklevich <vtsyrklevich@chromium.org>
Reviewed-by: default avatarVitaly Buka <vitalybuka@chromium.org>
Commit-Queue: Vlad Tsyrklevich <vtsyrklevich@chromium.org>
Cr-Commit-Position: refs/heads/master@{#625826}
parent 132683a2
......@@ -19,8 +19,6 @@
#include "components/gwp_asan/common/allocator_state.h"
#include "components/gwp_asan/common/crash_key_name.h"
using base::debug::StackTrace;
namespace gwp_asan {
namespace internal {
......@@ -56,17 +54,13 @@ void GuardedPageAllocator::Init(size_t max_alloced_pages, size_t total_pages) {
std::next(free_slot_ring_buffer_.begin(), total_pages));
}
AllocateStackTraces();
slots_ = std::make_unique<AllocatorState::SlotMetadata[]>(total_pages);
state_.slot_metadata = reinterpret_cast<uintptr_t>(slots_.get());
}
GuardedPageAllocator::~GuardedPageAllocator() {
if (state_.total_pages) {
if (state_.total_pages)
UnmapRegion();
DeallocateStackTraces();
}
}
void* GuardedPageAllocator::Allocate(size_t size, size_t align) {
......@@ -111,7 +105,7 @@ void GuardedPageAllocator::Deallocate(void* ptr) {
size_t slot = state_.AddrToSlot(state_.GetPageAddr(addr));
DCHECK_EQ(addr, slots_[slot].alloc_ptr);
// Check for double free.
if (slots_[slot].dealloc.trace_addr) {
if (slots_[slot].dealloc.trace_collected) {
state_.double_free_detected = true;
base::subtle::MemoryBarrier();
// Trigger an exception by writing to the inaccessible free()d allocation.
......@@ -168,39 +162,6 @@ void GuardedPageAllocator::FreeSlot(size_t slot) {
free_slot_start_idx_);
}
void GuardedPageAllocator::AllocateStackTraces() {
// new is not used so that we can explicitly call the constructor when we
// want to collect a stack trace.
for (size_t i = 0; i < state_.total_pages; i++) {
alloc_traces[i] =
static_cast<StackTrace*>(malloc(sizeof(*alloc_traces[i])));
CHECK(alloc_traces[i]);
dealloc_traces[i] =
static_cast<StackTrace*>(malloc(sizeof(*dealloc_traces[i])));
CHECK(dealloc_traces[i]);
}
}
void GuardedPageAllocator::DeallocateStackTraces() {
for (size_t i = 0; i < state_.total_pages; i++) {
DestructStackTrace(i);
free(alloc_traces[i]);
alloc_traces[i] = nullptr;
free(dealloc_traces[i]);
dealloc_traces[i] = nullptr;
}
}
void GuardedPageAllocator::DestructStackTrace(size_t slot) {
// Destruct previous allocation/deallocation traces. The constructor was only
// called if trace_addr is non-null.
if (slots_[slot].alloc.trace_addr)
alloc_traces[slot]->~StackTrace();
if (slots_[slot].dealloc.trace_addr)
dealloc_traces[slot]->~StackTrace();
}
void GuardedPageAllocator::RecordAllocationInSlot(size_t slot,
size_t size,
void* ptr) {
......@@ -208,20 +169,22 @@ void GuardedPageAllocator::RecordAllocationInSlot(size_t slot,
slots_[slot].alloc_ptr = reinterpret_cast<uintptr_t>(ptr);
slots_[slot].alloc.tid = base::PlatformThread::CurrentId();
new (alloc_traces[slot]) StackTrace();
slots_[slot].alloc.trace_addr = reinterpret_cast<uintptr_t>(
alloc_traces[slot]->Addresses(&slots_[slot].alloc.trace_len));
slots_[slot].alloc.trace_len = base::debug::CollectStackTrace(
reinterpret_cast<void**>(&slots_[slot].alloc.trace),
AllocatorState::kMaxStackFrames);
slots_[slot].alloc.trace_collected = true;
slots_[slot].dealloc.tid = base::kInvalidThreadId;
slots_[slot].dealloc.trace_addr = 0;
slots_[slot].dealloc.trace_len = 0;
slots_[slot].dealloc.trace_collected = false;
}
void GuardedPageAllocator::RecordDeallocationInSlot(size_t slot) {
slots_[slot].dealloc.tid = base::PlatformThread::CurrentId();
new (dealloc_traces[slot]) StackTrace();
slots_[slot].dealloc.trace_addr = reinterpret_cast<uintptr_t>(
dealloc_traces[slot]->Addresses(&slots_[slot].dealloc.trace_len));
slots_[slot].dealloc.trace_len = base::debug::CollectStackTrace(
reinterpret_cast<void**>(&slots_[slot].dealloc.trace),
AllocatorState::kMaxStackFrames);
slots_[slot].dealloc.trace_collected = true;
}
uintptr_t GuardedPageAllocator::GetCrashKeyAddress() const {
......
......@@ -10,7 +10,6 @@
#include <memory>
#include "base/compiler_specific.h"
#include "base/debug/stack_trace.h"
#include "base/gtest_prod_util.h"
#include "base/no_destructor.h"
#include "base/synchronization/lock.h"
......@@ -97,17 +96,6 @@ class GWP_ASAN_EXPORT GuardedPageAllocator {
// Marks the specified slot as unreserved.
void FreeSlot(size_t slot) LOCKS_EXCLUDED(lock_);
// Allocate total_pages stack traces.
void AllocateStackTraces();
// Deallocate stack traces. May only be called after AllocateStackTraces().
void DeallocateStackTraces();
// Call the destructor on the allocation and deallocation stack traces for
// a given slot index if the constructors for those stack traces have been
// called.
void DestructStackTrace(size_t slot);
// Record an allocation or deallocation for a given slot index. This
// encapsulates the logic for updating the stack traces and metadata for a
// given slot.
......@@ -137,12 +125,6 @@ class GWP_ASAN_EXPORT GuardedPageAllocator {
// extraneous memory for when total_pages < kGpaMaxPages.
std::unique_ptr<AllocatorState::SlotMetadata[]> slots_;
// StackTrace objects for every slot in AllocatorState::data_. We avoid
// statically allocating the StackTrace objects because they are large and
// the allocator may be initialized with total_pages < kGpaMaxPages.
base::debug::StackTrace* alloc_traces[AllocatorState::kGpaMaxPages];
base::debug::StackTrace* dealloc_traces[AllocatorState::kGpaMaxPages];
// Required for a singleton to access the constructor.
friend base::NoDestructor<GuardedPageAllocator>;
......
......@@ -9,13 +9,12 @@
#include "base/threading/platform_thread.h"
#include "build/build_config.h"
using base::debug::StackTrace;
namespace gwp_asan {
namespace internal {
// TODO: Delete out-of-line constexpr defininitons once C++17 is in use.
constexpr size_t AllocatorState::kGpaMaxPages;
constexpr size_t AllocatorState::kMaxStackFrames;
AllocatorState::GetMetadataReturnType AllocatorState::GetMetadataForAddress(
uintptr_t exception_address,
......
......@@ -25,7 +25,6 @@
#ifndef COMPONENTS_GWP_ASAN_COMMON_ALLOCATOR_STATE_H_
#define COMPONENTS_GWP_ASAN_COMMON_ALLOCATOR_STATE_H_
#include "base/debug/stack_trace.h"
#include "base/threading/platform_thread.h"
namespace gwp_asan {
......@@ -37,6 +36,8 @@ class AllocatorState {
public:
// Maximum number of pages this class can allocate.
static constexpr size_t kGpaMaxPages = 128;
// Maximum number of stack trace frames to collect.
static constexpr size_t kMaxStackFrames = 60;
enum class ErrorType {
kUseAfterFree = 0,
......@@ -59,10 +60,12 @@ class AllocatorState {
// (De)allocation thread id or base::kInvalidThreadId if no (de)allocation
// occurred.
base::PlatformThreadId tid = base::kInvalidThreadId;
// Pointer to stack trace addresses or null if no (de)allocation occurred.
uintptr_t trace_addr = 0;
// Stack trace length or 0 if no (de)allocation occurred.
// Stack trace contents.
uintptr_t trace[kMaxStackFrames];
// Stack trace length.
size_t trace_len = 0;
// Whether a stack trace has been collected for this (de)allocation.
bool trace_collected = false;
};
// Size of the allocation
......
......@@ -27,9 +27,6 @@ namespace internal {
using GwpAsanCrashAnalysisResult = CrashAnalyzer::GwpAsanCrashAnalysisResult;
// TODO: Delete out-of-line constexpr defininitons once C++17 is in use.
constexpr size_t CrashAnalyzer::kMaxStackTraceLen;
GwpAsanCrashAnalysisResult CrashAnalyzer::GetExceptionInfo(
const crashpad::ProcessSnapshot& process_snapshot,
gwp_asan::Crash* proto) {
......@@ -125,14 +122,14 @@ GwpAsanCrashAnalysisResult CrashAnalyzer::AnalyzeCrashedAllocator(
}
AllocatorState::ErrorType error = valid_state.GetErrorType(
exception_addr, slot.alloc.trace_addr != 0, slot.dealloc.trace_addr != 0);
exception_addr, slot.alloc.trace_collected, slot.dealloc.trace_collected);
proto->set_error_type(static_cast<Crash_ErrorType>(error));
proto->set_allocation_address(slot.alloc_ptr);
proto->set_allocation_size(slot.alloc_size);
if (slot.alloc.tid != base::kInvalidThreadId || slot.alloc.trace_len)
ReadAllocationInfo(memory, slot.alloc, proto->mutable_allocation());
ReadAllocationInfo(slot.alloc, proto->mutable_allocation());
if (slot.dealloc.tid != base::kInvalidThreadId || slot.dealloc.trace_len)
ReadAllocationInfo(memory, slot.dealloc, proto->mutable_deallocation());
ReadAllocationInfo(slot.dealloc, proto->mutable_deallocation());
proto->set_region_start(valid_state.pages_base_addr);
proto->set_region_size(valid_state.pages_end_addr -
valid_state.pages_base_addr);
......@@ -140,45 +137,26 @@ GwpAsanCrashAnalysisResult CrashAnalyzer::AnalyzeCrashedAllocator(
return GwpAsanCrashAnalysisResult::kGwpAsanCrash;
}
bool CrashAnalyzer::ReadAllocationInfo(
const crashpad::ProcessMemory& memory,
void CrashAnalyzer::ReadAllocationInfo(
const SlotMetadata::AllocationInfo& slot_info,
gwp_asan::Crash_AllocationInfo* proto_info) {
if (slot_info.tid != base::kInvalidThreadId)
proto_info->set_thread_id(slot_info.tid);
if (!slot_info.trace_len)
return true;
size_t trace_len = std::min(slot_info.trace_len, kMaxStackTraceLen);
// On 32-bit platforms we can't read directly to
// proto_info->mutable_stack_trace()->mutable_data(), so we read to an
// intermediate uintptr_t array.
uintptr_t trace[kMaxStackTraceLen];
if (!ReadStackTrace(memory,
static_cast<crashpad::VMAddress>(slot_info.trace_addr),
trace, trace_len))
return false;
proto_info->mutable_stack_trace()->Resize(trace_len, 0);
uint64_t* output = proto_info->mutable_stack_trace()->mutable_data();
for (size_t i = 0; i < trace_len; i++)
output[i] = trace[i];
return true;
}
if (!slot_info.trace_len || !slot_info.trace_collected)
return;
bool CrashAnalyzer::ReadStackTrace(const crashpad::ProcessMemory& memory,
crashpad::VMAddress crashing_trace_addr,
uintptr_t* trace_output,
size_t trace_len) {
if (!memory.Read(crashing_trace_addr, sizeof(uintptr_t) * trace_len,
trace_output)) {
DLOG(ERROR) << "Memory read should always succeed.";
return false;
if (slot_info.trace_len > AllocatorState::kMaxStackFrames) {
DLOG(ERROR) << "Stack trace length is corrupted: " << slot_info.trace_len;
return;
}
return true;
// On 32-bit platforms we can't copy directly to
// proto_info->mutable_stack_trace()->mutable_data().
proto_info->mutable_stack_trace()->Resize(slot_info.trace_len, 0);
uint64_t* output = proto_info->mutable_stack_trace()->mutable_data();
for (size_t i = 0; i < slot_info.trace_len; i++)
output[i] = slot_info.trace[i];
}
} // namespace internal
......
......@@ -64,10 +64,6 @@ class CrashAnalyzer {
private:
using SlotMetadata = AllocatorState::SlotMetadata;
// The maximum stack trace size the analyzer will extract from a crashing
// process.
static constexpr size_t kMaxStackTraceLen = 64;
// Given an ExceptionSnapshot, return the address of where the exception
// occurred (or null if it was not a data access exception.)
static crashpad::VMAddress GetAccessAddress(
......@@ -90,16 +86,8 @@ class CrashAnalyzer {
// This method fills out an AllocationInfo protobuf from a
// SlotMetadata::AllocationInfo struct.
static bool ReadAllocationInfo(const crashpad::ProcessMemory& memory,
const SlotMetadata::AllocationInfo& slot_info,
static void ReadAllocationInfo(const SlotMetadata::AllocationInfo& slot_info,
gwp_asan::Crash_AllocationInfo* proto_info);
// Read a stack trace from a crashing process with address crashing_trace_addr
// and length trace_len into trace_output. On failure returns false.
static bool ReadStackTrace(const crashpad::ProcessMemory& memory,
crashpad::VMAddress crashing_trace_addr,
uintptr_t* trace_output,
size_t trace_len);
};
} // namespace internal
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment