Commit 8971b614 authored by Vlad Tsyrklevich's avatar Vlad Tsyrklevich Committed by Commit Bot

GWP-ASan: Store stack traces more compactly

Currently, the allocation and deallocation stack traces are stored in
separate 200 byte buffers. This change stores them together in a single
400-byte buffer so that deallocations can use any unused space from the
allocation. This improves memory density for deallocations (which is
usually what we care most about in UAFs) and does not affect
allocations.

Bug: 951410
Change-Id: I6fa36a80a605b16eb2d92919b3fd4a5ff60dfc84
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1566455
Auto-Submit: Vlad Tsyrklevich <vtsyrklevich@chromium.org>
Reviewed-by: default avatarVitaly Buka <vitalybuka@chromium.org>
Commit-Queue: Vlad Tsyrklevich <vtsyrklevich@chromium.org>
Cr-Commit-Position: refs/heads/master@{#650539}
parent f3e75ab6
...@@ -269,8 +269,8 @@ void GuardedPageAllocator::RecordAllocationMetadata( ...@@ -269,8 +269,8 @@ void GuardedPageAllocator::RecordAllocationMetadata(
base::debug::CollectStackTrace(trace, AllocatorState::kMaxStackFrames); base::debug::CollectStackTrace(trace, AllocatorState::kMaxStackFrames);
metadata_[metadata_idx].alloc.trace_len = metadata_[metadata_idx].alloc.trace_len =
Pack(reinterpret_cast<uintptr_t*>(trace), len, Pack(reinterpret_cast<uintptr_t*>(trace), len,
metadata_[metadata_idx].alloc.packed_trace, metadata_[metadata_idx].stack_trace_pool,
sizeof(metadata_[metadata_idx].alloc.packed_trace)); sizeof(metadata_[metadata_idx].stack_trace_pool) / 2);
metadata_[metadata_idx].alloc.tid = ReportTid(); metadata_[metadata_idx].alloc.tid = ReportTid();
metadata_[metadata_idx].alloc.trace_collected = true; metadata_[metadata_idx].alloc.trace_collected = true;
...@@ -287,8 +287,10 @@ void GuardedPageAllocator::RecordDeallocationMetadata( ...@@ -287,8 +287,10 @@ void GuardedPageAllocator::RecordDeallocationMetadata(
base::debug::CollectStackTrace(trace, AllocatorState::kMaxStackFrames); base::debug::CollectStackTrace(trace, AllocatorState::kMaxStackFrames);
metadata_[metadata_idx].dealloc.trace_len = metadata_[metadata_idx].dealloc.trace_len =
Pack(reinterpret_cast<uintptr_t*>(trace), len, Pack(reinterpret_cast<uintptr_t*>(trace), len,
metadata_[metadata_idx].dealloc.packed_trace, metadata_[metadata_idx].stack_trace_pool +
sizeof(metadata_[metadata_idx].dealloc.packed_trace)); metadata_[metadata_idx].alloc.trace_len,
sizeof(metadata_[metadata_idx].stack_trace_pool) -
metadata_[metadata_idx].alloc.trace_len);
metadata_[metadata_idx].dealloc.tid = ReportTid(); metadata_[metadata_idx].dealloc.tid = ReportTid();
metadata_[metadata_idx].dealloc.trace_collected = true; metadata_[metadata_idx].dealloc.trace_collected = true;
} }
......
...@@ -49,11 +49,12 @@ class AllocatorState { ...@@ -49,11 +49,12 @@ class AllocatorState {
// Invalid metadata index. // Invalid metadata index.
static constexpr MetadataIdx kInvalidMetadataIdx = kMaxMetadata; static constexpr MetadataIdx kInvalidMetadataIdx = kMaxMetadata;
// Maximum number of stack trace frames to collect. // Maximum number of stack trace frames to collect for an allocation or
static constexpr size_t kMaxStackFrames = 60; // deallocation.
// Number of bytes to allocate for packed stack traces. This can hold static constexpr size_t kMaxStackFrames = 100;
// approximately kMaxStackFrames under normal conditions. // Number of bytes to allocate for both allocation and deallocation packed
static constexpr size_t kMaxPackedTraceLength = 200; // stack traces. (Stack trace entries take ~3.5 bytes on average.)
static constexpr size_t kMaxPackedTraceLength = 400;
static_assert(std::numeric_limits<SlotIdx>::max() >= kMaxSlots - 1, static_assert(std::numeric_limits<SlotIdx>::max() >= kMaxSlots - 1,
"SlotIdx can hold all possible slot index values"); "SlotIdx can hold all possible slot index values");
...@@ -88,12 +89,14 @@ class AllocatorState { ...@@ -88,12 +89,14 @@ class AllocatorState {
// (De)allocation thread id or base::kInvalidThreadId if no (de)allocation // (De)allocation thread id or base::kInvalidThreadId if no (de)allocation
// occurred. // occurred.
uint64_t tid = base::kInvalidThreadId; uint64_t tid = base::kInvalidThreadId;
// Packed stack trace.
uint8_t packed_trace[kMaxPackedTraceLength];
// Length used to encode the packed stack trace. // Length used to encode the packed stack trace.
size_t trace_len = 0; uint16_t trace_len = 0;
// Whether a stack trace has been collected for this (de)allocation. // Whether a stack trace has been collected for this (de)allocation.
bool trace_collected = false; bool trace_collected = false;
static_assert(std::numeric_limits<decltype(trace_len)>::max() >=
kMaxPackedTraceLength - 1,
"trace_len can hold all possible length values.");
}; };
// Size of the allocation // Size of the allocation
...@@ -103,6 +106,10 @@ class AllocatorState { ...@@ -103,6 +106,10 @@ class AllocatorState {
// Used to synchronize whether a deallocation has occurred (e.g. whether a // Used to synchronize whether a deallocation has occurred (e.g. whether a
// double free has occurred) between threads. // double free has occurred) between threads.
std::atomic<bool> deallocation_occurred{false}; std::atomic<bool> deallocation_occurred{false};
// Holds the combined allocation/deallocation stack traces. The deallocation
// stack trace is stored immediately after the allocation stack trace to
// optimize on space.
uint8_t stack_trace_pool[kMaxPackedTraceLength];
AllocationInfo alloc; AllocationInfo alloc;
AllocationInfo dealloc; AllocationInfo dealloc;
......
...@@ -169,10 +169,12 @@ GwpAsanCrashAnalysisResult CrashAnalyzer::AnalyzeCrashedAllocator( ...@@ -169,10 +169,12 @@ GwpAsanCrashAnalysisResult CrashAnalyzer::AnalyzeCrashedAllocator(
proto->set_allocation_size(metadata.alloc_size); proto->set_allocation_size(metadata.alloc_size);
if (metadata.alloc.tid != base::kInvalidThreadId || if (metadata.alloc.tid != base::kInvalidThreadId ||
metadata.alloc.trace_len) metadata.alloc.trace_len)
ReadAllocationInfo(metadata.alloc, proto->mutable_allocation()); ReadAllocationInfo(metadata.stack_trace_pool, 0, metadata.alloc,
proto->mutable_allocation());
if (metadata.dealloc.tid != base::kInvalidThreadId || if (metadata.dealloc.tid != base::kInvalidThreadId ||
metadata.dealloc.trace_len) metadata.dealloc.trace_len)
ReadAllocationInfo(metadata.dealloc, proto->mutable_deallocation()); ReadAllocationInfo(metadata.stack_trace_pool, metadata.alloc.trace_len,
metadata.dealloc, proto->mutable_deallocation());
} }
proto->set_region_start(valid_state.pages_base_addr); proto->set_region_start(valid_state.pages_base_addr);
...@@ -185,6 +187,8 @@ GwpAsanCrashAnalysisResult CrashAnalyzer::AnalyzeCrashedAllocator( ...@@ -185,6 +187,8 @@ GwpAsanCrashAnalysisResult CrashAnalyzer::AnalyzeCrashedAllocator(
} }
void CrashAnalyzer::ReadAllocationInfo( void CrashAnalyzer::ReadAllocationInfo(
const uint8_t* stack_trace,
size_t stack_trace_offset,
const SlotMetadata::AllocationInfo& slot_info, const SlotMetadata::AllocationInfo& slot_info,
gwp_asan::Crash_AllocationInfo* proto_info) { gwp_asan::Crash_AllocationInfo* proto_info) {
if (slot_info.tid != base::kInvalidThreadId) if (slot_info.tid != base::kInvalidThreadId)
...@@ -193,15 +197,17 @@ void CrashAnalyzer::ReadAllocationInfo( ...@@ -193,15 +197,17 @@ void CrashAnalyzer::ReadAllocationInfo(
if (!slot_info.trace_len || !slot_info.trace_collected) if (!slot_info.trace_len || !slot_info.trace_collected)
return; return;
if (slot_info.trace_len > AllocatorState::kMaxPackedTraceLength) { if (slot_info.trace_len > AllocatorState::kMaxPackedTraceLength ||
stack_trace_offset + slot_info.trace_len >
AllocatorState::kMaxPackedTraceLength) {
DLOG(ERROR) << "Stack trace length is corrupted: " << slot_info.trace_len; DLOG(ERROR) << "Stack trace length is corrupted: " << slot_info.trace_len;
return; return;
} }
uintptr_t unpacked_stack_trace[AllocatorState::kMaxPackedTraceLength]; uintptr_t unpacked_stack_trace[AllocatorState::kMaxPackedTraceLength];
size_t unpacked_len = size_t unpacked_len =
Unpack(slot_info.packed_trace, slot_info.trace_len, unpacked_stack_trace, Unpack(stack_trace + stack_trace_offset, slot_info.trace_len,
AllocatorState::kMaxPackedTraceLength); unpacked_stack_trace, AllocatorState::kMaxPackedTraceLength);
if (!unpacked_len) { if (!unpacked_len) {
DLOG(ERROR) << "Failed to unpack stack trace."; DLOG(ERROR) << "Failed to unpack stack trace.";
return; return;
......
...@@ -91,9 +91,11 @@ class CrashAnalyzer { ...@@ -91,9 +91,11 @@ class CrashAnalyzer {
crashpad::VMAddress gpa_addr, crashpad::VMAddress gpa_addr,
gwp_asan::Crash* proto); gwp_asan::Crash* proto);
// This method fills out an AllocationInfo protobuf from a // This method fills out an AllocationInfo protobuf from a stack trace
// SlotMetadata::AllocationInfo struct. // and a SlotMetadata::AllocationInfo struct.
static void ReadAllocationInfo(const SlotMetadata::AllocationInfo& slot_info, static void ReadAllocationInfo(const uint8_t* stack_trace,
size_t stack_trace_offset,
const SlotMetadata::AllocationInfo& slot_info,
gwp_asan::Crash_AllocationInfo* proto_info); gwp_asan::Crash_AllocationInfo* proto_info);
FRIEND_TEST_ALL_PREFIXES(CrashAnalyzerTest, StackTraceCollection); FRIEND_TEST_ALL_PREFIXES(CrashAnalyzerTest, StackTraceCollection);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment