Commit 0fead47b authored by Sergei Glazunov's avatar Sergei Glazunov Committed by Chromium LUCI CQ

[CheckedPtr] Improve the BackupRefPtr design

Use a dedicated bit to track the liveness state of an allocation.
This change allows us to detect double-free bugs and improves
the overall stability.

Additionally, use a more appropriate inlining strategy.

Bug: 1073933
Change-Id: I62584e48e85edfe5ecbc1213cde99c7228ecd3e0
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2469569
Commit-Queue: Sergei Glazunov <glazunov@google.com>
Reviewed-by: default avatarBenoit L <lizeb@chromium.org>
Reviewed-by: default avatarBartek Nowierski <bartekn@chromium.org>
Reviewed-by: default avatarKentaro Hara <haraken@chromium.org>
Cr-Commit-Position: refs/heads/master@{#837188}
parent a45f52c7
...@@ -311,6 +311,7 @@ component("base") { ...@@ -311,6 +311,7 @@ component("base") {
"macros.h", "macros.h",
"memory/aligned_memory.cc", "memory/aligned_memory.cc",
"memory/aligned_memory.h", "memory/aligned_memory.h",
"memory/checked_ptr.cc",
"memory/checked_ptr.h", "memory/checked_ptr.h",
"memory/discardable_memory.cc", "memory/discardable_memory.cc",
"memory/discardable_memory.h", "memory/discardable_memory.h",
...@@ -1807,7 +1808,6 @@ component("base") { ...@@ -1807,7 +1808,6 @@ component("base") {
"allocator/partition_allocator/partition_oom.h", "allocator/partition_allocator/partition_oom.h",
"allocator/partition_allocator/partition_page.cc", "allocator/partition_allocator/partition_page.cc",
"allocator/partition_allocator/partition_page.h", "allocator/partition_allocator/partition_page.h",
"allocator/partition_allocator/partition_ref_count.cc",
"allocator/partition_allocator/partition_ref_count.h", "allocator/partition_allocator/partition_ref_count.h",
"allocator/partition_allocator/partition_root.cc", "allocator/partition_allocator/partition_root.cc",
"allocator/partition_allocator/partition_root.h", "allocator/partition_allocator/partition_root.h",
......
...@@ -107,39 +107,11 @@ template void PartitionAllocator<internal::NotThreadSafe>::init( ...@@ -107,39 +107,11 @@ template void PartitionAllocator<internal::NotThreadSafe>::init(
PartitionOptions); PartitionOptions);
#if DCHECK_IS_ON() #if DCHECK_IS_ON()
void DCheckIfManagedByPartitionAllocNormalBuckets(const void* ptr) { void DCheckGetSlotOffsetIsZero(void* ptr) {
PA_DCHECK(IsManagedByPartitionAllocNormalBuckets(ptr)); PA_DCHECK(PartitionAllocGetSlotOffset(ptr) == 0);
} }
#endif #endif
// Gets the offset from the beginning of the allocated slot, adjusted for cookie
// (if any).
// CAUTION! Use only for normal buckets. Using on direct-mapped allocations may
// lead to undefined behavior.
//
// This function is not a template, and can be used on either variant
// (thread-safe or not) of the allocator. This relies on the two PartitionRoot<>
// having the same layout, which is enforced by static_assert().
BASE_EXPORT size_t PartitionAllocGetSlotOffset(void* ptr) {
internal::DCheckIfManagedByPartitionAllocNormalBuckets(ptr);
auto* slot_span =
internal::PartitionAllocGetSlotSpanForSizeQuery<internal::ThreadSafe>(
ptr);
auto* root = PartitionRoot<internal::ThreadSafe>::FromSlotSpan(slot_span);
// The only allocations that don't use ref-count are allocated outside of
// GigaCage, hence we'd never get here in the `allow_extras = false` case.
PA_DCHECK(root->allow_extras);
ptr = root->AdjustPointerForExtrasSubtract(ptr);
// Get the offset from the beginning of the slot span.
uintptr_t ptr_addr = reinterpret_cast<uintptr_t>(ptr);
uintptr_t slot_span_start = reinterpret_cast<uintptr_t>(
internal::SlotSpanMetadata<internal::ThreadSafe>::ToPointer(slot_span));
size_t offset_in_slot_span = ptr_addr - slot_span_start;
return slot_span->bucket->GetSlotOffset(offset_in_slot_span);
}
} // namespace internal } // namespace internal
} // namespace base } // namespace base
...@@ -69,22 +69,6 @@ namespace base { ...@@ -69,22 +69,6 @@ namespace base {
BASE_EXPORT void PartitionAllocGlobalInit(OomFunction on_out_of_memory); BASE_EXPORT void PartitionAllocGlobalInit(OomFunction on_out_of_memory);
BASE_EXPORT void PartitionAllocGlobalUninitForTesting(); BASE_EXPORT void PartitionAllocGlobalUninitForTesting();
// This file may end up getting included even when PartitionAlloc isn't used,
// but the .cc file won't be linked. Exclude the code that relies on it.
#if BUILDFLAG(USE_PARTITION_ALLOC)
namespace internal {
// Avoid including partition_address_space.h from this .h file, by moving the
// call to IfManagedByPartitionAllocNormalBuckets into the .cc file.
#if DCHECK_IS_ON()
BASE_EXPORT void DCheckIfManagedByPartitionAllocNormalBuckets(const void* ptr);
#else
ALWAYS_INLINE void DCheckIfManagedByPartitionAllocNormalBuckets(const void*) {}
#endif
} // namespace internal
#endif // BUILDFLAG(USE_PARTITION_ALLOC)
namespace internal { namespace internal {
template <bool thread_safe> template <bool thread_safe>
struct BASE_EXPORT PartitionAllocator { struct BASE_EXPORT PartitionAllocator {
......
...@@ -269,6 +269,8 @@ static const size_t kReasonableSizeOfUnusedPages = 1024 * 1024 * 1024; // 1 GiB ...@@ -269,6 +269,8 @@ static const size_t kReasonableSizeOfUnusedPages = 1024 * 1024 * 1024; // 1 GiB
static const unsigned char kUninitializedByte = 0xAB; static const unsigned char kUninitializedByte = 0xAB;
static const unsigned char kFreedByte = 0xCD; static const unsigned char kFreedByte = 0xCD;
static const unsigned char kQuarantinedByte = 0xEF;
// Flags for `PartitionAllocFlags`. // Flags for `PartitionAllocFlags`.
enum PartitionAllocFlags { enum PartitionAllocFlags {
PartitionAllocReturnNull = 1 << 0, PartitionAllocReturnNull = 1 << 0,
......
...@@ -6,6 +6,8 @@ ...@@ -6,6 +6,8 @@
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_FORWARD_H_ #define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_FORWARD_H_
#include "base/base_export.h" #include "base/base_export.h"
#include "base/compiler_specific.h"
#include "base/dcheck_is_on.h"
namespace base { namespace base {
namespace internal { namespace internal {
...@@ -13,11 +15,14 @@ namespace internal { ...@@ -13,11 +15,14 @@ namespace internal {
template <bool thread_safe> template <bool thread_safe>
struct SlotSpanMetadata; struct SlotSpanMetadata;
BASE_EXPORT size_t PartitionAllocGetSlotOffset(void* ptr);
constexpr bool ThreadSafe = true; constexpr bool ThreadSafe = true;
constexpr bool NotThreadSafe = false; constexpr bool NotThreadSafe = false;
#if DCHECK_IS_ON()
BASE_EXPORT void DCheckGetSlotOffsetIsZero(void* ptr);
#else
ALWAYS_INLINE void DCheckGetSlotOffsetIsZero(void*) {}
#endif
} // namespace internal } // namespace internal
template <bool thread_safe> template <bool thread_safe>
......
...@@ -852,7 +852,7 @@ TEST_F(PartitionAllocTest, AllocSizes) { ...@@ -852,7 +852,7 @@ TEST_F(PartitionAllocTest, AllocSizes) {
} }
// Test that we can fetch the real allocated size after an allocation. // Test that we can fetch the real allocated size after an allocation.
TEST_F(PartitionAllocTest, AllocGetSizeAndOffset) { TEST_F(PartitionAllocTest, AllocGetSizeAndOffsetAndStart) {
void* ptr; void* ptr;
size_t requested_size, actual_size, predicted_size; size_t requested_size, actual_size, predicted_size;
...@@ -869,6 +869,8 @@ TEST_F(PartitionAllocTest, AllocGetSizeAndOffset) { ...@@ -869,6 +869,8 @@ TEST_F(PartitionAllocTest, AllocGetSizeAndOffset) {
for (size_t offset = 0; offset < requested_size; ++offset) { for (size_t offset = 0; offset < requested_size; ++offset) {
EXPECT_EQ(PartitionAllocGetSlotOffset(static_cast<char*>(ptr) + offset), EXPECT_EQ(PartitionAllocGetSlotOffset(static_cast<char*>(ptr) + offset),
offset); offset);
EXPECT_EQ(PartitionAllocGetSlotStart(static_cast<char*>(ptr) + offset),
ptr);
} }
} }
#endif #endif
...@@ -888,6 +890,8 @@ TEST_F(PartitionAllocTest, AllocGetSizeAndOffset) { ...@@ -888,6 +890,8 @@ TEST_F(PartitionAllocTest, AllocGetSizeAndOffset) {
for (size_t offset = 0; offset < requested_size; offset += 877) { for (size_t offset = 0; offset < requested_size; offset += 877) {
EXPECT_EQ(PartitionAllocGetSlotOffset(static_cast<char*>(ptr) + offset), EXPECT_EQ(PartitionAllocGetSlotOffset(static_cast<char*>(ptr) + offset),
offset); offset);
EXPECT_EQ(PartitionAllocGetSlotStart(static_cast<char*>(ptr) + offset),
ptr);
} }
} }
#endif #endif
...@@ -911,6 +915,8 @@ TEST_F(PartitionAllocTest, AllocGetSizeAndOffset) { ...@@ -911,6 +915,8 @@ TEST_F(PartitionAllocTest, AllocGetSizeAndOffset) {
for (size_t offset = 0; offset < requested_size; offset += 4999) { for (size_t offset = 0; offset < requested_size; offset += 4999) {
EXPECT_EQ(PartitionAllocGetSlotOffset(static_cast<char*>(ptr) + offset), EXPECT_EQ(PartitionAllocGetSlotOffset(static_cast<char*>(ptr) + offset),
offset); offset);
EXPECT_EQ(PartitionAllocGetSlotStart(static_cast<char*>(ptr) + offset),
ptr);
} }
} }
#endif #endif
...@@ -928,6 +934,8 @@ TEST_F(PartitionAllocTest, AllocGetSizeAndOffset) { ...@@ -928,6 +934,8 @@ TEST_F(PartitionAllocTest, AllocGetSizeAndOffset) {
for (size_t offset = 0; offset < requested_size; offset += 4999) { for (size_t offset = 0; offset < requested_size; offset += 4999) {
EXPECT_EQ(PartitionAllocGetSlotOffset(static_cast<char*>(ptr) + offset), EXPECT_EQ(PartitionAllocGetSlotOffset(static_cast<char*>(ptr) + offset),
offset); offset);
EXPECT_EQ(PartitionAllocGetSlotStart(static_cast<char*>(ptr) + offset),
ptr);
} }
} }
#endif #endif
...@@ -1632,9 +1640,6 @@ TEST_F(PartitionAllocDeathTest, LargeAllocs) { ...@@ -1632,9 +1640,6 @@ TEST_F(PartitionAllocDeathTest, LargeAllocs) {
EXPECT_DEATH(allocator.root()->Alloc(MaxDirectMapped() + 1, type_name), ""); EXPECT_DEATH(allocator.root()->Alloc(MaxDirectMapped() + 1, type_name), "");
} }
// TODO(glazunov): make BackupRefPtr compatible with the double-free detection.
#if !ENABLE_REF_COUNT_FOR_BACKUP_REF_PTR
// Check that our immediate double-free detection works. // Check that our immediate double-free detection works.
TEST_F(PartitionAllocDeathTest, ImmediateDoubleFree) { TEST_F(PartitionAllocDeathTest, ImmediateDoubleFree) {
void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name); void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
...@@ -1657,8 +1662,6 @@ TEST_F(PartitionAllocDeathTest, RefcountDoubleFree) { ...@@ -1657,8 +1662,6 @@ TEST_F(PartitionAllocDeathTest, RefcountDoubleFree) {
EXPECT_DEATH(allocator.root()->Free(ptr), ""); EXPECT_DEATH(allocator.root()->Free(ptr), "");
} }
#endif // !ENABLE_REF_COUNT_FOR_BACKUP_REF_PTR
// Check that guard pages are present where expected. // Check that guard pages are present where expected.
TEST_F(PartitionAllocDeathTest, DirectMapGuardPages) { TEST_F(PartitionAllocDeathTest, DirectMapGuardPages) {
const size_t kSizes[] = { const size_t kSizes[] = {
...@@ -2713,6 +2716,7 @@ TEST_F(PartitionAllocTest, MAYBE_Bookkeeping) { ...@@ -2713,6 +2716,7 @@ TEST_F(PartitionAllocTest, MAYBE_Bookkeeping) {
TEST_F(PartitionAllocTest, RefCountBasic) { TEST_F(PartitionAllocTest, RefCountBasic) {
constexpr uint64_t kCookie = 0x1234567890ABCDEF; constexpr uint64_t kCookie = 0x1234567890ABCDEF;
constexpr uint64_t kQuarantined = 0xEFEFEFEFEFEFEFEF;
size_t alloc_size = 64 - kExtraAllocSize; size_t alloc_size = 64 - kExtraAllocSize;
uint64_t* ptr1 = reinterpret_cast<uint64_t*>( uint64_t* ptr1 = reinterpret_cast<uint64_t*>(
...@@ -2722,17 +2726,20 @@ TEST_F(PartitionAllocTest, RefCountBasic) { ...@@ -2722,17 +2726,20 @@ TEST_F(PartitionAllocTest, RefCountBasic) {
*ptr1 = kCookie; *ptr1 = kCookie;
auto* ref_count = PartitionRefCountPointer(ptr1); auto* ref_count = PartitionRefCountPointer(ptr1);
EXPECT_TRUE(ref_count->HasOneRef());
ref_count->AddRef(); ref_count->Acquire();
ref_count->Release(); EXPECT_FALSE(ref_count->Release());
EXPECT_TRUE(ref_count->HasOneRef()); EXPECT_TRUE(ref_count->HasOneRef());
EXPECT_EQ(*ptr1, kCookie); EXPECT_EQ(*ptr1, kCookie);
ref_count->AddRef(); ref_count->Acquire();
EXPECT_FALSE(ref_count->HasOneRef()); EXPECT_FALSE(ref_count->HasOneRef());
allocator.root()->Free(ptr1); allocator.root()->Free(ptr1);
// The allocation shouldn't be reclaimed, and its contents should be zapped.
EXPECT_NE(*ptr1, kCookie); EXPECT_NE(*ptr1, kCookie);
EXPECT_EQ(*ptr1, kQuarantined);
// The allocator should not reuse the original slot since its reference count // The allocator should not reuse the original slot since its reference count
// doesn't equal zero. // doesn't equal zero.
...@@ -2742,7 +2749,8 @@ TEST_F(PartitionAllocTest, RefCountBasic) { ...@@ -2742,7 +2749,8 @@ TEST_F(PartitionAllocTest, RefCountBasic) {
allocator.root()->Free(ptr2); allocator.root()->Free(ptr2);
// When the last reference is released, the slot should become reusable. // When the last reference is released, the slot should become reusable.
ref_count->Release(); EXPECT_TRUE(ref_count->Release());
PartitionAllocFreeForRefCounting(ptr1);
uint64_t* ptr3 = reinterpret_cast<uint64_t*>( uint64_t* ptr3 = reinterpret_cast<uint64_t*>(
allocator.root()->Alloc(alloc_size, type_name)); allocator.root()->Alloc(alloc_size, type_name));
EXPECT_EQ(ptr1, ptr3); EXPECT_EQ(ptr1, ptr3);
......
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/partition_ref_count.h"
#include "base/allocator/partition_allocator/checked_ptr_support.h"
#include "base/allocator/partition_allocator/partition_alloc.h"
#include "base/allocator/partition_allocator/partition_page.h"
namespace base {
namespace internal {
#if ENABLE_REF_COUNT_FOR_BACKUP_REF_PTR
// TODO(glazunov): Simplify the function once the non-thread-safe PartitionRoot
// is no longer used.
void PartitionRefCount::Free() {
auto* slot_span =
SlotSpanMetadata<ThreadSafe>::FromPointerNoAlignmentCheck(this);
auto* root = PartitionRoot<ThreadSafe>::FromSlotSpan(slot_span);
#ifdef ADDRESS_SANITIZER
size_t utilized_slot_size = slot_span->GetUtilizedSlotSize();
// PartitionRefCount is required to be allocated inside a `PartitionRoot` that
// supports extras.
PA_DCHECK(root->allow_extras);
size_t usable_size = root->AdjustSizeForExtrasSubtract(utilized_slot_size);
ASAN_UNPOISON_MEMORY_REGION(this, usable_size);
#endif
if (root->is_thread_safe) {
root->RawFree(this, slot_span);
return;
}
auto* non_thread_safe_slot_span =
reinterpret_cast<SlotSpanMetadata<NotThreadSafe>*>(slot_span);
auto* non_thread_safe_root =
reinterpret_cast<PartitionRoot<NotThreadSafe>*>(root);
non_thread_safe_root->RawFree(this, non_thread_safe_slot_span);
}
#endif // ENABLE_REF_COUNT_FOR_BACKUP_REF_PTR
} // namespace internal
} // namespace base
...@@ -22,12 +22,20 @@ namespace internal { ...@@ -22,12 +22,20 @@ namespace internal {
#if ENABLE_REF_COUNT_FOR_BACKUP_REF_PTR #if ENABLE_REF_COUNT_FOR_BACKUP_REF_PTR
// Special-purpose atomic reference count class used by BackupRefPtrImpl.
// The least significant bit of the count is reserved for tracking the liveness
// state of an allocation: it's set when the allocation is created and cleared
// on free(). So the count can be:
//
// 1 for an allocation that is just returned from Alloc()
// 2 * k + 1 for a "live" allocation with k references
// 2 * k for an allocation with k dangling references after Free()
//
// This protects against double-free's, as we check whether the reference count
// is odd in |ReleaseFromAllocator()|, and if not we have a double-free.
class BASE_EXPORT PartitionRefCount { class BASE_EXPORT PartitionRefCount {
public: public:
// PartitionRefCount should never be constructed directly. PartitionRefCount();
PartitionRefCount() = delete;
ALWAYS_INLINE void Init() { count_.store(1, std::memory_order_relaxed); }
// Incrementing the counter doesn't imply any visibility about modified // Incrementing the counter doesn't imply any visibility about modified
// memory, hence relaxed atomics. For decrement, visibility is required before // memory, hence relaxed atomics. For decrement, visibility is required before
...@@ -36,32 +44,52 @@ class BASE_EXPORT PartitionRefCount { ...@@ -36,32 +44,52 @@ class BASE_EXPORT PartitionRefCount {
// For details, see base::AtomicRefCount, which has the same constraints and // For details, see base::AtomicRefCount, which has the same constraints and
// characteristics. // characteristics.
ALWAYS_INLINE void AddRef() { ALWAYS_INLINE void Acquire() {
CHECK_GT(count_.fetch_add(1, std::memory_order_relaxed), 0); PA_CHECK(count_.fetch_add(2, std::memory_order_relaxed) > 0);
} }
ALWAYS_INLINE void Release() { // Returns true if the allocation should be reclaimed.
if (count_.fetch_sub(1, std::memory_order_release) == 1) { ALWAYS_INLINE bool Release() {
if (count_.fetch_sub(2, std::memory_order_release) == 2) {
// In most thread-safe reference count implementations, an acquire // In most thread-safe reference count implementations, an acquire
// barrier is required so that all changes made to an object from other // barrier is required so that all changes made to an object from other
// threads are visible to its destructor. In our case, the destructor // threads are visible to its destructor. In our case, the destructor
// finishes before the final `Release` call, so it shouldn't be a problem. // finishes before the final `Release` call, so it shouldn't be a problem.
// However, we will keep it as a precautionary measure. // However, we will keep it as a precautionary measure.
std::atomic_thread_fence(std::memory_order_acquire); std::atomic_thread_fence(std::memory_order_acquire);
Free(); return true;
}
return false;
}
// Returns true if the allocation should be reclaimed.
// This function should be called by the allocator during Free().
ALWAYS_INLINE bool ReleaseFromAllocator() {
int32_t old_count = count_.fetch_sub(1, std::memory_order_release);
PA_CHECK(old_count & 1); // double-free detection
if (old_count == 1) {
std::atomic_thread_fence(std::memory_order_acquire);
return true;
} }
return false;
} }
ALWAYS_INLINE bool HasOneRef() { ALWAYS_INLINE bool HasOneRef() {
return count_.load(std::memory_order_acquire) == 1; return count_.load(std::memory_order_acquire) == 1;
} }
private: ALWAYS_INLINE bool IsAlive() {
void Free(); return count_.load(std::memory_order_relaxed) & 1;
}
std::atomic<int32_t> count_; private:
std::atomic<int32_t> count_{1};
}; };
ALWAYS_INLINE PartitionRefCount::PartitionRefCount() = default;
// Allocate extra space for the reference count to satisfy the alignment // Allocate extra space for the reference count to satisfy the alignment
// requirement. // requirement.
static constexpr size_t kInSlotRefCountBufferSize = kAlignment; static constexpr size_t kInSlotRefCountBufferSize = kAlignment;
...@@ -77,14 +105,7 @@ static constexpr size_t kPartitionRefCountOffset = kInSlotRefCountBufferSize; ...@@ -77,14 +105,7 @@ static constexpr size_t kPartitionRefCountOffset = kInSlotRefCountBufferSize;
ALWAYS_INLINE PartitionRefCount* PartitionRefCountPointer(void* ptr) { ALWAYS_INLINE PartitionRefCount* PartitionRefCountPointer(void* ptr) {
return reinterpret_cast<PartitionRefCount*>(reinterpret_cast<char*>(ptr) - DCheckGetSlotOffsetIsZero(ptr);
PartitionAllocGetSlotOffset(ptr) -
kPartitionRefCountOffset);
}
// This function can only be used when we are certain that `ptr` points to the
// beginning of the allocation slot.
ALWAYS_INLINE PartitionRefCount* PartitionRefCountPointerNoOffset(void* ptr) {
return reinterpret_cast<PartitionRefCount*>(reinterpret_cast<char*>(ptr) - return reinterpret_cast<PartitionRefCount*>(reinterpret_cast<char*>(ptr) -
kPartitionRefCountOffset); kPartitionRefCountOffset);
} }
......
...@@ -301,6 +301,12 @@ static void PartitionDumpBucketStats( ...@@ -301,6 +301,12 @@ static void PartitionDumpBucketStats(
} }
} }
#if DCHECK_IS_ON()
void DCheckIfManagedByPartitionAllocNormalBuckets(const void* ptr) {
PA_DCHECK(IsManagedByPartitionAllocNormalBuckets(ptr));
}
#endif
} // namespace internal } // namespace internal
template <bool thread_safe> template <bool thread_safe>
......
...@@ -69,6 +69,22 @@ ...@@ -69,6 +69,22 @@
namespace base { namespace base {
// This file may end up getting included even when PartitionAlloc isn't used,
// but the .cc file won't be linked. Exclude the code that relies on it.
#if BUILDFLAG(USE_PARTITION_ALLOC)
namespace internal {
// Avoid including partition_address_space.h from this .h file, by moving the
// call to IfManagedByPartitionAllocNormalBuckets into the .cc file.
#if DCHECK_IS_ON()
BASE_EXPORT void DCheckIfManagedByPartitionAllocNormalBuckets(const void* ptr);
#else
ALWAYS_INLINE void DCheckIfManagedByPartitionAllocNormalBuckets(const void*) {}
#endif
} // namespace internal
#endif // BUILDFLAG(USE_PARTITION_ALLOC)
enum PartitionPurgeFlags { enum PartitionPurgeFlags {
// Decommitting the ring list of empty slot spans is reasonably fast. // Decommitting the ring list of empty slot spans is reasonably fast.
PartitionPurgeDecommitEmptySlotSpans = 1 << 0, PartitionPurgeDecommitEmptySlotSpans = 1 << 0,
...@@ -292,6 +308,8 @@ struct BASE_EXPORT PartitionRoot { ...@@ -292,6 +308,8 @@ struct BASE_EXPORT PartitionRoot {
ALWAYS_INLINE void RawFree(void* ptr); ALWAYS_INLINE void RawFree(void* ptr);
ALWAYS_INLINE void RawFree(void* ptr, SlotSpan* slot_span); ALWAYS_INLINE void RawFree(void* ptr, SlotSpan* slot_span);
ALWAYS_INLINE void RawFreeWithThreadCache(void* ptr, SlotSpan* slot_span);
internal::ThreadCache* thread_cache_for_testing() const { internal::ThreadCache* thread_cache_for_testing() const {
return with_thread_cache ? internal::ThreadCache::Get() : nullptr; return with_thread_cache ? internal::ThreadCache::Get() : nullptr;
} }
...@@ -613,6 +631,99 @@ PartitionAllocGetSlotSpanForSizeQuery(void* ptr) { ...@@ -613,6 +631,99 @@ PartitionAllocGetSlotSpanForSizeQuery(void* ptr) {
return slot_span; return slot_span;
} }
// This file may end up getting included even when PartitionAlloc isn't used,
// but the .cc file won't be linked. Exclude the code that relies on it.
#if BUILDFLAG(USE_PARTITION_ALLOC)
// Gets the offset from the beginning of the allocated slot, adjusted for cookie
// (if any).
// CAUTION! Use only for normal buckets. Using on direct-mapped allocations may
// lead to undefined behavior.
//
// This function is not a template, and can be used on either variant
// (thread-safe or not) of the allocator. This relies on the two PartitionRoot<>
// having the same layout, which is enforced by static_assert().
ALWAYS_INLINE size_t PartitionAllocGetSlotOffset(void* ptr) {
internal::DCheckIfManagedByPartitionAllocNormalBuckets(ptr);
auto* slot_span =
internal::PartitionAllocGetSlotSpanForSizeQuery<internal::ThreadSafe>(
ptr);
auto* root = PartitionRoot<internal::ThreadSafe>::FromSlotSpan(slot_span);
// The only allocations that don't use ref-count are allocated outside of
// GigaCage, hence we'd never get here in the `allow_extras = false` case.
PA_DCHECK(root->allow_extras);
ptr = root->AdjustPointerForExtrasSubtract(ptr);
// Get the offset from the beginning of the slot span.
uintptr_t ptr_addr = reinterpret_cast<uintptr_t>(ptr);
uintptr_t slot_span_start = reinterpret_cast<uintptr_t>(
internal::SlotSpanMetadata<internal::ThreadSafe>::ToPointer(slot_span));
size_t offset_in_slot_span = ptr_addr - slot_span_start;
return slot_span->bucket->GetSlotOffset(offset_in_slot_span);
}
ALWAYS_INLINE void* PartitionAllocGetSlotStart(void* ptr) {
internal::DCheckIfManagedByPartitionAllocNormalBuckets(ptr);
auto* slot_span =
internal::PartitionAllocGetSlotSpanForSizeQuery<internal::ThreadSafe>(
ptr);
auto* root = PartitionRoot<internal::ThreadSafe>::FromSlotSpan(slot_span);
// The only allocations that don't use ref-count are allocated outside of
// GigaCage, hence we'd never get here in the `allow_extras = false` case.
PA_DCHECK(root->allow_extras);
// Get the offset from the beginning of the slot span.
uintptr_t ptr_addr = reinterpret_cast<uintptr_t>(ptr);
uintptr_t slot_span_start = reinterpret_cast<uintptr_t>(
internal::SlotSpanMetadata<internal::ThreadSafe>::ToPointer(slot_span));
size_t offset_in_slot_span = ptr_addr - slot_span_start;
auto* bucket = slot_span->bucket;
return root->AdjustPointerForExtrasAdd(reinterpret_cast<void*>(
slot_span_start +
bucket->slot_size * bucket->GetSlotNumber(offset_in_slot_span)));
}
#if ENABLE_REF_COUNT_FOR_BACKUP_REF_PTR
// TODO(glazunov): Simplify the function once the non-thread-safe PartitionRoot
// is no longer used.
ALWAYS_INLINE void PartitionAllocFreeForRefCounting(void* ptr) {
PA_DCHECK(!internal::PartitionRefCountPointer(ptr)->IsAlive());
auto* slot_span =
SlotSpanMetadata<ThreadSafe>::FromPointerNoAlignmentCheck(ptr);
auto* root = PartitionRoot<ThreadSafe>::FromSlotSpan(slot_span);
PA_DCHECK(root->allow_extras);
#ifdef ADDRESS_SANITIZER
size_t utilized_slot_size = slot_span->GetUtilizedSlotSize();
// PartitionRefCount is required to be allocated inside a `PartitionRoot` that
// supports extras.
size_t usable_size = root->AdjustSizeForExtrasSubtract(utilized_slot_size);
ASAN_UNPOISON_MEMORY_REGION(ptr, usable_size);
#endif
void* slot_start = root->AdjustPointerForExtrasSubtract(ptr);
#if DCHECK_IS_ON()
memset(slot_start, kFreedByte, slot_span->GetUtilizedSlotSize());
#endif
if (root->is_thread_safe) {
root->RawFreeWithThreadCache(slot_start, slot_span);
return;
}
auto* non_thread_safe_slot_span =
reinterpret_cast<SlotSpanMetadata<NotThreadSafe>*>(slot_span);
auto* non_thread_safe_root =
reinterpret_cast<PartitionRoot<NotThreadSafe>*>(root);
non_thread_safe_root->RawFreeWithThreadCache(slot_start,
non_thread_safe_slot_span);
}
#endif // ENABLE_REF_COUNT_FOR_BACKUP_REF_PTR
#endif // BUILDFLAG(USE_PARTITION_ALLOC)
} // namespace internal } // namespace internal
template <bool thread_safe> template <bool thread_safe>
...@@ -756,8 +867,7 @@ ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooksImmediate( ...@@ -756,8 +867,7 @@ ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooksImmediate(
if (!slot_span->bucket->is_direct_mapped()) { if (!slot_span->bucket->is_direct_mapped()) {
#if ENABLE_REF_COUNT_FOR_BACKUP_REF_PTR #if ENABLE_REF_COUNT_FOR_BACKUP_REF_PTR
internal::PartitionRefCount* ref_count = auto* ref_count = internal::PartitionRefCountPointer(ptr);
internal::PartitionRefCountPointerNoOffset(ptr);
// If we are holding the last reference to the allocation, it can be freed // If we are holding the last reference to the allocation, it can be freed
// immediately. Otherwise, defer the operation and zap the memory to turn // immediately. Otherwise, defer the operation and zap the memory to turn
// potential use-after-free issues into unexploitable crashes. // potential use-after-free issues into unexploitable crashes.
...@@ -765,12 +875,13 @@ ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooksImmediate( ...@@ -765,12 +875,13 @@ ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooksImmediate(
#ifdef ADDRESS_SANITIZER #ifdef ADDRESS_SANITIZER
ASAN_POISON_MEMORY_REGION(ptr, usable_size); ASAN_POISON_MEMORY_REGION(ptr, usable_size);
#else #else
memset(ptr, kFreedByte, usable_size); memset(ptr, kQuarantinedByte, usable_size);
#endif #endif
ref_count->Release();
return;
} }
#endif
if (UNLIKELY(!(ref_count->ReleaseFromAllocator())))
return;
#endif // ENABLE_REF_COUNT_FOR_BACKUP_REF_PTR
} }
// Shift ptr to the beginning of the slot. // Shift ptr to the beginning of the slot.
...@@ -788,21 +899,7 @@ ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooksImmediate( ...@@ -788,21 +899,7 @@ ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooksImmediate(
} }
#endif #endif
// TLS access can be expensive, do a cheap local check first. RawFreeWithThreadCache(ptr, slot_span);
//
// Also the thread-unsafe variant doesn't have a use for a thread cache, so
// make it statically known to the compiler.
if (thread_safe && with_thread_cache &&
!slot_span->bucket->is_direct_mapped()) {
PA_DCHECK(slot_span->bucket >= this->buckets &&
slot_span->bucket <= &this->sentinel_bucket);
size_t bucket_index = slot_span->bucket - this->buckets;
auto* thread_cache = internal::ThreadCache::Get();
if (thread_cache && thread_cache->MaybePutInCache(ptr, bucket_index))
return;
}
RawFree(ptr, slot_span);
} }
template <bool thread_safe> template <bool thread_safe>
...@@ -822,6 +919,27 @@ ALWAYS_INLINE void PartitionRoot<thread_safe>::RawFree(void* ptr, ...@@ -822,6 +919,27 @@ ALWAYS_INLINE void PartitionRoot<thread_safe>::RawFree(void* ptr,
deferred_unmap.Run(); deferred_unmap.Run();
} }
template <bool thread_safe>
ALWAYS_INLINE void PartitionRoot<thread_safe>::RawFreeWithThreadCache(
void* ptr,
SlotSpan* slot_span) {
// TLS access can be expensive, do a cheap local check first.
//
// Also the thread-unsafe variant doesn't have a use for a thread cache, so
// make it statically known to the compiler.
if (thread_safe && with_thread_cache &&
!slot_span->bucket->is_direct_mapped()) {
PA_DCHECK(slot_span->bucket >= this->buckets &&
slot_span->bucket <= &this->sentinel_bucket);
size_t bucket_index = slot_span->bucket - this->buckets;
auto* thread_cache = internal::ThreadCache::Get();
if (thread_cache && thread_cache->MaybePutInCache(ptr, bucket_index))
return;
}
RawFree(ptr, slot_span);
}
template <bool thread_safe> template <bool thread_safe>
ALWAYS_INLINE void PartitionRoot<thread_safe>::RawFreeLocked(void* ptr) { ALWAYS_INLINE void PartitionRoot<thread_safe>::RawFreeLocked(void* ptr) {
SlotSpan* slot_span = SlotSpan::FromPointerNoAlignmentCheck(ptr); SlotSpan* slot_span = SlotSpan::FromPointerNoAlignmentCheck(ptr);
...@@ -1103,7 +1221,7 @@ ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocFlagsNoHooks( ...@@ -1103,7 +1221,7 @@ ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocFlagsNoHooks(
bool is_direct_mapped = raw_size > kMaxBucketed; bool is_direct_mapped = raw_size > kMaxBucketed;
if (allow_extras && !is_direct_mapped) { if (allow_extras && !is_direct_mapped) {
#if ENABLE_REF_COUNT_FOR_BACKUP_REF_PTR #if ENABLE_REF_COUNT_FOR_BACKUP_REF_PTR
internal::PartitionRefCountPointerNoOffset(ret)->Init(); new (internal::PartitionRefCountPointer(ret)) internal::PartitionRefCount();
#endif // ENABLE_REF_COUNT_FOR_BACKUP_REF_PTR #endif // ENABLE_REF_COUNT_FOR_BACKUP_REF_PTR
} }
return ret; return ret;
......
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/memory/checked_ptr.h"
#include "base/allocator/partition_allocator/partition_alloc.h"
namespace base {
namespace internal {
#if ENABLE_BACKUP_REF_PTR_IMPL
void BackupRefPtrImpl::AcquireInternal(void* ptr) {
void* slot_start = PartitionAllocGetSlotStart(ptr);
PartitionRefCountPointer(slot_start)->Acquire();
}
void BackupRefPtrImpl::ReleaseInternal(void* ptr) {
void* slot_start = PartitionAllocGetSlotStart(ptr);
if (PartitionRefCountPointer(slot_start)->Release())
PartitionAllocFreeForRefCounting(slot_start);
}
bool BackupRefPtrImpl::IsPointeeAlive(void* ptr) {
void* slot_start = PartitionAllocGetSlotStart(ptr);
return PartitionRefCountPointer(slot_start)->IsAlive();
}
#endif // ENABLE_BACKUP_REF_PTR_IMPL
} // namespace internal
} // namespace base
...@@ -116,8 +116,9 @@ struct BackupRefPtrImpl { ...@@ -116,8 +116,9 @@ struct BackupRefPtrImpl {
void* ptr = const_cast<void*>(cv_ptr); void* ptr = const_cast<void*>(cv_ptr);
uintptr_t addr = reinterpret_cast<uintptr_t>(ptr); uintptr_t addr = reinterpret_cast<uintptr_t>(ptr);
if (LIKELY(IsManagedByPartitionAllocNormalBuckets(ptr))) // This check already covers the nullptr case.
PartitionRefCountPointer(ptr)->AddRef(); if (IsManagedByPartitionAllocNormalBuckets(ptr))
AcquireInternal(ptr);
return addr; return addr;
} }
...@@ -127,8 +128,8 @@ struct BackupRefPtrImpl { ...@@ -127,8 +128,8 @@ struct BackupRefPtrImpl {
void* ptr = reinterpret_cast<void*>(wrapped_ptr); void* ptr = reinterpret_cast<void*>(wrapped_ptr);
// This check already covers the nullptr case. // This check already covers the nullptr case.
if (LIKELY(IsManagedByPartitionAllocNormalBuckets(ptr))) if (IsManagedByPartitionAllocNormalBuckets(ptr))
PartitionRefCountPointer(ptr)->Release(); ReleaseInternal(ptr);
} }
// Returns equivalent of |WrapRawPtr(nullptr)|. Separated out to make it a // Returns equivalent of |WrapRawPtr(nullptr)|. Separated out to make it a
...@@ -143,6 +144,11 @@ struct BackupRefPtrImpl { ...@@ -143,6 +144,11 @@ struct BackupRefPtrImpl {
// hasn't been freed. The function is allowed to crash on nullptr. // hasn't been freed. The function is allowed to crash on nullptr.
static ALWAYS_INLINE void* SafelyUnwrapPtrForDereference( static ALWAYS_INLINE void* SafelyUnwrapPtrForDereference(
uintptr_t wrapped_ptr) { uintptr_t wrapped_ptr) {
#if DCHECK_IS_ON()
void* ptr = reinterpret_cast<void*>(wrapped_ptr);
if (IsManagedByPartitionAllocNormalBuckets(ptr))
DCHECK(IsPointeeAlive(ptr));
#endif
return reinterpret_cast<void*>(wrapped_ptr); return reinterpret_cast<void*>(wrapped_ptr);
} }
...@@ -183,6 +189,17 @@ struct BackupRefPtrImpl { ...@@ -183,6 +189,17 @@ struct BackupRefPtrImpl {
// This is for accounting only, used by unit tests. // This is for accounting only, used by unit tests.
static ALWAYS_INLINE void IncrementSwapCountForTest() {} static ALWAYS_INLINE void IncrementSwapCountForTest() {}
private:
// We've evaluated several strategies (inline nothing, various parts, or
// everything in |Wrap()| and |Release()|) using the Speedometer2 benchmark
// to measure performance. The best results were obtained when only the
// lightweight |IsManagedByPartitionAllocNormalBuckets()| check was inlined.
// Therefore, we've extracted the rest into the functions below and marked
// them as NOINLINE to prevent unintended LTO effects.
static BASE_EXPORT NOINLINE void AcquireInternal(void* ptr);
static BASE_EXPORT NOINLINE void ReleaseInternal(void* ptr);
static BASE_EXPORT NOINLINE bool IsPointeeAlive(void* ptr);
}; };
#endif // ENABLE_BACKUP_REF_PTR_IMPL #endif // ENABLE_BACKUP_REF_PTR_IMPL
......
...@@ -735,8 +735,12 @@ TEST(BackupRefPtrImpl, Basic) { ...@@ -735,8 +735,12 @@ TEST(BackupRefPtrImpl, Basic) {
*raw_ptr1 = 42; *raw_ptr1 = 42;
EXPECT_EQ(*raw_ptr1, *checked_ptr1); EXPECT_EQ(*raw_ptr1, *checked_ptr1);
// The allocation should be poisoned since there's a CheckedPtr alive.
allocator.root()->Free(raw_ptr1); allocator.root()->Free(raw_ptr1);
#if DCHECK_IS_ON()
// In debug builds, the use-after-free should be caught immediately.
EXPECT_DEATH_IF_SUPPORTED(if (*checked_ptr1 == 42) return, "");
#else // DCHECK_IS_ON()
// The allocation should be poisoned since there's a CheckedPtr alive.
EXPECT_NE(*checked_ptr1, 42ul); EXPECT_NE(*checked_ptr1, 42ul);
// The allocator should not be able to reuse the slot at this point. // The allocator should not be able to reuse the slot at this point.
...@@ -749,6 +753,7 @@ TEST(BackupRefPtrImpl, Basic) { ...@@ -749,6 +753,7 @@ TEST(BackupRefPtrImpl, Basic) {
void* raw_ptr3 = allocator.root()->Alloc(sizeof(uint64_t), ""); void* raw_ptr3 = allocator.root()->Alloc(sizeof(uint64_t), "");
EXPECT_EQ(raw_ptr1, raw_ptr3); EXPECT_EQ(raw_ptr1, raw_ptr3);
allocator.root()->Free(raw_ptr3); allocator.root()->Free(raw_ptr3);
#endif // DCHECK_IS_ON()
} }
#endif // BUILDFLAG(USE_PARTITION_ALLOC) && ENABLE_BACKUP_REF_PTR_IMPL && #endif // BUILDFLAG(USE_PARTITION_ALLOC) && ENABLE_BACKUP_REF_PTR_IMPL &&
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment