Commit 946e27ec authored by Sergei Glazunov's avatar Sergei Glazunov Committed by Commit Bot

[CheckedPtr] Initial BackupRefPtr implementation

With this patch, PartitionAlloc holds the reference count (i.e. the
number of associated CheckedPtrs) for every non-direct-mapped
allocation. On free(), if the count doesn't equal zero, the allocation
is poisoned and quarantined until there are no references left;
therefore, use-after-free issues affecting CheckedPtr-protected
pointers become unexploitable.

This is a highly experimental feature, which is disabled by default.

Bug: 1073933
Change-Id: Icda4b6e0c303e472df4222e1418bf0fdfe9aeef1
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2385456
Commit-Queue: Sergei Glazunov <glazunov@google.com>
Reviewed-by: default avatarBartek Nowierski <bartekn@chromium.org>
Reviewed-by: default avatarBenoit L <lizeb@chromium.org>
Reviewed-by: default avatarKentaro Hara <haraken@chromium.org>
Cr-Commit-Position: refs/heads/master@{#805392}
parent 3fe858fe
......@@ -1772,6 +1772,8 @@ component("base") {
"allocator/partition_allocator/partition_oom.h",
"allocator/partition_allocator/partition_page.cc",
"allocator/partition_allocator/partition_page.h",
"allocator/partition_allocator/partition_ref_count.cc",
"allocator/partition_allocator/partition_ref_count.h",
"allocator/partition_allocator/partition_tag.h",
"allocator/partition_allocator/partition_tag_bitmap.h",
"allocator/partition_allocator/random.cc",
......
......@@ -9,6 +9,14 @@
#define ENABLE_TAG_FOR_MTE_CHECKED_PTR 0
#define ENABLE_TAG_FOR_SINGLE_TAG_CHECKED_PTR 0
#define ENABLE_REF_COUNT_FOR_BACKUP_REF_PTR 0
static_assert(!ENABLE_REF_COUNT_FOR_BACKUP_REF_PTR ||
!ENABLE_TAG_FOR_CHECKED_PTR2,
"ENABLE_REF_COUNT_FOR_BACKUP_REF_PTR and "
"ENABLE_TAG_FOR_CHECKED_PTR2 aren't compatible and can't be both "
"used at the same time");
// This is a sub-variant of ENABLE_TAG_FOR_MTE_CHECKED_PTR
#define MTE_CHECKED_PTR_SET_TAG_AT_FREE 1
......
......@@ -66,6 +66,7 @@
#include "base/allocator/partition_allocator/partition_direct_map_extent.h"
#include "base/allocator/partition_allocator/partition_lock.h"
#include "base/allocator/partition_allocator/partition_page.h"
#include "base/allocator/partition_allocator/partition_ref_count.h"
#include "base/allocator/partition_allocator/partition_tag.h"
#include "base/base_export.h"
#include "base/bits.h"
......@@ -84,6 +85,10 @@
#include <stdlib.h>
#endif
#if defined(ADDRESS_SANITIZER)
#include <sanitizer/asan_interface.h>
#endif // defined(ADDRESS_SANITIZER)
// We use this to make MEMORY_TOOL_REPLACES_ALLOCATOR behave the same for max
// size as other alloc code.
#define CHECK_MAX_SIZE_OR_RETURN_NULLPTR(size, flags) \
......@@ -173,6 +178,7 @@ ALWAYS_INLINE void* PartitionPointerAdjustSubtract(bool allow_extras,
if (allow_extras) {
ptr = PartitionTagPointerAdjustSubtract(ptr);
ptr = PartitionCookiePointerAdjustSubtract(ptr);
ptr = PartitionRefCountPointerAdjustSubtract(ptr);
}
return ptr;
}
......@@ -181,6 +187,7 @@ ALWAYS_INLINE void* PartitionPointerAdjustAdd(bool allow_extras, void* ptr) {
if (allow_extras) {
ptr = PartitionTagPointerAdjustAdd(ptr);
ptr = PartitionCookiePointerAdjustAdd(ptr);
ptr = PartitionRefCountPointerAdjustAdd(ptr);
}
return ptr;
}
......@@ -189,6 +196,7 @@ ALWAYS_INLINE size_t PartitionSizeAdjustAdd(bool allow_extras, size_t size) {
if (allow_extras) {
size = PartitionTagSizeAdjustAdd(size);
size = PartitionCookieSizeAdjustAdd(size);
size = PartitionRefCountSizeAdjustAdd(size);
}
return size;
}
......@@ -198,6 +206,7 @@ ALWAYS_INLINE size_t PartitionSizeAdjustSubtract(bool allow_extras,
if (allow_extras) {
size = PartitionTagSizeAdjustSubtract(size);
size = PartitionCookieSizeAdjustSubtract(size);
size = PartitionRefCountSizeAdjustSubtract(size);
}
return size;
}
......@@ -369,6 +378,7 @@ struct BASE_EXPORT PartitionRoot {
size_t total_size_of_committed_pages = 0;
size_t total_size_of_super_pages = 0;
size_t total_size_of_direct_mapped_pages = 0;
bool is_thread_safe = thread_safe;
// TODO(bartekn): Consider size of added extras (cookies and/or tag, or
// nothing) instead of true|false, so that we can just add or subtract the
// size instead of having an if branch on the hot paths.
......@@ -482,6 +492,9 @@ struct BASE_EXPORT PartitionRoot {
internal::PartitionBucket<thread_safe>* SizeToBucket(size_t size) const;
// Frees memory, with |ptr| as returned by |RawAlloc()|.
ALWAYS_INLINE void RawFree(void* ptr, Page* page);
private:
// Allocates memory, without any cookies / tags.
//
......@@ -492,8 +505,6 @@ struct BASE_EXPORT PartitionRoot {
size_t size,
size_t* allocated_size,
bool* is_already_zeroed);
// Frees memory, with |ptr| as returned by |RawAlloc()|.
ALWAYS_INLINE void RawFree(void* ptr, Page* page);
ALWAYS_INLINE void* AllocFromBucket(Bucket* bucket,
int flags,
size_t size,
......@@ -614,12 +625,11 @@ ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooks(void* ptr) {
size_t allocated_size = page->GetAllocatedSize();
// |ptr| points after the tag and the cookie.
// The layout is | tag | cookie | data | cookie |
// ^ ^
// | ptr
// allocation_start_ptr
// The layout is | tag or ref count | cookie | data | cookie |
// ^ ^
// allocation_start_ptr ptr
//
// Note: tag and cookie can be 0-sized.
// Note: tag, reference count and cookie can be 0-sized.
void* allocation_start_ptr =
internal::PartitionPointerAdjustSubtract(true /* allow_extras */, ptr);
......@@ -642,6 +652,23 @@ ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooks(void* ptr) {
#else
internal::PartitionTagClearValue(ptr, size_with_no_extras);
#endif
#if ENABLE_REF_COUNT_FOR_BACKUP_REF_PTR
internal::PartitionRefCount* ref_count =
internal::PartitionRefCountPointerNoOffset(ptr);
// If we are holding the last reference to the allocation, it can be freed
// immediately. Otherwise, defer the operation and zap the memory to turn
// potential use-after-free issues into unexploitable crashes.
if (UNLIKELY(!ref_count->HasOneRef())) {
#ifdef ADDRESS_SANITIZER
ASAN_POISON_MEMORY_REGION(ptr, size_with_no_extras);
#else
memset(ptr, kFreedByte, size_with_no_extras);
#endif
ref_count->Release();
return;
}
#endif
}
ptr = allocation_start_ptr;
......@@ -878,17 +905,21 @@ ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocFlagsNoHooks(int flags,
memset(ret, 0, size_with_no_extras);
}
// Do not set tag for MTECheckedPtr in the set-tag-at-free case.
// It is set only at Free() time and at slot span allocation time.
#if !ENABLE_TAG_FOR_MTE_CHECKED_PTR || !MTE_CHECKED_PTR_SET_TAG_AT_FREE
bool is_direct_mapped = size > kMaxBucketed;
if (allow_extras && !is_direct_mapped) {
// Do not set tag for MTECheckedPtr in the set-tag-at-free case.
// It is set only at Free() time and at slot span allocation time.
#if !ENABLE_TAG_FOR_MTE_CHECKED_PTR || !MTE_CHECKED_PTR_SET_TAG_AT_FREE
size_t slot_size_with_no_extras =
internal::PartitionSizeAdjustSubtract(allow_extras, allocated_size);
internal::PartitionTagSetValue(ret, slot_size_with_no_extras,
GetNewPartitionTag());
}
#endif // !ENABLE_TAG_FOR_MTE_CHECKED_PTR || !MTE_CHECKED_PTR_SET_TAG_AT_FREE
#if ENABLE_REF_COUNT_FOR_BACKUP_REF_PTR
internal::PartitionRefCountPointerNoOffset(ret)->Init();
#endif // ENABLE_REF_COUNT_FOR_BACKUP_REF_PTR
}
return ret;
}
......
......@@ -18,6 +18,7 @@
#include "base/allocator/partition_allocator/page_allocator_constants.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/partition_alloc_features.h"
#include "base/allocator/partition_allocator/partition_ref_count.h"
#include "base/allocator/partition_allocator/partition_tag.h"
#include "base/allocator/partition_allocator/partition_tag_bitmap.h"
#include "base/logging.h"
......@@ -134,11 +135,13 @@ namespace internal {
const size_t kTestAllocSize = 16;
#if !DCHECK_IS_ON()
const size_t kPointerOffset = kInSlotTagBufferSize;
const size_t kExtraAllocSize = kInSlotTagBufferSize;
const size_t kPointerOffset = kInSlotTagBufferSize + kInSlotRefCountBufferSize;
const size_t kExtraAllocSize = kInSlotTagBufferSize + kInSlotRefCountBufferSize;
#else
const size_t kPointerOffset = kCookieSize + kInSlotTagBufferSize;
const size_t kExtraAllocSize = kCookieSize * 2 + kInSlotTagBufferSize;
const size_t kPointerOffset =
kCookieSize + kInSlotTagBufferSize + kInSlotRefCountBufferSize;
const size_t kExtraAllocSize =
kCookieSize * 2 + kInSlotTagBufferSize + kInSlotRefCountBufferSize;
#endif
const size_t kRealAllocSize = kTestAllocSize + kExtraAllocSize;
......@@ -1548,12 +1551,14 @@ TEST_F(PartitionAllocDeathTest, LargeAllocs) {
EXPECT_DEATH(allocator.root()->Alloc(kMaxDirectMapped + 1, type_name), "");
}
// TODO(glazunov): make BackupRefPtr compatible with the double-free detection.
#if !ENABLE_REF_COUNT_FOR_BACKUP_REF_PTR
// Check that our immediate double-free detection works.
TEST_F(PartitionAllocDeathTest, ImmediateDoubleFree) {
void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
EXPECT_TRUE(ptr);
allocator.root()->Free(ptr);
EXPECT_DEATH(allocator.root()->Free(ptr), "");
}
......@@ -1571,6 +1576,8 @@ TEST_F(PartitionAllocDeathTest, RefcountDoubleFree) {
EXPECT_DEATH(allocator.root()->Free(ptr), "");
}
#endif // !ENABLE_REF_COUNT_FOR_BACKUP_REF_PTR
// Check that guard pages are present where expected.
TEST_F(PartitionAllocDeathTest, GuardPages) {
// PartitionAlloc adds kPartitionPageSize to the requested size
......@@ -2417,11 +2424,12 @@ TEST_F(PartitionAllocTest, Alignment) {
// cookie.
expected_alignment = std::min(expected_alignment, kCookieSize);
#endif
#if ENABLE_TAG_FOR_CHECKED_PTR2
#if ENABLE_TAG_FOR_CHECKED_PTR2 || ENABLE_REF_COUNT_FOR_BACKUP_REF_PTR
// When ENABLE_TAG_FOR_CHECKED_PTR2, a kInSlotTagBufferSize is added before
// rounding up the allocation size. The returned pointer points after the
// partition tag.
expected_alignment = std::min({expected_alignment, kInSlotTagBufferSize});
expected_alignment = std::min(
{expected_alignment, kInSlotTagBufferSize + kInSlotRefCountBufferSize});
#endif
for (int index = 0; index < 3; index++) {
void* ptr = allocator.root()->Alloc(size, "");
......@@ -2579,6 +2587,48 @@ TEST_F(PartitionAllocTest, GetAllocatedSize) {
}
}
#if ENABLE_REF_COUNT_FOR_BACKUP_REF_PTR
TEST_F(PartitionAllocTest, RefCountBasic) {
constexpr uint64_t kCookie = 0x1234567890ABCDEF;
size_t alloc_size = 64 - kExtraAllocSize;
uint64_t* ptr1 = reinterpret_cast<uint64_t*>(
allocator.root()->Alloc(alloc_size, type_name));
EXPECT_TRUE(ptr1);
*ptr1 = kCookie;
auto* ref_count = PartitionRefCountPointer(ptr1);
ref_count->AddRef();
ref_count->Release();
EXPECT_TRUE(ref_count->HasOneRef());
EXPECT_EQ(*ptr1, kCookie);
ref_count->AddRef();
EXPECT_FALSE(ref_count->HasOneRef());
allocator.root()->Free(ptr1);
EXPECT_NE(*ptr1, kCookie);
// The allocator should not reuse the original slot since its reference count
// doesn't equal zero.
uint64_t* ptr2 = reinterpret_cast<uint64_t*>(
allocator.root()->Alloc(alloc_size, type_name));
EXPECT_NE(ptr1, ptr2);
allocator.root()->Free(ptr2);
// When the last reference is released, the slot should become reusable.
ref_count->Release();
uint64_t* ptr3 = reinterpret_cast<uint64_t*>(
allocator.root()->Alloc(alloc_size, type_name));
EXPECT_EQ(ptr1, ptr3);
allocator.root()->Free(ptr3);
}
#endif
} // namespace internal
} // namespace base
......
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/partition_ref_count.h"
#include "base/allocator/partition_allocator/checked_ptr_support.h"
#include "base/allocator/partition_allocator/partition_alloc.h"
#include "base/allocator/partition_allocator/partition_page.h"
namespace base {
namespace internal {
#if ENABLE_REF_COUNT_FOR_BACKUP_REF_PTR
// TODO(glazunov): Simplify the function once the non-thread-safe PartitionRoot
// is no longer used.
void PartitionRefCount::Free() {
auto* page = PartitionPage<ThreadSafe>::FromPointerNoAlignmentCheck(this);
auto* root = PartitionRoot<ThreadSafe>::FromPage(page);
#ifdef ADDRESS_SANITIZER
size_t allocated_size = page->GetAllocatedSize();
// PartitionRefCount is required to be allocated inside a `PartitionRoot` that
// supports extras.
PA_DCHECK(root->allow_extras);
size_t size_with_no_extras = internal::PartitionSizeAdjustSubtract(
/* allow_extras= */ true, allocated_size);
ASAN_UNPOISON_MEMORY_REGION(this, size_with_no_extras);
#endif
if (root->is_thread_safe) {
root->RawFree(this, page);
return;
}
auto* non_thread_safe_page =
reinterpret_cast<PartitionPage<NotThreadSafe>*>(page);
auto* non_thread_safe_root =
reinterpret_cast<PartitionRoot<NotThreadSafe>*>(root);
non_thread_safe_root->RawFree(this, non_thread_safe_page);
}
#endif // ENABLE_REF_COUNT_FOR_BACKUP_REF_PTR
} // namespace internal
} // namespace base
// Copyright (c) 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_REF_COUNT_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_REF_COUNT_H_
#include <atomic>
#include "base/allocator/partition_allocator/checked_ptr_support.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/partition_alloc_forward.h"
#include "base/allocator/partition_allocator/partition_cookie.h"
#include "base/base_export.h"
#include "base/check_op.h"
#include "base/notreached.h"
#include "build/build_config.h"
namespace base {
namespace internal {
#if ENABLE_REF_COUNT_FOR_BACKUP_REF_PTR
class BASE_EXPORT PartitionRefCount {
public:
// PartitionRefCount should never be constructed directly.
PartitionRefCount() = delete;
ALWAYS_INLINE void Init() { count_.store(1, std::memory_order_relaxed); }
// Incrementing the counter doesn't imply any visibility about modified
// memory, hence relaxed atomics. For decrement, visibility is required before
// the memory gets freed, necessitating an acquire/release barrier before
// freeing the memory.
// For details, see base::AtomicRefCount, which has the same constraints and
// characteristics.
ALWAYS_INLINE void AddRef() {
CHECK_GT(count_.fetch_add(1, std::memory_order_relaxed), 0);
}
ALWAYS_INLINE void Release() {
if (count_.fetch_sub(1, std::memory_order_release) == 1) {
// In most thread-safe reference count implementations, an acquire
// barrier is required so that all changes made to an object from other
// threads are visible to its destructor. In our case, the destructor
// finishes before the final `Release` call, so it shouldn't be a problem.
// However, we will keep it as a precautionary measure.
std::atomic_thread_fence(std::memory_order_acquire);
Free();
}
}
ALWAYS_INLINE bool HasOneRef() {
return count_.load(std::memory_order_acquire) == 1;
}
private:
void Free();
std::atomic<int32_t> count_;
};
// Allocate extra space for the reference count to satisfy the alignment
// requirement.
static constexpr size_t kInSlotRefCountBufferSize = alignof(std::max_align_t);
static_assert(sizeof(PartitionRefCount) <= kInSlotRefCountBufferSize,
"PartitionRefCount should fit into the in-slot buffer.");
#if DCHECK_IS_ON()
static constexpr size_t kPartitionRefCountOffset =
kInSlotRefCountBufferSize + kCookieSize;
#else
static constexpr size_t kPartitionRefCountOffset = kInSlotRefCountBufferSize;
#endif
ALWAYS_INLINE size_t PartitionRefCountSizeAdjustAdd(size_t size) {
PA_DCHECK(size + kInSlotRefCountBufferSize > size);
return size + kInSlotRefCountBufferSize;
}
ALWAYS_INLINE size_t PartitionRefCountSizeAdjustSubtract(size_t size) {
PA_DCHECK(size >= kInSlotRefCountBufferSize);
return size - kInSlotRefCountBufferSize;
}
ALWAYS_INLINE PartitionRefCount* PartitionRefCountPointer(void* ptr) {
return reinterpret_cast<PartitionRefCount*>(reinterpret_cast<char*>(ptr) -
PartitionAllocGetSlotOffset(ptr) -
kPartitionRefCountOffset);
}
// This function can only be used when we are certain that `ptr` points to the
// beginning of the allocation slot.
ALWAYS_INLINE PartitionRefCount* PartitionRefCountPointerNoOffset(void* ptr) {
return reinterpret_cast<PartitionRefCount*>(reinterpret_cast<char*>(ptr) -
kPartitionRefCountOffset);
}
ALWAYS_INLINE void* PartitionRefCountPointerAdjustSubtract(void* ptr) {
return reinterpret_cast<void*>(reinterpret_cast<char*>(ptr) -
kInSlotRefCountBufferSize);
}
ALWAYS_INLINE void* PartitionRefCountPointerAdjustAdd(void* ptr) {
return reinterpret_cast<void*>(reinterpret_cast<char*>(ptr) +
kInSlotRefCountBufferSize);
}
#else // ENABLE_REF_COUNTER_FOR_BACKUP_REF_PTR
static constexpr size_t kInSlotRefCountBufferSize = 0;
ALWAYS_INLINE size_t PartitionRefCountSizeAdjustAdd(size_t size) {
return size;
}
ALWAYS_INLINE size_t PartitionRefCountSizeAdjustSubtract(size_t size) {
return size;
}
ALWAYS_INLINE void* PartitionRefCountPointerAdjustSubtract(void* ptr) {
return ptr;
}
ALWAYS_INLINE void* PartitionRefCountPointerAdjustAdd(void* ptr) {
return ptr;
}
#endif // ENABLE_REF_COUNT_FOR_BACKUP_REF_PTR
} // namespace internal
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_REF_COUNT_H_
......@@ -25,9 +25,11 @@ namespace internal {
// TODO(tasak): add a description about the partition tag.
using PartitionTag = uint8_t;
// Allocate extra 16 bytes for the partition tag. 14 bytes are unused
// (reserved).
static constexpr size_t kInSlotTagBufferSize = 16;
// Allocate extra space for the partition tag to satisfy the alignment
// requirement.
static constexpr size_t kInSlotTagBufferSize = alignof(std::max_align_t);
static_assert(sizeof(PartitionTag) <= kInSlotTagBufferSize,
"PartitionTag should fit into the in-slot buffer.");
#if DCHECK_IS_ON()
// The layout inside the slot is |tag|cookie|object|(empty)|cookie|.
......
This diff is collapsed.
......@@ -29,6 +29,7 @@ static_assert(sizeof(CheckedPtr<int>) == sizeof(int*),
static_assert(sizeof(CheckedPtr<std::string>) == sizeof(std::string*),
"CheckedPtr shouldn't add memory overhead");
#if !ENABLE_BACKUP_REF_PTR_IMPL
// |is_trivially_copyable| assertion means that arrays/vectors of CheckedPtr can
// be copied by memcpy.
static_assert(std::is_trivially_copyable<CheckedPtr<void>>::value,
......@@ -57,6 +58,7 @@ static_assert(std::is_trivially_default_constructible<CheckedPtr<int>>::value,
static_assert(
std::is_trivially_default_constructible<CheckedPtr<std::string>>::value,
"CheckedPtr should be trivially default constructible");
#endif // !ENABLE_BACKUP_REF_PTR_IMPL
// Don't use base::internal for testing CheckedPtr API, to test if code outside
// this namespace calls the correct functions from this namespace.
......@@ -534,20 +536,20 @@ TEST_F(CheckedPtrTest, UpcastConvertible) {
{
Derived derived_val(42, 84, 1024);
CheckedPtr<Derived> checked_derived_ptr = &derived_val;
CheckedPtr<Derived> checked_derived_ptr1 = &derived_val;
CheckedPtr<Derived> checked_derived_ptr2 = &derived_val;
CheckedPtr<Derived> checked_derived_ptr3 = &derived_val;
CheckedPtr<Derived> checked_derived_ptr4 = &derived_val;
CheckedPtr<Base1> checked_base1_ptr(std::move(checked_derived_ptr));
CheckedPtr<Base1> checked_base1_ptr(std::move(checked_derived_ptr1));
EXPECT_EQ(checked_base1_ptr->b1, 42);
CheckedPtr<Base2> checked_base2_ptr(std::move(checked_derived_ptr));
CheckedPtr<Base2> checked_base2_ptr(std::move(checked_derived_ptr2));
EXPECT_EQ(checked_base2_ptr->b2, 84);
checked_base1_ptr = std::move(checked_derived_ptr);
checked_base1_ptr = std::move(checked_derived_ptr3);
EXPECT_EQ(checked_base1_ptr->b1, 42);
checked_base2_ptr = std::move(checked_derived_ptr);
checked_base2_ptr = std::move(checked_derived_ptr4);
EXPECT_EQ(checked_base2_ptr->b2, 84);
EXPECT_EQ(checked_base1_ptr, checked_derived_ptr);
EXPECT_EQ(checked_base2_ptr, checked_derived_ptr);
}
}
......@@ -815,7 +817,7 @@ TEST(CheckedPtr2OrMTEImpl, CrashOnGenerationMismatch) {
// pointer and points to the tag appropriately.
char bytes[] = {0xBA, 0x42, 0x78, 0x89};
CheckedPtr<char, CheckedPtr2OrMTEImplForTest> ptr = bytes + kTagOffsetForTest;
EXPECT_TRUE(*ptr == 0x78);
EXPECT_EQ(*ptr, 0x78);
// Clobber the generation associated with the fake allocation.
bytes[0] = 0;
EXPECT_DEATH_IF_SUPPORTED(if (*ptr == 0x78) return, "");
......@@ -847,7 +849,7 @@ TEST(CheckedPtr2OrMTEImpl, CrashOnUseAfterFree) {
// exercise real PartitionAlloc paths.
CheckedPtr<int> ptr = static_cast<int*>(raw_ptr);
*ptr = 42;
EXPECT_TRUE(*ptr == 42);
EXPECT_EQ(*ptr, 42);
allocator.root()->Free(raw_ptr);
EXPECT_DEATH_IF_SUPPORTED(if (*ptr == 42) return, "");
}
......@@ -881,10 +883,48 @@ TEST(CheckedPtr2OrMTEImpl, CrashOnUseAfterFree_WithOffset) {
}
}
#endif // ENABLE_TAG_FOR_MTE_CHECKED_PTR
#endif // BUILDFLAG(USE_PARTITION_ALLOC) && ENABLE_CHECKED_PTR2_OR_MTE_IMPL &&
// !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
#if BUILDFLAG(USE_PARTITION_ALLOC) && ENABLE_BACKUP_REF_PTR_IMPL && \
!defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
TEST(BackupRefPtrImpl, Basic) {
// This test works only if GigaCage is enabled. Bail out otherwise.
if (!IsPartitionAllocGigaCageEnabled())
return;
// TODO(bartekn): Avoid using PartitionAlloc API directly. Switch to
// new/delete once PartitionAlloc Everywhere is fully enabled.
PartitionAllocGlobalInit(HandleOOM);
PartitionAllocator<ThreadSafe> allocator;
allocator.init();
uint64_t* raw_ptr1 = reinterpret_cast<uint64_t*>(
allocator.root()->Alloc(sizeof(uint64_t), ""));
// Use the actual CheckedPtr implementation, not a test substitute, to
// exercise real PartitionAlloc paths.
CheckedPtr<uint64_t> checked_ptr1 = raw_ptr1;
*raw_ptr1 = 42;
EXPECT_EQ(*raw_ptr1, *checked_ptr1);
// The allocation should be poisoned since there's a CheckedPtr alive.
allocator.root()->Free(raw_ptr1);
EXPECT_NE(*checked_ptr1, 42ul);
// The allocator should not be able to reuse the slot at this point.
void* raw_ptr2 = allocator.root()->Alloc(sizeof(uint64_t), "");
EXPECT_NE(raw_ptr1, raw_ptr2);
allocator.root()->Free(raw_ptr2);
// When the last reference is released, the slot should become reusable.
checked_ptr1 = nullptr;
void* raw_ptr3 = allocator.root()->Alloc(sizeof(uint64_t), "");
EXPECT_EQ(raw_ptr1, raw_ptr3);
allocator.root()->Free(raw_ptr3);
}
#endif // BUILDFLAG(USE_PARTITION_ALLOC) && ENABLE_BACKUP_REF_PTR_IMPL &&
// !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
} // namespace internal
} // namespace base
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment