Commit e90cf11a authored by Benoit Lize's avatar Benoit Lize Committed by Commit Bot

base/allocator: Avoid (D)CHECK() in PartitionAllocator.

PartitionAllocator can be used as the malloc() implementation. In this
context, a (D)CHECK() which fires causes allocations, which creates
reentrency issues when used inside PartitionAlloc.

To avoid that, use PA_(D)CHECK() in base/allocator/partition_allocator/,
which is identical to the unprefixed version if PartitionAlloc is not
the malloc() implementation. Otherwise it uses simplified versions.

This doesn't necessarily cover all the code reached from
PartitionAlloc. This is not a correctness issue, as Chrome would still
crash when a (D)CHECK() fires, but a debugability one. As a consequence,
limiting the changes to the directory (rather than all code called from
it) is sufficient.

Bug: 998048
Change-Id: I171be3e6bac35cf09a29400c4254adc6f995739b
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2249739
Commit-Queue: Benoit L <lizeb@chromium.org>
Reviewed-by: default avatarBartek Nowierski <bartekn@chromium.org>
Cr-Commit-Position: refs/heads/master@{#779762}
parent 5f1fba21
...@@ -1748,6 +1748,7 @@ jumbo_component("base") { ...@@ -1748,6 +1748,7 @@ jumbo_component("base") {
"allocator/partition_allocator/partition_address_space.h", "allocator/partition_allocator/partition_address_space.h",
"allocator/partition_allocator/partition_alloc.cc", "allocator/partition_allocator/partition_alloc.cc",
"allocator/partition_allocator/partition_alloc.h", "allocator/partition_allocator/partition_alloc.h",
"allocator/partition_allocator/partition_alloc_check.h",
"allocator/partition_allocator/partition_alloc_constants.h", "allocator/partition_allocator/partition_alloc_constants.h",
"allocator/partition_allocator/partition_alloc_features.cc", "allocator/partition_allocator/partition_alloc_features.cc",
"allocator/partition_allocator/partition_alloc_features.h", "allocator/partition_allocator/partition_alloc_features.h",
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include "base/allocator/partition_allocator/page_allocator.h" #include "base/allocator/partition_allocator/page_allocator.h"
#include "base/allocator/partition_allocator/page_allocator_internal.h" #include "base/allocator/partition_allocator/page_allocator_internal.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/bits.h" #include "base/bits.h"
#include "base/notreached.h" #include "base/notreached.h"
#include "base/stl_util.h" #include "base/stl_util.h"
...@@ -26,8 +27,8 @@ AddressPoolManager* AddressPoolManager::GetInstance() { ...@@ -26,8 +27,8 @@ AddressPoolManager* AddressPoolManager::GetInstance() {
} }
pool_handle AddressPoolManager::Add(uintptr_t ptr, size_t length) { pool_handle AddressPoolManager::Add(uintptr_t ptr, size_t length) {
DCHECK(!(ptr & kSuperPageOffsetMask)); PA_DCHECK(!(ptr & kSuperPageOffsetMask));
DCHECK(!((ptr + length) & kSuperPageOffsetMask)); PA_DCHECK(!((ptr + length) & kSuperPageOffsetMask));
for (pool_handle i = 0; i < base::size(pools_); ++i) { for (pool_handle i = 0; i < base::size(pools_); ++i) {
if (!pools_[i].IsInitialized()) { if (!pools_[i].IsInitialized()) {
...@@ -46,34 +47,34 @@ void AddressPoolManager::ResetForTesting() { ...@@ -46,34 +47,34 @@ void AddressPoolManager::ResetForTesting() {
void AddressPoolManager::Remove(pool_handle handle) { void AddressPoolManager::Remove(pool_handle handle) {
Pool* pool = GetPool(handle); Pool* pool = GetPool(handle);
DCHECK(pool->IsInitialized()); PA_DCHECK(pool->IsInitialized());
pool->Reset(); pool->Reset();
} }
char* AddressPoolManager::Alloc(pool_handle handle, size_t length) { char* AddressPoolManager::Alloc(pool_handle handle, size_t length) {
Pool* pool = GetPool(handle); Pool* pool = GetPool(handle);
DCHECK(pool->IsInitialized()); PA_DCHECK(pool->IsInitialized());
return reinterpret_cast<char*>(pool->FindChunk(length)); return reinterpret_cast<char*>(pool->FindChunk(length));
} }
void AddressPoolManager::Free(pool_handle handle, void* ptr, size_t length) { void AddressPoolManager::Free(pool_handle handle, void* ptr, size_t length) {
Pool* pool = GetPool(handle); Pool* pool = GetPool(handle);
DCHECK(pool->IsInitialized()); PA_DCHECK(pool->IsInitialized());
pool->FreeChunk(reinterpret_cast<uintptr_t>(ptr), length); pool->FreeChunk(reinterpret_cast<uintptr_t>(ptr), length);
} }
void AddressPoolManager::Pool::Initialize(uintptr_t ptr, size_t length) { void AddressPoolManager::Pool::Initialize(uintptr_t ptr, size_t length) {
CHECK(ptr != 0); PA_CHECK(ptr != 0);
CHECK(!(ptr & kSuperPageOffsetMask)); PA_CHECK(!(ptr & kSuperPageOffsetMask));
CHECK(!(length & kSuperPageOffsetMask)); PA_CHECK(!(length & kSuperPageOffsetMask));
address_begin_ = ptr; address_begin_ = ptr;
#if DCHECK_IS_ON() #if DCHECK_IS_ON()
address_end_ = ptr + length; address_end_ = ptr + length;
DCHECK_LT(address_begin_, address_end_); PA_DCHECK(address_begin_ < address_end_);
#endif #endif
total_bits_ = length / kSuperPageSize; total_bits_ = length / kSuperPageSize;
CHECK_LE(total_bits_, kMaxBits); PA_CHECK(total_bits_ <= kMaxBits);
base::AutoLock scoped_lock(lock_); base::AutoLock scoped_lock(lock_);
alloc_bitset_.reset(); alloc_bitset_.reset();
...@@ -124,7 +125,7 @@ uintptr_t AddressPoolManager::Pool::FindChunk(size_t requested_size) { ...@@ -124,7 +125,7 @@ uintptr_t AddressPoolManager::Pool::FindChunk(size_t requested_size) {
// mark as allocated) and return the allocated address. // mark as allocated) and return the allocated address.
if (found) { if (found) {
for (size_t i = beg_bit; i < end_bit; ++i) { for (size_t i = beg_bit; i < end_bit; ++i) {
DCHECK(!alloc_bitset_.test(i)); PA_DCHECK(!alloc_bitset_.test(i));
alloc_bitset_.set(i); alloc_bitset_.set(i);
} }
if (bit_hint_ == beg_bit) { if (bit_hint_ == beg_bit) {
...@@ -132,7 +133,7 @@ uintptr_t AddressPoolManager::Pool::FindChunk(size_t requested_size) { ...@@ -132,7 +133,7 @@ uintptr_t AddressPoolManager::Pool::FindChunk(size_t requested_size) {
} }
uintptr_t address = address_begin_ + beg_bit * kSuperPageSize; uintptr_t address = address_begin_ + beg_bit * kSuperPageSize;
#if DCHECK_IS_ON() #if DCHECK_IS_ON()
DCHECK_LE(address + required_size, address_end_); PA_DCHECK(address + required_size <= address_end_);
#endif #endif
return address; return address;
} }
...@@ -145,18 +146,18 @@ uintptr_t AddressPoolManager::Pool::FindChunk(size_t requested_size) { ...@@ -145,18 +146,18 @@ uintptr_t AddressPoolManager::Pool::FindChunk(size_t requested_size) {
void AddressPoolManager::Pool::FreeChunk(uintptr_t address, size_t free_size) { void AddressPoolManager::Pool::FreeChunk(uintptr_t address, size_t free_size) {
base::AutoLock scoped_lock(lock_); base::AutoLock scoped_lock(lock_);
DCHECK(!(address & kSuperPageOffsetMask)); PA_DCHECK(!(address & kSuperPageOffsetMask));
const size_t size = bits::Align(free_size, kSuperPageSize); const size_t size = bits::Align(free_size, kSuperPageSize);
DCHECK_LE(address_begin_, address); DCHECK_LE(address_begin_, address);
#if DCHECK_IS_ON() #if DCHECK_IS_ON()
DCHECK_LE(address + size, address_end_); PA_DCHECK(address + size <= address_end_);
#endif #endif
const size_t beg_bit = (address - address_begin_) / kSuperPageSize; const size_t beg_bit = (address - address_begin_) / kSuperPageSize;
const size_t end_bit = beg_bit + size / kSuperPageSize; const size_t end_bit = beg_bit + size / kSuperPageSize;
for (size_t i = beg_bit; i < end_bit; ++i) { for (size_t i = beg_bit; i < end_bit; ++i) {
DCHECK(alloc_bitset_.test(i)); PA_DCHECK(alloc_bitset_.test(i));
alloc_bitset_.reset(i); alloc_bitset_.reset(i);
} }
bit_hint_ = std::min(bit_hint_, beg_bit); bit_hint_ = std::min(bit_hint_, beg_bit);
...@@ -170,7 +171,7 @@ AddressPoolManager::~AddressPoolManager() = default; ...@@ -170,7 +171,7 @@ AddressPoolManager::~AddressPoolManager() = default;
ALWAYS_INLINE AddressPoolManager::Pool* AddressPoolManager::GetPool( ALWAYS_INLINE AddressPoolManager::Pool* AddressPoolManager::GetPool(
pool_handle handle) { pool_handle handle) {
DCHECK(0 < handle && handle <= kNumPools); PA_DCHECK(0 < handle && handle <= kNumPools);
return &pools_[handle - 1]; return &pools_[handle - 1];
} }
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include "base/allocator/partition_allocator/address_space_randomization.h" #include "base/allocator/partition_allocator/address_space_randomization.h"
#include "base/allocator/partition_allocator/page_allocator.h" #include "base/allocator/partition_allocator/page_allocator.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/random.h" #include "base/allocator/partition_allocator/random.h"
#include "base/allocator/partition_allocator/spin_lock.h" #include "base/allocator/partition_allocator/spin_lock.h"
#include "base/check_op.h" #include "base/check_op.h"
...@@ -61,7 +62,7 @@ void* GetRandomPageBase() { ...@@ -61,7 +62,7 @@ void* GetRandomPageBase() {
random += internal::kASLROffset; random += internal::kASLROffset;
#endif // defined(ARCH_CPU_32_BITS) #endif // defined(ARCH_CPU_32_BITS)
DCHECK_EQ(0ULL, (random & kPageAllocationGranularityOffsetMask)); PA_DCHECK(!(random & kPageAllocationGranularityOffsetMask));
return reinterpret_cast<void*>(random); return reinterpret_cast<void*>(random);
} }
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include "base/allocator/partition_allocator/memory_reclaimer.h" #include "base/allocator/partition_allocator/memory_reclaimer.h"
#include "base/allocator/partition_allocator/partition_alloc.h" #include "base/allocator/partition_allocator/partition_alloc.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/bind.h" #include "base/bind.h"
#include "base/location.h" #include "base/location.h"
#include "base/metrics/histogram_functions.h" #include "base/metrics/histogram_functions.h"
...@@ -17,17 +18,17 @@ namespace { ...@@ -17,17 +18,17 @@ namespace {
template <bool thread_safe> template <bool thread_safe>
void Insert(std::set<PartitionRoot<thread_safe>*>* partitions, void Insert(std::set<PartitionRoot<thread_safe>*>* partitions,
PartitionRoot<thread_safe>* partition) { PartitionRoot<thread_safe>* partition) {
DCHECK(partition); PA_DCHECK(partition);
auto it_and_whether_inserted = partitions->insert(partition); auto it_and_whether_inserted = partitions->insert(partition);
DCHECK(it_and_whether_inserted.second); PA_DCHECK(it_and_whether_inserted.second);
} }
template <bool thread_safe> template <bool thread_safe>
void Remove(std::set<PartitionRoot<thread_safe>*>* partitions, void Remove(std::set<PartitionRoot<thread_safe>*>* partitions,
PartitionRoot<thread_safe>* partition) { PartitionRoot<thread_safe>* partition) {
DCHECK(partition); PA_DCHECK(partition);
size_t erased_count = partitions->erase(partition); size_t erased_count = partitions->erase(partition);
DCHECK_EQ(1u, erased_count); PA_DCHECK(erased_count == 1u);
} }
} // namespace } // namespace
...@@ -64,12 +65,12 @@ void PartitionAllocMemoryReclaimer::UnregisterPartition( ...@@ -64,12 +65,12 @@ void PartitionAllocMemoryReclaimer::UnregisterPartition(
void PartitionAllocMemoryReclaimer::Start( void PartitionAllocMemoryReclaimer::Start(
scoped_refptr<SequencedTaskRunner> task_runner) { scoped_refptr<SequencedTaskRunner> task_runner) {
DCHECK(!timer_); PA_DCHECK(!timer_);
DCHECK(task_runner); PA_DCHECK(task_runner);
{ {
AutoLock lock(lock_); AutoLock lock(lock_);
DCHECK(!thread_safe_partitions_.empty()); PA_DCHECK(!thread_safe_partitions_.empty());
} }
// This does not need to run on the main thread, however there are a few // This does not need to run on the main thread, however there are a few
......
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
// found in the LICENSE file. // found in the LICENSE file.
#include "base/allocator/partition_allocator/oom_callback.h" #include "base/allocator/partition_allocator/oom_callback.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/check.h" #include "base/check.h"
...@@ -13,7 +14,7 @@ PartitionAllocOomCallback g_oom_callback; ...@@ -13,7 +14,7 @@ PartitionAllocOomCallback g_oom_callback;
} // namespace } // namespace
void SetPartitionAllocOomCallback(PartitionAllocOomCallback callback) { void SetPartitionAllocOomCallback(PartitionAllocOomCallback callback) {
DCHECK(!g_oom_callback); PA_DCHECK(!g_oom_callback);
g_oom_callback = callback; g_oom_callback = callback;
} }
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include "base/allocator/partition_allocator/address_space_randomization.h" #include "base/allocator/partition_allocator/address_space_randomization.h"
#include "base/allocator/partition_allocator/page_allocator_internal.h" #include "base/allocator/partition_allocator/page_allocator_internal.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/spin_lock.h" #include "base/allocator/partition_allocator/spin_lock.h"
#include "base/bits.h" #include "base/bits.h"
#include "base/check_op.h" #include "base/check_op.h"
...@@ -78,9 +79,9 @@ void* TrimMapping(void* base, ...@@ -78,9 +79,9 @@ void* TrimMapping(void* base,
pre_slack = alignment - pre_slack; pre_slack = alignment - pre_slack;
} }
size_t post_slack = base_length - pre_slack - trim_length; size_t post_slack = base_length - pre_slack - trim_length;
DCHECK(base_length >= trim_length || pre_slack || post_slack); PA_DCHECK(base_length >= trim_length || pre_slack || post_slack);
DCHECK(pre_slack < base_length); PA_DCHECK(pre_slack < base_length);
DCHECK(post_slack < base_length); PA_DCHECK(post_slack < base_length);
return TrimMappingInternal(base, base_length, trim_length, accessibility, return TrimMappingInternal(base, base_length, trim_length, accessibility,
commit, pre_slack, post_slack); commit, pre_slack, post_slack);
} }
...@@ -92,10 +93,10 @@ void* SystemAllocPages(void* hint, ...@@ -92,10 +93,10 @@ void* SystemAllocPages(void* hint,
PageAccessibilityConfiguration accessibility, PageAccessibilityConfiguration accessibility,
PageTag page_tag, PageTag page_tag,
bool commit) { bool commit) {
DCHECK(!(length & kPageAllocationGranularityOffsetMask)); PA_DCHECK(!(length & kPageAllocationGranularityOffsetMask));
DCHECK(!(reinterpret_cast<uintptr_t>(hint) & PA_DCHECK(!(reinterpret_cast<uintptr_t>(hint) &
kPageAllocationGranularityOffsetMask)); kPageAllocationGranularityOffsetMask));
DCHECK(commit || accessibility == PageInaccessible); PA_DCHECK(commit || accessibility == PageInaccessible);
return SystemAllocPagesInternal(hint, length, accessibility, page_tag, return SystemAllocPagesInternal(hint, length, accessibility, page_tag,
commit); commit);
} }
...@@ -106,16 +107,16 @@ void* AllocPages(void* address, ...@@ -106,16 +107,16 @@ void* AllocPages(void* address,
PageAccessibilityConfiguration accessibility, PageAccessibilityConfiguration accessibility,
PageTag page_tag, PageTag page_tag,
bool commit) { bool commit) {
DCHECK(length >= kPageAllocationGranularity); PA_DCHECK(length >= kPageAllocationGranularity);
DCHECK(!(length & kPageAllocationGranularityOffsetMask)); PA_DCHECK(!(length & kPageAllocationGranularityOffsetMask));
DCHECK(align >= kPageAllocationGranularity); PA_DCHECK(align >= kPageAllocationGranularity);
// Alignment must be power of 2 for masking math to work. // Alignment must be power of 2 for masking math to work.
DCHECK(base::bits::IsPowerOfTwo(align)); PA_DCHECK(base::bits::IsPowerOfTwo(align));
DCHECK(!(reinterpret_cast<uintptr_t>(address) & PA_DCHECK(!(reinterpret_cast<uintptr_t>(address) &
kPageAllocationGranularityOffsetMask)); kPageAllocationGranularityOffsetMask));
uintptr_t align_offset_mask = align - 1; uintptr_t align_offset_mask = align - 1;
uintptr_t align_base_mask = ~align_offset_mask; uintptr_t align_base_mask = ~align_offset_mask;
DCHECK(!(reinterpret_cast<uintptr_t>(address) & align_offset_mask)); PA_DCHECK(!(reinterpret_cast<uintptr_t>(address) & align_offset_mask));
// If the client passed null as the address, choose a good one. // If the client passed null as the address, choose a good one.
if (address == nullptr) { if (address == nullptr) {
...@@ -165,7 +166,7 @@ void* AllocPages(void* address, ...@@ -165,7 +166,7 @@ void* AllocPages(void* address,
// Make a larger allocation so we can force alignment. // Make a larger allocation so we can force alignment.
size_t try_length = length + (align - kPageAllocationGranularity); size_t try_length = length + (align - kPageAllocationGranularity);
CHECK(try_length >= length); PA_CHECK(try_length >= length);
void* ret; void* ret;
do { do {
...@@ -183,41 +184,41 @@ void* AllocPages(void* address, ...@@ -183,41 +184,41 @@ void* AllocPages(void* address,
} }
void FreePages(void* address, size_t length) { void FreePages(void* address, size_t length) {
DCHECK(!(reinterpret_cast<uintptr_t>(address) & PA_DCHECK(!(reinterpret_cast<uintptr_t>(address) &
kPageAllocationGranularityOffsetMask)); kPageAllocationGranularityOffsetMask));
DCHECK(!(length & kPageAllocationGranularityOffsetMask)); PA_DCHECK(!(length & kPageAllocationGranularityOffsetMask));
FreePagesInternal(address, length); FreePagesInternal(address, length);
} }
bool TrySetSystemPagesAccess(void* address, bool TrySetSystemPagesAccess(void* address,
size_t length, size_t length,
PageAccessibilityConfiguration accessibility) { PageAccessibilityConfiguration accessibility) {
DCHECK(!(length & kSystemPageOffsetMask)); PA_DCHECK(!(length & kSystemPageOffsetMask));
return TrySetSystemPagesAccessInternal(address, length, accessibility); return TrySetSystemPagesAccessInternal(address, length, accessibility);
} }
void SetSystemPagesAccess(void* address, void SetSystemPagesAccess(void* address,
size_t length, size_t length,
PageAccessibilityConfiguration accessibility) { PageAccessibilityConfiguration accessibility) {
DCHECK(!(length & kSystemPageOffsetMask)); PA_DCHECK(!(length & kSystemPageOffsetMask));
SetSystemPagesAccessInternal(address, length, accessibility); SetSystemPagesAccessInternal(address, length, accessibility);
} }
void DecommitSystemPages(void* address, size_t length) { void DecommitSystemPages(void* address, size_t length) {
DCHECK_EQ(0UL, length & kSystemPageOffsetMask); PA_DCHECK(!(length & kSystemPageOffsetMask));
DecommitSystemPagesInternal(address, length); DecommitSystemPagesInternal(address, length);
} }
bool RecommitSystemPages(void* address, bool RecommitSystemPages(void* address,
size_t length, size_t length,
PageAccessibilityConfiguration accessibility) { PageAccessibilityConfiguration accessibility) {
DCHECK_EQ(0UL, length & kSystemPageOffsetMask); PA_DCHECK(!(length & kSystemPageOffsetMask));
DCHECK_NE(PageInaccessible, accessibility); PA_DCHECK(accessibility != PageInaccessible);
return RecommitSystemPagesInternal(address, length, accessibility); return RecommitSystemPagesInternal(address, length, accessibility);
} }
void DiscardSystemPages(void* address, size_t length) { void DiscardSystemPages(void* address, size_t length) {
DCHECK_EQ(0UL, length & kSystemPageOffsetMask); PA_DCHECK(!(length & kSystemPageOffsetMask));
DiscardSystemPagesInternal(address, length); DiscardSystemPagesInternal(address, length);
} }
...@@ -229,8 +230,8 @@ bool ReserveAddressSpace(size_t size) { ...@@ -229,8 +230,8 @@ bool ReserveAddressSpace(size_t size) {
PageTag::kChromium, false); PageTag::kChromium, false);
if (mem != nullptr) { if (mem != nullptr) {
// We guarantee this alignment when reserving address space. // We guarantee this alignment when reserving address space.
DCHECK(!(reinterpret_cast<uintptr_t>(mem) & PA_DCHECK(!(reinterpret_cast<uintptr_t>(mem) &
kPageAllocationGranularityOffsetMask)); kPageAllocationGranularityOffsetMask));
s_reservation_address = mem; s_reservation_address = mem;
s_reservation_size = size; s_reservation_size = size;
return true; return true;
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <lib/zx/vmo.h> #include <lib/zx/vmo.h>
#include "base/allocator/partition_allocator/page_allocator.h" #include "base/allocator/partition_allocator/page_allocator.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/fuchsia/fuchsia_logging.h" #include "base/fuchsia/fuchsia_logging.h"
#include "base/notreached.h" #include "base/notreached.h"
...@@ -35,7 +36,7 @@ const char* PageTagToName(PageTag tag) { ...@@ -35,7 +36,7 @@ const char* PageTagToName(PageTag tag) {
case PageTag::kV8: case PageTag::kV8:
return "cr_v8"; return "cr_v8";
default: default:
DCHECK(false); PA_DCHECK(false);
return ""; return "";
} }
} }
...@@ -126,7 +127,7 @@ void* TrimMappingInternal(void* base, ...@@ -126,7 +127,7 @@ void* TrimMappingInternal(void* base,
bool commit, bool commit,
size_t pre_slack, size_t pre_slack,
size_t post_slack) { size_t post_slack) {
DCHECK_EQ(base_length, trim_length + pre_slack + post_slack); PA_DCHECK(base_length == trim_length + pre_slack + post_slack);
uint64_t base_address = reinterpret_cast<uint64_t>(base); uint64_t base_address = reinterpret_cast<uint64_t>(base);
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include <errno.h> #include <errno.h>
#include <sys/mman.h> #include <sys/mman.h>
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/logging.h" #include "base/logging.h"
#include "build/build_config.h" #include "build/build_config.h"
...@@ -56,7 +57,7 @@ const char* PageTagToName(PageTag tag) { ...@@ -56,7 +57,7 @@ const char* PageTagToName(PageTag tag) {
case PageTag::kV8: case PageTag::kV8:
return "v8"; return "v8";
default: default:
DCHECK(false); PA_DCHECK(false);
return ""; return "";
} }
} }
...@@ -112,8 +113,8 @@ void* SystemAllocPagesInternal(void* hint, ...@@ -112,8 +113,8 @@ void* SystemAllocPagesInternal(void* hint,
#if defined(OS_MACOSX) #if defined(OS_MACOSX)
// Use a custom tag to make it easier to distinguish Partition Alloc regions // Use a custom tag to make it easier to distinguish Partition Alloc regions
// in vmmap(1). Tags between 240-255 are supported. // in vmmap(1). Tags between 240-255 are supported.
DCHECK_LE(PageTag::kFirst, page_tag); PA_DCHECK(PageTag::kFirst <= page_tag);
DCHECK_GE(PageTag::kLast, page_tag); PA_DCHECK(PageTag::kLast >= page_tag);
int fd = VM_MAKE_TAG(static_cast<int>(page_tag)); int fd = VM_MAKE_TAG(static_cast<int>(page_tag));
#else #else
int fd = -1; int fd = -1;
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include "base/allocator/partition_allocator/oom.h" #include "base/allocator/partition_allocator/oom.h"
#include "base/allocator/partition_allocator/page_allocator_internal.h" #include "base/allocator/partition_allocator/page_allocator_internal.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/logging.h" #include "base/logging.h"
namespace base { namespace base {
...@@ -84,7 +85,7 @@ void SetSystemPagesAccessInternal( ...@@ -84,7 +85,7 @@ void SetSystemPagesAccessInternal(
if (!VirtualFree(address, length, MEM_DECOMMIT)) { if (!VirtualFree(address, length, MEM_DECOMMIT)) {
// We check `GetLastError` for `ERROR_SUCCESS` here so that in a crash // We check `GetLastError` for `ERROR_SUCCESS` here so that in a crash
// report we get the error number. // report we get the error number.
CHECK_EQ(static_cast<uint32_t>(ERROR_SUCCESS), GetLastError()); PA_CHECK(static_cast<uint32_t>(ERROR_SUCCESS) == GetLastError());
} }
} else { } else {
if (!VirtualAlloc(address, length, MEM_COMMIT, if (!VirtualAlloc(address, length, MEM_COMMIT,
...@@ -94,13 +95,13 @@ void SetSystemPagesAccessInternal( ...@@ -94,13 +95,13 @@ void SetSystemPagesAccessInternal(
OOM_CRASH(length); OOM_CRASH(length);
// We check `GetLastError` for `ERROR_SUCCESS` here so that in a crash // We check `GetLastError` for `ERROR_SUCCESS` here so that in a crash
// report we get the error number. // report we get the error number.
CHECK_EQ(ERROR_SUCCESS, error); PA_CHECK(ERROR_SUCCESS == error);
} }
} }
} }
void FreePagesInternal(void* address, size_t length) { void FreePagesInternal(void* address, size_t length) {
CHECK(VirtualFree(address, 0, MEM_RELEASE)); PA_CHECK(VirtualFree(address, 0, MEM_RELEASE));
} }
void DecommitSystemPagesInternal(void* address, size_t length) { void DecommitSystemPagesInternal(void* address, size_t length) {
...@@ -135,7 +136,7 @@ void DiscardSystemPagesInternal(void* address, size_t length) { ...@@ -135,7 +136,7 @@ void DiscardSystemPagesInternal(void* address, size_t length) {
// failure. // failure.
if (ret) { if (ret) {
void* ptr = VirtualAlloc(address, length, MEM_RESET, PAGE_READWRITE); void* ptr = VirtualAlloc(address, length, MEM_RESET, PAGE_READWRITE);
CHECK(ptr); PA_CHECK(ptr);
} }
} }
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include "base/allocator/partition_allocator/page_allocator.h" #include "base/allocator/partition_allocator/page_allocator.h"
#include "base/allocator/partition_allocator/page_allocator_internal.h" #include "base/allocator/partition_allocator/page_allocator_internal.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h" #include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/bits.h" #include "base/bits.h"
...@@ -30,38 +31,38 @@ pool_handle PartitionAddressSpace::direct_map_pool_ = 0; ...@@ -30,38 +31,38 @@ pool_handle PartitionAddressSpace::direct_map_pool_ = 0;
pool_handle PartitionAddressSpace::normal_bucket_pool_ = 0; pool_handle PartitionAddressSpace::normal_bucket_pool_ = 0;
void PartitionAddressSpace::Init() { void PartitionAddressSpace::Init() {
DCHECK(!reserved_address_start_); PA_DCHECK(!reserved_address_start_);
reserved_address_start_ = reinterpret_cast<uintptr_t>(SystemAllocPages( reserved_address_start_ = reinterpret_cast<uintptr_t>(SystemAllocPages(
nullptr, kReservedAddressSpaceSize, base::PageInaccessible, nullptr, kReservedAddressSpaceSize, base::PageInaccessible,
PageTag::kPartitionAlloc, false)); PageTag::kPartitionAlloc, false));
CHECK(reserved_address_start_); PA_CHECK(reserved_address_start_);
const uintptr_t reserved_address_end = const uintptr_t reserved_address_end =
reserved_address_start_ + kReservedAddressSpaceSize; reserved_address_start_ + kReservedAddressSpaceSize;
reserved_base_address_ = reserved_base_address_ =
bits::Align(reserved_address_start_, kReservedAddressSpaceAlignment); bits::Align(reserved_address_start_, kReservedAddressSpaceAlignment);
DCHECK_GE(reserved_base_address_, reserved_address_start_); PA_DCHECK(reserved_base_address_ >= reserved_address_start_);
DCHECK(!(reserved_base_address_ & kReservedAddressSpaceOffsetMask)); PA_DCHECK(!(reserved_base_address_ & kReservedAddressSpaceOffsetMask));
uintptr_t current = reserved_base_address_; uintptr_t current = reserved_base_address_;
direct_map_pool_ = internal::AddressPoolManager::GetInstance()->Add( direct_map_pool_ = internal::AddressPoolManager::GetInstance()->Add(
current, kDirectMapPoolSize); current, kDirectMapPoolSize);
DCHECK(direct_map_pool_); PA_DCHECK(direct_map_pool_);
current += kDirectMapPoolSize; current += kDirectMapPoolSize;
normal_bucket_pool_base_address_ = current; normal_bucket_pool_base_address_ = current;
normal_bucket_pool_ = internal::AddressPoolManager::GetInstance()->Add( normal_bucket_pool_ = internal::AddressPoolManager::GetInstance()->Add(
current, kNormalBucketPoolSize); current, kNormalBucketPoolSize);
DCHECK(normal_bucket_pool_); PA_DCHECK(normal_bucket_pool_);
current += kNormalBucketPoolSize; current += kNormalBucketPoolSize;
DCHECK_LE(current, reserved_address_end); PA_DCHECK(current <= reserved_address_end);
DCHECK_EQ(current, reserved_base_address_ + kDesiredAddressSpaceSize); PA_DCHECK(current == reserved_base_address_ + kDesiredAddressSpaceSize);
} }
void PartitionAddressSpace::UninitForTesting() { void PartitionAddressSpace::UninitForTesting() {
DCHECK(reserved_address_start_); PA_DCHECK(reserved_address_start_);
FreePages(reinterpret_cast<void*>(reserved_address_start_), FreePages(reinterpret_cast<void*>(reserved_address_start_),
kReservedAddressSpaceSize); kReservedAddressSpaceSize);
reserved_address_start_ = 0; reserved_address_start_ = 0;
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ADDRESS_SPACE_H_ #define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ADDRESS_SPACE_H_
#include "base/allocator/partition_allocator/address_pool_manager.h" #include "base/allocator/partition_allocator/address_pool_manager.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h" #include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/partition_alloc_features.h" #include "base/allocator/partition_allocator/partition_alloc_features.h"
#include "base/base_export.h" #include "base/base_export.h"
...@@ -133,12 +134,12 @@ class BASE_EXPORT PartitionAddressSpace { ...@@ -133,12 +134,12 @@ class BASE_EXPORT PartitionAddressSpace {
}; };
ALWAYS_INLINE internal::pool_handle GetDirectMapPool() { ALWAYS_INLINE internal::pool_handle GetDirectMapPool() {
DCHECK(IsPartitionAllocGigaCageEnabled()); PA_DCHECK(IsPartitionAllocGigaCageEnabled());
return PartitionAddressSpace::GetDirectMapPool(); return PartitionAddressSpace::GetDirectMapPool();
} }
ALWAYS_INLINE internal::pool_handle GetNormalBucketPool() { ALWAYS_INLINE internal::pool_handle GetNormalBucketPool() {
DCHECK(IsPartitionAllocGigaCageEnabled()); PA_DCHECK(IsPartitionAllocGigaCageEnabled());
return PartitionAddressSpace::GetNormalBucketPool(); return PartitionAddressSpace::GetNormalBucketPool();
} }
......
...@@ -100,8 +100,8 @@ void PartitionAllocHooks::SetObserverHooks(AllocationObserverHook* alloc_hook, ...@@ -100,8 +100,8 @@ void PartitionAllocHooks::SetObserverHooks(AllocationObserverHook* alloc_hook,
// Chained hooks are not supported. Registering a non-null hook when a // Chained hooks are not supported. Registering a non-null hook when a
// non-null hook is already registered indicates somebody is trying to // non-null hook is already registered indicates somebody is trying to
// overwrite a hook. // overwrite a hook.
CHECK((!allocation_observer_hook_ && !free_observer_hook_) || PA_CHECK((!allocation_observer_hook_ && !free_observer_hook_) ||
(!alloc_hook && !free_hook)) (!alloc_hook && !free_hook))
<< "Overwriting already set observer hooks"; << "Overwriting already set observer hooks";
allocation_observer_hook_ = alloc_hook; allocation_observer_hook_ = alloc_hook;
free_observer_hook_ = free_hook; free_observer_hook_ = free_hook;
...@@ -114,9 +114,9 @@ void PartitionAllocHooks::SetOverrideHooks(AllocationOverrideHook* alloc_hook, ...@@ -114,9 +114,9 @@ void PartitionAllocHooks::SetOverrideHooks(AllocationOverrideHook* alloc_hook,
ReallocOverrideHook realloc_hook) { ReallocOverrideHook realloc_hook) {
AutoLock guard(GetHooksLock()); AutoLock guard(GetHooksLock());
CHECK((!allocation_override_hook_ && !free_override_hook_ && PA_CHECK((!allocation_override_hook_ && !free_override_hook_ &&
!realloc_override_hook_) || !realloc_override_hook_) ||
(!alloc_hook && !free_hook && !realloc_hook)) (!alloc_hook && !free_hook && !realloc_hook))
<< "Overwriting already set override hooks"; << "Overwriting already set override hooks";
allocation_override_hook_ = alloc_hook; allocation_override_hook_ = alloc_hook;
free_override_hook_ = free_hook; free_override_hook_ = free_hook;
...@@ -179,7 +179,7 @@ bool PartitionAllocHooks::ReallocOverrideHookIfEnabled(size_t* out, ...@@ -179,7 +179,7 @@ bool PartitionAllocHooks::ReallocOverrideHookIfEnabled(size_t* out,
} }
void PartitionAllocGlobalInit(OomFunction on_out_of_memory) { void PartitionAllocGlobalInit(OomFunction on_out_of_memory) {
DCHECK(on_out_of_memory); PA_DCHECK(on_out_of_memory);
internal::g_oom_handling_function = on_out_of_memory; internal::g_oom_handling_function = on_out_of_memory;
#if defined(__LP64__) #if defined(__LP64__)
...@@ -263,8 +263,8 @@ void PartitionRoot<thread_safe>::InitSlowPath() { ...@@ -263,8 +263,8 @@ void PartitionRoot<thread_safe>::InitSlowPath() {
} }
current_increment <<= 1; current_increment <<= 1;
} }
DCHECK(current_size == 1 << kGenericMaxBucketedOrder); PA_DCHECK(current_size == 1 << kGenericMaxBucketedOrder);
DCHECK(bucket == &buckets[0] + kGenericNumBuckets); PA_DCHECK(bucket == &buckets[0] + kGenericNumBuckets);
// Then set up the fast size -> bucket lookup table. // Then set up the fast size -> bucket lookup table.
bucket = &buckets[0]; bucket = &buckets[0];
...@@ -286,9 +286,9 @@ void PartitionRoot<thread_safe>::InitSlowPath() { ...@@ -286,9 +286,9 @@ void PartitionRoot<thread_safe>::InitSlowPath() {
} }
} }
} }
DCHECK(bucket == &buckets[0] + kGenericNumBuckets); PA_DCHECK(bucket == &buckets[0] + kGenericNumBuckets);
DCHECK(bucket_ptr == &bucket_lookups[0] + PA_DCHECK(bucket_ptr == &bucket_lookups[0] + ((kBitsPerSizeT + 1) *
((kBitsPerSizeT + 1) * kGenericNumBucketsPerOrder)); kGenericNumBucketsPerOrder));
// And there's one last bucket lookup that will be hit for e.g. malloc(-1), // And there's one last bucket lookup that will be hit for e.g. malloc(-1),
// which tries to overflow to a non-existent order. // which tries to overflow to a non-existent order.
*bucket_ptr = Bucket::get_sentinel_bucket(); *bucket_ptr = Bucket::get_sentinel_bucket();
...@@ -300,7 +300,7 @@ template <bool thread_safe> ...@@ -300,7 +300,7 @@ template <bool thread_safe>
bool PartitionRoot<thread_safe>::ReallocDirectMappedInPlace( bool PartitionRoot<thread_safe>::ReallocDirectMappedInPlace(
internal::PartitionPage<thread_safe>* page, internal::PartitionPage<thread_safe>* page,
size_t raw_size) { size_t raw_size) {
DCHECK(page->bucket->is_direct_mapped()); PA_DCHECK(page->bucket->is_direct_mapped());
raw_size = internal::PartitionCookieSizeAdjustAdd(raw_size); raw_size = internal::PartitionCookieSizeAdjustAdd(raw_size);
...@@ -350,7 +350,7 @@ bool PartitionRoot<thread_safe>::ReallocDirectMappedInPlace( ...@@ -350,7 +350,7 @@ bool PartitionRoot<thread_safe>::ReallocDirectMappedInPlace(
#endif #endif
page->set_raw_size(raw_size); page->set_raw_size(raw_size);
DCHECK(page->get_raw_size() == raw_size); PA_DCHECK(page->get_raw_size() == raw_size);
page->bucket->slot_size = new_size; page->bucket->slot_size = new_size;
return true; return true;
...@@ -364,7 +364,7 @@ void* PartitionRoot<thread_safe>::ReallocFlags(int flags, ...@@ -364,7 +364,7 @@ void* PartitionRoot<thread_safe>::ReallocFlags(int flags,
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
CHECK_MAX_SIZE_OR_RETURN_NULLPTR(new_size, flags); CHECK_MAX_SIZE_OR_RETURN_NULLPTR(new_size, flags);
void* result = realloc(ptr, new_size); void* result = realloc(ptr, new_size);
CHECK(result || flags & PartitionAllocReturnNull); PA_CHECK(result || flags & PartitionAllocReturnNull);
return result; return result;
#else #else
if (UNLIKELY(!ptr)) if (UNLIKELY(!ptr))
...@@ -394,7 +394,7 @@ void* PartitionRoot<thread_safe>::ReallocFlags(int flags, ...@@ -394,7 +394,7 @@ void* PartitionRoot<thread_safe>::ReallocFlags(int flags,
{ {
internal::ScopedGuard<thread_safe> guard{lock_}; internal::ScopedGuard<thread_safe> guard{lock_};
// TODO(palmer): See if we can afford to make this a CHECK. // TODO(palmer): See if we can afford to make this a CHECK.
DCHECK(IsValidPage(page)); PA_DCHECK(IsValidPage(page));
if (UNLIKELY(page->bucket->is_direct_mapped())) { if (UNLIKELY(page->bucket->is_direct_mapped())) {
// We may be able to perform the realloc in place by changing the // We may be able to perform the realloc in place by changing the
...@@ -476,8 +476,8 @@ static size_t PartitionPurgePage(internal::PartitionPage<thread_safe>* page, ...@@ -476,8 +476,8 @@ static size_t PartitionPurgePage(internal::PartitionPage<thread_safe>* page,
constexpr size_t kMaxSlotCount = constexpr size_t kMaxSlotCount =
(kPartitionPageSize * kMaxPartitionPagesPerSlotSpan) / kSystemPageSize; (kPartitionPageSize * kMaxPartitionPagesPerSlotSpan) / kSystemPageSize;
DCHECK(bucket_num_slots <= kMaxSlotCount); PA_DCHECK(bucket_num_slots <= kMaxSlotCount);
DCHECK(page->num_unprovisioned_slots < bucket_num_slots); PA_DCHECK(page->num_unprovisioned_slots < bucket_num_slots);
size_t num_slots = bucket_num_slots - page->num_unprovisioned_slots; size_t num_slots = bucket_num_slots - page->num_unprovisioned_slots;
char slot_usage[kMaxSlotCount]; char slot_usage[kMaxSlotCount];
#if !defined(OS_WIN) #if !defined(OS_WIN)
...@@ -493,7 +493,7 @@ static size_t PartitionPurgePage(internal::PartitionPage<thread_safe>* page, ...@@ -493,7 +493,7 @@ static size_t PartitionPurgePage(internal::PartitionPage<thread_safe>* page,
for (internal::PartitionFreelistEntry* entry = page->freelist_head; entry; for (internal::PartitionFreelistEntry* entry = page->freelist_head; entry;
/**/) { /**/) {
size_t slot_index = (reinterpret_cast<char*>(entry) - ptr) / slot_size; size_t slot_index = (reinterpret_cast<char*>(entry) - ptr) / slot_size;
DCHECK(slot_index < num_slots); PA_DCHECK(slot_index < num_slots);
slot_usage[slot_index] = 0; slot_usage[slot_index] = 0;
entry = internal::EncodedPartitionFreelistEntry::Decode(entry->next); entry = internal::EncodedPartitionFreelistEntry::Decode(entry->next);
#if !defined(OS_WIN) #if !defined(OS_WIN)
...@@ -513,7 +513,7 @@ static size_t PartitionPurgePage(internal::PartitionPage<thread_safe>* page, ...@@ -513,7 +513,7 @@ static size_t PartitionPurgePage(internal::PartitionPage<thread_safe>* page,
while (!slot_usage[num_slots - 1]) { while (!slot_usage[num_slots - 1]) {
truncated_slots++; truncated_slots++;
num_slots--; num_slots--;
DCHECK(num_slots); PA_DCHECK(num_slots);
} }
// First, do the work of calculating the discardable bytes. Don't actually // First, do the work of calculating the discardable bytes. Don't actually
// discard anything unless the discard flag was passed in. // discard anything unless the discard flag was passed in.
...@@ -527,13 +527,13 @@ static size_t PartitionPurgePage(internal::PartitionPage<thread_safe>* page, ...@@ -527,13 +527,13 @@ static size_t PartitionPurgePage(internal::PartitionPage<thread_safe>* page,
// a slot span, so we "own" all the way up the page boundary. // a slot span, so we "own" all the way up the page boundary.
end_ptr = reinterpret_cast<char*>( end_ptr = reinterpret_cast<char*>(
RoundUpToSystemPage(reinterpret_cast<size_t>(end_ptr))); RoundUpToSystemPage(reinterpret_cast<size_t>(end_ptr)));
DCHECK(end_ptr <= ptr + bucket->get_bytes_per_span()); PA_DCHECK(end_ptr <= ptr + bucket->get_bytes_per_span());
if (begin_ptr < end_ptr) { if (begin_ptr < end_ptr) {
unprovisioned_bytes = end_ptr - begin_ptr; unprovisioned_bytes = end_ptr - begin_ptr;
discardable_bytes += unprovisioned_bytes; discardable_bytes += unprovisioned_bytes;
} }
if (unprovisioned_bytes && discard) { if (unprovisioned_bytes && discard) {
DCHECK(truncated_slots > 0); PA_DCHECK(truncated_slots > 0);
size_t num_new_entries = 0; size_t num_new_entries = 0;
page->num_unprovisioned_slots += static_cast<uint16_t>(truncated_slots); page->num_unprovisioned_slots += static_cast<uint16_t>(truncated_slots);
...@@ -563,7 +563,7 @@ static size_t PartitionPurgePage(internal::PartitionPage<thread_safe>* page, ...@@ -563,7 +563,7 @@ static size_t PartitionPurgePage(internal::PartitionPage<thread_safe>* page,
if (back) if (back)
back->next = internal::PartitionFreelistEntry::Encode(nullptr); back->next = internal::PartitionFreelistEntry::Encode(nullptr);
DCHECK(num_new_entries == num_slots - page->num_allocated_slots); PA_DCHECK(num_new_entries == num_slots - page->num_allocated_slots);
// Discard the memory. // Discard the memory.
DiscardSystemPages(begin_ptr, unprovisioned_bytes); DiscardSystemPages(begin_ptr, unprovisioned_bytes);
} }
...@@ -607,7 +607,8 @@ static void PartitionPurgeBucket( ...@@ -607,7 +607,8 @@ static void PartitionPurgeBucket(
internal::PartitionPage<thread_safe>::get_sentinel_page()) { internal::PartitionPage<thread_safe>::get_sentinel_page()) {
for (internal::PartitionPage<thread_safe>* page = bucket->active_pages_head; for (internal::PartitionPage<thread_safe>* page = bucket->active_pages_head;
page; page = page->next_page) { page; page = page->next_page) {
DCHECK(page != internal::PartitionPage<thread_safe>::get_sentinel_page()); PA_DCHECK(page !=
internal::PartitionPage<thread_safe>::get_sentinel_page());
PartitionPurgePage(page, true); PartitionPurgePage(page, true);
} }
} }
...@@ -657,7 +658,7 @@ static void PartitionDumpPageStats(PartitionBucketMemoryStats* stats_out, ...@@ -657,7 +658,7 @@ static void PartitionDumpPageStats(PartitionBucketMemoryStats* stats_out,
} else if (page->is_full()) { } else if (page->is_full()) {
++stats_out->num_full_pages; ++stats_out->num_full_pages;
} else { } else {
DCHECK(page->is_active()); PA_DCHECK(page->is_active());
++stats_out->num_active_pages; ++stats_out->num_active_pages;
} }
} }
...@@ -666,7 +667,7 @@ template <bool thread_safe> ...@@ -666,7 +667,7 @@ template <bool thread_safe>
static void PartitionDumpBucketStats( static void PartitionDumpBucketStats(
PartitionBucketMemoryStats* stats_out, PartitionBucketMemoryStats* stats_out,
const internal::PartitionBucket<thread_safe>* bucket) { const internal::PartitionBucket<thread_safe>* bucket) {
DCHECK(!bucket->is_direct_mapped()); PA_DCHECK(!bucket->is_direct_mapped());
stats_out->is_valid = false; stats_out->is_valid = false;
// If the active page list is empty (== // If the active page list is empty (==
// internal::PartitionPage::get_sentinel_page()), the bucket might still need // internal::PartitionPage::get_sentinel_page()), the bucket might still need
...@@ -691,13 +692,13 @@ static void PartitionDumpBucketStats( ...@@ -691,13 +692,13 @@ static void PartitionDumpBucketStats(
for (internal::PartitionPage<thread_safe>* page = bucket->empty_pages_head; for (internal::PartitionPage<thread_safe>* page = bucket->empty_pages_head;
page; page = page->next_page) { page; page = page->next_page) {
DCHECK(page->is_empty() || page->is_decommitted()); PA_DCHECK(page->is_empty() || page->is_decommitted());
PartitionDumpPageStats(stats_out, page); PartitionDumpPageStats(stats_out, page);
} }
for (internal::PartitionPage<thread_safe>* page = for (internal::PartitionPage<thread_safe>* page =
bucket->decommitted_pages_head; bucket->decommitted_pages_head;
page; page = page->next_page) { page; page = page->next_page) {
DCHECK(page->is_decommitted()); PA_DCHECK(page->is_decommitted());
PartitionDumpPageStats(stats_out, page); PartitionDumpPageStats(stats_out, page);
} }
...@@ -705,7 +706,8 @@ static void PartitionDumpBucketStats( ...@@ -705,7 +706,8 @@ static void PartitionDumpBucketStats(
internal::PartitionPage<thread_safe>::get_sentinel_page()) { internal::PartitionPage<thread_safe>::get_sentinel_page()) {
for (internal::PartitionPage<thread_safe>* page = bucket->active_pages_head; for (internal::PartitionPage<thread_safe>* page = bucket->active_pages_head;
page; page = page->next_page) { page; page = page->next_page) {
DCHECK(page != internal::PartitionPage<thread_safe>::get_sentinel_page()); PA_DCHECK(page !=
internal::PartitionPage<thread_safe>::get_sentinel_page());
PartitionDumpPageStats(stats_out, page); PartitionDumpPageStats(stats_out, page);
} }
} }
...@@ -756,8 +758,8 @@ void PartitionRoot<thread_safe>::DumpStats(const char* partition_name, ...@@ -756,8 +758,8 @@ void PartitionRoot<thread_safe>::DumpStats(const char* partition_name,
for (DirectMapExtent* extent = direct_map_list; for (DirectMapExtent* extent = direct_map_list;
extent && num_direct_mapped_allocations < kMaxReportableDirectMaps; extent && num_direct_mapped_allocations < kMaxReportableDirectMaps;
extent = extent->next_extent, ++num_direct_mapped_allocations) { extent = extent->next_extent, ++num_direct_mapped_allocations) {
DCHECK(!extent->next_extent || PA_DCHECK(!extent->next_extent ||
extent->next_extent->prev_extent == extent); extent->next_extent->prev_extent == extent);
size_t slot_size = extent->bucket->slot_size; size_t slot_size = extent->bucket->slot_size;
direct_mapped_allocations_total_size += slot_size; direct_mapped_allocations_total_size += slot_size;
if (is_light_dump) if (is_light_dump)
......
...@@ -57,6 +57,7 @@ ...@@ -57,6 +57,7 @@
#include "base/allocator/partition_allocator/memory_reclaimer.h" #include "base/allocator/partition_allocator/memory_reclaimer.h"
#include "base/allocator/partition_allocator/page_allocator.h" #include "base/allocator/partition_allocator/page_allocator.h"
#include "base/allocator/partition_allocator/partition_address_space.h" #include "base/allocator/partition_allocator/partition_address_space.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h" #include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/partition_alloc_forward.h" #include "base/allocator/partition_allocator/partition_alloc_forward.h"
#include "base/allocator/partition_allocator/partition_bucket.h" #include "base/allocator/partition_allocator/partition_bucket.h"
...@@ -86,7 +87,7 @@ ...@@ -86,7 +87,7 @@
if (flags & PartitionAllocReturnNull) { \ if (flags & PartitionAllocReturnNull) { \
return nullptr; \ return nullptr; \
} \ } \
CHECK(false); \ PA_CHECK(false); \
} }
namespace base { namespace base {
...@@ -423,17 +424,17 @@ ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocFromBucket(Bucket* bucket, ...@@ -423,17 +424,17 @@ ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocFromBucket(Bucket* bucket,
Page* page = bucket->active_pages_head; Page* page = bucket->active_pages_head;
// Check that this page is neither full nor freed. // Check that this page is neither full nor freed.
DCHECK(page); PA_DCHECK(page);
DCHECK(page->num_allocated_slots >= 0); PA_DCHECK(page->num_allocated_slots >= 0);
void* ret = page->freelist_head; void* ret = page->freelist_head;
if (LIKELY(ret)) { if (LIKELY(ret)) {
// If these DCHECKs fire, you probably corrupted memory. TODO(palmer): See // If these DCHECKs fire, you probably corrupted memory. TODO(palmer): See
// if we can afford to make these CHECKs. // if we can afford to make these CHECKs.
DCHECK(IsValidPage(page)); PA_DCHECK(IsValidPage(page));
// All large allocations must go through the slow path to correctly update // All large allocations must go through the slow path to correctly update
// the size metadata. // the size metadata.
DCHECK(page->get_raw_size() == 0); PA_DCHECK(page->get_raw_size() == 0);
internal::PartitionFreelistEntry* new_head = internal::PartitionFreelistEntry* new_head =
internal::EncodedPartitionFreelistEntry::Decode( internal::EncodedPartitionFreelistEntry::Decode(
page->freelist_head->next); page->freelist_head->next);
...@@ -442,7 +443,7 @@ ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocFromBucket(Bucket* bucket, ...@@ -442,7 +443,7 @@ ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocFromBucket(Bucket* bucket,
} else { } else {
ret = bucket->SlowPathAlloc(this, flags, size, &is_already_zeroed); ret = bucket->SlowPathAlloc(this, flags, size, &is_already_zeroed);
// TODO(palmer): See if we can afford to make this a CHECK. // TODO(palmer): See if we can afford to make this a CHECK.
DCHECK(!ret || IsValidPage(Page::FromPointer(ret))); PA_DCHECK(!ret || IsValidPage(Page::FromPointer(ret)));
} }
#if DCHECK_IS_ON() && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) #if DCHECK_IS_ON() && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
...@@ -456,7 +457,7 @@ ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocFromBucket(Bucket* bucket, ...@@ -456,7 +457,7 @@ ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocFromBucket(Bucket* bucket,
size_t new_slot_size = page->bucket->slot_size; size_t new_slot_size = page->bucket->slot_size;
size_t raw_size = page->get_raw_size(); size_t raw_size = page->get_raw_size();
if (raw_size) { if (raw_size) {
DCHECK(raw_size == size); PA_DCHECK(raw_size == size);
new_slot_size = raw_size; new_slot_size = raw_size;
} }
size_t no_cookie_size = size_t no_cookie_size =
...@@ -488,7 +489,7 @@ ALWAYS_INLINE void PartitionRoot<thread_safe>::Free(void* ptr) { ...@@ -488,7 +489,7 @@ ALWAYS_INLINE void PartitionRoot<thread_safe>::Free(void* ptr) {
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
free(ptr); free(ptr);
#else #else
DCHECK(initialized); PA_DCHECK(initialized);
if (UNLIKELY(!ptr)) if (UNLIKELY(!ptr))
return; return;
...@@ -502,7 +503,7 @@ ALWAYS_INLINE void PartitionRoot<thread_safe>::Free(void* ptr) { ...@@ -502,7 +503,7 @@ ALWAYS_INLINE void PartitionRoot<thread_safe>::Free(void* ptr) {
ptr = internal::PartitionCookieFreePointerAdjust(ptr); ptr = internal::PartitionCookieFreePointerAdjust(ptr);
Page* page = Page::FromPointer(ptr); Page* page = Page::FromPointer(ptr);
// TODO(palmer): See if we can afford to make this a CHECK. // TODO(palmer): See if we can afford to make this a CHECK.
DCHECK(IsValidPage(page)); PA_DCHECK(IsValidPage(page));
internal::DeferredUnmap deferred_unmap; internal::DeferredUnmap deferred_unmap;
{ {
ScopedGuard guard{lock_}; ScopedGuard guard{lock_};
...@@ -531,16 +532,16 @@ template <bool thread_safe> ...@@ -531,16 +532,16 @@ template <bool thread_safe>
ALWAYS_INLINE void PartitionRoot<thread_safe>::IncreaseCommittedPages( ALWAYS_INLINE void PartitionRoot<thread_safe>::IncreaseCommittedPages(
size_t len) { size_t len) {
total_size_of_committed_pages += len; total_size_of_committed_pages += len;
DCHECK(total_size_of_committed_pages <= PA_DCHECK(total_size_of_committed_pages <=
total_size_of_super_pages + total_size_of_direct_mapped_pages); total_size_of_super_pages + total_size_of_direct_mapped_pages);
} }
template <bool thread_safe> template <bool thread_safe>
ALWAYS_INLINE void PartitionRoot<thread_safe>::DecreaseCommittedPages( ALWAYS_INLINE void PartitionRoot<thread_safe>::DecreaseCommittedPages(
size_t len) { size_t len) {
total_size_of_committed_pages -= len; total_size_of_committed_pages -= len;
DCHECK(total_size_of_committed_pages <= PA_DCHECK(total_size_of_committed_pages <=
total_size_of_super_pages + total_size_of_direct_mapped_pages); total_size_of_super_pages + total_size_of_direct_mapped_pages);
} }
template <bool thread_safe> template <bool thread_safe>
...@@ -555,7 +556,7 @@ template <bool thread_safe> ...@@ -555,7 +556,7 @@ template <bool thread_safe>
ALWAYS_INLINE void PartitionRoot<thread_safe>::RecommitSystemPages( ALWAYS_INLINE void PartitionRoot<thread_safe>::RecommitSystemPages(
void* address, void* address,
size_t length) { size_t length) {
CHECK(::base::RecommitSystemPages(address, length, PageReadWrite)); PA_CHECK(::base::RecommitSystemPages(address, length, PageReadWrite));
IncreaseCommittedPages(length); IncreaseCommittedPages(length);
} }
...@@ -597,11 +598,11 @@ ALWAYS_INLINE internal::PartitionPage<thread_safe>* ...@@ -597,11 +598,11 @@ ALWAYS_INLINE internal::PartitionPage<thread_safe>*
PartitionAllocGetPageForSize(void* ptr) { PartitionAllocGetPageForSize(void* ptr) {
// No need to lock here. Only |ptr| being freed by another thread could // No need to lock here. Only |ptr| being freed by another thread could
// cause trouble, and the caller is responsible for that not happening. // cause trouble, and the caller is responsible for that not happening.
DCHECK(PartitionAllocSupportsGetSize()); PA_DCHECK(PartitionAllocSupportsGetSize());
auto* page = auto* page =
internal::PartitionPage<thread_safe>::FromPointerNoAlignmentCheck(ptr); internal::PartitionPage<thread_safe>::FromPointerNoAlignmentCheck(ptr);
// TODO(palmer): See if we can afford to make this a CHECK. // TODO(palmer): See if we can afford to make this a CHECK.
DCHECK(PartitionRoot<thread_safe>::IsValidPage(page)); PA_DCHECK(PartitionRoot<thread_safe>::IsValidPage(page));
return page; return page;
} }
} // namespace internal } // namespace internal
...@@ -623,7 +624,7 @@ ALWAYS_INLINE size_t PartitionAllocGetSize(void* ptr) { ...@@ -623,7 +624,7 @@ ALWAYS_INLINE size_t PartitionAllocGetSize(void* ptr) {
// lead to undefined behavior. // lead to undefined behavior.
template <bool thread_safe> template <bool thread_safe>
ALWAYS_INLINE size_t PartitionAllocGetSlotOffset(void* ptr) { ALWAYS_INLINE size_t PartitionAllocGetSlotOffset(void* ptr) {
DCHECK(IsManagedByPartitionAllocAndNotDirectMapped(ptr)); PA_DCHECK(IsManagedByPartitionAllocAndNotDirectMapped(ptr));
ptr = internal::PartitionCookieFreePointerAdjust(ptr); ptr = internal::PartitionCookieFreePointerAdjust(ptr);
auto* page = internal::PartitionAllocGetPageForSize<thread_safe>(ptr); auto* page = internal::PartitionAllocGetPageForSize<thread_safe>(ptr);
size_t slot_size = page->bucket->slot_size; size_t slot_size = page->bucket->slot_size;
...@@ -651,9 +652,9 @@ PartitionRoot<thread_safe>::SizeToBucket(size_t size) const { ...@@ -651,9 +652,9 @@ PartitionRoot<thread_safe>::SizeToBucket(size_t size) const {
size_t sub_order_index = size & order_sub_index_masks[order]; size_t sub_order_index = size & order_sub_index_masks[order];
Bucket* bucket = bucket_lookups[(order << kGenericNumBucketsPerOrderBits) + Bucket* bucket = bucket_lookups[(order << kGenericNumBucketsPerOrderBits) +
order_index + !!sub_order_index]; order_index + !!sub_order_index];
CHECK(bucket); PA_CHECK(bucket);
DCHECK(!bucket->slot_size || bucket->slot_size >= size); PA_DCHECK(!bucket->slot_size || bucket->slot_size >= size);
DCHECK(!(bucket->slot_size % kGenericSmallestBucket)); PA_DCHECK(!(bucket->slot_size % kGenericSmallestBucket));
return bucket; return bucket;
} }
...@@ -662,16 +663,16 @@ ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocFlags( ...@@ -662,16 +663,16 @@ ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocFlags(
int flags, int flags,
size_t size, size_t size,
const char* type_name) { const char* type_name) {
DCHECK_LT(flags, PartitionAllocLastFlag << 1); PA_DCHECK(flags < PartitionAllocLastFlag << 1);
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
CHECK_MAX_SIZE_OR_RETURN_NULLPTR(size, flags); CHECK_MAX_SIZE_OR_RETURN_NULLPTR(size, flags);
const bool zero_fill = flags & PartitionAllocZeroFill; const bool zero_fill = flags & PartitionAllocZeroFill;
void* result = zero_fill ? calloc(1, size) : malloc(size); void* result = zero_fill ? calloc(1, size) : malloc(size);
CHECK(result || flags & PartitionAllocReturnNull); PA_CHECK(result || flags & PartitionAllocReturnNull);
return result; return result;
#else #else
DCHECK(initialized); PA_DCHECK(initialized);
void* result; void* result;
const bool hooks_enabled = PartitionAllocHooks::AreHooksEnabled(); const bool hooks_enabled = PartitionAllocHooks::AreHooksEnabled();
if (UNLIKELY(hooks_enabled)) { if (UNLIKELY(hooks_enabled)) {
...@@ -685,7 +686,7 @@ ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocFlags( ...@@ -685,7 +686,7 @@ ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocFlags(
size_t requested_size = size; size_t requested_size = size;
size = internal::PartitionCookieSizeAdjustAdd(size); size = internal::PartitionCookieSizeAdjustAdd(size);
auto* bucket = SizeToBucket(size); auto* bucket = SizeToBucket(size);
DCHECK(bucket); PA_DCHECK(bucket);
{ {
internal::ScopedGuard<thread_safe> guard{lock_}; internal::ScopedGuard<thread_safe> guard{lock_};
result = AllocFromBucket(bucket, flags, size); result = AllocFromBucket(bucket, flags, size);
...@@ -725,7 +726,7 @@ ALWAYS_INLINE size_t PartitionRoot<thread_safe>::ActualSize(size_t size) { ...@@ -725,7 +726,7 @@ ALWAYS_INLINE size_t PartitionRoot<thread_safe>::ActualSize(size_t size) {
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
return size; return size;
#else #else
DCHECK(PartitionRoot<thread_safe>::initialized); PA_DCHECK(PartitionRoot<thread_safe>::initialized);
size = internal::PartitionCookieSizeAdjustAdd(size); size = internal::PartitionCookieSizeAdjustAdd(size);
auto* bucket = SizeToBucket(size); auto* bucket = SizeToBucket(size);
if (LIKELY(!bucket->is_direct_mapped())) { if (LIKELY(!bucket->is_direct_mapped())) {
......
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CHECK_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CHECK_H_
#include "base/allocator/buildflags.h"
#include "base/check.h"
// When PartitionAlloc is used as the default allocator, we cannot use the
// regular (D)CHECK() macros, as they allocate internally. When an assertion is
// triggered, they format strings, leading to reentrency in the code, which none
// of PartitionAlloc is designed to support (and especially not for error
// paths).
//
// As a consequence:
// - When PartitionAlloc is not malloc(), use the regular macros
// - Otherwise, crash immediately. This provides worse error messages though.
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
// See base/check.h for implementation details.
#define PA_CHECK(condition) \
UNLIKELY(!(condition)) ? IMMEDIATE_CRASH() : EAT_CHECK_STREAM_PARAMS()
#if DCHECK_IS_ON()
#define PA_DCHECK(condition) PA_CHECK(condition)
#else
#define PA_DCHECK(condition) EAT_CHECK_STREAM_PARAMS(!(condition))
#endif // DCHECK_IS_ON()
#else
#define PA_CHECK(condition) CHECK(condition)
#define PA_DCHECK(condition) DCHECK(condition)
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CHECK_H_
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include <vector> #include <vector>
#include "base/allocator/partition_allocator/partition_alloc.h" #include "base/allocator/partition_allocator/partition_alloc.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/strings/stringprintf.h" #include "base/strings/stringprintf.h"
#include "base/threading/platform_thread.h" #include "base/threading/platform_thread.h"
#include "base/time/time.h" #include "base/time/time.h"
...@@ -78,7 +79,7 @@ class TestLoopThread : public PlatformThread::Delegate { ...@@ -78,7 +79,7 @@ class TestLoopThread : public PlatformThread::Delegate {
public: public:
explicit TestLoopThread(OnceCallback<float()> test_fn) explicit TestLoopThread(OnceCallback<float()> test_fn)
: test_fn_(std::move(test_fn)) { : test_fn_(std::move(test_fn)) {
CHECK(PlatformThread::Create(0, this, &thread_handle_)); PA_CHECK(PlatformThread::Create(0, this, &thread_handle_));
} }
float Run() { float Run() {
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include <stddef.h> #include <stddef.h>
#include <stdint.h> #include <stdint.h>
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h" #include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/partition_alloc_forward.h" #include "base/allocator/partition_allocator/partition_alloc_forward.h"
#include "base/base_export.h" #include "base/base_export.h"
...@@ -63,7 +64,7 @@ struct PartitionBucket { ...@@ -63,7 +64,7 @@ struct PartitionBucket {
// Caller must check that the size is not above the kGenericMaxDirectMapped // Caller must check that the size is not above the kGenericMaxDirectMapped
// limit before calling. This also guards against integer overflow in the // limit before calling. This also guards against integer overflow in the
// calculation here. // calculation here.
DCHECK(size <= kGenericMaxDirectMapped); PA_DCHECK(size <= kGenericMaxDirectMapped);
return (size + kSystemPageOffsetMask) & kSystemPageBaseMask; return (size + kSystemPageOffsetMask) & kSystemPageBaseMask;
} }
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_COOKIE_H_ #define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_COOKIE_H_
#include "base/allocator/buildflags.h" #include "base/allocator/buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/compiler_specific.h" #include "base/compiler_specific.h"
#include "base/logging.h" #include "base/logging.h"
...@@ -30,14 +31,14 @@ static constexpr unsigned char kCookieValue[kCookieSize] = { ...@@ -30,14 +31,14 @@ static constexpr unsigned char kCookieValue[kCookieSize] = {
ALWAYS_INLINE void PartitionCookieCheckValue(void* ptr) { ALWAYS_INLINE void PartitionCookieCheckValue(void* ptr) {
unsigned char* cookie_ptr = reinterpret_cast<unsigned char*>(ptr); unsigned char* cookie_ptr = reinterpret_cast<unsigned char*>(ptr);
for (size_t i = 0; i < kCookieSize; ++i, ++cookie_ptr) for (size_t i = 0; i < kCookieSize; ++i, ++cookie_ptr)
DCHECK(*cookie_ptr == kCookieValue[i]); PA_DCHECK(*cookie_ptr == kCookieValue[i]);
} }
ALWAYS_INLINE size_t PartitionCookieSizeAdjustAdd(size_t size) { ALWAYS_INLINE size_t PartitionCookieSizeAdjustAdd(size_t size) {
// Add space for cookies, checking for integer overflow. TODO(palmer): // Add space for cookies, checking for integer overflow. TODO(palmer):
// Investigate the performance and code size implications of using // Investigate the performance and code size implications of using
// CheckedNumeric throughout PA. // CheckedNumeric throughout PA.
DCHECK(size + (2 * kCookieSize) > size); PA_DCHECK(size + (2 * kCookieSize) > size);
size += 2 * kCookieSize; size += 2 * kCookieSize;
return size; return size;
} }
...@@ -50,7 +51,7 @@ ALWAYS_INLINE void* PartitionCookieFreePointerAdjust(void* ptr) { ...@@ -50,7 +51,7 @@ ALWAYS_INLINE void* PartitionCookieFreePointerAdjust(void* ptr) {
ALWAYS_INLINE size_t PartitionCookieSizeAdjustSubtract(size_t size) { ALWAYS_INLINE size_t PartitionCookieSizeAdjustSubtract(size_t size) {
// Remove space for cookies. // Remove space for cookies.
DCHECK(size >= 2 * kCookieSize); PA_DCHECK(size >= 2 * kCookieSize);
size -= 2 * kCookieSize; size -= 2 * kCookieSize;
return size; return size;
} }
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_DIRECT_MAP_EXTENT_H_ #ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_DIRECT_MAP_EXTENT_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_DIRECT_MAP_EXTENT_H_ #define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_DIRECT_MAP_EXTENT_H_
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_bucket.h" #include "base/allocator/partition_allocator/partition_bucket.h"
#include "base/allocator/partition_allocator/partition_page.h" #include "base/allocator/partition_allocator/partition_page.h"
#include "base/logging.h" #include "base/logging.h"
...@@ -27,7 +28,7 @@ template <bool thread_safe> ...@@ -27,7 +28,7 @@ template <bool thread_safe>
ALWAYS_INLINE PartitionDirectMapExtent<thread_safe>* ALWAYS_INLINE PartitionDirectMapExtent<thread_safe>*
PartitionDirectMapExtent<thread_safe>::FromPage( PartitionDirectMapExtent<thread_safe>::FromPage(
PartitionPage<thread_safe>* page) { PartitionPage<thread_safe>* page) {
DCHECK(page->bucket->is_direct_mapped()); PA_DCHECK(page->bucket->is_direct_mapped());
return reinterpret_cast<PartitionDirectMapExtent<thread_safe>*>( return reinterpret_cast<PartitionDirectMapExtent<thread_safe>*>(
reinterpret_cast<char*>(page) + 3 * kPageMetadataSize); reinterpret_cast<char*>(page) + 3 * kPageMetadataSize);
} }
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include "base/allocator/partition_allocator/address_pool_manager.h" #include "base/allocator/partition_allocator/address_pool_manager.h"
#include "base/allocator/partition_allocator/partition_alloc.h" #include "base/allocator/partition_allocator/partition_alloc.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_features.h" #include "base/allocator/partition_allocator/partition_alloc_features.h"
#include "base/allocator/partition_allocator/partition_direct_map_extent.h" #include "base/allocator/partition_allocator/partition_direct_map_extent.h"
#include "base/check.h" #include "base/check.h"
...@@ -43,13 +44,13 @@ PartitionDirectUnmap(PartitionPage<thread_safe>* page) { ...@@ -43,13 +44,13 @@ PartitionDirectUnmap(PartitionPage<thread_safe>* page) {
// Maintain the doubly-linked list of all direct mappings. // Maintain the doubly-linked list of all direct mappings.
if (extent->prev_extent) { if (extent->prev_extent) {
DCHECK(extent->prev_extent->next_extent == extent); PA_DCHECK(extent->prev_extent->next_extent == extent);
extent->prev_extent->next_extent = extent->next_extent; extent->prev_extent->next_extent = extent->next_extent;
} else { } else {
root->direct_map_list = extent->next_extent; root->direct_map_list = extent->next_extent;
} }
if (extent->next_extent) { if (extent->next_extent) {
DCHECK(extent->next_extent->prev_extent == extent); PA_DCHECK(extent->next_extent->prev_extent == extent);
extent->next_extent->prev_extent = extent->prev_extent; extent->next_extent->prev_extent = extent->prev_extent;
} }
...@@ -59,10 +60,10 @@ PartitionDirectUnmap(PartitionPage<thread_safe>* page) { ...@@ -59,10 +60,10 @@ PartitionDirectUnmap(PartitionPage<thread_safe>* page) {
size_t uncommitted_page_size = page->bucket->slot_size + kSystemPageSize; size_t uncommitted_page_size = page->bucket->slot_size + kSystemPageSize;
root->DecreaseCommittedPages(uncommitted_page_size); root->DecreaseCommittedPages(uncommitted_page_size);
DCHECK(root->total_size_of_direct_mapped_pages >= uncommitted_page_size); PA_DCHECK(root->total_size_of_direct_mapped_pages >= uncommitted_page_size);
root->total_size_of_direct_mapped_pages -= uncommitted_page_size; root->total_size_of_direct_mapped_pages -= uncommitted_page_size;
DCHECK(!(unmap_size & kPageAllocationGranularityOffsetMask)); PA_DCHECK(!(unmap_size & kPageAllocationGranularityOffsetMask));
char* ptr = char* ptr =
reinterpret_cast<char*>(PartitionPage<thread_safe>::ToPointer(page)); reinterpret_cast<char*>(PartitionPage<thread_safe>::ToPointer(page));
...@@ -75,15 +76,16 @@ PartitionDirectUnmap(PartitionPage<thread_safe>* page) { ...@@ -75,15 +76,16 @@ PartitionDirectUnmap(PartitionPage<thread_safe>* page) {
template <bool thread_safe> template <bool thread_safe>
ALWAYS_INLINE void PartitionRegisterEmptyPage( ALWAYS_INLINE void PartitionRegisterEmptyPage(
PartitionPage<thread_safe>* page) { PartitionPage<thread_safe>* page) {
DCHECK(page->is_empty()); PA_DCHECK(page->is_empty());
PartitionRoot<thread_safe>* root = PartitionRoot<thread_safe>::FromPage(page); PartitionRoot<thread_safe>* root = PartitionRoot<thread_safe>::FromPage(page);
root->lock_.AssertAcquired(); root->lock_.AssertAcquired();
// If the page is already registered as empty, give it another life. // If the page is already registered as empty, give it another life.
if (page->empty_cache_index != -1) { if (page->empty_cache_index != -1) {
DCHECK(page->empty_cache_index >= 0); PA_DCHECK(page->empty_cache_index >= 0);
DCHECK(static_cast<unsigned>(page->empty_cache_index) < kMaxFreeableSpans); PA_DCHECK(static_cast<unsigned>(page->empty_cache_index) <
DCHECK(root->global_empty_page_ring[page->empty_cache_index] == page); kMaxFreeableSpans);
PA_DCHECK(root->global_empty_page_ring[page->empty_cache_index] == page);
root->global_empty_page_ring[page->empty_cache_index] = nullptr; root->global_empty_page_ring[page->empty_cache_index] = nullptr;
} }
...@@ -121,7 +123,7 @@ PartitionPage<thread_safe>* PartitionPage<thread_safe>::get_sentinel_page() { ...@@ -121,7 +123,7 @@ PartitionPage<thread_safe>* PartitionPage<thread_safe>::get_sentinel_page() {
template <bool thread_safe> template <bool thread_safe>
DeferredUnmap PartitionPage<thread_safe>::FreeSlowPath() { DeferredUnmap PartitionPage<thread_safe>::FreeSlowPath() {
DCHECK(this != get_sentinel_page()); PA_DCHECK(this != get_sentinel_page());
if (LIKELY(num_allocated_slots == 0)) { if (LIKELY(num_allocated_slots == 0)) {
// Page became fully unused. // Page became fully unused.
if (UNLIKELY(bucket->is_direct_mapped())) { if (UNLIKELY(bucket->is_direct_mapped())) {
...@@ -131,27 +133,27 @@ DeferredUnmap PartitionPage<thread_safe>::FreeSlowPath() { ...@@ -131,27 +133,27 @@ DeferredUnmap PartitionPage<thread_safe>::FreeSlowPath() {
// the empty list as a force towards defragmentation. // the empty list as a force towards defragmentation.
if (LIKELY(this == bucket->active_pages_head)) if (LIKELY(this == bucket->active_pages_head))
bucket->SetNewActivePage(); bucket->SetNewActivePage();
DCHECK(bucket->active_pages_head != this); PA_DCHECK(bucket->active_pages_head != this);
set_raw_size(0); set_raw_size(0);
DCHECK(!get_raw_size()); PA_DCHECK(!get_raw_size());
PartitionRegisterEmptyPage(this); PartitionRegisterEmptyPage(this);
} else { } else {
DCHECK(!bucket->is_direct_mapped()); PA_DCHECK(!bucket->is_direct_mapped());
// Ensure that the page is full. That's the only valid case if we // Ensure that the page is full. That's the only valid case if we
// arrive here. // arrive here.
DCHECK(num_allocated_slots < 0); PA_DCHECK(num_allocated_slots < 0);
// A transition of num_allocated_slots from 0 to -1 is not legal, and // A transition of num_allocated_slots from 0 to -1 is not legal, and
// likely indicates a double-free. // likely indicates a double-free.
CHECK(num_allocated_slots != -1); PA_CHECK(num_allocated_slots != -1);
num_allocated_slots = -num_allocated_slots - 2; num_allocated_slots = -num_allocated_slots - 2;
DCHECK(num_allocated_slots == bucket->get_slots_per_span() - 1); PA_DCHECK(num_allocated_slots == bucket->get_slots_per_span() - 1);
// Fully used page became partially used. It must be put back on the // Fully used page became partially used. It must be put back on the
// non-full page list. Also make it the current page to increase the // non-full page list. Also make it the current page to increase the
// chances of it being filled up again. The old current page will be // chances of it being filled up again. The old current page will be
// the next page. // the next page.
DCHECK(!next_page); PA_DCHECK(!next_page);
if (LIKELY(bucket->active_pages_head != get_sentinel_page())) if (LIKELY(bucket->active_pages_head != get_sentinel_page()))
next_page = bucket->active_pages_head; next_page = bucket->active_pages_head;
bucket->active_pages_head = this; bucket->active_pages_head = this;
...@@ -167,8 +169,8 @@ DeferredUnmap PartitionPage<thread_safe>::FreeSlowPath() { ...@@ -167,8 +169,8 @@ DeferredUnmap PartitionPage<thread_safe>::FreeSlowPath() {
template <bool thread_safe> template <bool thread_safe>
void PartitionPage<thread_safe>::Decommit(PartitionRoot<thread_safe>* root) { void PartitionPage<thread_safe>::Decommit(PartitionRoot<thread_safe>* root) {
root->lock_.AssertAcquired(); root->lock_.AssertAcquired();
DCHECK(is_empty()); PA_DCHECK(is_empty());
DCHECK(!bucket->is_direct_mapped()); PA_DCHECK(!bucket->is_direct_mapped());
void* addr = PartitionPage::ToPointer(this); void* addr = PartitionPage::ToPointer(this);
root->DecommitSystemPages(addr, bucket->get_bytes_per_span()); root->DecommitSystemPages(addr, bucket->get_bytes_per_span());
...@@ -180,23 +182,23 @@ void PartitionPage<thread_safe>::Decommit(PartitionRoot<thread_safe>* root) { ...@@ -180,23 +182,23 @@ void PartitionPage<thread_safe>::Decommit(PartitionRoot<thread_safe>* root) {
// 32 bytes in size. // 32 bytes in size.
freelist_head = nullptr; freelist_head = nullptr;
num_unprovisioned_slots = 0; num_unprovisioned_slots = 0;
DCHECK(is_decommitted()); PA_DCHECK(is_decommitted());
} }
template <bool thread_safe> template <bool thread_safe>
void PartitionPage<thread_safe>::DecommitIfPossible( void PartitionPage<thread_safe>::DecommitIfPossible(
PartitionRoot<thread_safe>* root) { PartitionRoot<thread_safe>* root) {
root->lock_.AssertAcquired(); root->lock_.AssertAcquired();
DCHECK(empty_cache_index >= 0); PA_DCHECK(empty_cache_index >= 0);
DCHECK(static_cast<unsigned>(empty_cache_index) < kMaxFreeableSpans); PA_DCHECK(static_cast<unsigned>(empty_cache_index) < kMaxFreeableSpans);
DCHECK(this == root->global_empty_page_ring[empty_cache_index]); PA_DCHECK(this == root->global_empty_page_ring[empty_cache_index]);
empty_cache_index = -1; empty_cache_index = -1;
if (is_empty()) if (is_empty())
Decommit(root); Decommit(root);
} }
void DeferredUnmap::Unmap() { void DeferredUnmap::Unmap() {
DCHECK(ptr && size > 0); PA_DCHECK(ptr && size > 0);
if (IsManagedByPartitionAlloc(ptr)) { if (IsManagedByPartitionAlloc(ptr)) {
DecommitPages(ptr, size); DecommitPages(ptr, size);
} else { } else {
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include <string.h> #include <string.h>
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h" #include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/partition_alloc_forward.h" #include "base/allocator/partition_allocator/partition_alloc_forward.h"
#include "base/allocator/partition_allocator/partition_bucket.h" #include "base/allocator/partition_allocator/partition_bucket.h"
...@@ -134,7 +135,7 @@ static_assert(sizeof(PartitionPage<ThreadSafe>) <= kPageMetadataSize, ...@@ -134,7 +135,7 @@ static_assert(sizeof(PartitionPage<ThreadSafe>) <= kPageMetadataSize,
ALWAYS_INLINE char* PartitionSuperPageToMetadataArea(char* ptr) { ALWAYS_INLINE char* PartitionSuperPageToMetadataArea(char* ptr) {
uintptr_t pointer_as_uint = reinterpret_cast<uintptr_t>(ptr); uintptr_t pointer_as_uint = reinterpret_cast<uintptr_t>(ptr);
DCHECK(!(pointer_as_uint & kSuperPageOffsetMask)); PA_DCHECK(!(pointer_as_uint & kSuperPageOffsetMask));
// The metadata area is exactly one system page (the guard page) into the // The metadata area is exactly one system page (the guard page) into the
// super page. // super page.
return reinterpret_cast<char*>(pointer_as_uint + kSystemPageSize); return reinterpret_cast<char*>(pointer_as_uint + kSystemPageSize);
...@@ -152,8 +153,8 @@ PartitionPage<thread_safe>::FromPointerNoAlignmentCheck(void* ptr) { ...@@ -152,8 +153,8 @@ PartitionPage<thread_safe>::FromPointerNoAlignmentCheck(void* ptr) {
// Index 0 is invalid because it is the super page extent metadata and the // Index 0 is invalid because it is the super page extent metadata and the
// last index is invalid because the whole PartitionPage is set as guard // last index is invalid because the whole PartitionPage is set as guard
// pages. // pages.
DCHECK(partition_page_index); PA_DCHECK(partition_page_index);
DCHECK(partition_page_index < kNumPartitionPagesPerSuperPage - 1); PA_DCHECK(partition_page_index < kNumPartitionPagesPerSuperPage - 1);
auto* page = reinterpret_cast<PartitionPage*>( auto* page = reinterpret_cast<PartitionPage*>(
PartitionSuperPageToMetadataArea(super_page_ptr) + PartitionSuperPageToMetadataArea(super_page_ptr) +
(partition_page_index << kPageMetadataShift)); (partition_page_index << kPageMetadataShift));
...@@ -177,17 +178,18 @@ ALWAYS_INLINE void* PartitionPage<thread_safe>::ToPointer( ...@@ -177,17 +178,18 @@ ALWAYS_INLINE void* PartitionPage<thread_safe>::ToPointer(
// A valid |page| must be past the first guard System page and within // A valid |page| must be past the first guard System page and within
// the following metadata region. // the following metadata region.
DCHECK(super_page_offset > kSystemPageSize); PA_DCHECK(super_page_offset > kSystemPageSize);
// Must be less than total metadata region. // Must be less than total metadata region.
DCHECK(super_page_offset < kSystemPageSize + (kNumPartitionPagesPerSuperPage * PA_DCHECK(super_page_offset <
kPageMetadataSize)); kSystemPageSize +
(kNumPartitionPagesPerSuperPage * kPageMetadataSize));
uintptr_t partition_page_index = uintptr_t partition_page_index =
(super_page_offset - kSystemPageSize) >> kPageMetadataShift; (super_page_offset - kSystemPageSize) >> kPageMetadataShift;
// Index 0 is invalid because it is the super page extent metadata and the // Index 0 is invalid because it is the super page extent metadata and the
// last index is invalid because the whole PartitionPage is set as guard // last index is invalid because the whole PartitionPage is set as guard
// pages. // pages.
DCHECK(partition_page_index); PA_DCHECK(partition_page_index);
DCHECK(partition_page_index < kNumPartitionPagesPerSuperPage - 1); PA_DCHECK(partition_page_index < kNumPartitionPagesPerSuperPage - 1);
uintptr_t super_page_base = (pointer_as_uint & kSuperPageBaseMask); uintptr_t super_page_base = (pointer_as_uint & kSuperPageBaseMask);
void* ret = reinterpret_cast<void*>( void* ret = reinterpret_cast<void*>(
super_page_base + (partition_page_index << kPartitionPageShift)); super_page_base + (partition_page_index << kPartitionPageShift));
...@@ -203,9 +205,9 @@ ALWAYS_INLINE PartitionPage<thread_safe>* ...@@ -203,9 +205,9 @@ ALWAYS_INLINE PartitionPage<thread_safe>*
PartitionPage<thread_safe>::FromPointer(void* ptr) { PartitionPage<thread_safe>::FromPointer(void* ptr) {
PartitionPage* page = PartitionPage::FromPointerNoAlignmentCheck(ptr); PartitionPage* page = PartitionPage::FromPointerNoAlignmentCheck(ptr);
// Checks that the pointer is a multiple of bucket size. // Checks that the pointer is a multiple of bucket size.
DCHECK(!((reinterpret_cast<uintptr_t>(ptr) - PA_DCHECK(!((reinterpret_cast<uintptr_t>(ptr) -
reinterpret_cast<uintptr_t>(PartitionPage::ToPointer(page))) % reinterpret_cast<uintptr_t>(PartitionPage::ToPointer(page))) %
page->bucket->slot_size)); page->bucket->slot_size));
return page; return page;
} }
...@@ -218,8 +220,8 @@ ALWAYS_INLINE const size_t* PartitionPage<thread_safe>::get_raw_size_ptr() ...@@ -218,8 +220,8 @@ ALWAYS_INLINE const size_t* PartitionPage<thread_safe>::get_raw_size_ptr()
if (bucket->slot_size <= kMaxSystemPagesPerSlotSpan * kSystemPageSize) if (bucket->slot_size <= kMaxSystemPagesPerSlotSpan * kSystemPageSize)
return nullptr; return nullptr;
DCHECK((bucket->slot_size % kSystemPageSize) == 0); PA_DCHECK((bucket->slot_size % kSystemPageSize) == 0);
DCHECK(bucket->is_direct_mapped() || bucket->get_slots_per_span() == 1); PA_DCHECK(bucket->is_direct_mapped() || bucket->get_slots_per_span() == 1);
const PartitionPage* the_next_page = this + 1; const PartitionPage* the_next_page = this + 1;
return reinterpret_cast<const size_t*>(&the_next_page->freelist_head); return reinterpret_cast<const size_t*>(&the_next_page->freelist_head);
...@@ -252,12 +254,12 @@ ALWAYS_INLINE DeferredUnmap PartitionPage<thread_safe>::Free(void* ptr) { ...@@ -252,12 +254,12 @@ ALWAYS_INLINE DeferredUnmap PartitionPage<thread_safe>::Free(void* ptr) {
memset(ptr, kFreedByte, slot_size); memset(ptr, kFreedByte, slot_size);
#endif #endif
DCHECK(num_allocated_slots); PA_DCHECK(num_allocated_slots);
// Catches an immediate double free. // Catches an immediate double free.
CHECK(ptr != freelist_head); PA_CHECK(ptr != freelist_head);
// Look for double free one level deeper in debug. // Look for double free one level deeper in debug.
DCHECK(!freelist_head || PA_DCHECK(!freelist_head ||
ptr != EncodedPartitionFreelistEntry::Decode(freelist_head->next)); ptr != EncodedPartitionFreelistEntry::Decode(freelist_head->next));
auto* entry = static_cast<internal::PartitionFreelistEntry*>(ptr); auto* entry = static_cast<internal::PartitionFreelistEntry*>(ptr);
entry->next = internal::PartitionFreelistEntry::Encode(freelist_head); entry->next = internal::PartitionFreelistEntry::Encode(freelist_head);
freelist_head = entry; freelist_head = entry;
...@@ -267,46 +269,46 @@ ALWAYS_INLINE DeferredUnmap PartitionPage<thread_safe>::Free(void* ptr) { ...@@ -267,46 +269,46 @@ ALWAYS_INLINE DeferredUnmap PartitionPage<thread_safe>::Free(void* ptr) {
} else { } else {
// All single-slot allocations must go through the slow path to // All single-slot allocations must go through the slow path to
// correctly update the size metadata. // correctly update the size metadata.
DCHECK(get_raw_size() == 0); PA_DCHECK(get_raw_size() == 0);
} }
return {}; return {};
} }
template <bool thread_safe> template <bool thread_safe>
ALWAYS_INLINE bool PartitionPage<thread_safe>::is_active() const { ALWAYS_INLINE bool PartitionPage<thread_safe>::is_active() const {
DCHECK(this != get_sentinel_page()); PA_DCHECK(this != get_sentinel_page());
DCHECK(!page_offset); PA_DCHECK(!page_offset);
return (num_allocated_slots > 0 && return (num_allocated_slots > 0 &&
(freelist_head || num_unprovisioned_slots)); (freelist_head || num_unprovisioned_slots));
} }
template <bool thread_safe> template <bool thread_safe>
ALWAYS_INLINE bool PartitionPage<thread_safe>::is_full() const { ALWAYS_INLINE bool PartitionPage<thread_safe>::is_full() const {
DCHECK(this != get_sentinel_page()); PA_DCHECK(this != get_sentinel_page());
DCHECK(!page_offset); PA_DCHECK(!page_offset);
bool ret = (num_allocated_slots == bucket->get_slots_per_span()); bool ret = (num_allocated_slots == bucket->get_slots_per_span());
if (ret) { if (ret) {
DCHECK(!freelist_head); PA_DCHECK(!freelist_head);
DCHECK(!num_unprovisioned_slots); PA_DCHECK(!num_unprovisioned_slots);
} }
return ret; return ret;
} }
template <bool thread_safe> template <bool thread_safe>
ALWAYS_INLINE bool PartitionPage<thread_safe>::is_empty() const { ALWAYS_INLINE bool PartitionPage<thread_safe>::is_empty() const {
DCHECK(this != get_sentinel_page()); PA_DCHECK(this != get_sentinel_page());
DCHECK(!page_offset); PA_DCHECK(!page_offset);
return (!num_allocated_slots && freelist_head); return (!num_allocated_slots && freelist_head);
} }
template <bool thread_safe> template <bool thread_safe>
ALWAYS_INLINE bool PartitionPage<thread_safe>::is_decommitted() const { ALWAYS_INLINE bool PartitionPage<thread_safe>::is_decommitted() const {
DCHECK(this != get_sentinel_page()); PA_DCHECK(this != get_sentinel_page());
DCHECK(!page_offset); PA_DCHECK(!page_offset);
bool ret = (!num_allocated_slots && !freelist_head); bool ret = (!num_allocated_slots && !freelist_head);
if (ret) { if (ret) {
DCHECK(!num_unprovisioned_slots); PA_DCHECK(!num_unprovisioned_slots);
DCHECK(empty_cache_index == -1); PA_DCHECK(empty_cache_index == -1);
} }
return ret; return ret;
} }
...@@ -320,10 +322,10 @@ ALWAYS_INLINE void PartitionPage<thread_safe>::set_raw_size(size_t size) { ...@@ -320,10 +322,10 @@ ALWAYS_INLINE void PartitionPage<thread_safe>::set_raw_size(size_t size) {
template <bool thread_safe> template <bool thread_safe>
ALWAYS_INLINE void PartitionPage<thread_safe>::Reset() { ALWAYS_INLINE void PartitionPage<thread_safe>::Reset() {
DCHECK(is_decommitted()); PA_DCHECK(is_decommitted());
num_unprovisioned_slots = bucket->get_slots_per_span(); num_unprovisioned_slots = bucket->get_slots_per_span();
DCHECK(num_unprovisioned_slots); PA_DCHECK(num_unprovisioned_slots);
next_page = nullptr; next_page = nullptr;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment