Commit 0044f2fd authored by Takashi Sakamoto's avatar Takashi Sakamoto Committed by Commit Bot

PartitionAlloc reserves address space and AddressPool manages the space.

- This feature is behind the flag: PartitionAllocGigaCage.
- This feature is only available on 64bit chrome.
- Currently 32GB address space is reserved for PartitionAllocator.
  - 16Gbyte for direct map allocation (1 pool), and
  - 16Gbyte for normal buckets allocation (1 pool)
- Need to invoke PartitionAllocGlobalInit() before alllocating memory via PartitionAllocator.

Bug: 1086388
Change-Id: I370d2422e5f3ee5825334da3a4914730b0fbee93
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2174031
Commit-Queue: Takashi Sakamoto <tasak@google.com>
Reviewed-by: default avatarBartek Nowierski <bartekn@chromium.org>
Reviewed-by: default avatarKentaro Hara <haraken@chromium.org>
Cr-Commit-Position: refs/heads/master@{#774965}
parent 19799b8e
...@@ -1713,6 +1713,8 @@ jumbo_component("base") { ...@@ -1713,6 +1713,8 @@ jumbo_component("base") {
# Add stuff that doesn't work in NaCl. # Add stuff that doesn't work in NaCl.
sources += [ sources += [
# PartitionAlloc uses SpinLock, which doesn't work in NaCl (see below). # PartitionAlloc uses SpinLock, which doesn't work in NaCl (see below).
"allocator/partition_allocator/address_pool_manager.cc",
"allocator/partition_allocator/address_pool_manager.h",
"allocator/partition_allocator/address_space_randomization.cc", "allocator/partition_allocator/address_space_randomization.cc",
"allocator/partition_allocator/address_space_randomization.h", "allocator/partition_allocator/address_space_randomization.h",
"allocator/partition_allocator/memory_reclaimer.cc", "allocator/partition_allocator/memory_reclaimer.cc",
...@@ -1723,9 +1725,13 @@ jumbo_component("base") { ...@@ -1723,9 +1725,13 @@ jumbo_component("base") {
"allocator/partition_allocator/page_allocator.cc", "allocator/partition_allocator/page_allocator.cc",
"allocator/partition_allocator/page_allocator.h", "allocator/partition_allocator/page_allocator.h",
"allocator/partition_allocator/page_allocator_internal.h", "allocator/partition_allocator/page_allocator_internal.h",
"allocator/partition_allocator/partition_address_space.cc",
"allocator/partition_allocator/partition_address_space.h",
"allocator/partition_allocator/partition_alloc.cc", "allocator/partition_allocator/partition_alloc.cc",
"allocator/partition_allocator/partition_alloc.h", "allocator/partition_allocator/partition_alloc.h",
"allocator/partition_allocator/partition_alloc_constants.h", "allocator/partition_allocator/partition_alloc_constants.h",
"allocator/partition_allocator/partition_alloc_features.cc",
"allocator/partition_allocator/partition_alloc_features.h",
"allocator/partition_allocator/partition_alloc_forward.h", "allocator/partition_allocator/partition_alloc_forward.h",
"allocator/partition_allocator/partition_bucket.cc", "allocator/partition_allocator/partition_bucket.cc",
"allocator/partition_allocator/partition_bucket.h", "allocator/partition_allocator/partition_bucket.h",
......
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/address_pool_manager.h"
#include "base/allocator/partition_allocator/page_allocator.h"
#include "base/allocator/partition_allocator/page_allocator_internal.h"
#include "base/bits.h"
#include "base/stl_util.h"
#include <limits>
namespace base {
namespace internal {
#if defined(ARCH_CPU_64_BITS)
constexpr size_t AddressPoolManager::kNumPools;
// static
AddressPoolManager* AddressPoolManager::GetInstance() {
static NoDestructor<AddressPoolManager> instance;
return instance.get();
}
pool_handle AddressPoolManager::Add(const void* ptr,
size_t length,
size_t align) {
DCHECK(base::bits::IsPowerOfTwo(align));
const uintptr_t align_offset_mask = align - 1;
const uintptr_t ptr_as_uintptr = reinterpret_cast<uintptr_t>(ptr);
DCHECK(!(ptr_as_uintptr & align_offset_mask));
DCHECK(!((ptr_as_uintptr + length) & align_offset_mask));
for (pool_handle i = 0; i < base::size(pools_); ++i) {
if (!pools_[i]) {
pools_[i] = std::make_unique<Pool>(ptr_as_uintptr, length, align);
return i + 1;
}
}
NOTREACHED();
return 0;
}
void AddressPoolManager::ResetForTesting() {
for (pool_handle i = 0; i < base::size(pools_); ++i)
pools_[i].reset();
}
void AddressPoolManager::Remove(pool_handle handle) {
DCHECK(0 < handle && handle <= kNumPools);
pools_[handle - 1].reset();
}
void* AddressPoolManager::Alloc(pool_handle handle, size_t length) {
DCHECK(0 < handle && handle <= kNumPools);
Pool* pool = pools_[handle - 1].get();
DCHECK(pool);
return pool->FindChunk(length);
}
void AddressPoolManager::Free(pool_handle handle, void* ptr, size_t length) {
DCHECK(0 < handle && handle <= kNumPools);
Pool* pool = pools_[handle - 1].get();
DCHECK(pool);
pool->FreeChunk(reinterpret_cast<uintptr_t>(ptr), length);
}
AddressPoolManager::Pool::Pool(uintptr_t ptr, size_t length, size_t align)
: align_(align)
#if DCHECK_IS_ON()
,
address_begin_(ptr),
address_end_(ptr + length)
#endif
{
free_chunks_.insert(std::make_pair(ptr, length));
#if DCHECK_IS_ON()
DCHECK_LT(address_begin_, address_end_);
#endif
}
void* AddressPoolManager::Pool::FindChunk(size_t requested_size) {
base::AutoLock scoped_lock(lock_);
const uintptr_t align_offset_mask = align_ - 1;
const size_t required_size = bits::Align(requested_size, align_);
uintptr_t chosen_chunk = 0;
size_t chosen_chunk_size = std::numeric_limits<size_t>::max();
// Use first fit policy to find an available chunk from free chunks.
for (const auto& chunk : free_chunks_) {
size_t chunk_size = chunk.second;
if (chunk_size >= required_size) {
chosen_chunk = chunk.first;
chosen_chunk_size = chunk_size;
break;
}
}
if (!chosen_chunk)
return nullptr;
free_chunks_.erase(chosen_chunk);
if (chosen_chunk_size > required_size) {
bool newly_inserted =
free_chunks_
.insert(std::make_pair(chosen_chunk + required_size,
chosen_chunk_size - required_size))
.second;
DCHECK(newly_inserted);
}
DCHECK(!(chosen_chunk & align_offset_mask));
#if DCHECK_IS_ON()
DCHECK_LE(address_begin_, chosen_chunk);
DCHECK_LE(chosen_chunk + required_size, address_end_);
#endif
return reinterpret_cast<void*>(chosen_chunk);
}
void AddressPoolManager::Pool::FreeChunk(uintptr_t address, size_t free_size) {
base::AutoLock scoped_lock(lock_);
const uintptr_t align_offset_mask = align_ - 1;
DCHECK(!(address & align_offset_mask));
const size_t size = bits::Align(free_size, align_);
#if DCHECK_IS_ON()
DCHECK_LE(address_begin_, address);
DCHECK_LE(address + size, address_end_);
#endif
DCHECK_LT(address, address + size);
auto new_chunk = std::make_pair(address, size);
auto lower_bound = free_chunks_.lower_bound(address);
if (lower_bound != free_chunks_.begin()) {
auto left = --lower_bound;
uintptr_t left_chunk_end = left->first + left->second;
DCHECK_LE(left_chunk_end, address);
if (left_chunk_end == address) {
new_chunk.first = left->first;
new_chunk.second += left->second;
free_chunks_.erase(left);
}
}
auto right = free_chunks_.upper_bound(address);
if (right != free_chunks_.end()) {
uintptr_t chunk_end = address + size;
DCHECK_LE(chunk_end, right->first);
if (right->first == chunk_end) {
new_chunk.second += right->second;
free_chunks_.erase(right);
}
}
bool newly_inserted = free_chunks_.insert(new_chunk).second;
DCHECK(newly_inserted);
}
AddressPoolManager::Pool::~Pool() = default;
AddressPoolManager::AddressPoolManager() = default;
AddressPoolManager::~AddressPoolManager() = default;
#endif // defined(ARCH_CPU_64_BITS)
} // namespace internal
} // namespace base
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_H_
#include <map>
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/atomicops.h"
#include "base/no_destructor.h"
#include "base/synchronization/lock.h"
#include "base/thread_annotations.h"
#include "build/build_config.h"
namespace base {
namespace internal {
using pool_handle = unsigned;
// The address space reservation is supported only on 64-bit architecture.
#if defined(ARCH_CPU_64_BITS)
// AddressPoolManager takes a reserved virtual address space and manages the
// address space allocation.
// AddressPoolManager supports up to 2 pools. One pool manages one contiguous
// reserved address space. Alloc() takes the pool handle and returns
// address regions from the specified pool. Free() also takes the pool handle
// and returns the address region back to the manager.
class BASE_EXPORT AddressPoolManager {
public:
static AddressPoolManager* GetInstance();
pool_handle Add(const void* address, size_t length, size_t align);
void Remove(pool_handle handle);
void* Alloc(pool_handle handle, size_t length);
void Free(pool_handle handle, void* ptr, size_t length);
void ResetForTesting();
private:
AddressPoolManager();
~AddressPoolManager();
pool_handle AllocHandle();
class Pool {
public:
Pool(uintptr_t ptr, size_t length, size_t align);
~Pool();
void* FindChunk(size_t size);
void FreeChunk(uintptr_t address, size_t size);
private:
base::Lock lock_;
std::map<uintptr_t, size_t> free_chunks_ GUARDED_BY(lock_);
// All returned chunks will be aligned on this align_ and all chunks' size
// will be a multiple of |align_|.
const uintptr_t align_ = 0;
#if DCHECK_IS_ON()
const uintptr_t address_begin_;
const uintptr_t address_end_;
#endif
DISALLOW_COPY_AND_ASSIGN(Pool);
};
static constexpr size_t kNumPools = 2;
std::unique_ptr<Pool> pools_[kNumPools];
friend class NoDestructor<AddressPoolManager>;
DISALLOW_COPY_AND_ASSIGN(AddressPoolManager);
};
#endif // defined(ARCH_CPU_64_BITS)
} // namespace internal
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_H_
...@@ -18,6 +18,14 @@ ...@@ -18,6 +18,14 @@
namespace base { namespace base {
namespace {
void HandleOOM(size_t unused_size) {
LOG(FATAL) << "Out of memory";
}
} // namespace
class PartitionAllocMemoryReclaimerTest : public ::testing::Test { class PartitionAllocMemoryReclaimerTest : public ::testing::Test {
public: public:
PartitionAllocMemoryReclaimerTest() PartitionAllocMemoryReclaimerTest()
...@@ -27,6 +35,7 @@ class PartitionAllocMemoryReclaimerTest : public ::testing::Test { ...@@ -27,6 +35,7 @@ class PartitionAllocMemoryReclaimerTest : public ::testing::Test {
protected: protected:
void SetUp() override { void SetUp() override {
PartitionAllocGlobalInit(HandleOOM);
PartitionAllocMemoryReclaimer::Instance()->ResetForTesting(); PartitionAllocMemoryReclaimer::Instance()->ResetForTesting();
allocator_ = std::make_unique<PartitionAllocatorGeneric>(); allocator_ = std::make_unique<PartitionAllocatorGeneric>();
allocator_->init(); allocator_->init();
...@@ -36,6 +45,7 @@ class PartitionAllocMemoryReclaimerTest : public ::testing::Test { ...@@ -36,6 +45,7 @@ class PartitionAllocMemoryReclaimerTest : public ::testing::Test {
allocator_ = nullptr; allocator_ = nullptr;
PartitionAllocMemoryReclaimer::Instance()->ResetForTesting(); PartitionAllocMemoryReclaimer::Instance()->ResetForTesting();
task_environment_.FastForwardUntilNoTasksRemain(); task_environment_.FastForwardUntilNoTasksRemain();
PartitionAllocGlobalUninitForTesting();
} }
void StartReclaimer() { void StartReclaimer() {
......
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/partition_address_space.h"
#include "base/allocator/partition_allocator/page_allocator.h"
#include "base/allocator/partition_allocator/page_allocator_internal.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/bits.h"
namespace base {
namespace internal {
#if defined(ARCH_CPU_64_BITS)
constexpr size_t PartitionAddressSpace::kGigaBytes;
constexpr size_t PartitionAddressSpace::kDirectMapPoolSize;
constexpr size_t PartitionAddressSpace::kNormalBucketPoolSize;
constexpr size_t PartitionAddressSpace::kReservedAddressSpaceSize;
// static
PartitionAddressSpace* PartitionAddressSpace::Instance() {
static NoDestructor<PartitionAddressSpace> instance;
return instance.get();
}
void PartitionAddressSpace::Init() {
reserved_address_start_ = reinterpret_cast<char*>(SystemAllocPages(
nullptr, kReservedAddressSpaceSize, base::PageInaccessible,
PageTag::kPartitionAlloc, false));
DCHECK(reserved_address_start_);
reserved_address_end_ = reserved_address_start_ + kReservedAddressSpaceSize;
char* current = reinterpret_cast<char*>(bits::Align(
reinterpret_cast<uintptr_t>(reserved_address_start_), kSuperPageSize));
DCHECK_GE(current, reserved_address_start_);
DCHECK(!(reinterpret_cast<uintptr_t>(current) & kSuperPageOffsetMask));
direct_map_pool_ = internal::AddressPoolManager::GetInstance()->Add(
current, kDirectMapPoolSize, kSuperPageSize);
DCHECK(direct_map_pool_);
current += kDirectMapPoolSize;
normal_bucket_pool_ = internal::AddressPoolManager::GetInstance()->Add(
current, kNormalBucketPoolSize, kSuperPageSize);
DCHECK(normal_bucket_pool_);
current += kNormalBucketPoolSize;
DCHECK_LE(current, reserved_address_end_);
}
void PartitionAddressSpace::UninitForTesting() {
DCHECK(reserved_address_start_);
DCHECK(reserved_address_end_);
FreePages(reserved_address_start_,
reserved_address_end_ - reserved_address_start_);
reserved_address_start_ = nullptr;
reserved_address_end_ = nullptr;
direct_map_pool_ = 0;
internal::AddressPoolManager::GetInstance()->ResetForTesting();
}
#endif // defined(ARCH_CPU_64_BITS)
} // namespace internal
} // namespace base
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ADDRESS_SPACE_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ADDRESS_SPACE_H_
#include "base/allocator/partition_allocator/address_pool_manager.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/partition_alloc_features.h"
#include "base/base_export.h"
#include "base/feature_list.h"
#include "base/notreached.h"
#include "build/build_config.h"
namespace base {
namespace internal {
// The address space reservation is supported only on 64-bit architecture.
#if defined(ARCH_CPU_64_BITS)
// Reserves address space for PartitionAllocator.
class BASE_EXPORT PartitionAddressSpace {
public:
static PartitionAddressSpace* Instance();
internal::pool_handle GetDirectMapPool() { return direct_map_pool_; }
internal::pool_handle GetNormalBucketPool() { return normal_bucket_pool_; }
void Init();
void UninitForTesting();
// TODO(tasak): This method should be as cheap as possible. So we can make
// this cheaper since the range size is a power of two, but just checking that
// the high order bits of the address are the right ones.
bool Contains(const void* address) const {
return reserved_address_start_ <= address &&
address < reserved_address_end_;
}
private:
// Partition Alloc Address Space
// Reserves 32Gbytes address space for 1 direct map space(16G) and 1 normal
// bucket space(16G).
//
// +----------------+ reserved address start
// | (unused) |
// +----------------+ kSuperPageSize-aligned reserved address: X
// | |
// | direct map |
// | space |
// | |
// +----------------+ X + 16G bytes
// | normal buckets |
// | space |
// +----------------+ X + 32G bytes
// | (unused) |
// +----------------+ reserved address end
static constexpr size_t kGigaBytes = static_cast<size_t>(1024 * 1024 * 1024);
static constexpr size_t kDirectMapPoolSize =
static_cast<size_t>(16 * kGigaBytes);
static constexpr size_t kNormalBucketPoolSize =
static_cast<size_t>(16 * kGigaBytes);
// kSuperPageSize padding is added to be able to align to kSuperPageSize
// boundary.
static constexpr size_t kReservedAddressSpaceSize =
kDirectMapPoolSize + kNormalBucketPoolSize + kSuperPageSize;
char* reserved_address_start_;
char* reserved_address_end_;
internal::pool_handle direct_map_pool_;
internal::pool_handle normal_bucket_pool_;
};
ALWAYS_INLINE internal::pool_handle GetDirectMapPool() {
DCHECK(IsPartitionAllocGigaCageEnabled());
return PartitionAddressSpace::Instance()->GetDirectMapPool();
}
ALWAYS_INLINE internal::pool_handle GetNormalBucketPool() {
DCHECK(IsPartitionAllocGigaCageEnabled());
return PartitionAddressSpace::Instance()->GetNormalBucketPool();
}
#else // !defined(ARCH_CPU_64_BITS)
ALWAYS_INLINE internal::pool_handle GetDirectMapPool() {
NOTREACHED();
return 0;
}
ALWAYS_INLINE internal::pool_handle GetNormalBucketPool() {
NOTREACHED();
return 0;
}
#endif
} // namespace internal
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ADDRESS_SPACE_H_
...@@ -9,6 +9,9 @@ ...@@ -9,6 +9,9 @@
#include <memory> #include <memory>
#include <type_traits> #include <type_traits>
#include "base/allocator/partition_allocator/page_allocator_internal.h"
#include "base/allocator/partition_allocator/partition_address_space.h"
#include "base/allocator/partition_allocator/partition_alloc_features.h"
#include "base/allocator/partition_allocator/partition_direct_map_extent.h" #include "base/allocator/partition_allocator/partition_direct_map_extent.h"
#include "base/allocator/partition_allocator/partition_oom.h" #include "base/allocator/partition_allocator/partition_oom.h"
#include "base/allocator/partition_allocator/partition_page.h" #include "base/allocator/partition_allocator/partition_page.h"
...@@ -199,6 +202,20 @@ static void PartitionAllocBaseInit( ...@@ -199,6 +202,20 @@ static void PartitionAllocBaseInit(
void PartitionAllocGlobalInit(OomFunction on_out_of_memory) { void PartitionAllocGlobalInit(OomFunction on_out_of_memory) {
DCHECK(on_out_of_memory); DCHECK(on_out_of_memory);
internal::g_oom_handling_function = on_out_of_memory; internal::g_oom_handling_function = on_out_of_memory;
#if defined(ARCH_CPU_64_BITS)
// Reserve address space for partition alloc.
if (IsPartitionAllocGigaCageEnabled())
internal::PartitionAddressSpace::Instance()->Init();
#endif
}
void PartitionAllocGlobalUninitForTesting() {
#if defined(ARCH_CPU_64_BITS)
if (IsPartitionAllocGigaCageEnabled())
internal::PartitionAddressSpace::Instance()->UninitForTesting();
#endif
internal::g_oom_handling_function = nullptr;
} }
void PartitionRoot::Init(size_t bucket_count, size_t maximum_allocation) { void PartitionRoot::Init(size_t bucket_count, size_t maximum_allocation) {
......
...@@ -75,6 +75,7 @@ ...@@ -75,6 +75,7 @@
#include "base/bits.h" #include "base/bits.h"
#include "base/compiler_specific.h" #include "base/compiler_specific.h"
#include "base/logging.h" #include "base/logging.h"
#include "base/notreached.h"
#include "base/stl_util.h" #include "base/stl_util.h"
#include "base/sys_byteorder.h" #include "base/sys_byteorder.h"
#include "build/build_config.h" #include "build/build_config.h"
...@@ -219,6 +220,7 @@ class BASE_EXPORT PartitionStatsDumper { ...@@ -219,6 +220,7 @@ class BASE_EXPORT PartitionStatsDumper {
}; };
BASE_EXPORT void PartitionAllocGlobalInit(OomFunction on_out_of_memory); BASE_EXPORT void PartitionAllocGlobalInit(OomFunction on_out_of_memory);
BASE_EXPORT void PartitionAllocGlobalUninitForTesting();
ALWAYS_INLINE void* PartitionRoot::Alloc(size_t size, const char* type_name) { ALWAYS_INLINE void* PartitionRoot::Alloc(size_t size, const char* type_name) {
return AllocFlags(0, size, type_name); return AllocFlags(0, size, type_name);
......
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/partition_alloc_features.h"
#include "base/feature_list.h"
namespace base {
// If enabled, PartitionAllocator reserves an address space(named, giga cage)
// initially and uses a part of the address space for each allocation.
const Feature kPartitionAllocGigaCage{"PartitionAllocGigaCage",
FEATURE_DISABLED_BY_DEFAULT};
} // namespace base
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_FEATURES_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_FEATURES_H_
#include "base/base_export.h"
#include "base/feature_list.h"
#include "base/metrics/field_trial_params.h"
#include "build/build_config.h"
namespace base {
struct Feature;
extern const BASE_EXPORT Feature kPartitionAllocGigaCage;
ALWAYS_INLINE bool IsPartitionAllocGigaCageEnabled() {
#if !defined(ARCH_CPU_64_BITS)
return false;
#else
return FeatureList::IsEnabled(kPartitionAllocGigaCage);
#endif
}
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_FEATURES_H_
...@@ -110,6 +110,10 @@ void AllocateRandomly(base::PartitionRootGeneric* root, ...@@ -110,6 +110,10 @@ void AllocateRandomly(base::PartitionRootGeneric* root,
} }
} }
void HandleOOM(size_t unused_size) {
LOG(FATAL) << "Out of memory";
}
} // namespace } // namespace
namespace base { namespace base {
...@@ -142,10 +146,13 @@ class PartitionAllocTest : public testing::Test { ...@@ -142,10 +146,13 @@ class PartitionAllocTest : public testing::Test {
~PartitionAllocTest() override = default; ~PartitionAllocTest() override = default;
void SetUp() override { void SetUp() override {
PartitionAllocGlobalInit(HandleOOM);
allocator.init(); allocator.init();
generic_allocator.init(); generic_allocator.init();
} }
void TearDown() override { PartitionAllocGlobalUninitForTesting(); }
PartitionRoot::Page* GetFullPage(size_t size) { PartitionRoot::Page* GetFullPage(size_t size) {
size_t real_size = size + kExtraAllocSize; size_t real_size = size + kExtraAllocSize;
size_t bucket_index = real_size >> kBucketShift; size_t bucket_index = real_size >> kBucketShift;
......
...@@ -4,9 +4,12 @@ ...@@ -4,9 +4,12 @@
#include "base/allocator/partition_allocator/partition_bucket.h" #include "base/allocator/partition_allocator/partition_bucket.h"
#include "base/allocator/partition_allocator/address_pool_manager.h"
#include "base/allocator/partition_allocator/oom.h" #include "base/allocator/partition_allocator/oom.h"
#include "base/allocator/partition_allocator/page_allocator.h" #include "base/allocator/partition_allocator/page_allocator.h"
#include "base/allocator/partition_allocator/partition_address_space.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h" #include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/partition_alloc_features.h"
#include "base/allocator/partition_allocator/partition_direct_map_extent.h" #include "base/allocator/partition_allocator/partition_direct_map_extent.h"
#include "base/allocator/partition_allocator/partition_oom.h" #include "base/allocator/partition_allocator/partition_oom.h"
#include "base/allocator/partition_allocator/partition_page.h" #include "base/allocator/partition_allocator/partition_page.h"
...@@ -19,6 +22,21 @@ namespace internal { ...@@ -19,6 +22,21 @@ namespace internal {
namespace { namespace {
char* CommitPages(internal::pool_handle pool, size_t map_size) {
#if defined(ARCH_CPU_64_BITS)
char* ptr = reinterpret_cast<char*>(
internal::AddressPoolManager::GetInstance()->Alloc(pool, map_size));
if (UNLIKELY(!ptr))
return nullptr;
DCHECK(!(map_size & kSystemPageOffsetMask));
SetSystemPagesAccess(ptr, map_size, PageReadWrite);
return ptr;
#else
NOTREACHED();
return nullptr;
#endif
}
template <bool thread_safe> template <bool thread_safe>
ALWAYS_INLINE PartitionPage<thread_safe>* PartitionDirectMap( ALWAYS_INLINE PartitionPage<thread_safe>* PartitionDirectMap(
PartitionRootBase<thread_safe>* root, PartitionRootBase<thread_safe>* root,
...@@ -41,9 +59,14 @@ ALWAYS_INLINE PartitionPage<thread_safe>* PartitionDirectMap( ...@@ -41,9 +59,14 @@ ALWAYS_INLINE PartitionPage<thread_safe>* PartitionDirectMap(
map_size += kPageAllocationGranularityOffsetMask; map_size += kPageAllocationGranularityOffsetMask;
map_size &= kPageAllocationGranularityBaseMask; map_size &= kPageAllocationGranularityBaseMask;
char* ptr = reinterpret_cast<char*>(AllocPages(nullptr, map_size, char* ptr = nullptr;
kSuperPageSize, PageReadWrite, if (IsPartitionAllocGigaCageEnabled()) {
PageTag::kPartitionAlloc)); ptr = CommitPages(GetDirectMapPool(), map_size);
} else {
ptr = reinterpret_cast<char*>(AllocPages(nullptr, map_size, kSuperPageSize,
PageReadWrite,
PageTag::kPartitionAlloc));
}
if (UNLIKELY(!ptr)) if (UNLIKELY(!ptr))
return nullptr; return nullptr;
...@@ -229,9 +252,14 @@ ALWAYS_INLINE void* PartitionBucket<thread_safe>::AllocNewSlotSpan( ...@@ -229,9 +252,14 @@ ALWAYS_INLINE void* PartitionBucket<thread_safe>::AllocNewSlotSpan(
// page table bloat and not fragmenting address spaces in 32 bit // page table bloat and not fragmenting address spaces in 32 bit
// architectures. // architectures.
char* requested_address = root->next_super_page; char* requested_address = root->next_super_page;
char* super_page = reinterpret_cast<char*>( char* super_page = nullptr;
AllocPages(requested_address, kSuperPageSize, kSuperPageSize, if (IsPartitionAllocGigaCageEnabled()) {
PageReadWrite, PageTag::kPartitionAlloc)); super_page = CommitPages(GetNormalBucketPool(), kSuperPageSize);
} else {
super_page = reinterpret_cast<char*>(
AllocPages(requested_address, kSuperPageSize, kSuperPageSize,
PageReadWrite, PageTag::kPartitionAlloc));
}
if (UNLIKELY(!super_page)) if (UNLIKELY(!super_page))
return nullptr; return nullptr;
...@@ -294,7 +322,7 @@ ALWAYS_INLINE void* PartitionBucket<thread_safe>::AllocNewSlotSpan( ...@@ -294,7 +322,7 @@ ALWAYS_INLINE void* PartitionBucket<thread_safe>::AllocNewSlotSpan(
PartitionSuperPageExtentEntry<thread_safe>* current_extent = PartitionSuperPageExtentEntry<thread_safe>* current_extent =
root->current_extent; root->current_extent;
bool is_new_extent = (super_page != requested_address); const bool is_new_extent = super_page != requested_address;
if (UNLIKELY(is_new_extent)) { if (UNLIKELY(is_new_extent)) {
if (UNLIKELY(!current_extent)) { if (UNLIKELY(!current_extent)) {
DCHECK(!root->first_extent); DCHECK(!root->first_extent);
......
...@@ -4,15 +4,36 @@ ...@@ -4,15 +4,36 @@
#include "base/allocator/partition_allocator/partition_page.h" #include "base/allocator/partition_allocator/partition_page.h"
#include "base/allocator/partition_allocator/address_pool_manager.h"
#include "base/allocator/partition_allocator/partition_address_space.h"
#include "base/allocator/partition_allocator/partition_alloc_features.h"
#include "base/allocator/partition_allocator/partition_direct_map_extent.h" #include "base/allocator/partition_allocator/partition_direct_map_extent.h"
#include "base/allocator/partition_allocator/partition_root_base.h" #include "base/allocator/partition_allocator/partition_root_base.h"
#include "base/check.h" #include "base/check.h"
#include "base/feature_list.h"
#include "base/notreached.h"
#include "build/build_config.h"
namespace base { namespace base {
namespace internal { namespace internal {
namespace { namespace {
void DecommitPages(void* address, size_t size) {
#if defined(ARCH_CPU_64_BITS)
internal::AddressPoolManager::GetInstance()->Free(
internal::GetDirectMapPool(), address, size);
#if defined(OS_MACOSX)
SetSystemPagesAccess(address, size, PageReadWrite);
memset(address, 0, size);
#endif
SetSystemPagesAccess(address, size, PageInaccessible);
DecommitSystemPages(address, size);
#else
NOTREACHED();
#endif
}
template <bool thread_safe> template <bool thread_safe>
ALWAYS_INLINE DeferredUnmap ALWAYS_INLINE DeferredUnmap
PartitionDirectUnmap(PartitionPage<thread_safe>* page) { PartitionDirectUnmap(PartitionPage<thread_safe>* page) {
...@@ -179,7 +200,12 @@ void PartitionPage<thread_safe>::DecommitIfPossible( ...@@ -179,7 +200,12 @@ void PartitionPage<thread_safe>::DecommitIfPossible(
} }
void DeferredUnmap::Unmap() { void DeferredUnmap::Unmap() {
FreePages(ptr, size); DCHECK(ptr && size > 0);
if (IsPartitionAllocGigaCageEnabled()) {
DecommitPages(ptr, size);
} else {
FreePages(ptr, size);
}
} }
template struct PartitionPage<ThreadSafe>; template struct PartitionPage<ThreadSafe>;
......
...@@ -50,10 +50,15 @@ constexpr size_t kLoopIterations = kSamplingFrequency * 4; ...@@ -50,10 +50,15 @@ constexpr size_t kLoopIterations = kSamplingFrequency * 4;
constexpr int kSuccess = 0; constexpr int kSuccess = 0;
constexpr int kFailure = 1; constexpr int kFailure = 1;
static void HandleOOM(size_t unused_size) {
LOG(FATAL) << "Out of memory.";
}
class SamplingPartitionAllocShimsTest : public base::MultiProcessTest { class SamplingPartitionAllocShimsTest : public base::MultiProcessTest {
public: public:
static void multiprocessTestSetup() { static void multiprocessTestSetup() {
crash_reporter::InitializeCrashKeys(); crash_reporter::InitializeCrashKeys();
base::PartitionAllocGlobalInit(HandleOOM);
InstallPartitionAllocHooks( InstallPartitionAllocHooks(
AllocatorState::kMaxMetadata, AllocatorState::kMaxMetadata, AllocatorState::kMaxMetadata, AllocatorState::kMaxMetadata,
AllocatorState::kMaxSlots, kSamplingFrequency, base::DoNothing()); AllocatorState::kMaxSlots, kSamplingFrequency, base::DoNothing());
......
...@@ -331,14 +331,21 @@ bool ValidateProcessMmaps(base::Value* process_mmaps, ...@@ -331,14 +331,21 @@ bool ValidateProcessMmaps(base::Value* process_mmaps,
return true; return true;
} }
void HandleOOM(size_t unsued_size) {
LOG(FATAL) << "Out of memory.";
}
} // namespace } // namespace
TestDriver::TestDriver() TestDriver::TestDriver()
: wait_for_ui_thread_(base::WaitableEvent::ResetPolicy::AUTOMATIC, : wait_for_ui_thread_(base::WaitableEvent::ResetPolicy::AUTOMATIC,
base::WaitableEvent::InitialState::NOT_SIGNALED) { base::WaitableEvent::InitialState::NOT_SIGNALED) {
base::PartitionAllocGlobalInit(HandleOOM);
partition_allocator_.init(); partition_allocator_.init();
} }
TestDriver::~TestDriver() = default; TestDriver::~TestDriver() {
base::PartitionAllocGlobalUninitForTesting();
}
bool TestDriver::RunTest(const Options& options) { bool TestDriver::RunTest(const Options& options) {
options_ = options; options_ = options;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment