Commit 76137ebc authored by Bartek Nowierski's avatar Bartek Nowierski Committed by Commit Bot

Bitmap-based free chunk tracking

Bug: 1086388
Change-Id: Ic73b32ee9b49cbb6fd1901e5340af1ce4274b2ba
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2215538
Commit-Queue: Bartek Nowierski <bartekn@chromium.org>
Reviewed-by: default avatarTakashi Sakamoto <tasak@google.com>
Reviewed-by: default avatarBenoit L <lizeb@chromium.org>
Reviewed-by: default avatarKentaro Hara <haraken@chromium.org>
Cr-Commit-Position: refs/heads/master@{#776502}
parent b44f0f76
......@@ -3129,6 +3129,7 @@ test("base_unittests") {
if (use_partition_alloc) {
sources += [
"allocator/partition_allocator/address_pool_manager_unittest.cc",
"allocator/partition_allocator/address_space_randomization_unittest.cc",
"allocator/partition_allocator/memory_reclaimer_unittest.cc",
"allocator/partition_allocator/page_allocator_unittest.cc",
......
......@@ -16,7 +16,9 @@ namespace internal {
#if defined(ARCH_CPU_64_BITS)
constexpr size_t AddressPoolManager::kNumPools;
static_assert(sizeof(size_t) >= 8, "Need 64-bit address space");
constexpr size_t AddressPoolManager::Pool::kMaxBits;
// static
AddressPoolManager* AddressPoolManager::GetInstance() {
......@@ -24,17 +26,13 @@ AddressPoolManager* AddressPoolManager::GetInstance() {
return instance.get();
}
pool_handle AddressPoolManager::Add(uintptr_t ptr,
size_t length,
size_t align) {
DCHECK(base::bits::IsPowerOfTwo(align));
const uintptr_t align_offset_mask = align - 1;
DCHECK(!(ptr & align_offset_mask));
DCHECK(!((ptr + length) & align_offset_mask));
pool_handle AddressPoolManager::Add(uintptr_t ptr, size_t length) {
DCHECK(!(ptr & kSuperPageOffsetMask));
DCHECK(!((ptr + length) & kSuperPageOffsetMask));
for (pool_handle i = 0; i < base::size(pools_); ++i) {
if (!pools_[i]) {
pools_[i] = std::make_unique<Pool>(ptr, length, align);
pools_[i] = std::make_unique<Pool>(ptr, length);
return i + 1;
}
}
......@@ -66,93 +64,87 @@ void AddressPoolManager::Free(pool_handle handle, void* ptr, size_t length) {
pool->FreeChunk(reinterpret_cast<uintptr_t>(ptr), length);
}
AddressPoolManager::Pool::Pool(uintptr_t ptr, size_t length, size_t align)
: align_(align)
AddressPoolManager::Pool::Pool(uintptr_t ptr, size_t length)
: total_bits_(length / kSuperPageSize),
address_begin_(ptr)
#if DCHECK_IS_ON()
,
address_begin_(ptr),
address_end_(ptr + length)
#endif
{
free_chunks_.insert(std::make_pair(ptr, length));
CHECK_LE(total_bits_, kMaxBits);
CHECK(!(ptr & kSuperPageOffsetMask));
CHECK(!(length & kSuperPageOffsetMask));
#if DCHECK_IS_ON()
DCHECK_LT(address_begin_, address_end_);
#endif
alloc_bitset_.reset();
}
uintptr_t AddressPoolManager::Pool::FindChunk(size_t requested_size) {
base::AutoLock scoped_lock(lock_);
const uintptr_t align_offset_mask = align_ - 1;
const size_t required_size = bits::Align(requested_size, align_);
uintptr_t chosen_chunk = 0;
size_t chosen_chunk_size = std::numeric_limits<size_t>::max();
const size_t required_size = bits::Align(requested_size, kSuperPageSize);
const size_t need_bits = required_size >> kSuperPageShift;
// Use first fit policy to find an available chunk from free chunks.
for (const auto& chunk : free_chunks_) {
size_t chunk_size = chunk.second;
if (chunk_size >= required_size) {
chosen_chunk = chunk.first;
chosen_chunk_size = chunk_size;
break;
size_t beg_bit = 0;
size_t curr_bit = 0;
while (true) {
// |end_bit| points 1 past the last bit that needs to be 0. If it goes past
// |total_bits_|, return |nullptr| to signal no free chunk was found.
size_t end_bit = beg_bit + need_bits;
if (end_bit > total_bits_)
return 0;
bool found = true;
for (; curr_bit < end_bit; ++curr_bit) {
if (alloc_bitset_.test(curr_bit)) {
// The bit was set, so this chunk isn't entirely free. Set |found=false|
// to ensure the outer loop continues. However, continue the innter loop
// to set |beg_bit| just past the last set bit in the investigated
// chunk. |curr_bit| is advanced all the way to |end_bit| to prevent the
// next outer loop pass from checking the same bits.
beg_bit = curr_bit + 1;
found = false;
}
}
}
if (!chosen_chunk)
return 0;
free_chunks_.erase(chosen_chunk);
if (chosen_chunk_size > required_size) {
bool newly_inserted =
free_chunks_
.insert(std::make_pair(chosen_chunk + required_size,
chosen_chunk_size - required_size))
.second;
DCHECK(newly_inserted);
}
DCHECK(!(chosen_chunk & align_offset_mask));
// An entire [beg_bit;end_bit) region of 0s was found. Fill them with 1s
// (to mark as allocated) and return the allocated address.
if (found) {
for (size_t i = beg_bit; i < end_bit; ++i) {
DCHECK(!alloc_bitset_.test(i));
alloc_bitset_.set(i);
}
uintptr_t address = address_begin_ + beg_bit * kSuperPageSize;
#if DCHECK_IS_ON()
DCHECK_LE(address_begin_, chosen_chunk);
DCHECK_LE(chosen_chunk + required_size, address_end_);
DCHECK_LE(address + required_size, address_end_);
#endif
return chosen_chunk;
return address;
}
}
return 0;
}
void AddressPoolManager::Pool::FreeChunk(uintptr_t address, size_t free_size) {
base::AutoLock scoped_lock(lock_);
const uintptr_t align_offset_mask = align_ - 1;
DCHECK(!(address & align_offset_mask));
DCHECK(!(address & kSuperPageOffsetMask));
const size_t size = bits::Align(free_size, align_);
#if DCHECK_IS_ON()
const size_t size = bits::Align(free_size, kSuperPageSize);
DCHECK_LE(address_begin_, address);
#if DCHECK_IS_ON()
DCHECK_LE(address + size, address_end_);
#endif
DCHECK_LT(address, address + size);
auto new_chunk = std::make_pair(address, size);
auto lower_bound = free_chunks_.lower_bound(address);
if (lower_bound != free_chunks_.begin()) {
auto left = --lower_bound;
uintptr_t left_chunk_end = left->first + left->second;
DCHECK_LE(left_chunk_end, address);
if (left_chunk_end == address) {
new_chunk.first = left->first;
new_chunk.second += left->second;
free_chunks_.erase(left);
}
}
auto right = free_chunks_.upper_bound(address);
if (right != free_chunks_.end()) {
uintptr_t chunk_end = address + size;
DCHECK_LE(chunk_end, right->first);
if (right->first == chunk_end) {
new_chunk.second += right->second;
free_chunks_.erase(right);
}
const size_t beg_bit = (address - address_begin_) / kSuperPageSize;
const size_t end_bit = beg_bit + size / kSuperPageSize;
for (size_t i = beg_bit; i < end_bit; ++i) {
DCHECK(alloc_bitset_.test(i));
alloc_bitset_.reset(i);
}
bool newly_inserted = free_chunks_.insert(new_chunk).second;
DCHECK(newly_inserted);
}
AddressPoolManager::Pool::~Pool() = default;
......
......@@ -5,7 +5,7 @@
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_H_
#include <map>
#include <bitset>
#include <memory>
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
......@@ -34,7 +34,7 @@ class BASE_EXPORT AddressPoolManager {
public:
static AddressPoolManager* GetInstance();
pool_handle Add(uintptr_t address, size_t length, size_t align);
pool_handle Add(uintptr_t address, size_t length);
void Remove(pool_handle handle);
char* Alloc(pool_handle handle, size_t length);
void Free(pool_handle handle, void* ptr, size_t length);
......@@ -48,20 +48,24 @@ class BASE_EXPORT AddressPoolManager {
class Pool {
public:
Pool(uintptr_t ptr, size_t length, size_t align);
Pool(uintptr_t ptr, size_t length);
~Pool();
uintptr_t FindChunk(size_t size);
void FreeChunk(uintptr_t address, size_t size);
private:
// The bitset stores an allocation state of the address pool. 1 bit per
// super-page: 1 = allocated, 0 = free.
static constexpr size_t kGigaBytes = 1024 * 1024 * 1024;
static constexpr size_t kMaxSupportedSize = 16 * kGigaBytes;
static constexpr size_t kMaxBits = kMaxSupportedSize / kSuperPageSize;
base::Lock lock_;
std::map<uintptr_t, size_t> free_chunks_ GUARDED_BY(lock_);
// All returned chunks will be aligned on this align_ and all chunks' size
// will be a multiple of |align_|.
const uintptr_t align_ = 0;
#if DCHECK_IS_ON()
std::bitset<kMaxBits> alloc_bitset_ GUARDED_BY(lock_);
const size_t total_bits_;
const uintptr_t address_begin_;
#if DCHECK_IS_ON()
const uintptr_t address_end_;
#endif
......
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/address_pool_manager.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace base {
namespace internal {
#if defined(ARCH_CPU_64_BITS)
TEST(AddressPoolManager, TooLargePool) {
uintptr_t base_addr = 0x4200000;
constexpr size_t kSize = 16ull * 1024 * 1024 * 1024;
AddressPoolManager::GetInstance()->ResetForTesting();
AddressPoolManager::GetInstance()->Add(base_addr, kSize);
EXPECT_DEATH_IF_SUPPORTED(
AddressPoolManager::GetInstance()->Add(base_addr, kSize + kSuperPageSize),
"");
}
TEST(AddressPoolManager, OnePage) {
uintptr_t base_addr = 0x4200000;
char* base_ptr = reinterpret_cast<char*>(base_addr);
AddressPoolManager::GetInstance()->ResetForTesting();
pool_handle pool =
AddressPoolManager::GetInstance()->Add(base_addr, kSuperPageSize);
EXPECT_EQ(AddressPoolManager::GetInstance()->Alloc(pool, kSuperPageSize + 1),
nullptr);
EXPECT_EQ(AddressPoolManager::GetInstance()->Alloc(pool, kSuperPageSize),
base_ptr);
EXPECT_EQ(AddressPoolManager::GetInstance()->Alloc(pool, kSuperPageSize),
nullptr);
AddressPoolManager::GetInstance()->Free(pool, base_ptr, kSuperPageSize);
EXPECT_EQ(AddressPoolManager::GetInstance()->Alloc(pool, kSuperPageSize),
base_ptr);
}
TEST(AddressPoolManager, ManyPages) {
uintptr_t base_addr = 0x4200000;
char* base_ptr = reinterpret_cast<char*>(base_addr);
AddressPoolManager::GetInstance()->ResetForTesting();
constexpr size_t kPageCnt = 8192;
pool_handle pool = AddressPoolManager::GetInstance()->Add(
base_addr, kPageCnt * kSuperPageSize);
EXPECT_EQ(
AddressPoolManager::GetInstance()->Alloc(pool, kPageCnt * kSuperPageSize),
base_ptr);
EXPECT_EQ(AddressPoolManager::GetInstance()->Alloc(pool, kSuperPageSize),
nullptr);
AddressPoolManager::GetInstance()->Free(pool, base_ptr,
kPageCnt * kSuperPageSize);
EXPECT_EQ(
AddressPoolManager::GetInstance()->Alloc(pool, kPageCnt * kSuperPageSize),
base_ptr);
}
TEST(AddressPoolManager, PagesFragmented) {
uintptr_t base_addr = 0x4200000;
char* base_ptr = reinterpret_cast<char*>(base_addr);
AddressPoolManager::GetInstance()->ResetForTesting();
constexpr size_t kPageCnt = 8192;
pool_handle pool = AddressPoolManager::GetInstance()->Add(
base_addr, kPageCnt * kSuperPageSize);
void* addrs[kPageCnt];
for (size_t i = 0; i < kPageCnt; ++i) {
addrs[i] = AddressPoolManager::GetInstance()->Alloc(pool, kSuperPageSize);
EXPECT_EQ(addrs[i], base_ptr + i * kSuperPageSize);
}
for (size_t i = 1; i < kPageCnt; i += 2) {
AddressPoolManager::GetInstance()->Free(pool, addrs[i], kSuperPageSize);
}
EXPECT_EQ(AddressPoolManager::GetInstance()->Alloc(pool, 2 * kSuperPageSize),
nullptr);
for (size_t i = 1; i < kPageCnt; i += 2) {
addrs[i] = AddressPoolManager::GetInstance()->Alloc(pool, kSuperPageSize);
EXPECT_EQ(addrs[i], base_ptr + i * kSuperPageSize);
}
}
TEST(AddressPoolManager, IrregularPattern) {
uintptr_t base_addr = 0x4200000;
char* base_ptr = reinterpret_cast<char*>(base_addr);
AddressPoolManager::GetInstance()->ResetForTesting();
constexpr size_t kPageCnt = 8192;
pool_handle pool = AddressPoolManager::GetInstance()->Add(
base_addr, kPageCnt * kSuperPageSize);
void* a1 = AddressPoolManager::GetInstance()->Alloc(pool, kSuperPageSize);
EXPECT_EQ(a1, base_ptr);
void* a2 = AddressPoolManager::GetInstance()->Alloc(pool, 2 * kSuperPageSize);
EXPECT_EQ(a2, base_ptr + 1 * kSuperPageSize);
void* a3 = AddressPoolManager::GetInstance()->Alloc(pool, 3 * kSuperPageSize);
EXPECT_EQ(a3, base_ptr + 3 * kSuperPageSize);
void* a4 = AddressPoolManager::GetInstance()->Alloc(pool, 4 * kSuperPageSize);
EXPECT_EQ(a4, base_ptr + 6 * kSuperPageSize);
void* a5 = AddressPoolManager::GetInstance()->Alloc(pool, 5 * kSuperPageSize);
EXPECT_EQ(a5, base_ptr + 10 * kSuperPageSize);
AddressPoolManager::GetInstance()->Free(pool, a4, 4 * kSuperPageSize);
void* a6 = AddressPoolManager::GetInstance()->Alloc(pool, 6 * kSuperPageSize);
EXPECT_EQ(a6, base_ptr + 15 * kSuperPageSize);
AddressPoolManager::GetInstance()->Free(pool, a5, 5 * kSuperPageSize);
void* a7 = AddressPoolManager::GetInstance()->Alloc(pool, 7 * kSuperPageSize);
EXPECT_EQ(a7, base_ptr + 6 * kSuperPageSize);
void* a8 = AddressPoolManager::GetInstance()->Alloc(pool, 3 * kSuperPageSize);
EXPECT_EQ(a8, base_ptr + 21 * kSuperPageSize);
void* a9 = AddressPoolManager::GetInstance()->Alloc(pool, 2 * kSuperPageSize);
EXPECT_EQ(a9, base_ptr + 13 * kSuperPageSize);
AddressPoolManager::GetInstance()->Free(pool, a7, 7 * kSuperPageSize);
AddressPoolManager::GetInstance()->Free(pool, a9, 2 * kSuperPageSize);
AddressPoolManager::GetInstance()->Free(pool, a6, 6 * kSuperPageSize);
void* a10 =
AddressPoolManager::GetInstance()->Alloc(pool, 15 * kSuperPageSize);
EXPECT_EQ(a10, base_ptr + 6 * kSuperPageSize);
}
#endif // defined(ARCH_CPU_64_BITS)
} // namespace internal
} // namespace base
......@@ -62,12 +62,12 @@ void PartitionAddressSpace::Init() {
uintptr_t current = reserved_base_address_;
direct_map_pool_ = internal::AddressPoolManager::GetInstance()->Add(
current, kDirectMapPoolSize, kSuperPageSize);
current, kDirectMapPoolSize);
DCHECK(direct_map_pool_);
current += kDirectMapPoolSize;
normal_bucket_pool_ = internal::AddressPoolManager::GetInstance()->Add(
current, kNormalBucketPoolSize, kSuperPageSize);
current, kNormalBucketPoolSize);
DCHECK(normal_bucket_pool_);
current += kNormalBucketPoolSize;
DCHECK_LE(current, reserved_address_end);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment