Commit 78309e5f authored by Benoit Lize's avatar Benoit Lize Committed by Commit Bot

base/allocator: Avoid calling memset() on macOS in DecommitSystemPages()

To have uniform semantics across OSes, we call memset() in
DecommitSystemPages() on macOS. This is costly and potentially wasteful,
as this may bring back compressed/swpped out pages from disk, just to
call memset().

To avoid that, call mmap() on top of the existing mapping.

Bug: 1086388
Change-Id: Iedbf8d1b5d9882cc5e229765b80f95e9ee12a8de
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2238118Reviewed-by: default avatarKentaro Hara <haraken@chromium.org>
Reviewed-by: default avatarTakashi Sakamoto <tasak@google.com>
Reviewed-by: default avatarBartek Nowierski <bartekn@chromium.org>
Commit-Queue: Benoit L <lizeb@chromium.org>
Cr-Commit-Position: refs/heads/master@{#781898}
parent 2030632e
......@@ -4,6 +4,13 @@
#include "base/allocator/partition_allocator/address_pool_manager.h"
#if defined(OS_MACOSX)
#include <sys/mman.h>
#endif
#include <algorithm>
#include <limits>
#include "base/allocator/partition_allocator/page_allocator.h"
#include "base/allocator/partition_allocator/page_allocator_internal.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
......@@ -11,13 +18,40 @@
#include "base/notreached.h"
#include "base/stl_util.h"
#include <limits>
namespace base {
namespace internal {
#if defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
namespace {
void DecommitPages(void* address, size_t size) {
#if defined(OS_MACOSX)
// MAP_FIXED replaces an existing mapping with a new one, when the address is
// already part of a mapping. Since newly-created mappings are guaranteed to
// be zero-filled, this has the desired effect. It is only required on macOS,
// as on other operating systems, |DecommitSystemPages()| provides the same
// behavior.
void* ptr = mmap(address, size, PROT_NONE,
MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
PA_CHECK(ptr == address);
#else
SetSystemPagesAccess(address, size, PageInaccessible);
DecommitSystemPages(address, size);
#endif
}
void CommitPages(void* address, size_t size) {
#if defined(OS_MACOSX)
SetSystemPagesAccess(address, size, PageReadWrite);
#else
PA_CHECK(RecommitSystemPages(address, size, PageReadWrite));
SetSystemPagesAccess(address, size, PageReadWrite);
#endif
}
} // namespace
constexpr size_t AddressPoolManager::Pool::kMaxBits;
// static
......@@ -53,13 +87,17 @@ void AddressPoolManager::Remove(pool_handle handle) {
char* AddressPoolManager::Alloc(pool_handle handle, size_t length) {
Pool* pool = GetPool(handle);
PA_DCHECK(pool->IsInitialized());
return reinterpret_cast<char*>(pool->FindChunk(length));
char* ptr = reinterpret_cast<char*>(pool->FindChunk(length));
if (LIKELY(ptr))
CommitPages(ptr, length);
return ptr;
}
void AddressPoolManager::Free(pool_handle handle, void* ptr, size_t length) {
PA_DCHECK(0 < handle && handle <= kNumPools);
Pool* pool = GetPool(handle);
PA_DCHECK(pool->IsInitialized());
DecommitPages(ptr, length);
pool->FreeChunk(reinterpret_cast<uintptr_t>(ptr), length);
}
......
......@@ -4,6 +4,8 @@
#include "base/allocator/partition_allocator/address_pool_manager.h"
#include "base/allocator/partition_allocator/page_allocator.h"
#include "base/allocator/partition_allocator/page_allocator_internal.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h"
......@@ -13,126 +15,135 @@ namespace internal {
#if defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
TEST(AddressPoolManager, TooLargePool) {
class AddressPoolManagerTest : public testing::Test {
protected:
AddressPoolManagerTest() = default;
~AddressPoolManagerTest() override = default;
void SetUp() override {
AddressPoolManager::GetInstance()->ResetForTesting();
base_address_ =
AllocPages(nullptr, kPoolSize, kSuperPageSize, base::PageInaccessible,
PageTag::kPartitionAlloc, false);
ASSERT_TRUE(base_address_);
pool_ = AddressPoolManager::GetInstance()->Add(
reinterpret_cast<uintptr_t>(base_address_), kPoolSize);
}
void TearDown() override { FreePages(base_address_, kPoolSize); }
static constexpr size_t kPageCnt = 8192;
static constexpr size_t kPoolSize = kSuperPageSize * kPageCnt;
void* base_address_;
pool_handle pool_;
};
TEST_F(AddressPoolManagerTest, TooLargePool) {
uintptr_t base_addr = 0x4200000;
constexpr size_t kSize = 16ull * 1024 * 1024 * 1024;
AddressPoolManager::GetInstance()->ResetForTesting();
AddressPoolManager::GetInstance()->Add(base_addr, kSize);
EXPECT_DEATH_IF_SUPPORTED(
AddressPoolManager::GetInstance()->Add(base_addr, kSize + kSuperPageSize),
"");
}
TEST(AddressPoolManager, OnePage) {
uintptr_t base_addr = 0x4200000;
char* base_ptr = reinterpret_cast<char*>(base_addr);
AddressPoolManager::GetInstance()->ResetForTesting();
pool_handle pool =
AddressPoolManager::GetInstance()->Add(base_addr, kSuperPageSize);
TEST_F(AddressPoolManagerTest, ManyPages) {
char* base_ptr = reinterpret_cast<char*>(base_address_);
EXPECT_EQ(AddressPoolManager::GetInstance()->Alloc(pool, kSuperPageSize + 1),
nullptr);
EXPECT_EQ(AddressPoolManager::GetInstance()->Alloc(pool, kSuperPageSize),
EXPECT_EQ(AddressPoolManager::GetInstance()->Alloc(pool_,
kPageCnt * kSuperPageSize),
base_ptr);
EXPECT_EQ(AddressPoolManager::GetInstance()->Alloc(pool, kSuperPageSize),
EXPECT_EQ(AddressPoolManager::GetInstance()->Alloc(pool_, kSuperPageSize),
nullptr);
AddressPoolManager::GetInstance()->Free(pool, base_ptr, kSuperPageSize);
EXPECT_EQ(AddressPoolManager::GetInstance()->Alloc(pool, kSuperPageSize),
base_ptr);
}
TEST(AddressPoolManager, ManyPages) {
uintptr_t base_addr = 0x4200000;
char* base_ptr = reinterpret_cast<char*>(base_addr);
AddressPoolManager::GetInstance()->ResetForTesting();
constexpr size_t kPageCnt = 8192;
pool_handle pool = AddressPoolManager::GetInstance()->Add(
base_addr, kPageCnt * kSuperPageSize);
EXPECT_EQ(
AddressPoolManager::GetInstance()->Alloc(pool, kPageCnt * kSuperPageSize),
base_ptr);
EXPECT_EQ(AddressPoolManager::GetInstance()->Alloc(pool, kSuperPageSize),
nullptr);
AddressPoolManager::GetInstance()->Free(pool, base_ptr,
AddressPoolManager::GetInstance()->Free(pool_, base_ptr,
kPageCnt * kSuperPageSize);
EXPECT_EQ(
AddressPoolManager::GetInstance()->Alloc(pool, kPageCnt * kSuperPageSize),
base_ptr);
EXPECT_EQ(AddressPoolManager::GetInstance()->Alloc(pool_,
kPageCnt * kSuperPageSize),
base_ptr);
}
TEST(AddressPoolManager, PagesFragmented) {
uintptr_t base_addr = 0x4200000;
char* base_ptr = reinterpret_cast<char*>(base_addr);
AddressPoolManager::GetInstance()->ResetForTesting();
constexpr size_t kPageCnt = 8192;
pool_handle pool = AddressPoolManager::GetInstance()->Add(
base_addr, kPageCnt * kSuperPageSize);
TEST_F(AddressPoolManagerTest, PagesFragmented) {
char* base_ptr = reinterpret_cast<char*>(base_address_);
void* addrs[kPageCnt];
for (size_t i = 0; i < kPageCnt; ++i) {
addrs[i] = AddressPoolManager::GetInstance()->Alloc(pool, kSuperPageSize);
addrs[i] = AddressPoolManager::GetInstance()->Alloc(pool_, kSuperPageSize);
EXPECT_EQ(addrs[i], base_ptr + i * kSuperPageSize);
}
EXPECT_EQ(AddressPoolManager::GetInstance()->Alloc(pool, kSuperPageSize),
EXPECT_EQ(AddressPoolManager::GetInstance()->Alloc(pool_, kSuperPageSize),
nullptr);
for (size_t i = 1; i < kPageCnt; i += 2) {
AddressPoolManager::GetInstance()->Free(pool, addrs[i], kSuperPageSize);
AddressPoolManager::GetInstance()->Free(pool_, addrs[i], kSuperPageSize);
}
EXPECT_EQ(AddressPoolManager::GetInstance()->Alloc(pool, 2 * kSuperPageSize),
EXPECT_EQ(AddressPoolManager::GetInstance()->Alloc(pool_, 2 * kSuperPageSize),
nullptr);
for (size_t i = 1; i < kPageCnt; i += 2) {
addrs[i] = AddressPoolManager::GetInstance()->Alloc(pool, kSuperPageSize);
addrs[i] = AddressPoolManager::GetInstance()->Alloc(pool_, kSuperPageSize);
EXPECT_EQ(addrs[i], base_ptr + i * kSuperPageSize);
}
EXPECT_EQ(AddressPoolManager::GetInstance()->Alloc(pool, kSuperPageSize),
EXPECT_EQ(AddressPoolManager::GetInstance()->Alloc(pool_, kSuperPageSize),
nullptr);
}
TEST(AddressPoolManager, IrregularPattern) {
uintptr_t base_addr = 0x4200000;
char* base_ptr = reinterpret_cast<char*>(base_addr);
AddressPoolManager::GetInstance()->ResetForTesting();
constexpr size_t kPageCnt = 8192;
pool_handle pool = AddressPoolManager::GetInstance()->Add(
base_addr, kPageCnt * kSuperPageSize);
TEST_F(AddressPoolManagerTest, IrregularPattern) {
char* base_ptr = reinterpret_cast<char*>(base_address_);
void* a1 = AddressPoolManager::GetInstance()->Alloc(pool, kSuperPageSize);
void* a1 = AddressPoolManager::GetInstance()->Alloc(pool_, kSuperPageSize);
EXPECT_EQ(a1, base_ptr);
void* a2 = AddressPoolManager::GetInstance()->Alloc(pool, 2 * kSuperPageSize);
void* a2 =
AddressPoolManager::GetInstance()->Alloc(pool_, 2 * kSuperPageSize);
EXPECT_EQ(a2, base_ptr + 1 * kSuperPageSize);
void* a3 = AddressPoolManager::GetInstance()->Alloc(pool, 3 * kSuperPageSize);
void* a3 =
AddressPoolManager::GetInstance()->Alloc(pool_, 3 * kSuperPageSize);
EXPECT_EQ(a3, base_ptr + 3 * kSuperPageSize);
void* a4 = AddressPoolManager::GetInstance()->Alloc(pool, 4 * kSuperPageSize);
void* a4 =
AddressPoolManager::GetInstance()->Alloc(pool_, 4 * kSuperPageSize);
EXPECT_EQ(a4, base_ptr + 6 * kSuperPageSize);
void* a5 = AddressPoolManager::GetInstance()->Alloc(pool, 5 * kSuperPageSize);
void* a5 =
AddressPoolManager::GetInstance()->Alloc(pool_, 5 * kSuperPageSize);
EXPECT_EQ(a5, base_ptr + 10 * kSuperPageSize);
AddressPoolManager::GetInstance()->Free(pool, a4, 4 * kSuperPageSize);
void* a6 = AddressPoolManager::GetInstance()->Alloc(pool, 6 * kSuperPageSize);
AddressPoolManager::GetInstance()->Free(pool_, a4, 4 * kSuperPageSize);
void* a6 =
AddressPoolManager::GetInstance()->Alloc(pool_, 6 * kSuperPageSize);
EXPECT_EQ(a6, base_ptr + 15 * kSuperPageSize);
AddressPoolManager::GetInstance()->Free(pool, a5, 5 * kSuperPageSize);
void* a7 = AddressPoolManager::GetInstance()->Alloc(pool, 7 * kSuperPageSize);
AddressPoolManager::GetInstance()->Free(pool_, a5, 5 * kSuperPageSize);
void* a7 =
AddressPoolManager::GetInstance()->Alloc(pool_, 7 * kSuperPageSize);
EXPECT_EQ(a7, base_ptr + 6 * kSuperPageSize);
void* a8 = AddressPoolManager::GetInstance()->Alloc(pool, 3 * kSuperPageSize);
void* a8 =
AddressPoolManager::GetInstance()->Alloc(pool_, 3 * kSuperPageSize);
EXPECT_EQ(a8, base_ptr + 21 * kSuperPageSize);
void* a9 = AddressPoolManager::GetInstance()->Alloc(pool, 2 * kSuperPageSize);
void* a9 =
AddressPoolManager::GetInstance()->Alloc(pool_, 2 * kSuperPageSize);
EXPECT_EQ(a9, base_ptr + 13 * kSuperPageSize);
AddressPoolManager::GetInstance()->Free(pool, a7, 7 * kSuperPageSize);
AddressPoolManager::GetInstance()->Free(pool, a9, 2 * kSuperPageSize);
AddressPoolManager::GetInstance()->Free(pool, a6, 6 * kSuperPageSize);
AddressPoolManager::GetInstance()->Free(pool_, a7, 7 * kSuperPageSize);
AddressPoolManager::GetInstance()->Free(pool_, a9, 2 * kSuperPageSize);
AddressPoolManager::GetInstance()->Free(pool_, a6, 6 * kSuperPageSize);
void* a10 =
AddressPoolManager::GetInstance()->Alloc(pool, 15 * kSuperPageSize);
AddressPoolManager::GetInstance()->Alloc(pool_, 15 * kSuperPageSize);
EXPECT_EQ(a10, base_ptr + 6 * kSuperPageSize);
}
TEST_F(AddressPoolManagerTest, DecommittedDataIsErased) {
void* data = AddressPoolManager::GetInstance()->Alloc(pool_, kSuperPageSize);
ASSERT_TRUE(data);
memset(data, 42, kSuperPageSize);
AddressPoolManager::GetInstance()->Free(pool_, data, kSuperPageSize);
void* data2 = AddressPoolManager::GetInstance()->Alloc(pool_, kSuperPageSize);
ASSERT_EQ(data, data2);
uint32_t sum = 0;
for (size_t i = 0; i < kSuperPageSize; i++) {
sum += reinterpret_cast<uint8_t*>(data2)[i];
}
EXPECT_EQ(0u, sum) << sum / 42 << " bytes were not zeroed";
}
#endif // defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
} // namespace internal
......
......@@ -251,6 +251,29 @@ TEST(PageAllocatorTest, PageTagging) {
}
#endif // defined(OS_ANDROID)
#if !defined(OS_MACOSX)
TEST(PageAllocatorTest, DecommitErasesMemory) {
size_t size = kPageAllocationGranularity;
void* buffer = AllocPages(nullptr, size, kPageAllocationGranularity,
PageReadWrite, PageTag::kChromium, true);
ASSERT_TRUE(buffer);
memset(buffer, 42, size);
DecommitSystemPages(buffer, size);
EXPECT_TRUE(RecommitSystemPages(buffer, size, PageReadWrite));
uint8_t* recommitted_buffer = reinterpret_cast<uint8_t*>(buffer);
uint32_t sum = 0;
for (size_t i = 0; i < size; i++) {
sum += recommitted_buffer[i];
}
EXPECT_EQ(0u, sum) << "Data was not erased";
}
#endif // defined(OS_MACOSX)
} // namespace base
#endif // !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
......@@ -24,21 +24,6 @@ namespace internal {
namespace {
char* CommitPages(internal::pool_handle pool, size_t map_size) {
#if defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
char* ptr =
internal::AddressPoolManager::GetInstance()->Alloc(pool, map_size);
if (UNLIKELY(!ptr))
return nullptr;
PA_DCHECK(!(map_size & kSystemPageOffsetMask));
SetSystemPagesAccess(ptr, map_size, PageReadWrite);
return ptr;
#else
NOTREACHED();
return nullptr;
#endif
}
template <bool thread_safe>
ALWAYS_INLINE PartitionPage<thread_safe>* PartitionDirectMap(
PartitionRoot<thread_safe>* root,
......@@ -63,7 +48,12 @@ ALWAYS_INLINE PartitionPage<thread_safe>* PartitionDirectMap(
char* ptr = nullptr;
if (IsPartitionAllocGigaCageEnabled()) {
ptr = CommitPages(GetDirectMapPool(), map_size);
#if defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
ptr = internal::AddressPoolManager::GetInstance()->Alloc(GetDirectMapPool(),
map_size);
#else
NOTREACHED();
#endif // defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
} else {
ptr = reinterpret_cast<char*>(AllocPages(nullptr, map_size, kSuperPageSize,
PageReadWrite,
......@@ -256,7 +246,12 @@ ALWAYS_INLINE void* PartitionBucket<thread_safe>::AllocNewSlotSpan(
char* requested_address = root->next_super_page;
char* super_page = nullptr;
if (IsPartitionAllocGigaCageEnabled()) {
super_page = CommitPages(GetNormalBucketPool(), kSuperPageSize);
#if defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
super_page = AddressPoolManager::GetInstance()->Alloc(GetNormalBucketPool(),
kSuperPageSize);
#else
NOTREACHED();
#endif
} else {
super_page = reinterpret_cast<char*>(
AllocPages(requested_address, kSuperPageSize, kSuperPageSize,
......
......@@ -19,22 +19,6 @@ namespace internal {
namespace {
void DecommitPages(void* address, size_t size) {
#if defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
#if defined(OS_MACOSX)
SetSystemPagesAccess(address, size, PageReadWrite);
memset(address, 0, size);
#endif
SetSystemPagesAccess(address, size, PageInaccessible);
DecommitSystemPages(address, size);
internal::AddressPoolManager::GetInstance()->Free(
internal::GetDirectMapPool(), address, size);
#else
NOTREACHED();
#endif
}
template <bool thread_safe>
ALWAYS_INLINE DeferredUnmap
PartitionDirectUnmap(PartitionPage<thread_safe>* page) {
......@@ -201,7 +185,12 @@ void PartitionPage<thread_safe>::DecommitIfPossible(
void DeferredUnmap::Unmap() {
PA_DCHECK(ptr && size > 0);
if (IsManagedByPartitionAlloc(ptr)) {
DecommitPages(ptr, size);
#if defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
internal::AddressPoolManager::GetInstance()->Free(
internal::GetDirectMapPool(), ptr, size);
#else
NOTREACHED();
#endif // defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
} else {
FreePages(ptr, size);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment