Commit a27feee1 authored by Benoit Lize's avatar Benoit Lize Committed by Commit Bot

android: Support PartitionAlloc as the malloc() implementation.

This CL adds support for the 'use_allocator = "partition"' GN argument
on Android and Linux. This is not meant to ship, but to ease
experimentation. Without this flag, there is no behavior change. With
it, malloc() and new calls are redirected to PartitionAlloc.

For DCHECK_IS_ON() builds, PartitionAlloc doesn't support the right
alignment guarantees to suppose posix_memalign(), which is required at
least on Linux. This is due to the addition of a "cookie" on both sides
of the allocation, meant to detect improper memory writes. With this CL,
the cookies are temporarily disabled with PartitionAlloc when it is used
as the malloc() implementation.

Change-Id: I1b1b6fc855fb48bbad04a82a2b60e320194943fd
Bug: 998048
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2167281
Commit-Queue: Benoit L <lizeb@chromium.org>
Reviewed-by: default avatarWill Harris <wfh@chromium.org>
Reviewed-by: default avatarKentaro Hara <haraken@chromium.org>
Cr-Commit-Position: refs/heads/master@{#776916}
parent dde3499e
...@@ -1313,12 +1313,22 @@ jumbo_component("base") { ...@@ -1313,12 +1313,22 @@ jumbo_component("base") {
deps += [ "//base/allocator:tcmalloc" ] deps += [ "//base/allocator:tcmalloc" ]
} else if (is_linux && use_allocator == "none") { } else if (is_linux && use_allocator == "none") {
sources += [ "allocator/allocator_shim_default_dispatch_to_glibc.cc" ] sources += [ "allocator/allocator_shim_default_dispatch_to_glibc.cc" ]
} else if (is_android && use_allocator == "none") { } else if ((is_linux || is_android) && use_allocator == "partition") {
sources += [ # Cannot use the same dispatching for host-side binaries.
"allocator/allocator_shim_default_dispatch_to_linker_wrapped_symbols.cc", if (is_a_target_toolchain) {
"allocator/allocator_shim_override_linker_wrapped_symbols.h", sources += [
] "allocator/allocator_shim_default_dispatch_to_partition_alloc.cc",
]
} else {
sources += [ "allocator/allocator_shim_default_dispatch_to_glibc.cc" ]
}
} else if (is_android) {
sources +=
[ "allocator/allocator_shim_override_linker_wrapped_symbols.h" ]
all_dependent_configs += [ "//base/allocator:wrap_malloc_symbols" ] all_dependent_configs += [ "//base/allocator:wrap_malloc_symbols" ]
if (use_allocator == "none") {
sources += [ "allocator/allocator_shim_default_dispatch_to_linker_wrapped_symbols.cc" ]
}
} else if (is_mac || is_ios) { } else if (is_mac || is_ios) {
sources += [ sources += [
"allocator/allocator_shim_default_dispatch_to_mac_zoned_malloc.cc", "allocator/allocator_shim_default_dispatch_to_mac_zoned_malloc.cc",
......
...@@ -276,12 +276,14 @@ if (use_allocator == "tcmalloc") { ...@@ -276,12 +276,14 @@ if (use_allocator == "tcmalloc") {
buildflag_header("buildflags") { buildflag_header("buildflags") {
header = "buildflags.h" header = "buildflags.h"
flags = [ "USE_ALLOCATOR_SHIM=$use_allocator_shim" ] _use_partition_alloc = use_allocator == "partition"
if (use_allocator == "tcmalloc") { _use_tcmalloc = use_allocator == "tcmalloc"
flags += [ "USE_TCMALLOC=1" ]
} else { flags = [
flags += [ "USE_TCMALLOC=0" ] "USE_ALLOCATOR_SHIM=$use_allocator_shim",
} "USE_TCMALLOC=$_use_tcmalloc",
"USE_PARTITION_ALLOC_AS_MALLOC=$_use_partition_alloc",
]
} }
# Used to shim malloc symbols on Android. see //base/allocator/README.md. # Used to shim malloc symbols on Android. see //base/allocator/README.md.
......
...@@ -33,7 +33,11 @@ declare_args() { ...@@ -33,7 +33,11 @@ declare_args() {
use_allocator_shim = _default_use_allocator_shim use_allocator_shim = _default_use_allocator_shim
} }
assert(use_allocator == "none" || use_allocator == "tcmalloc") assert(use_allocator == "none" || use_allocator == "tcmalloc" ||
use_allocator == "partition")
# Don't ship this configuration, not ready yet.
assert(!(use_allocator == "partition" && is_official_build))
assert(!is_win || use_allocator == "none", "Tcmalloc doesn't work on Windows.") assert(!is_win || use_allocator == "none", "Tcmalloc doesn't work on Windows.")
assert(!is_mac || use_allocator == "none", "Tcmalloc doesn't work on macOS.") assert(!is_mac || use_allocator == "none", "Tcmalloc doesn't work on macOS.")
......
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/allocator_shim.h"
#include "base/allocator/allocator_shim_internals.h"
#include "base/allocator/partition_allocator/partition_alloc.h"
#include "base/bits.h"
#include "base/no_destructor.h"
namespace {
base::PartitionRootGeneric& Allocator() {
static base::NoDestructor<base::PartitionRootGeneric> allocator;
allocator->Init();
return *allocator;
}
using base::allocator::AllocatorDispatch;
void* PartitionMalloc(const AllocatorDispatch*, size_t size, void* context) {
return Allocator().Alloc(size, "");
}
void* PartitionCalloc(const AllocatorDispatch*,
size_t n,
size_t size,
void* context) {
return Allocator().AllocFlags(base::PartitionAllocZeroFill, n * size, "");
}
void* PartitionMemalign(const AllocatorDispatch*,
size_t alignment,
size_t size,
void* context) {
// This is mandated by |posix_memalign()|, so should never fire.
//
// Note: CHECK() is fine here since we are not called from malloc(), but from
// posix_memalign(), so there is no recursion. It is also fine to make aligned
// allocations slower, as they are rare.
CHECK(base::bits::IsPowerOfTwo(alignment));
// PartitionAlloc only guarantees alignment for power-of-two sized
// allocations. To make sure this applies here, round up the allocation size.
size_t size_rounded_up =
static_cast<size_t>(1)
<< (sizeof(size_t) * 8 - base::bits::CountLeadingZeroBits(size - 1));
void* ptr = Allocator().Alloc(size_rounded_up, "");
CHECK_EQ(reinterpret_cast<uintptr_t>(ptr) % alignment, 0ull);
return ptr;
}
void* PartitionRealloc(const AllocatorDispatch*,
void* address,
size_t size,
void* context) {
return Allocator().Realloc(address, size, "");
}
void PartitionFree(const AllocatorDispatch*, void* address, void* context) {
Allocator().Free(address);
}
size_t PartitionGetSizeEstimate(const AllocatorDispatch*,
void* address,
void* context) {
// TODO(lizeb): Returns incorrect values for aligned allocations.
return base::PartitionAllocGetSize(address);
}
} // namespace
constexpr AllocatorDispatch AllocatorDispatch::default_dispatch = {
&PartitionMalloc, /* alloc_function */
&PartitionCalloc, /* alloc_zero_initialized_function */
&PartitionMemalign, /* alloc_aligned_function */
&PartitionRealloc, /* realloc_function */
&PartitionFree, /* free_function */
&PartitionGetSizeEstimate, /* get_size_estimate_function */
nullptr, /* batch_malloc_function */
nullptr, /* batch_free_function */
nullptr, /* free_definite_size_function */
nullptr, /* aligned_malloc_function */
nullptr, /* aligned_realloc_function */
nullptr, /* aligned_free_function */
nullptr, /* next */
};
// Intercept diagnostics symbols as well, even though they are not part of the
// unified shim layer.
//
// TODO(lizeb): Implement the ones that doable.
extern "C" {
SHIM_ALWAYS_EXPORT void malloc_stats(void) __THROW {}
SHIM_ALWAYS_EXPORT int mallopt(int cmd, int value) __THROW {
return 0;
}
#ifdef HAVE_STRUCT_MALLINFO
SHIM_ALWAYS_EXPORT struct mallinfo mallinfo(void) __THROW {
return {};
}
#endif
} // extern "C"
...@@ -72,7 +72,6 @@ static_assert(kMaxSystemPagesPerSlotSpan < (1 << 8), ...@@ -72,7 +72,6 @@ static_assert(kMaxSystemPagesPerSlotSpan < (1 << 8),
PartitionRoot::PartitionRoot() = default; PartitionRoot::PartitionRoot() = default;
PartitionRoot::~PartitionRoot() = default; PartitionRoot::~PartitionRoot() = default;
PartitionRootGeneric::PartitionRootGeneric() = default;
PartitionRootGeneric::~PartitionRootGeneric() = default; PartitionRootGeneric::~PartitionRootGeneric() = default;
PartitionAllocatorGeneric::PartitionAllocatorGeneric() = default; PartitionAllocatorGeneric::PartitionAllocatorGeneric() = default;
...@@ -228,9 +227,12 @@ void PartitionRoot::Init(size_t bucket_count, size_t maximum_allocation) { ...@@ -228,9 +227,12 @@ void PartitionRoot::Init(size_t bucket_count, size_t maximum_allocation) {
} }
} }
void PartitionRootGeneric::Init() { void PartitionRootGeneric::InitSlowPath() {
ScopedGuard guard{lock_}; ScopedGuard guard{lock_};
if (this->initialized.load(std::memory_order_relaxed))
return;
PartitionAllocBaseInit(this); PartitionAllocBaseInit(this);
// Precalculate some shift and mask constants used in the hot path. // Precalculate some shift and mask constants used in the hot path.
......
...@@ -141,7 +141,7 @@ struct BASE_EXPORT PartitionRoot ...@@ -141,7 +141,7 @@ struct BASE_EXPORT PartitionRoot
// PartitionAllocatorGeneric. // PartitionAllocatorGeneric.
struct BASE_EXPORT PartitionRootGeneric struct BASE_EXPORT PartitionRootGeneric
: public internal::PartitionRootBase<internal::ThreadSafe> { : public internal::PartitionRootBase<internal::ThreadSafe> {
PartitionRootGeneric(); PartitionRootGeneric() = default;
~PartitionRootGeneric() override; ~PartitionRootGeneric() override;
// Some pre-computed constants. // Some pre-computed constants.
size_t order_index_shifts[kBitsPerSizeT + 1] = {}; size_t order_index_shifts[kBitsPerSizeT + 1] = {};
...@@ -156,7 +156,14 @@ struct BASE_EXPORT PartitionRootGeneric ...@@ -156,7 +156,14 @@ struct BASE_EXPORT PartitionRootGeneric
Bucket buckets[kGenericNumBuckets] = {}; Bucket buckets[kGenericNumBuckets] = {};
// Public API. // Public API.
void Init(); ALWAYS_INLINE void Init() {
if (LIKELY(this->initialized.load(std::memory_order_relaxed)))
return;
InitSlowPath();
}
NOINLINE void InitSlowPath();
ALWAYS_INLINE void* Alloc(size_t size, const char* type_name); ALWAYS_INLINE void* Alloc(size_t size, const char* type_name);
ALWAYS_INLINE void* AllocFlags(int flags, size_t size, const char* type_name); ALWAYS_INLINE void* AllocFlags(int flags, size_t size, const char* type_name);
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_FEATURES_H_ #ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_FEATURES_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_FEATURES_H_ #define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_FEATURES_H_
#include "base/allocator/buildflags.h"
#include "base/base_export.h" #include "base/base_export.h"
#include "base/feature_list.h" #include "base/feature_list.h"
#include "base/metrics/field_trial_params.h" #include "base/metrics/field_trial_params.h"
...@@ -17,7 +18,10 @@ struct Feature; ...@@ -17,7 +18,10 @@ struct Feature;
extern const BASE_EXPORT Feature kPartitionAllocGigaCage; extern const BASE_EXPORT Feature kPartitionAllocGigaCage;
ALWAYS_INLINE bool IsPartitionAllocGigaCageEnabled() { ALWAYS_INLINE bool IsPartitionAllocGigaCageEnabled() {
#if !defined(ARCH_CPU_64_BITS) // The feature is not applicable to 32 bit architectures (not enough address
// space). It is also incompatible with PartitionAlloc as malloc(), as the
// base::Feature code allocates, leading to reentancy in PartitionAlloc.
#if !defined(ARCH_CPU_64_BITS) || BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
return false; return false;
#else #else
return FeatureList::IsEnabled(kPartitionAllocGigaCage); return FeatureList::IsEnabled(kPartitionAllocGigaCage);
......
...@@ -5,65 +5,81 @@ ...@@ -5,65 +5,81 @@
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_COOKIE_H_ #ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_COOKIE_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_COOKIE_H_ #define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_COOKIE_H_
#include "base/allocator/buildflags.h"
#include "base/compiler_specific.h" #include "base/compiler_specific.h"
#include "base/logging.h" #include "base/logging.h"
namespace base { namespace base {
namespace internal { namespace internal {
#if DCHECK_IS_ON()
// Handles alignment up to XMM instructions on Intel. // Handles alignment up to XMM instructions on Intel.
static constexpr size_t kCookieSize = 16; static constexpr size_t kCookieSize = 16;
// Cookies are enabled for debug builds, unless PartitionAlloc is used as the
// malloc() implementation. This is a temporary workaround the alignment issues
// caused by cookies. With them, PartitionAlloc cannot support posix_memalign(),
// which is required.
//
// TODO(lizeb): Support cookies when used as the malloc() implementation.
#if DCHECK_IS_ON() && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
static constexpr unsigned char kCookieValue[kCookieSize] = { static constexpr unsigned char kCookieValue[kCookieSize] = {
0xDE, 0xAD, 0xBE, 0xEF, 0xCA, 0xFE, 0xD0, 0x0D, 0xDE, 0xAD, 0xBE, 0xEF, 0xCA, 0xFE, 0xD0, 0x0D,
0x13, 0x37, 0xF0, 0x05, 0xBA, 0x11, 0xAB, 0x1E}; 0x13, 0x37, 0xF0, 0x05, 0xBA, 0x11, 0xAB, 0x1E};
#endif
ALWAYS_INLINE void PartitionCookieCheckValue(void* ptr) { ALWAYS_INLINE void PartitionCookieCheckValue(void* ptr) {
#if DCHECK_IS_ON()
unsigned char* cookie_ptr = reinterpret_cast<unsigned char*>(ptr); unsigned char* cookie_ptr = reinterpret_cast<unsigned char*>(ptr);
for (size_t i = 0; i < kCookieSize; ++i, ++cookie_ptr) for (size_t i = 0; i < kCookieSize; ++i, ++cookie_ptr)
DCHECK(*cookie_ptr == kCookieValue[i]); DCHECK(*cookie_ptr == kCookieValue[i]);
#endif
} }
ALWAYS_INLINE size_t PartitionCookieSizeAdjustAdd(size_t size) { ALWAYS_INLINE size_t PartitionCookieSizeAdjustAdd(size_t size) {
#if DCHECK_IS_ON()
// Add space for cookies, checking for integer overflow. TODO(palmer): // Add space for cookies, checking for integer overflow. TODO(palmer):
// Investigate the performance and code size implications of using // Investigate the performance and code size implications of using
// CheckedNumeric throughout PA. // CheckedNumeric throughout PA.
DCHECK(size + (2 * kCookieSize) > size); DCHECK(size + (2 * kCookieSize) > size);
size += 2 * kCookieSize; size += 2 * kCookieSize;
#endif
return size; return size;
} }
ALWAYS_INLINE void* PartitionCookieFreePointerAdjust(void* ptr) { ALWAYS_INLINE void* PartitionCookieFreePointerAdjust(void* ptr) {
#if DCHECK_IS_ON()
// The value given to the application is actually just after the cookie. // The value given to the application is actually just after the cookie.
ptr = static_cast<char*>(ptr) - kCookieSize; ptr = static_cast<char*>(ptr) - kCookieSize;
#endif
return ptr; return ptr;
} }
ALWAYS_INLINE size_t PartitionCookieSizeAdjustSubtract(size_t size) { ALWAYS_INLINE size_t PartitionCookieSizeAdjustSubtract(size_t size) {
#if DCHECK_IS_ON()
// Remove space for cookies. // Remove space for cookies.
DCHECK(size >= 2 * kCookieSize); DCHECK(size >= 2 * kCookieSize);
size -= 2 * kCookieSize; size -= 2 * kCookieSize;
#endif
return size; return size;
} }
ALWAYS_INLINE void PartitionCookieWriteValue(void* ptr) { ALWAYS_INLINE void PartitionCookieWriteValue(void* ptr) {
#if DCHECK_IS_ON()
unsigned char* cookie_ptr = reinterpret_cast<unsigned char*>(ptr); unsigned char* cookie_ptr = reinterpret_cast<unsigned char*>(ptr);
for (size_t i = 0; i < kCookieSize; ++i, ++cookie_ptr) for (size_t i = 0; i < kCookieSize; ++i, ++cookie_ptr)
*cookie_ptr = kCookieValue[i]; *cookie_ptr = kCookieValue[i];
#endif
} }
#else
ALWAYS_INLINE void PartitionCookieCheckValue(void* ptr) {}
ALWAYS_INLINE size_t PartitionCookieSizeAdjustAdd(size_t size) {
return size;
}
ALWAYS_INLINE void* PartitionCookieFreePointerAdjust(void* ptr) {
return ptr;
}
ALWAYS_INLINE size_t PartitionCookieSizeAdjustSubtract(size_t size) {
return size;
}
ALWAYS_INLINE void PartitionCookieWriteValue(void* ptr) {}
#endif // DCHECK_IS_ON() && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
} // namespace internal } // namespace internal
} // namespace base } // namespace base
......
...@@ -38,11 +38,6 @@ void PartitionRootBase<thread_safe>::DecommitEmptyPages() { ...@@ -38,11 +38,6 @@ void PartitionRootBase<thread_safe>::DecommitEmptyPages() {
} }
} }
template <bool thread_safe>
internal::PartitionRootBase<thread_safe>::PartitionRootBase() = default;
template <bool thread_safe>
internal::PartitionRootBase<thread_safe>::~PartitionRootBase() = default;
template struct PartitionRootBase<ThreadSafe>; template struct PartitionRootBase<ThreadSafe>;
template struct PartitionRootBase<NotThreadSafe>; template struct PartitionRootBase<NotThreadSafe>;
......
...@@ -5,6 +5,8 @@ ...@@ -5,6 +5,8 @@
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ROOT_BASE_H_ #ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ROOT_BASE_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ROOT_BASE_H_ #define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ROOT_BASE_H_
#include <atomic>
#include "base/allocator/partition_allocator/page_allocator.h" #include "base/allocator/partition_allocator/page_allocator.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h" #include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/partition_alloc_forward.h" #include "base/allocator/partition_allocator/partition_alloc_forward.h"
...@@ -169,13 +171,13 @@ static_assert( ...@@ -169,13 +171,13 @@ static_assert(
static OomFunction g_oom_handling_function = nullptr; static OomFunction g_oom_handling_function = nullptr;
template <bool thread_safety> template <bool thread_safety>
struct BASE_EXPORT PartitionRootBase { struct PartitionRootBase {
using Page = PartitionPage<thread_safety>; using Page = PartitionPage<thread_safety>;
using Bucket = PartitionBucket<thread_safety>; using Bucket = PartitionBucket<thread_safety>;
using ScopedGuard = internal::ScopedGuard<thread_safety>; using ScopedGuard = internal::ScopedGuard<thread_safety>;
PartitionRootBase(); PartitionRootBase() = default;
virtual ~PartitionRootBase(); virtual ~PartitionRootBase() = default;
MaybeSpinLock<thread_safety> lock_; MaybeSpinLock<thread_safety> lock_;
size_t total_size_of_committed_pages = 0; size_t total_size_of_committed_pages = 0;
size_t total_size_of_super_pages = 0; size_t total_size_of_super_pages = 0;
...@@ -185,7 +187,8 @@ struct BASE_EXPORT PartitionRootBase { ...@@ -185,7 +187,8 @@ struct BASE_EXPORT PartitionRootBase {
// total_size_of_direct_mapped_pages. // total_size_of_direct_mapped_pages.
unsigned num_buckets = 0; unsigned num_buckets = 0;
unsigned max_allocation = 0; unsigned max_allocation = 0;
bool initialized = false; // Atomic as initialization can be concurrent.
std::atomic<bool> initialized = {};
char* next_super_page = nullptr; char* next_super_page = nullptr;
char* next_partition_page = nullptr; char* next_partition_page = nullptr;
char* next_partition_page_end = nullptr; char* next_partition_page_end = nullptr;
...@@ -263,7 +266,7 @@ ALWAYS_INLINE void* PartitionRootBase<thread_safety>::AllocFromBucket( ...@@ -263,7 +266,7 @@ ALWAYS_INLINE void* PartitionRootBase<thread_safety>::AllocFromBucket(
DCHECK(!ret || IsValidPage(Page::FromPointer(ret))); DCHECK(!ret || IsValidPage(Page::FromPointer(ret)));
} }
#if DCHECK_IS_ON() #if DCHECK_IS_ON() && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
if (!ret) { if (!ret) {
return nullptr; return nullptr;
} }
......
...@@ -132,11 +132,11 @@ bool UncheckedMalloc(size_t size, void** result) { ...@@ -132,11 +132,11 @@ bool UncheckedMalloc(size_t size, void** result) {
#if BUILDFLAG(USE_ALLOCATOR_SHIM) #if BUILDFLAG(USE_ALLOCATOR_SHIM)
*result = allocator::UncheckedAlloc(size); *result = allocator::UncheckedAlloc(size);
#elif defined(MEMORY_TOOL_REPLACES_ALLOCATOR) || \ #elif defined(MEMORY_TOOL_REPLACES_ALLOCATOR) || \
(!defined(LIBC_GLIBC) && !defined(USE_TCMALLOC)) (!defined(LIBC_GLIBC) && !BUILDFLAG(USE_TCMALLOC))
*result = malloc(size); *result = malloc(size);
#elif defined(LIBC_GLIBC) && !defined(USE_TCMALLOC) #elif defined(LIBC_GLIBC) && !BUILDFLAG(USE_TCMALLOC)
*result = __libc_malloc(size); *result = __libc_malloc(size);
#elif defined(USE_TCMALLOC) #elif BUILDFLAG(USE_TCMALLOC)
*result = tc_malloc_skip_new_handler(size); *result = tc_malloc_skip_new_handler(size);
#endif #endif
return *result != nullptr; return *result != nullptr;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment