Commit 750797b1 authored by Sergei Glazunov's avatar Sergei Glazunov Committed by Chromium LUCI CQ

Add option to disable BackupRefPtr in renderers

Currently, we can't use BackupRefPtr in renderers anyway due to severe
performance regressions. Therefore, add an option to disable the
allocator support for the per-allocation reference count slot and save
memory.

In order to do so, as soon as we can detect whether the current process
is a renderer, create a new partition with the desired set of flags
and replace the "temporary" one in the allocator shim. Previously made
allocations will keep using the old partition.

Bug: 1073933
Change-Id: Icb0d9359953c3b9b0cdc0c4ced113b4eca7a5c83
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2449981Reviewed-by: default avatarBenoit L <lizeb@chromium.org>
Reviewed-by: default avatarKen Rockot <rockot@google.com>
Reviewed-by: default avatarBartek Nowierski <bartekn@chromium.org>
Reviewed-by: default avatarKentaro Hara <haraken@chromium.org>
Commit-Queue: Sergei Glazunov <glazunov@google.com>
Cr-Commit-Position: refs/heads/master@{#842138}
parent cb31e164
......@@ -162,6 +162,8 @@ BASE_EXPORT void InitializeAllocatorShim();
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
BASE_EXPORT void EnablePartitionAllocMemoryReclaimer();
BASE_EXPORT void ConfigurePartitionRefCountSupport(bool enable_ref_count);
#endif
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && ALLOW_PCSCAN
......
......@@ -6,6 +6,7 @@
#include "base/allocator/allocator_shim_internals.h"
#include "base/allocator/buildflags.h"
#include "base/allocator/partition_allocator/checked_ptr_support.h"
#include "base/allocator/partition_allocator/memory_reclaimer.h"
#include "base/allocator/partition_allocator/partition_alloc.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
......@@ -92,14 +93,19 @@ base::ThreadSafePartitionRoot* Allocator() {
auto* new_root = new (g_allocator_buffer) base::ThreadSafePartitionRoot({
base::PartitionOptions::Alignment::kRegular,
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && !DISABLE_REF_COUNT_IN_RENDERER
base::PartitionOptions::ThreadCache::kEnabled,
#else
// Other tests, such as the ThreadCache tests create a thread cache, and
// only one is supported at a time.
//
// Also, with DISABLE_REF_COUNT_IN_RENDERER, this partition is only
// temporary until we determine what type of process we're running in.
// Leave the ability to have a thread cache to the main partition.
base::PartitionOptions::ThreadCache::kDisabled,
#endif
base::PartitionOptions::PCScan::kDisabledByDefault
base::PartitionOptions::PCScan::kDisabledByDefault,
base::PartitionOptions::RefCount::kEnabled,
});
g_root_.store(new_root, std::memory_order_release);
......@@ -305,6 +311,29 @@ void EnablePartitionAllocMemoryReclaimer() {
AlignedAllocator());
}
alignas(base::ThreadSafePartitionRoot) uint8_t
g_allocator_buffer_for_ref_count_config[sizeof(
base::ThreadSafePartitionRoot)];
void ConfigurePartitionRefCountSupport(bool enable_ref_count) {
auto* current_root = g_root_.load(std::memory_order_acquire);
// We expect a number of heap allocations to be made before this function is
// called, which should force the `g_root` initialization.
PA_CHECK(current_root);
current_root->PurgeMemory(PartitionPurgeDecommitEmptySlotSpans |
PartitionPurgeDiscardUnusedSystemPages);
auto* new_root = new (g_allocator_buffer_for_ref_count_config)
base::ThreadSafePartitionRoot({
base::PartitionOptions::Alignment::kRegular,
base::PartitionOptions::ThreadCache::kEnabled,
base::PartitionOptions::PCScan::kDisabledByDefault,
enable_ref_count ? base::PartitionOptions::RefCount::kEnabled
: base::PartitionOptions::RefCount::kDisabled,
});
g_root_.store(new_root, std::memory_order_release);
}
#if ALLOW_PCSCAN
void EnablePCScan() {
Allocator()->EnablePCScan();
......@@ -321,7 +350,6 @@ void ConfigurePartitionAlloc() {
#endif // defined(ARCH_CPU_X86)
}
#endif // defined(OS_WIN)
} // namespace allocator
} // namespace base
......
......@@ -6,5 +6,12 @@
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_CHECKED_PTR_SUPPORT_H_
#define ENABLE_REF_COUNT_FOR_BACKUP_REF_PTR 0
#define DISABLE_REF_COUNT_IN_RENDERER 0
#if DISABLE_REF_COUNT_IN_RENDERER
static_assert(ENABLE_REF_COUNT_FOR_BACKUP_REF_PTR,
"DISABLE_REF_COUNT_IN_RENDERER can only by used if "
"PartitionRefCount is enabled");
#endif
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_CHECKED_PTR_SUPPORT_H_
......@@ -391,19 +391,27 @@ void PartitionRoot<thread_safe>::Init(PartitionOptions opts) {
// If alignment needs to be enforced, disallow adding a cookie and/or
// ref-count at the beginning of the slot.
allow_extras = (opts.alignment != PartitionOptions::Alignment::kAlignedAlloc);
if (opts.alignment == PartitionOptions::Alignment::kAlignedAlloc) {
allow_cookies = false;
allow_ref_count = false;
} else {
allow_cookies = true;
allow_ref_count = opts.ref_count == PartitionOptions::RefCount::kEnabled;
}
#if PARTITION_EXTRAS_REQUIRED
size_t size = 0, offset = 0;
if (allow_extras) {
size += internal::kPartitionCookieSizeAdjustment;
size += internal::kPartitionRefCountSizeAdjustment;
extras_size = 0;
extras_offset = 0;
if (allow_cookies) {
extras_size += internal::kPartitionCookieSizeAdjustment;
extras_offset += internal::kPartitionCookieOffsetAdjustment;
}
offset += internal::kPartitionCookieOffsetAdjustment;
offset += internal::kPartitionRefCountOffsetAdjustment;
if (allow_ref_count) {
extras_size += internal::kPartitionRefCountSizeAdjustment;
extras_offset += internal::kPartitionRefCountOffsetAdjustment;
}
extras_size = static_cast<uint32_t>(size);
extras_offset = static_cast<uint32_t>(offset);
#endif
pcscan_mode = PartitionOptionsToPCScanMode<thread_safe>(opts.pcscan);
......@@ -519,7 +527,7 @@ bool PartitionRoot<thread_safe>::ReallocDirectMappedInPlace(
#if DCHECK_IS_ON()
// Write a new trailing cookie.
if (allow_extras) {
if (allow_cookies) {
internal::PartitionCookieWriteValue(char_ptr + raw_size -
internal::kCookieSize);
}
......@@ -605,7 +613,7 @@ void* PartitionRoot<thread_safe>::ReallocFlags(int flags,
#if DCHECK_IS_ON()
// Write a new trailing cookie only when it is possible to keep track
// raw size (otherwise we wouldn't know where to look for it later).
if (allow_extras) {
if (allow_cookies) {
internal::PartitionCookieWriteValue(static_cast<char*>(ptr) +
new_size);
}
......
......@@ -130,9 +130,15 @@ struct PartitionOptions {
kForcedEnabledForTesting,
};
enum class RefCount {
kEnabled,
kDisabled,
};
Alignment alignment;
ThreadCache thread_cache;
PCScan pcscan;
RefCount ref_count;
};
// Never instantiate a PartitionRoot directly, instead use
......@@ -161,7 +167,8 @@ struct BASE_EXPORT PartitionRoot {
bool with_thread_cache = false;
const bool is_thread_safe = thread_safe;
bool allow_extras;
bool allow_ref_count;
bool allow_cookies;
#if !PARTITION_EXTRAS_REQUIRED
// Teach the compiler that `AdjustSizeForExtrasAdd` etc. can be eliminated
......@@ -318,7 +325,7 @@ struct BASE_EXPORT PartitionRoot {
}
bool UsesGigaCage() const {
return features::IsPartitionAllocGigaCageEnabled() && allow_extras;
return features::IsPartitionAllocGigaCageEnabled() && allow_ref_count;
}
ALWAYS_INLINE bool IsScannable() const {
......@@ -648,8 +655,8 @@ ALWAYS_INLINE size_t PartitionAllocGetSlotOffset(void* ptr) {
ptr);
auto* root = PartitionRoot<internal::ThreadSafe>::FromSlotSpan(slot_span);
// The only allocations that don't use ref-count are allocated outside of
// GigaCage, hence we'd never get here in the `allow_extras = false` case.
PA_DCHECK(root->allow_extras);
// GigaCage, hence we'd never get here in the `allow_ref_count = false` case.
PA_DCHECK(root->allow_ref_count);
// Get the offset from the beginning of the slot span.
uintptr_t ptr_addr = reinterpret_cast<uintptr_t>(ptr);
......@@ -667,8 +674,8 @@ ALWAYS_INLINE void* PartitionAllocGetSlotStart(void* ptr) {
ptr);
auto* root = PartitionRoot<internal::ThreadSafe>::FromSlotSpan(slot_span);
// The only allocations that don't use ref-count are allocated outside of
// GigaCage, hence we'd never get here in the `allow_extras = false` case.
PA_DCHECK(root->allow_extras);
// GigaCage, hence we'd never get here in the `allow_ref_count = false` case.
PA_DCHECK(root->allow_ref_count);
// Get the offset from the beginning of the slot span.
uintptr_t ptr_addr = reinterpret_cast<uintptr_t>(ptr);
......@@ -692,15 +699,8 @@ ALWAYS_INLINE void PartitionAllocFreeForRefCounting(void* slot_start) {
SlotSpanMetadata<ThreadSafe>::FromPointerNoAlignmentCheck(slot_start);
auto* root = PartitionRoot<ThreadSafe>::FromSlotSpan(slot_span);
// PartitionRefCount is required to be allocated inside a `PartitionRoot` that
// supports extras.
PA_DCHECK(root->allow_extras);
#ifdef ADDRESS_SANITIZER
void* ptr = root->AdjustPointerForExtrasAdd(slot_start);
size_t usable_size =
root->AdjustSizeForExtrasSubtract(slot_span->GetUtilizedSlotSize());
ASAN_UNPOISON_MEMORY_REGION(ptr, usable_size);
#endif
// supports reference counts.
PA_DCHECK(root->allow_ref_count);
#if DCHECK_IS_ON()
memset(slot_start, kFreedByte, slot_span->GetUtilizedSlotSize());
......@@ -850,42 +850,41 @@ ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooksImmediate(
ZERO_RANDOMLY_ON_FREE
const size_t utilized_slot_size = slot_span->GetUtilizedSlotSize();
#endif
if (allow_extras) {
#if ENABLE_REF_COUNT_FOR_BACKUP_REF_PTR || DCHECK_IS_ON()
size_t usable_size = AdjustSizeForExtrasSubtract(utilized_slot_size);
const size_t usable_size = AdjustSizeForExtrasSubtract(utilized_slot_size);
#endif
void* slot_start = AdjustPointerForExtrasSubtract(ptr);
#if DCHECK_IS_ON()
if (allow_cookies) {
// Verify 2 cookies surrounding the allocated region.
// If these asserts fire, you probably corrupted memory.
char* char_ptr = static_cast<char*>(ptr);
internal::PartitionCookieCheckValue(char_ptr - internal::kCookieSize);
internal::PartitionCookieCheckValue(char_ptr + usable_size);
}
#endif
void* slot_start = AdjustPointerForExtrasSubtract(ptr);
if (LIKELY(!slot_span->bucket->is_direct_mapped())) {
#if ENABLE_REF_COUNT_FOR_BACKUP_REF_PTR
if (allow_ref_count) {
if (LIKELY(!slot_span->bucket->is_direct_mapped())) {
auto* ref_count = internal::PartitionRefCountPointer(slot_start);
// If we are holding the last reference to the allocation, it can be freed
// immediately. Otherwise, defer the operation and zap the memory to turn
// potential use-after-free issues into unexploitable crashes.
if (UNLIKELY(!ref_count->HasOneRef())) {
#ifdef ADDRESS_SANITIZER
ASAN_POISON_MEMORY_REGION(ptr, usable_size);
#else
if (UNLIKELY(!ref_count->HasOneRef()))
memset(ptr, kQuarantinedByte, usable_size);
#endif
}
if (UNLIKELY(!(ref_count->ReleaseFromAllocator())))
return;
#endif // ENABLE_REF_COUNT_FOR_BACKUP_REF_PTR
}
}
#endif // ENABLE_REF_COUNT_FOR_BACKUP_REF_PTR
// Shift ptr to the beginning of the slot.
// Shift `ptr` to the beginning of the slot.
ptr = slot_start;
} // if (allow_extras)
#if DCHECK_IS_ON()
memset(ptr, kFreedByte, utilized_slot_size);
......@@ -1225,7 +1224,7 @@ ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocFlagsNoHooks(
#if DCHECK_IS_ON()
// Surround the region with 2 cookies.
if (allow_extras) {
if (allow_cookies) {
char* char_ret = static_cast<char*>(ret);
internal::PartitionCookieWriteValue(char_ret - internal::kCookieSize);
internal::PartitionCookieWriteValue(char_ret + usable_size);
......@@ -1244,14 +1243,15 @@ ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocFlagsNoHooks(
memset(ret, 0, usable_size);
}
#if ENABLE_REF_COUNT_FOR_BACKUP_REF_PTR
bool is_direct_mapped = raw_size > kMaxBucketed;
// LIKELY: Direct mapped allocations are large and rare.
if (allow_extras && LIKELY(!is_direct_mapped)) {
#if ENABLE_REF_COUNT_FOR_BACKUP_REF_PTR
new (internal::PartitionRefCountPointerNoDCheck(slot_start))
if (allow_ref_count && LIKELY(!is_direct_mapped)) {
new (internal::PartitionRefCountPointer(slot_start))
internal::PartitionRefCount();
#endif // ENABLE_REF_COUNT_FOR_BACKUP_REF_PTR
}
#endif // ENABLE_REF_COUNT_FOR_BACKUP_REF_PTR
return ret;
}
......@@ -1275,7 +1275,7 @@ ALWAYS_INLINE void* PartitionRoot<thread_safe>::AlignedAllocFlags(
// Aligned allocation support relies on the natural alignment guarantees of
// PartitionAlloc. Since cookies and ref-count are layered on top of
// PartitionAlloc, they change the guarantees. As a consequence, forbid both.
PA_DCHECK(!allow_extras);
PA_DCHECK(!allow_cookies && !allow_ref_count);
// This is mandated by |posix_memalign()|, so should never fire.
PA_CHECK(base::bits::IsPowerOfTwo(alignment));
......
......@@ -10,6 +10,7 @@
#include <utility>
#include "base/allocator/partition_allocator/address_pool_manager.h"
#include "base/allocator/partition_allocator/checked_ptr_support.h"
#include "base/allocator/partition_allocator/partition_address_space.h"
#include "base/allocator/partition_allocator/partition_alloc_forward.h"
......
......@@ -17,6 +17,7 @@
#include "base/allocator/allocator_extension.h"
#include "base/allocator/allocator_shim.h"
#include "base/allocator/buildflags.h"
#include "base/allocator/partition_allocator/checked_ptr_support.h"
#include "base/allocator/partition_allocator/partition_alloc_features.h"
#include "base/at_exit.h"
#include "base/base_switches.h"
......@@ -258,6 +259,19 @@ void EnablePCScanForMallocPartitionsInBrowserProcessIfNeeded() {
#endif
}
// This function should be executed as early as possible once we can get the
// command line arguments and determine whether the process needs BRP support.
// Until that moment, all heap allocations end up in a slower temporary
// partition with no thread cache and cause heap fragmentation.
//
// Furthermore, since the function has to allocate a new partition, it must
// only run once.
void ConfigurePartitionRefCountSupportIfNeeded(bool enable_ref_count) {
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && DISABLE_REF_COUNT_IN_RENDERER
base::allocator::ConfigurePartitionRefCountSupport(enable_ref_count);
#endif
}
#if BUILDFLAG(USE_ZYGOTE_HANDLE)
pid_t LaunchZygoteHelper(base::CommandLine* cmd_line,
base::ScopedFD* control_fd) {
......@@ -497,6 +511,9 @@ int RunZygote(ContentMainDelegate* delegate) {
*base::CommandLine::ForCurrentProcess();
std::string process_type =
command_line.GetSwitchValueASCII(switches::kProcessType);
// Must run as early as possible. See the definition comment.
ConfigurePartitionRefCountSupportIfNeeded(process_type !=
switches::kRendererProcess);
ContentClientInitializer::Set(process_type, delegate);
MainFunctionParams main_params(command_line);
......@@ -552,6 +569,12 @@ int RunOtherNamedProcessTypeMain(const std::string& process_type,
{switches::kGpuProcess, GpuMain},
};
if (process_type != switches::kZygoteProcess) {
// Must run as early as possible. See the definition comment.
ConfigurePartitionRefCountSupportIfNeeded(process_type !=
switches::kRendererProcess);
}
for (size_t i = 0; i < base::size(kMainFunctions); ++i) {
if (process_type == kMainFunctions[i].name) {
int exit_code = delegate->RunProcess(process_type, main_function_params);
......@@ -912,6 +935,9 @@ int ContentMainRunnerImpl::RunBrowser(MainFunctionParams& main_params,
if (is_browser_main_loop_started_)
return -1;
// Must run as early as possible. See the definition comment.
ConfigurePartitionRefCountSupportIfNeeded(/* enable_ref_count = */ true);
bool should_start_minimal_browser = start_minimal_browser;
if (!mojo_ipc_support_) {
if (delegate_->ShouldCreateFeatureList()) {
......
......@@ -78,7 +78,8 @@ bool Partitions::InitializeOnce() {
fast_malloc_allocator->init(
{base::PartitionOptions::Alignment::kRegular,
base::PartitionOptions::ThreadCache::kEnabled,
base::PartitionOptions::PCScan::kDisabledByDefault});
base::PartitionOptions::PCScan::kDisabledByDefault,
base::PartitionOptions::RefCount::kDisabled});
fast_malloc_root_ = fast_malloc_allocator->root();
#endif // !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
......@@ -90,16 +91,18 @@ bool Partitions::InitializeOnce() {
base::PartitionAllocGlobalInit(&Partitions::HandleOutOfMemory);
array_buffer_allocator->init(
{base::PartitionOptions::Alignment::kRegular,
array_buffer_allocator->init({base::PartitionOptions::Alignment::kRegular,
base::PartitionOptions::ThreadCache::kDisabled,
base::PartitionOptions::PCScan::kAlwaysDisabled});
base::PartitionOptions::PCScan::kAlwaysDisabled,
base::PartitionOptions::RefCount::kDisabled});
buffer_allocator->init({base::PartitionOptions::Alignment::kRegular,
base::PartitionOptions::ThreadCache::kDisabled,
base::PartitionOptions::PCScan::kDisabledByDefault});
base::PartitionOptions::PCScan::kDisabledByDefault,
base::PartitionOptions::RefCount::kDisabled});
layout_allocator->init({base::PartitionOptions::Alignment::kRegular,
base::PartitionOptions::ThreadCache::kDisabled,
base::PartitionOptions::PCScan::kAlwaysDisabled});
base::PartitionOptions::PCScan::kAlwaysDisabled,
base::PartitionOptions::RefCount::kDisabled});
array_buffer_root_ = array_buffer_allocator->root();
buffer_root_ = buffer_allocator->root();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment