Commit a4c19797 authored by Albert J. Wong's avatar Albert J. Wong

Shatter partition_alloc.h into smaller files.

partition_alloc.h has been a dumping ground for all objects related to
PartitonAlloc. The original code used many inlined top-level functions
making the implementation very coupled. This splits out most of the
critical clusters of functionality into separate files with roughly
one struct/class per file.

The split makes the module layering and interaction boundaries much clearer.
Because of the heavy use of inlining, and the highly coupled nature of the
original code, it was not possible to make a clean split yet between
declaration and implementation. Therefore, this splitting required introducing
"-inl.h" files because too many of these objects reached direclty into each
others's fields. These "-inl.h" files can likely be reduced in future
refactors.

Bug: 766882
Change-Id: Iceeccc64a1d810a68cedfb28599dfbcf66642b98
Reviewed-on: https://chromium-review.googlesource.com/1006154Reviewed-by: default avatarDaniel Cheng <dcheng@chromium.org>
Reviewed-by: default avatarKentaro Hara <haraken@chromium.org>
Reviewed-by: default avatarChris Palmer <palmer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#555118}
parent 27c99670
......@@ -1500,6 +1500,22 @@ jumbo_component("base") {
"allocator/partition_allocator/page_allocator_internal.h",
"allocator/partition_allocator/partition_alloc.cc",
"allocator/partition_allocator/partition_alloc.h",
"allocator/partition_allocator/partition_alloc_constants.h",
"allocator/partition_allocator/partition_bucket-inl.h",
"allocator/partition_allocator/partition_bucket.cc",
"allocator/partition_allocator/partition_bucket.h",
"allocator/partition_allocator/partition_cookie.h",
"allocator/partition_allocator/partition_direct_map_extent-inl.h",
"allocator/partition_allocator/partition_direct_map_extent.h",
"allocator/partition_allocator/partition_freelist_entry.h",
"allocator/partition_allocator/partition_oom.cc",
"allocator/partition_allocator/partition_oom.h",
"allocator/partition_allocator/partition_page-inl.h",
"allocator/partition_allocator/partition_page.cc",
"allocator/partition_allocator/partition_page.h",
"allocator/partition_allocator/partition_root_base-inl.h",
"allocator/partition_allocator/partition_root_base.cc",
"allocator/partition_allocator/partition_root_base.h",
"allocator/partition_allocator/spin_lock.cc",
"allocator/partition_allocator/spin_lock.h",
]
......
......@@ -9,36 +9,13 @@
#include <cstddef>
#include "base/allocator/partition_allocator/page_allocator_constants.h"
#include "base/base_export.h"
#include "base/compiler_specific.h"
#include "build/build_config.h"
namespace base {
#if defined(OS_WIN)
static const size_t kPageAllocationGranularityShift = 16; // 64KB
#elif defined(_MIPS_ARCH_LOONGSON)
static const size_t kPageAllocationGranularityShift = 14; // 16KB
#else
static const size_t kPageAllocationGranularityShift = 12; // 4KB
#endif
static const size_t kPageAllocationGranularity =
1 << kPageAllocationGranularityShift;
static const size_t kPageAllocationGranularityOffsetMask =
kPageAllocationGranularity - 1;
static const size_t kPageAllocationGranularityBaseMask =
~kPageAllocationGranularityOffsetMask;
#if defined(_MIPS_ARCH_LOONGSON)
static const size_t kSystemPageSize = 16384;
#else
static const size_t kSystemPageSize = 4096;
#endif
static const size_t kSystemPageOffsetMask = kSystemPageSize - 1;
static_assert((kSystemPageSize & (kSystemPageSize - 1)) == 0,
"kSystemPageSize must be power of 2");
static const size_t kSystemPageBaseMask = ~kSystemPageOffsetMask;
enum PageAccessibilityConfiguration {
PageInaccessible,
PageRead,
......
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_CONSTANTS_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_CONSTANTS_H_
#include <stddef.h>
#include "build/build_config.h"
namespace base {
#if defined(OS_WIN)
static const size_t kPageAllocationGranularityShift = 16; // 64KB
#elif defined(_MIPS_ARCH_LOONGSON)
static const size_t kPageAllocationGranularityShift = 14; // 16KB
#else
static const size_t kPageAllocationGranularityShift = 12; // 4KB
#endif
static const size_t kPageAllocationGranularity =
1 << kPageAllocationGranularityShift;
static const size_t kPageAllocationGranularityOffsetMask =
kPageAllocationGranularity - 1;
static const size_t kPageAllocationGranularityBaseMask =
~kPageAllocationGranularityOffsetMask;
#if defined(_MIPS_ARCH_LOONGSON)
static const size_t kSystemPageSize = 16384;
#else
static const size_t kSystemPageSize = 4096;
#endif
static const size_t kSystemPageOffsetMask = kSystemPageSize - 1;
static_assert((kSystemPageSize & (kSystemPageSize - 1)) == 0,
"kSystemPageSize must be power of 2");
static const size_t kSystemPageBaseMask = ~kSystemPageOffsetMask;
static const size_t kPageMetadataShift = 5; // 32 bytes per partition page.
static const size_t kPageMetadataSize = 1 << kPageMetadataShift;
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_CONSTANTS_H
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CONSTANTS_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CONSTANTS_H_
#include "base/allocator/partition_allocator/page_allocator_constants.h"
#include "base/bits.h"
#include "base/logging.h"
namespace base {
// Allocation granularity of sizeof(void*) bytes.
static const size_t kAllocationGranularity = sizeof(void*);
static const size_t kAllocationGranularityMask = kAllocationGranularity - 1;
static const size_t kBucketShift = (kAllocationGranularity == 8) ? 3 : 2;
// Underlying partition storage pages are a power-of-two size. It is typical
// for a partition page to be based on multiple system pages. Most references to
// "page" refer to partition pages.
// We also have the concept of "super pages" -- these are the underlying system
// allocations we make. Super pages contain multiple partition pages inside them
// and include space for a small amount of metadata per partition page.
// Inside super pages, we store "slot spans". A slot span is a continguous range
// of one or more partition pages that stores allocations of the same size.
// Slot span sizes are adjusted depending on the allocation size, to make sure
// the packing does not lead to unused (wasted) space at the end of the last
// system page of the span. For our current max slot span size of 64k and other
// constant values, we pack _all_ PartitionRootGeneric::Alloc() sizes perfectly
// up against the end of a system page.
#if defined(_MIPS_ARCH_LOONGSON)
static const size_t kPartitionPageShift = 16; // 64KB
#else
static const size_t kPartitionPageShift = 14; // 16KB
#endif
static const size_t kPartitionPageSize = 1 << kPartitionPageShift;
static const size_t kPartitionPageOffsetMask = kPartitionPageSize - 1;
static const size_t kPartitionPageBaseMask = ~kPartitionPageOffsetMask;
static const size_t kMaxPartitionPagesPerSlotSpan = 4;
// To avoid fragmentation via never-used freelist entries, we hand out partition
// freelist sections gradually, in units of the dominant system page size.
// What we're actually doing is avoiding filling the full partition page (16 KB)
// with freelist pointers right away. Writing freelist pointers will fault and
// dirty a private page, which is very wasteful if we never actually store
// objects there.
static const size_t kNumSystemPagesPerPartitionPage =
kPartitionPageSize / kSystemPageSize;
static const size_t kMaxSystemPagesPerSlotSpan =
kNumSystemPagesPerPartitionPage * kMaxPartitionPagesPerSlotSpan;
// We reserve virtual address space in 2MB chunks (aligned to 2MB as well).
// These chunks are called "super pages". We do this so that we can store
// metadata in the first few pages of each 2MB aligned section. This leads to
// a very fast free(). We specifically choose 2MB because this virtual address
// block represents a full but single PTE allocation on ARM, ia32 and x64.
//
// The layout of the super page is as follows. The sizes below are the same
// for 32 bit and 64 bit.
//
// | Guard page (4KB) |
// | Metadata page (4KB) |
// | Guard pages (8KB) |
// | Slot span |
// | Slot span |
// | ... |
// | Slot span |
// | Guard page (4KB) |
//
// - Each slot span is a contiguous range of one or more PartitionPages.
// - The metadata page has the following format. Note that the PartitionPage
// that is not at the head of a slot span is "unused". In other words,
// the metadata for the slot span is stored only in the first PartitionPage
// of the slot span. Metadata accesses to other PartitionPages are
// redirected to the first PartitionPage.
//
// | SuperPageExtentEntry (32B) |
// | PartitionPage of slot span 1 (32B, used) |
// | PartitionPage of slot span 1 (32B, unused) |
// | PartitionPage of slot span 1 (32B, unused) |
// | PartitionPage of slot span 2 (32B, used) |
// | PartitionPage of slot span 3 (32B, used) |
// | ... |
// | PartitionPage of slot span N (32B, unused) |
//
// A direct mapped page has a similar layout to fake it looking like a super
// page:
//
// | Guard page (4KB) |
// | Metadata page (4KB) |
// | Guard pages (8KB) |
// | Direct mapped object |
// | Guard page (4KB) |
//
// - The metadata page has the following layout:
//
// | SuperPageExtentEntry (32B) |
// | PartitionPage (32B) |
// | PartitionBucket (32B) |
// | PartitionDirectMapExtent (8B) |
static const size_t kSuperPageShift = 21; // 2MB
static const size_t kSuperPageSize = 1 << kSuperPageShift;
static const size_t kSuperPageOffsetMask = kSuperPageSize - 1;
static const size_t kSuperPageBaseMask = ~kSuperPageOffsetMask;
static const size_t kNumPartitionPagesPerSuperPage =
kSuperPageSize / kPartitionPageSize;
// The following kGeneric* constants apply to the generic variants of the API.
// The "order" of an allocation is closely related to the power-of-two size of
// the allocation. More precisely, the order is the bit index of the
// most-significant-bit in the allocation size, where the bit numbers starts
// at index 1 for the least-significant-bit.
// In terms of allocation sizes, order 0 covers 0, order 1 covers 1, order 2
// covers 2->3, order 3 covers 4->7, order 4 covers 8->15.
static const size_t kGenericMinBucketedOrder = 4; // 8 bytes.
static const size_t kGenericMaxBucketedOrder =
20; // Largest bucketed order is 1<<(20-1) (storing 512KB -> almost 1MB)
static const size_t kGenericNumBucketedOrders =
(kGenericMaxBucketedOrder - kGenericMinBucketedOrder) + 1;
// Eight buckets per order (for the higher orders), e.g. order 8 is 128, 144,
// 160, ..., 240:
static const size_t kGenericNumBucketsPerOrderBits = 3;
static const size_t kGenericNumBucketsPerOrder =
1 << kGenericNumBucketsPerOrderBits;
static const size_t kGenericNumBuckets =
kGenericNumBucketedOrders * kGenericNumBucketsPerOrder;
static const size_t kGenericSmallestBucket = 1
<< (kGenericMinBucketedOrder - 1);
static const size_t kGenericMaxBucketSpacing =
1 << ((kGenericMaxBucketedOrder - 1) - kGenericNumBucketsPerOrderBits);
static const size_t kGenericMaxBucketed =
(1 << (kGenericMaxBucketedOrder - 1)) +
((kGenericNumBucketsPerOrder - 1) * kGenericMaxBucketSpacing);
static const size_t kGenericMinDirectMappedDownsize =
kGenericMaxBucketed +
1; // Limit when downsizing a direct mapping using realloc().
static const size_t kGenericMaxDirectMapped =
(1UL << 31) + kPageAllocationGranularity; // 2 GB plus one more page.
static const size_t kBitsPerSizeT = sizeof(void*) * CHAR_BIT;
// Constant for the memory reclaim logic.
static const size_t kMaxFreeableSpans = 16;
// If the total size in bytes of allocated but not committed pages exceeds this
// value (probably it is a "out of virtual address space" crash),
// a special crash stack trace is generated at |PartitionOutOfMemory|.
// This is to distinguish "out of virtual address space" from
// "out of physical memory" in crash reports.
static const size_t kReasonableSizeOfUnusedPages = 1024 * 1024 * 1024; // 1GB
// Flags for PartitionAllocGenericFlags.
enum PartitionAllocFlags {
PartitionAllocReturnNull = 1 << 0,
};
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CONSTANTS_H_
......@@ -27,6 +27,12 @@
#if !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
// Because there is so much deep inspection of the internal objects,
// explicitly annotating the namespaces for commonly expected objects makes the
// code unreadable. Prefer using directives instead.
using base::internal::PartitionBucket;
using base::internal::PartitionPage;
namespace {
constexpr size_t kTestMaxAllocation = base::kSystemPageSize;
......@@ -84,6 +90,14 @@ bool ClearAddressSpaceLimit() {
namespace base {
// NOTE: Though this test actually excercises interfaces inside the ::base
// namespace, the unittest is inside the ::base::internal spaces because a
// portion of the test expectations require inspecting objects and behavior
// in the ::base::internal namespace. An alternate formulation would be to
// explicitly add using statements for each inspected type but this felt more
// readable.
namespace internal {
const size_t kTestAllocSize = 16;
#if !DCHECK_IS_ON()
const size_t kPointerOffset = 0;
......@@ -2089,6 +2103,7 @@ TEST_F(PartitionAllocTest, SmallReallocDoesNotMoveTrailingCookie) {
generic_allocator.root()->Free(ptr);
}
} // namespace internal
} // namespace base
#endif // !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_BUCKET_INL_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_BUCKET_INL_H_
#include "base/allocator/partition_allocator/partition_bucket.h"
#include "base/allocator/partition_allocator/partition_cookie.h"
#include "base/allocator/partition_allocator/partition_freelist_entry.h"
#include "base/allocator/partition_allocator/partition_page.h"
#include "base/compiler_specific.h"
#include "base/logging.h"
namespace base {
namespace internal {
// TODO(ajwong): Move this to PartitionRootBase. Likely can remove this -inl.h
// file.
ALWAYS_INLINE void* PartitionBucket::Alloc(PartitionRootBase* root,
int flags,
size_t size) {
PartitionPage* page = this->active_pages_head;
// Check that this page is neither full nor freed.
DCHECK(page->num_allocated_slots >= 0);
void* ret = page->freelist_head;
if (LIKELY(ret != 0)) {
// If these DCHECKs fire, you probably corrupted memory.
// TODO(palmer): See if we can afford to make this a CHECK.
DCHECK(PartitionPage::IsPointerValid(page));
// All large allocations must go through the slow path to correctly
// update the size metadata.
DCHECK(page->get_raw_size() == 0);
internal::PartitionFreelistEntry* new_head =
internal::PartitionFreelistEntry::Transform(
static_cast<internal::PartitionFreelistEntry*>(ret)->next);
page->freelist_head = new_head;
page->num_allocated_slots++;
} else {
ret = this->SlowPathAlloc(root, flags, size);
// TODO(palmer): See if we can afford to make this a CHECK.
DCHECK(!ret ||
PartitionPage::IsPointerValid(PartitionPage::FromPointer(ret)));
}
#if DCHECK_IS_ON()
if (!ret)
return 0;
// Fill the uninitialized pattern, and write the cookies.
page = PartitionPage::FromPointer(ret);
// TODO(ajwong): Can |page->bucket| ever not be |this|? If not, can this just
// be this->slot_size?
size_t new_slot_size = page->bucket->slot_size;
size_t raw_size = page->get_raw_size();
if (raw_size) {
DCHECK(raw_size == size);
new_slot_size = raw_size;
}
size_t no_cookie_size = PartitionCookieSizeAdjustSubtract(new_slot_size);
char* char_ret = static_cast<char*>(ret);
// The value given to the application is actually just after the cookie.
ret = char_ret + kCookieSize;
// Debug fill region kUninitializedByte and surround it with 2 cookies.
PartitionCookieWriteValue(char_ret);
memset(ret, kUninitializedByte, no_cookie_size);
PartitionCookieWriteValue(char_ret + kCookieSize + no_cookie_size);
#endif
return ret;
}
} // namespace internal
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_BUCKET_INL_H_
This diff is collapsed.
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_BUCKET_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_BUCKET_H_
#include <stddef.h>
#include <stdint.h>
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/base_export.h"
#include "base/compiler_specific.h"
namespace base {
namespace internal {
struct PartitionPage;
struct PartitionRootBase;
struct PartitionBucket {
// Accessed most in hot path => goes first.
PartitionPage* active_pages_head;
PartitionPage* empty_pages_head;
PartitionPage* decommitted_pages_head;
uint32_t slot_size;
uint32_t num_system_pages_per_slot_span : 8;
uint32_t num_full_pages : 24;
// Public API.
void Init(uint32_t new_slot_size);
// Note the matching Free() functions are in PartitionPage.
BASE_EXPORT void* Alloc(PartitionRootBase* root, int flags, size_t size);
BASE_EXPORT NOINLINE void* SlowPathAlloc(PartitionRootBase* root,
int flags,
size_t size);
ALWAYS_INLINE bool is_direct_mapped() const {
return !num_system_pages_per_slot_span;
}
ALWAYS_INLINE size_t get_bytes_per_span() const {
// TODO(ajwong): Change to CheckedMul. https://crbug.com/787153
// https://crbug.com/680657
return num_system_pages_per_slot_span * kSystemPageSize;
}
ALWAYS_INLINE uint16_t get_slots_per_span() const {
// TODO(ajwong): Change to CheckedMul. https://crbug.com/787153
// https://crbug.com/680657
return static_cast<uint16_t>(get_bytes_per_span() / slot_size);
}
static ALWAYS_INLINE size_t get_direct_map_size(size_t size) {
// Caller must check that the size is not above the kGenericMaxDirectMapped
// limit before calling. This also guards against integer overflow in the
// calculation here.
DCHECK(size <= kGenericMaxDirectMapped);
return (size + kSystemPageOffsetMask) & kSystemPageBaseMask;
}
// TODO(ajwong): Can this be made private? https://crbug.com/787153
static PartitionBucket* get_sentinel_bucket();
// This helper function scans a bucket's active page list for a suitable new
// active page. When it finds a suitable new active page (one that has
// free slots and is not empty), it is set as the new active page. If there
// is no suitable new active page, the current active page is set to
// PartitionPage::get_sentinel_page(). As potential pages are scanned, they
// are tidied up according to their state. Empty pages are swept on to the
// empty page list, decommitted pages on to the decommitted page list and full
// pages are unlinked from any list.
//
// This is where the guts of the bucket maintenance is done!
bool SetNewActivePage();
private:
static void OutOfMemory(const PartitionRootBase* root);
static void OutOfMemoryWithLotsOfUncommitedPages();
static NOINLINE void OnFull();
// Returns a natural number of PartitionPages (calculated by
// get_system_pages_per_slot_span()) to allocate from the current
// SuperPage when the bucket runs out of slots.
ALWAYS_INLINE uint16_t get_pages_per_slot_span();
// Returns the number of system pages in a slot span.
//
// The calculation attemps to find the best number of System Pages to
// allocate for the given slot_size to minimize wasted space. It uses a
// heuristic that looks at number of bytes wasted after the last slot and
// attempts to account for the PTE usage of each System Page.
uint8_t get_system_pages_per_slot_span();
// Allocates a new slot span with size |num_partition_pages| from the
// current extent. Metadata within this slot span will be uninitialized.
// Returns nullptr on error.
ALWAYS_INLINE void* AllocNewSlotSpan(PartitionRootBase* root,
int flags,
uint16_t num_partition_pages);
// Each bucket allocates a slot span when it runs out of slots.
// A slot span's size is equal to get_pages_per_slot_span() number of
// PartitionPages. This function initializes all PartitionPage within the
// span to point to the first PartitionPage which holds all the metadata
// for the span and registers this bucket as the owner of the span. It does
// NOT put the slots into the bucket's freelist.
ALWAYS_INLINE void InitializeSlotSpan(PartitionPage* page);
// Allocates one slot from the given |page| and then adds the remainder to
// the current bucket. If the |page| was freshly allocated, it must have been
// passed through InitializeSlotSpan() first.
ALWAYS_INLINE char* AllocAndFillFreelist(PartitionPage* page);
static PartitionBucket sentinel_bucket_;
};
} // namespace internal
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_BUCKET_H_
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_COOKIE_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_COOKIE_H_
#include "base/compiler_specific.h"
#include "base/logging.h"
namespace base {
namespace internal {
#if DCHECK_IS_ON()
// These two byte values match tcmalloc.
static const unsigned char kUninitializedByte = 0xAB;
static const unsigned char kFreedByte = 0xCD;
static const size_t kCookieSize =
16; // Handles alignment up to XMM instructions on Intel.
static const unsigned char kCookieValue[kCookieSize] = {
0xDE, 0xAD, 0xBE, 0xEF, 0xCA, 0xFE, 0xD0, 0x0D,
0x13, 0x37, 0xF0, 0x05, 0xBA, 0x11, 0xAB, 0x1E};
#endif
ALWAYS_INLINE void PartitionCookieCheckValue(void* ptr) {
#if DCHECK_IS_ON()
unsigned char* cookie_ptr = reinterpret_cast<unsigned char*>(ptr);
for (size_t i = 0; i < kCookieSize; ++i, ++cookie_ptr)
DCHECK(*cookie_ptr == kCookieValue[i]);
#endif
}
ALWAYS_INLINE size_t PartitionCookieSizeAdjustAdd(size_t size) {
#if DCHECK_IS_ON()
// Add space for cookies, checking for integer overflow. TODO(palmer):
// Investigate the performance and code size implications of using
// CheckedNumeric throughout PA.
DCHECK(size + (2 * kCookieSize) > size);
size += 2 * kCookieSize;
#endif
return size;
}
ALWAYS_INLINE void* PartitionCookieFreePointerAdjust(void* ptr) {
#if DCHECK_IS_ON()
// The value given to the application is actually just after the cookie.
ptr = static_cast<char*>(ptr) - kCookieSize;
#endif
return ptr;
}
ALWAYS_INLINE size_t PartitionCookieSizeAdjustSubtract(size_t size) {
#if DCHECK_IS_ON()
// Remove space for cookies.
DCHECK(size >= 2 * kCookieSize);
size -= 2 * kCookieSize;
#endif
return size;
}
ALWAYS_INLINE void PartitionCookieWriteValue(void* ptr) {
#if DCHECK_IS_ON()
unsigned char* cookie_ptr = reinterpret_cast<unsigned char*>(ptr);
for (size_t i = 0; i < kCookieSize; ++i, ++cookie_ptr)
*cookie_ptr = kCookieValue[i];
#endif
}
} // namespace internal
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_COOKIE_H_
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_DIRECT_MAP_EXTENT_INL_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_DIRECT_MAP_EXTENT_INL_H_
#include "base/allocator/partition_allocator/partition_bucket.h"
#include "base/allocator/partition_allocator/partition_direct_map_extent.h"
#include "base/allocator/partition_allocator/partition_page.h"
namespace base {
namespace internal {
ALWAYS_INLINE PartitionDirectMapExtent* PartitionDirectMapExtent::FromPage(
PartitionPage* page) {
DCHECK(page->bucket->is_direct_mapped());
return reinterpret_cast<PartitionDirectMapExtent*>(
reinterpret_cast<char*>(page) + 3 * kPageMetadataSize);
}
} // namespace internal
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_DIRECT_MAP_EXTENT_INL_H_
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_DIRECT_MAP_EXTENT_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_DIRECT_MAP_EXTENT_H_
namespace base {
namespace internal {
struct PartitionBucket;
struct PartitionPage;
struct PartitionDirectMapExtent {
PartitionDirectMapExtent* next_extent;
PartitionDirectMapExtent* prev_extent;
PartitionBucket* bucket;
size_t map_size; // Mapped size, not including guard pages and meta-data.
ALWAYS_INLINE static PartitionDirectMapExtent* FromPage(PartitionPage* page);
};
} // namespace internal
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_DIRECT_MAP_EXTENT_H_
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_FREELIST_ENTRY_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_FREELIST_ENTRY_H_
#include <stdint.h>
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/compiler_specific.h"
#include "base/sys_byteorder.h"
#include "build/build_config.h"
namespace base {
namespace internal {
// TODO(ajwong): Introduce an EncodedFreelistEntry type and then replace
// Transform() with Encode()/Decode() such that the API provides some static
// type safety.
//
// https://crbug.com/787153
struct PartitionFreelistEntry {
PartitionFreelistEntry* next;
static ALWAYS_INLINE PartitionFreelistEntry* Transform(
PartitionFreelistEntry* ptr) {
// We use bswap on little endian as a fast mask for two reasons:
// 1) If an object is freed and its vtable used where the attacker doesn't
// get the chance to run allocations between the free and use, the vtable
// dereference is likely to fault.
// 2) If the attacker has a linear buffer overflow and elects to try and
// corrupt a freelist pointer, partial pointer overwrite attacks are
// thwarted.
// For big endian, similar guarantees are arrived at with a negation.
#if defined(ARCH_CPU_BIG_ENDIAN)
uintptr_t masked = ~reinterpret_cast<uintptr_t>(ptr);
#else
uintptr_t masked = ByteSwapUintPtrT(reinterpret_cast<uintptr_t>(ptr));
#endif
return reinterpret_cast<PartitionFreelistEntry*>(masked);
}
};
} // namespace internal
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_FREELIST_ENTRY_H_
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/partition_oom.h"
#include "base/allocator/partition_allocator/oom.h"
#include "build/build_config.h"
namespace base {
namespace internal {
void NOINLINE PartitionExcessiveAllocationSize() {
OOM_CRASH();
}
#if !defined(ARCH_CPU_64_BITS)
NOINLINE void PartitionOutOfMemoryWithLotsOfUncommitedPages() {
OOM_CRASH();
}
#endif
} // namespace internal
} // namespace base
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Holds functions for generating OOM errors from PartitionAlloc. This is
// distinct from oom.h in that it is meant only for use in PartitionAlloc.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_OOM_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_OOM_H_
#include "base/compiler_specific.h"
#include "build/build_config.h"
namespace base {
namespace internal {
NOINLINE void PartitionExcessiveAllocationSize();
#if !defined(ARCH_CPU_64_BITS)
NOINLINE void PartitionOutOfMemoryWithLotsOfUncommitedPages();
#endif
} // namespace internal
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_OOM_H_
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_PAGE_INL_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_PAGE_INL_H_
#include "base/allocator/partition_allocator/partition_bucket.h"
#include "base/allocator/partition_allocator/partition_cookie.h"
#include "base/allocator/partition_allocator/partition_freelist_entry.h"
#include "base/allocator/partition_allocator/partition_page.h"
#include "base/allocator/partition_allocator/partition_root_base.h"
namespace base {
namespace internal {
ALWAYS_INLINE char* PartitionSuperPageToMetadataArea(char* ptr) {
uintptr_t pointer_as_uint = reinterpret_cast<uintptr_t>(ptr);
DCHECK(!(pointer_as_uint & kSuperPageOffsetMask));
// The metadata area is exactly one system page (the guard page) into the
// super page.
return reinterpret_cast<char*>(pointer_as_uint + kSystemPageSize);
}
ALWAYS_INLINE PartitionPage* PartitionPage::FromPointerNoAlignmentCheck(
void* ptr) {
uintptr_t pointer_as_uint = reinterpret_cast<uintptr_t>(ptr);
char* super_page_ptr =
reinterpret_cast<char*>(pointer_as_uint & kSuperPageBaseMask);
uintptr_t partition_page_index =
(pointer_as_uint & kSuperPageOffsetMask) >> kPartitionPageShift;
// Index 0 is invalid because it is the metadata and guard area and
// the last index is invalid because it is a guard page.
DCHECK(partition_page_index);
DCHECK(partition_page_index < kNumPartitionPagesPerSuperPage - 1);
PartitionPage* page = reinterpret_cast<PartitionPage*>(
PartitionSuperPageToMetadataArea(super_page_ptr) +
(partition_page_index << kPageMetadataShift));
// Partition pages in the same slot span can share the same page object.
// Adjust for that.
size_t delta = page->page_offset << kPageMetadataShift;
page =
reinterpret_cast<PartitionPage*>(reinterpret_cast<char*>(page) - delta);
return page;
}
// Resturns start of the slot span for the PartitionPage.
ALWAYS_INLINE void* PartitionPage::ToPointer(const PartitionPage* page) {
uintptr_t pointer_as_uint = reinterpret_cast<uintptr_t>(page);
uintptr_t super_page_offset = (pointer_as_uint & kSuperPageOffsetMask);
// A valid |page| must be past the first guard System page and within
// the following metadata region.
DCHECK(super_page_offset > kSystemPageSize);
// Must be less than total metadata region.
DCHECK(super_page_offset < kSystemPageSize + (kNumPartitionPagesPerSuperPage *
kPageMetadataSize));
uintptr_t partition_page_index =
(super_page_offset - kSystemPageSize) >> kPageMetadataShift;
// Index 0 is invalid because it is the superpage extent metadata and the
// last index is invalid because the whole PartitionPage is set as guard
// pages for the metadata region.
DCHECK(partition_page_index);
DCHECK(partition_page_index < kNumPartitionPagesPerSuperPage - 1);
uintptr_t super_page_base = (pointer_as_uint & kSuperPageBaseMask);
void* ret = reinterpret_cast<void*>(
super_page_base + (partition_page_index << kPartitionPageShift));
return ret;
}
ALWAYS_INLINE PartitionPage* PartitionPage::FromPointer(void* ptr) {
PartitionPage* page = PartitionPage::FromPointerNoAlignmentCheck(ptr);
// Checks that the pointer is a multiple of bucket size.
DCHECK(!((reinterpret_cast<uintptr_t>(ptr) -
reinterpret_cast<uintptr_t>(PartitionPage::ToPointer(page))) %
page->bucket->slot_size));
return page;
}
ALWAYS_INLINE const size_t* PartitionPage::get_raw_size_ptr() const {
// For single-slot buckets which span more than one partition page, we
// have some spare metadata space to store the raw allocation size. We
// can use this to report better statistics.
if (bucket->slot_size <= kMaxSystemPagesPerSlotSpan * kSystemPageSize)
return nullptr;
DCHECK((bucket->slot_size % kSystemPageSize) == 0);
DCHECK(bucket->is_direct_mapped() || bucket->get_slots_per_span() == 1);
const PartitionPage* the_next_page = this + 1;
return reinterpret_cast<const size_t*>(&the_next_page->freelist_head);
}
ALWAYS_INLINE size_t PartitionPage::get_raw_size() const {
const size_t* ptr = get_raw_size_ptr();
if (UNLIKELY(ptr != nullptr))
return *ptr;
return 0;
}
ALWAYS_INLINE bool PartitionPage::IsPointerValid(PartitionPage* page) {
PartitionRootBase* root = PartitionRootBase::FromPage(page);
return root->inverted_self == ~reinterpret_cast<uintptr_t>(root);
}
ALWAYS_INLINE void PartitionPage::Free(void* ptr) {
// If these asserts fire, you probably corrupted memory.
#if DCHECK_IS_ON()
size_t slot_size = this->bucket->slot_size;
size_t raw_size = get_raw_size();
if (raw_size)
slot_size = raw_size;
PartitionCookieCheckValue(ptr);
PartitionCookieCheckValue(reinterpret_cast<char*>(ptr) + slot_size -
kCookieSize);
memset(ptr, kFreedByte, slot_size);
#endif
DCHECK(this->num_allocated_slots);
// TODO(palmer): See if we can afford to make this a CHECK.
DCHECK(!freelist_head || PartitionPage::IsPointerValid(
PartitionPage::FromPointer(freelist_head)));
CHECK(ptr != freelist_head); // Catches an immediate double free.
// Look for double free one level deeper in debug.
DCHECK(!freelist_head || ptr != internal::PartitionFreelistEntry::Transform(
freelist_head->next));
internal::PartitionFreelistEntry* entry =
static_cast<internal::PartitionFreelistEntry*>(ptr);
entry->next = internal::PartitionFreelistEntry::Transform(freelist_head);
freelist_head = entry;
--this->num_allocated_slots;
if (UNLIKELY(this->num_allocated_slots <= 0)) {
FreeSlowPath();
} else {
// All single-slot allocations must go through the slow path to
// correctly update the size metadata.
DCHECK(get_raw_size() == 0);
}
}
ALWAYS_INLINE bool PartitionPage::is_active() const {
DCHECK(this != get_sentinel_page());
DCHECK(!page_offset);
return (num_allocated_slots > 0 &&
(freelist_head || num_unprovisioned_slots));
}
ALWAYS_INLINE bool PartitionPage::is_full() const {
DCHECK(this != get_sentinel_page());
DCHECK(!page_offset);
bool ret = (num_allocated_slots == bucket->get_slots_per_span());
if (ret) {
DCHECK(!freelist_head);
DCHECK(!num_unprovisioned_slots);
}
return ret;
}
ALWAYS_INLINE bool PartitionPage::is_empty() const {
DCHECK(this != get_sentinel_page());
DCHECK(!page_offset);
return (!num_allocated_slots && freelist_head);
}
ALWAYS_INLINE bool PartitionPage::is_decommitted() const {
DCHECK(this != get_sentinel_page());
DCHECK(!page_offset);
bool ret = (!num_allocated_slots && !freelist_head);
if (ret) {
DCHECK(!num_unprovisioned_slots);
DCHECK(empty_cache_index == -1);
}
return ret;
}
ALWAYS_INLINE void PartitionPage::set_raw_size(size_t size) {
size_t* raw_size_ptr = get_raw_size_ptr();
if (UNLIKELY(raw_size_ptr != nullptr))
*raw_size_ptr = size;
}
ALWAYS_INLINE void PartitionPage::Reset() {
DCHECK(this->is_decommitted());
num_unprovisioned_slots = bucket->get_slots_per_span();
DCHECK(num_unprovisioned_slots);
next_page = nullptr;
}
} // namespace internal
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_PAGE_INL_H_
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/partition_direct_map_extent-inl.h"
#include "base/allocator/partition_allocator/partition_page-inl.h"
#include "base/allocator/partition_allocator/partition_root_base-inl.h"
namespace base {
namespace internal {
namespace {
ALWAYS_INLINE void PartitionDirectUnmap(PartitionPage* page) {
PartitionRootBase* root = PartitionRootBase::FromPage(page);
const PartitionDirectMapExtent* extent =
PartitionDirectMapExtent::FromPage(page);
size_t unmap_size = extent->map_size;
// Maintain the doubly-linked list of all direct mappings.
if (extent->prev_extent) {
DCHECK(extent->prev_extent->next_extent == extent);
extent->prev_extent->next_extent = extent->next_extent;
} else {
root->direct_map_list = extent->next_extent;
}
if (extent->next_extent) {
DCHECK(extent->next_extent->prev_extent == extent);
extent->next_extent->prev_extent = extent->prev_extent;
}
// Add on the size of the trailing guard page and preceeding partition
// page.
unmap_size += kPartitionPageSize + kSystemPageSize;
size_t uncommitted_page_size = page->bucket->slot_size + kSystemPageSize;
root->DecreaseCommittedPages(uncommitted_page_size);
DCHECK(root->total_size_of_direct_mapped_pages >= uncommitted_page_size);
root->total_size_of_direct_mapped_pages -= uncommitted_page_size;
DCHECK(!(unmap_size & kPageAllocationGranularityOffsetMask));
char* ptr = reinterpret_cast<char*>(PartitionPage::ToPointer(page));
// Account for the mapping starting a partition page before the actual
// allocation address.
ptr -= kPartitionPageSize;
FreePages(ptr, unmap_size);
}
ALWAYS_INLINE void PartitionRegisterEmptyPage(PartitionPage* page) {
DCHECK(page->is_empty());
PartitionRootBase* root = PartitionRootBase::FromPage(page);
// If the page is already registered as empty, give it another life.
if (page->empty_cache_index != -1) {
DCHECK(page->empty_cache_index >= 0);
DCHECK(static_cast<unsigned>(page->empty_cache_index) < kMaxFreeableSpans);
DCHECK(root->global_empty_page_ring[page->empty_cache_index] == page);
root->global_empty_page_ring[page->empty_cache_index] = nullptr;
}
int16_t current_index = root->global_empty_page_ring_index;
PartitionPage* page_to_decommit = root->global_empty_page_ring[current_index];
// The page might well have been re-activated, filled up, etc. before we get
// around to looking at it here.
if (page_to_decommit)
page_to_decommit->DecommitIfPossible(root);
// We put the empty slot span on our global list of "pages that were once
// empty". thus providing it a bit of breathing room to get re-used before
// we really free it. This improves performance, particularly on Mac OS X
// which has subpar memory management performance.
root->global_empty_page_ring[current_index] = page;
page->empty_cache_index = current_index;
++current_index;
if (current_index == kMaxFreeableSpans)
current_index = 0;
root->global_empty_page_ring_index = current_index;
}
} // namespace
// static
PartitionPage PartitionPage::sentinel_page_;
PartitionPage* PartitionPage::get_sentinel_page() {
return &sentinel_page_;
}
void PartitionPage::FreeSlowPath() {
DCHECK(this != get_sentinel_page());
if (LIKELY(this->num_allocated_slots == 0)) {
// Page became fully unused.
if (UNLIKELY(bucket->is_direct_mapped())) {
PartitionDirectUnmap(this);
return;
}
// If it's the current active page, change it. We bounce the page to
// the empty list as a force towards defragmentation.
if (LIKELY(this == bucket->active_pages_head))
bucket->SetNewActivePage();
DCHECK(bucket->active_pages_head != this);
set_raw_size(0);
DCHECK(!get_raw_size());
PartitionRegisterEmptyPage(this);
} else {
DCHECK(!bucket->is_direct_mapped());
// Ensure that the page is full. That's the only valid case if we
// arrive here.
DCHECK(this->num_allocated_slots < 0);
// A transition of num_allocated_slots from 0 to -1 is not legal, and
// likely indicates a double-free.
CHECK(this->num_allocated_slots != -1);
this->num_allocated_slots = -this->num_allocated_slots - 2;
DCHECK(this->num_allocated_slots == bucket->get_slots_per_span() - 1);
// Fully used page became partially used. It must be put back on the
// non-full page list. Also make it the current page to increase the
// chances of it being filled up again. The old current page will be
// the next page.
DCHECK(!this->next_page);
if (LIKELY(bucket->active_pages_head != get_sentinel_page()))
this->next_page = bucket->active_pages_head;
bucket->active_pages_head = this;
--bucket->num_full_pages;
// Special case: for a partition page with just a single slot, it may
// now be empty and we want to run it through the empty logic.
if (UNLIKELY(this->num_allocated_slots == 0))
FreeSlowPath();
}
}
void PartitionPage::Decommit(PartitionRootBase* root) {
DCHECK(is_empty());
DCHECK(!bucket->is_direct_mapped());
void* addr = PartitionPage::ToPointer(this);
root->DecommitSystemPages(addr, bucket->get_bytes_per_span());
// We actually leave the decommitted page in the active list. We'll sweep
// it on to the decommitted page list when we next walk the active page
// list.
// Pulling this trick enables us to use a singly-linked page list for all
// cases, which is critical in keeping the page metadata structure down to
// 32 bytes in size.
freelist_head = nullptr;
num_unprovisioned_slots = 0;
DCHECK(is_decommitted());
}
void PartitionPage::DecommitIfPossible(PartitionRootBase* root) {
DCHECK(empty_cache_index >= 0);
DCHECK(static_cast<unsigned>(empty_cache_index) < kMaxFreeableSpans);
DCHECK(this == root->global_empty_page_ring[empty_cache_index]);
empty_cache_index = -1;
if (is_empty())
Decommit(root);
}
} // namespace internal
} // namespace base
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_PAGE_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_PAGE_H_
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
namespace base {
namespace internal {
struct PartitionBucket;
struct PartitionFreelistEntry;
struct PartitionRootBase;
// Some notes on page states. A page can be in one of four major states:
// 1) Active.
// 2) Full.
// 3) Empty.
// 4) Decommitted.
// An active page has available free slots. A full page has no free slots. An
// empty page has no free slots, and a decommitted page is an empty page that
// had its backing memory released back to the system.
// There are two linked lists tracking the pages. The "active page" list is an
// approximation of a list of active pages. It is an approximation because
// full, empty and decommitted pages may briefly be present in the list until
// we next do a scan over it.
// The "empty page" list is an accurate list of pages which are either empty
// or decommitted.
//
// The significant page transitions are:
// - free() will detect when a full page has a slot free()'d and immediately
// return the page to the head of the active list.
// - free() will detect when a page is fully emptied. It _may_ add it to the
// empty list or it _may_ leave it on the active list until a future list scan.
// - malloc() _may_ scan the active page list in order to fulfil the request.
// If it does this, full, empty and decommitted pages encountered will be
// booted out of the active list. If there are no suitable active pages found,
// an empty or decommitted page (if one exists) will be pulled from the empty
// list on to the active list.
//
// TODO(ajwong): Evaluate if this should be named PartitionSlotSpanMetadata or
// similar. If so, all uses of the term "page" in comments, member variables,
// local variables, and documentation that refer to this concept should be
// updated.
struct PartitionPage {
PartitionFreelistEntry* freelist_head;
PartitionPage* next_page;
PartitionBucket* bucket;
// Deliberately signed, 0 for empty or decommitted page, -n for full pages:
int16_t num_allocated_slots;
uint16_t num_unprovisioned_slots;
uint16_t page_offset;
int16_t empty_cache_index; // -1 if not in the empty cache.
// Public API
// Note the matching Alloc() functions are in PartitionPage.
BASE_EXPORT NOINLINE void FreeSlowPath();
ALWAYS_INLINE void Free(void* ptr);
void Decommit(PartitionRootBase* root);
void DecommitIfPossible(PartitionRootBase* root);
// Pointer manipulation functions. These must be static as the input |page|
// pointer may be the result of an offset calculation and therefore cannot
// be trusted. The objective of these functions is to sanitize this input.
ALWAYS_INLINE static void* ToPointer(const PartitionPage* page);
ALWAYS_INLINE static PartitionPage* FromPointerNoAlignmentCheck(void* ptr);
ALWAYS_INLINE static PartitionPage* FromPointer(void* ptr);
ALWAYS_INLINE static bool IsPointerValid(PartitionPage* page);
ALWAYS_INLINE const size_t* get_raw_size_ptr() const;
ALWAYS_INLINE size_t* get_raw_size_ptr() {
return const_cast<size_t*>(
const_cast<const PartitionPage*>(this)->get_raw_size_ptr());
}
ALWAYS_INLINE size_t get_raw_size() const;
ALWAYS_INLINE void set_raw_size(size_t size);
ALWAYS_INLINE void Reset();
// TODO(ajwong): Can this be made private? https://crbug.com/787153
BASE_EXPORT static PartitionPage* get_sentinel_page();
// Page State accessors.
// Note that it's only valid to call these functions on pages found on one of
// the page lists. Specifically, you can't call these functions on full pages
// that were detached from the active list.
//
// This restriction provides the flexibity for some of the status fields to
// be repurposed when a page is taken off a list. See the negation of
// |num_allocated_slots| when a full page is removed from the active list
// for an example of such repurposing.
ALWAYS_INLINE bool is_active() const;
ALWAYS_INLINE bool is_full() const;
ALWAYS_INLINE bool is_empty() const;
ALWAYS_INLINE bool is_decommitted() const;
private:
// g_sentinel_page is used as a sentinel to indicate that there is no page
// in the active page list. We can use nullptr, but in that case we need
// to add a null-check branch to the hot allocation path. We want to avoid
// that.
//
// Note, this declaration is kept in the header as opposed to an anonymous
// namespace so the getter can be fully inlined.
static PartitionPage sentinel_page_;
};
static_assert(sizeof(PartitionPage) <= kPageMetadataSize,
"PartitionPage must be able to fit in a metadata slot");
} // namespace internal
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_PAGE_H_
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ROOT_BASE_INL_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ROOT_BASE_INL_H_
#include "base/allocator/partition_allocator/page_allocator.h"
#include "base/allocator/partition_allocator/partition_root_base.h"
namespace base {
namespace internal {
ALWAYS_INLINE PartitionRootBase* PartitionRootBase::FromPage(
PartitionPage* page) {
PartitionSuperPageExtentEntry* extent_entry =
reinterpret_cast<PartitionSuperPageExtentEntry*>(
reinterpret_cast<uintptr_t>(page) & kSystemPageBaseMask);
return extent_entry->root;
}
ALWAYS_INLINE void PartitionRootBase::IncreaseCommittedPages(size_t len) {
total_size_of_committed_pages += len;
DCHECK(total_size_of_committed_pages <=
total_size_of_super_pages + total_size_of_direct_mapped_pages);
}
ALWAYS_INLINE void PartitionRootBase::DecreaseCommittedPages(size_t len) {
total_size_of_committed_pages -= len;
DCHECK(total_size_of_committed_pages <=
total_size_of_super_pages + total_size_of_direct_mapped_pages);
}
ALWAYS_INLINE void PartitionRootBase::DecommitSystemPages(void* address,
size_t length) {
::base::DecommitSystemPages(address, length);
DecreaseCommittedPages(length);
}
ALWAYS_INLINE void PartitionRootBase::RecommitSystemPages(void* address,
size_t length) {
CHECK(::base::RecommitSystemPages(address, length, PageReadWrite));
IncreaseCommittedPages(length);
}
} // namespace internal
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ROOT_BASE_INL_H_
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/partition_root_base-inl.h"
#include "base/allocator/partition_allocator/oom.h"
#include "base/allocator/partition_allocator/partition_oom.h"
#include "base/allocator/partition_allocator/partition_page-inl.h"
#include "build/build_config.h"
namespace base {
namespace internal {
NOINLINE void PartitionRootBase::OutOfMemory() {
#if !defined(ARCH_CPU_64_BITS)
// Check whether this OOM is due to a lot of super pages that are allocated
// but not committed, probably due to http://crbug.com/421387.
if (total_size_of_super_pages + total_size_of_direct_mapped_pages -
total_size_of_committed_pages >
kReasonableSizeOfUnusedPages) {
PartitionOutOfMemoryWithLotsOfUncommitedPages();
}
#endif
if (PartitionRootBase::gOomHandlingFunction)
(*PartitionRootBase::gOomHandlingFunction)();
OOM_CRASH();
}
void PartitionRootBase::DecommitEmptyPages() {
for (size_t i = 0; i < kMaxFreeableSpans; ++i) {
internal::PartitionPage* page = global_empty_page_ring[i];
if (page)
page->DecommitIfPossible(this);
global_empty_page_ring[i] = nullptr;
}
}
} // namespace internal
} // namespace base
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ROOT_BASE_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ROOT_BASE_H_
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/partition_direct_map_extent.h"
namespace base {
namespace internal {
struct PartitionPage;
struct PartitionRootBase;
// An "extent" is a span of consecutive superpages. We link to the partition's
// next extent (if there is one) to the very start of a superpage's metadata
// area.
struct PartitionSuperPageExtentEntry {
PartitionRootBase* root;
char* super_page_base;
char* super_pages_end;
PartitionSuperPageExtentEntry* next;
};
static_assert(
sizeof(PartitionSuperPageExtentEntry) <= kPageMetadataSize,
"PartitionSuperPageExtentEntry must be able to fit in a metadata slot");
struct BASE_EXPORT PartitionRootBase {
PartitionRootBase();
virtual ~PartitionRootBase();
size_t total_size_of_committed_pages = 0;
size_t total_size_of_super_pages = 0;
size_t total_size_of_direct_mapped_pages = 0;
// Invariant: total_size_of_committed_pages <=
// total_size_of_super_pages +
// total_size_of_direct_mapped_pages.
unsigned num_buckets = 0;
unsigned max_allocation = 0;
bool initialized = false;
char* next_super_page = nullptr;
char* next_partition_page = nullptr;
char* next_partition_page_end = nullptr;
PartitionSuperPageExtentEntry* current_extent = nullptr;
PartitionSuperPageExtentEntry* first_extent = nullptr;
PartitionDirectMapExtent* direct_map_list = nullptr;
PartitionPage* global_empty_page_ring[kMaxFreeableSpans] = {};
int16_t global_empty_page_ring_index = 0;
uintptr_t inverted_self = 0;
// Public API
// gOomHandlingFunction is invoked when PartitionAlloc hits OutOfMemory.
static void (*gOomHandlingFunction)();
NOINLINE void OutOfMemory();
ALWAYS_INLINE static PartitionRootBase* FromPage(PartitionPage* page);
ALWAYS_INLINE void IncreaseCommittedPages(size_t len);
ALWAYS_INLINE void DecreaseCommittedPages(size_t len);
ALWAYS_INLINE void DecommitSystemPages(void* address, size_t length);
ALWAYS_INLINE void RecommitSystemPages(void* address, size_t length);
void DecommitEmptyPages();
};
} // namespace internal
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ROOT_BASE_H_
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment