Commit d02466c6 authored by Albert J. Wong's avatar Albert J. Wong Committed by Commit Bot

Remove -inl.h files from Partition Alloc.

Now that the module boundaries are clear, refactor some functions to
make a clear layer separation for types and remove the -inl.h files.

Yay!  \o/

Bug: 766882
Change-Id: I17934666c640c8f5daed40c2c8435b2e220b7b79
Reviewed-on: https://chromium-review.googlesource.com/1008964
Commit-Queue: Albert J. Wong <ajwong@chromium.org>
Reviewed-by: default avatarChris Palmer <palmer@chromium.org>
Reviewed-by: default avatarDaniel Cheng <dcheng@chromium.org>
Reviewed-by: default avatarKentaro Hara <haraken@chromium.org>
Cr-Commit-Position: refs/heads/master@{#555277}
parent 1b809b07
......@@ -1511,19 +1511,15 @@ jumbo_component("base") {
"allocator/partition_allocator/partition_alloc.cc",
"allocator/partition_allocator/partition_alloc.h",
"allocator/partition_allocator/partition_alloc_constants.h",
"allocator/partition_allocator/partition_bucket-inl.h",
"allocator/partition_allocator/partition_bucket.cc",
"allocator/partition_allocator/partition_bucket.h",
"allocator/partition_allocator/partition_cookie.h",
"allocator/partition_allocator/partition_direct_map_extent-inl.h",
"allocator/partition_allocator/partition_direct_map_extent.h",
"allocator/partition_allocator/partition_freelist_entry.h",
"allocator/partition_allocator/partition_oom.cc",
"allocator/partition_allocator/partition_oom.h",
"allocator/partition_allocator/partition_page-inl.h",
"allocator/partition_allocator/partition_page.cc",
"allocator/partition_allocator/partition_page.h",
"allocator/partition_allocator/partition_root_base-inl.h",
"allocator/partition_allocator/partition_root_base.cc",
"allocator/partition_allocator/partition_root_base.h",
"allocator/partition_allocator/spin_lock.cc",
......
......@@ -7,9 +7,9 @@
#include <string.h>
#include <type_traits>
#include "base/allocator/partition_allocator/partition_direct_map_extent-inl.h"
#include "base/allocator/partition_allocator/partition_direct_map_extent.h"
#include "base/allocator/partition_allocator/partition_oom.h"
#include "base/allocator/partition_allocator/partition_page-inl.h"
#include "base/allocator/partition_allocator/partition_page.h"
#include "base/allocator/partition_allocator/spin_lock.h"
#include "base/compiler_specific.h"
#include "base/lazy_instance.h"
......@@ -276,7 +276,7 @@ void* PartitionRootGeneric::Realloc(void* ptr,
internal::PartitionPage* page = internal::PartitionPage::FromPointer(
internal::PartitionCookieFreePointerAdjust(ptr));
// TODO(palmer): See if we can afford to make this a CHECK.
DCHECK(internal::PartitionPage::IsPointerValid(page));
DCHECK(IsValidPage(page));
if (UNLIKELY(page->bucket->is_direct_mapped())) {
// We may be able to perform the realloc in place by changing the
......
......@@ -65,10 +65,10 @@
#include "base/allocator/partition_allocator/page_allocator.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/partition_bucket-inl.h"
#include "base/allocator/partition_allocator/partition_bucket.h"
#include "base/allocator/partition_allocator/partition_cookie.h"
#include "base/allocator/partition_allocator/partition_page-inl.h"
#include "base/allocator/partition_allocator/partition_root_base-inl.h"
#include "base/allocator/partition_allocator/partition_page.h"
#include "base/allocator/partition_allocator/partition_root_base.h"
#include "base/allocator/partition_allocator/spin_lock.h"
#include "base/base_export.h"
#include "base/bits.h"
......@@ -271,7 +271,7 @@ ALWAYS_INLINE void* PartitionRoot::Alloc(size_t size, const char* type_name) {
DCHECK(index < this->num_buckets);
DCHECK(size == index << kBucketShift);
internal::PartitionBucket* bucket = &this->buckets()[index];
void* result = bucket->Alloc(this, 0, size);
void* result = AllocFromBucket(bucket, 0, size);
PartitionAllocHooks::AllocationHookIfEnabled(result, requested_size,
type_name);
return result;
......@@ -288,7 +288,7 @@ ALWAYS_INLINE void PartitionFree(void* ptr) {
ptr = internal::PartitionCookieFreePointerAdjust(ptr);
internal::PartitionPage* page = internal::PartitionPage::FromPointer(ptr);
// TODO(palmer): See if we can afford to make this a CHECK.
DCHECK(internal::PartitionPage::IsPointerValid(page));
DCHECK(internal::PartitionRootBase::IsValidPage(page));
page->Free(ptr);
#endif
}
......@@ -326,7 +326,7 @@ ALWAYS_INLINE void* PartitionAllocGenericFlags(PartitionRootGeneric* root,
void* ret = nullptr;
{
subtle::SpinLock::Guard guard(root->lock);
ret = bucket->Alloc(root, flags, size);
ret = root->AllocFromBucket(bucket, flags, size);
}
PartitionAllocHooks::AllocationHookIfEnabled(ret, requested_size, type_name);
return ret;
......@@ -351,7 +351,7 @@ ALWAYS_INLINE void PartitionRootGeneric::Free(void* ptr) {
ptr = internal::PartitionCookieFreePointerAdjust(ptr);
internal::PartitionPage* page = internal::PartitionPage::FromPointer(ptr);
// TODO(palmer): See if we can afford to make this a CHECK.
DCHECK(internal::PartitionPage::IsPointerValid(page));
DCHECK(IsValidPage(page));
{
subtle::SpinLock::Guard guard(this->lock);
page->Free(ptr);
......@@ -392,7 +392,7 @@ ALWAYS_INLINE size_t PartitionAllocGetSize(void* ptr) {
ptr = internal::PartitionCookieFreePointerAdjust(ptr);
internal::PartitionPage* page = internal::PartitionPage::FromPointer(ptr);
// TODO(palmer): See if we can afford to make this a CHECK.
DCHECK(internal::PartitionPage::IsPointerValid(page));
DCHECK(internal::PartitionRootBase::IsValidPage(page));
size_t size = page->bucket->slot_size;
return internal::PartitionCookieSizeAdjustSubtract(size);
}
......
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_BUCKET_INL_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_BUCKET_INL_H_
#include "base/allocator/partition_allocator/partition_bucket.h"
#include "base/allocator/partition_allocator/partition_cookie.h"
#include "base/allocator/partition_allocator/partition_freelist_entry.h"
#include "base/allocator/partition_allocator/partition_page.h"
#include "base/compiler_specific.h"
#include "base/logging.h"
namespace base {
namespace internal {
// TODO(ajwong): Move this to PartitionRootBase. Likely can remove this -inl.h
// file.
ALWAYS_INLINE void* PartitionBucket::Alloc(PartitionRootBase* root,
int flags,
size_t size) {
PartitionPage* page = this->active_pages_head;
// Check that this page is neither full nor freed.
DCHECK(page->num_allocated_slots >= 0);
void* ret = page->freelist_head;
if (LIKELY(ret != 0)) {
// If these DCHECKs fire, you probably corrupted memory.
// TODO(palmer): See if we can afford to make this a CHECK.
DCHECK(PartitionPage::IsPointerValid(page));
// All large allocations must go through the slow path to correctly
// update the size metadata.
DCHECK(page->get_raw_size() == 0);
internal::PartitionFreelistEntry* new_head =
internal::PartitionFreelistEntry::Transform(
static_cast<internal::PartitionFreelistEntry*>(ret)->next);
page->freelist_head = new_head;
page->num_allocated_slots++;
} else {
ret = this->SlowPathAlloc(root, flags, size);
// TODO(palmer): See if we can afford to make this a CHECK.
DCHECK(!ret ||
PartitionPage::IsPointerValid(PartitionPage::FromPointer(ret)));
}
#if DCHECK_IS_ON()
if (!ret)
return 0;
// Fill the uninitialized pattern, and write the cookies.
page = PartitionPage::FromPointer(ret);
// TODO(ajwong): Can |page->bucket| ever not be |this|? If not, can this just
// be this->slot_size?
size_t new_slot_size = page->bucket->slot_size;
size_t raw_size = page->get_raw_size();
if (raw_size) {
DCHECK(raw_size == size);
new_slot_size = raw_size;
}
size_t no_cookie_size = PartitionCookieSizeAdjustSubtract(new_slot_size);
char* char_ret = static_cast<char*>(ret);
// The value given to the application is actually just after the cookie.
ret = char_ret + kCookieSize;
// Debug fill region kUninitializedByte and surround it with 2 cookies.
PartitionCookieWriteValue(char_ret);
memset(ret, kUninitializedByte, no_cookie_size);
PartitionCookieWriteValue(char_ret + kCookieSize + no_cookie_size);
#endif
return ret;
}
} // namespace internal
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_BUCKET_INL_H_
......@@ -2,14 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/partition_bucket.h"
#include "base/allocator/partition_allocator/oom.h"
#include "base/allocator/partition_allocator/page_allocator.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/partition_bucket-inl.h"
#include "base/allocator/partition_allocator/partition_direct_map_extent-inl.h"
#include "base/allocator/partition_allocator/partition_direct_map_extent.h"
#include "base/allocator/partition_allocator/partition_oom.h"
#include "base/allocator/partition_allocator/partition_page-inl.h"
#include "base/allocator/partition_allocator/partition_root_base-inl.h"
#include "base/allocator/partition_allocator/partition_page.h"
#include "base/allocator/partition_allocator/partition_root_base.h"
#include "build/build_config.h"
namespace base {
......
......@@ -32,7 +32,6 @@ struct PartitionBucket {
void Init(uint32_t new_slot_size);
// Note the matching Free() functions are in PartitionPage.
BASE_EXPORT void* Alloc(PartitionRootBase* root, int flags, size_t size);
BASE_EXPORT NOINLINE void* SlowPathAlloc(PartitionRootBase* root,
int flags,
size_t size);
......
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_DIRECT_MAP_EXTENT_INL_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_DIRECT_MAP_EXTENT_INL_H_
#include "base/allocator/partition_allocator/partition_bucket.h"
#include "base/allocator/partition_allocator/partition_direct_map_extent.h"
#include "base/allocator/partition_allocator/partition_page.h"
namespace base {
namespace internal {
ALWAYS_INLINE PartitionDirectMapExtent* PartitionDirectMapExtent::FromPage(
PartitionPage* page) {
DCHECK(page->bucket->is_direct_mapped());
return reinterpret_cast<PartitionDirectMapExtent*>(
reinterpret_cast<char*>(page) + 3 * kPageMetadataSize);
}
} // namespace internal
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_DIRECT_MAP_EXTENT_INL_H_
......@@ -5,12 +5,12 @@
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_DIRECT_MAP_EXTENT_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_DIRECT_MAP_EXTENT_H_
#include "base/allocator/partition_allocator/partition_bucket.h"
#include "base/allocator/partition_allocator/partition_page.h"
namespace base {
namespace internal {
struct PartitionBucket;
struct PartitionPage;
struct PartitionDirectMapExtent {
PartitionDirectMapExtent* next_extent;
PartitionDirectMapExtent* prev_extent;
......@@ -20,6 +20,13 @@ struct PartitionDirectMapExtent {
ALWAYS_INLINE static PartitionDirectMapExtent* FromPage(PartitionPage* page);
};
ALWAYS_INLINE PartitionDirectMapExtent* PartitionDirectMapExtent::FromPage(
PartitionPage* page) {
DCHECK(page->bucket->is_direct_mapped());
return reinterpret_cast<PartitionDirectMapExtent*>(
reinterpret_cast<char*>(page) + 3 * kPageMetadataSize);
}
} // namespace internal
} // namespace base
......
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_PAGE_INL_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_PAGE_INL_H_
#include "base/allocator/partition_allocator/partition_bucket.h"
#include "base/allocator/partition_allocator/partition_cookie.h"
#include "base/allocator/partition_allocator/partition_freelist_entry.h"
#include "base/allocator/partition_allocator/partition_page.h"
#include "base/allocator/partition_allocator/partition_root_base.h"
namespace base {
namespace internal {
ALWAYS_INLINE char* PartitionSuperPageToMetadataArea(char* ptr) {
uintptr_t pointer_as_uint = reinterpret_cast<uintptr_t>(ptr);
DCHECK(!(pointer_as_uint & kSuperPageOffsetMask));
// The metadata area is exactly one system page (the guard page) into the
// super page.
return reinterpret_cast<char*>(pointer_as_uint + kSystemPageSize);
}
ALWAYS_INLINE PartitionPage* PartitionPage::FromPointerNoAlignmentCheck(
void* ptr) {
uintptr_t pointer_as_uint = reinterpret_cast<uintptr_t>(ptr);
char* super_page_ptr =
reinterpret_cast<char*>(pointer_as_uint & kSuperPageBaseMask);
uintptr_t partition_page_index =
(pointer_as_uint & kSuperPageOffsetMask) >> kPartitionPageShift;
// Index 0 is invalid because it is the metadata and guard area and
// the last index is invalid because it is a guard page.
DCHECK(partition_page_index);
DCHECK(partition_page_index < kNumPartitionPagesPerSuperPage - 1);
PartitionPage* page = reinterpret_cast<PartitionPage*>(
PartitionSuperPageToMetadataArea(super_page_ptr) +
(partition_page_index << kPageMetadataShift));
// Partition pages in the same slot span can share the same page object.
// Adjust for that.
size_t delta = page->page_offset << kPageMetadataShift;
page =
reinterpret_cast<PartitionPage*>(reinterpret_cast<char*>(page) - delta);
return page;
}
// Resturns start of the slot span for the PartitionPage.
ALWAYS_INLINE void* PartitionPage::ToPointer(const PartitionPage* page) {
uintptr_t pointer_as_uint = reinterpret_cast<uintptr_t>(page);
uintptr_t super_page_offset = (pointer_as_uint & kSuperPageOffsetMask);
// A valid |page| must be past the first guard System page and within
// the following metadata region.
DCHECK(super_page_offset > kSystemPageSize);
// Must be less than total metadata region.
DCHECK(super_page_offset < kSystemPageSize + (kNumPartitionPagesPerSuperPage *
kPageMetadataSize));
uintptr_t partition_page_index =
(super_page_offset - kSystemPageSize) >> kPageMetadataShift;
// Index 0 is invalid because it is the superpage extent metadata and the
// last index is invalid because the whole PartitionPage is set as guard
// pages for the metadata region.
DCHECK(partition_page_index);
DCHECK(partition_page_index < kNumPartitionPagesPerSuperPage - 1);
uintptr_t super_page_base = (pointer_as_uint & kSuperPageBaseMask);
void* ret = reinterpret_cast<void*>(
super_page_base + (partition_page_index << kPartitionPageShift));
return ret;
}
ALWAYS_INLINE PartitionPage* PartitionPage::FromPointer(void* ptr) {
PartitionPage* page = PartitionPage::FromPointerNoAlignmentCheck(ptr);
// Checks that the pointer is a multiple of bucket size.
DCHECK(!((reinterpret_cast<uintptr_t>(ptr) -
reinterpret_cast<uintptr_t>(PartitionPage::ToPointer(page))) %
page->bucket->slot_size));
return page;
}
ALWAYS_INLINE const size_t* PartitionPage::get_raw_size_ptr() const {
// For single-slot buckets which span more than one partition page, we
// have some spare metadata space to store the raw allocation size. We
// can use this to report better statistics.
if (bucket->slot_size <= kMaxSystemPagesPerSlotSpan * kSystemPageSize)
return nullptr;
DCHECK((bucket->slot_size % kSystemPageSize) == 0);
DCHECK(bucket->is_direct_mapped() || bucket->get_slots_per_span() == 1);
const PartitionPage* the_next_page = this + 1;
return reinterpret_cast<const size_t*>(&the_next_page->freelist_head);
}
ALWAYS_INLINE size_t PartitionPage::get_raw_size() const {
const size_t* ptr = get_raw_size_ptr();
if (UNLIKELY(ptr != nullptr))
return *ptr;
return 0;
}
ALWAYS_INLINE bool PartitionPage::IsPointerValid(PartitionPage* page) {
PartitionRootBase* root = PartitionRootBase::FromPage(page);
return root->inverted_self == ~reinterpret_cast<uintptr_t>(root);
}
ALWAYS_INLINE void PartitionPage::Free(void* ptr) {
// If these asserts fire, you probably corrupted memory.
#if DCHECK_IS_ON()
size_t slot_size = this->bucket->slot_size;
size_t raw_size = get_raw_size();
if (raw_size)
slot_size = raw_size;
PartitionCookieCheckValue(ptr);
PartitionCookieCheckValue(reinterpret_cast<char*>(ptr) + slot_size -
kCookieSize);
memset(ptr, kFreedByte, slot_size);
#endif
DCHECK(this->num_allocated_slots);
// TODO(palmer): See if we can afford to make this a CHECK.
DCHECK(!freelist_head || PartitionPage::IsPointerValid(
PartitionPage::FromPointer(freelist_head)));
CHECK(ptr != freelist_head); // Catches an immediate double free.
// Look for double free one level deeper in debug.
DCHECK(!freelist_head || ptr != internal::PartitionFreelistEntry::Transform(
freelist_head->next));
internal::PartitionFreelistEntry* entry =
static_cast<internal::PartitionFreelistEntry*>(ptr);
entry->next = internal::PartitionFreelistEntry::Transform(freelist_head);
freelist_head = entry;
--this->num_allocated_slots;
if (UNLIKELY(this->num_allocated_slots <= 0)) {
FreeSlowPath();
} else {
// All single-slot allocations must go through the slow path to
// correctly update the size metadata.
DCHECK(get_raw_size() == 0);
}
}
ALWAYS_INLINE bool PartitionPage::is_active() const {
DCHECK(this != get_sentinel_page());
DCHECK(!page_offset);
return (num_allocated_slots > 0 &&
(freelist_head || num_unprovisioned_slots));
}
ALWAYS_INLINE bool PartitionPage::is_full() const {
DCHECK(this != get_sentinel_page());
DCHECK(!page_offset);
bool ret = (num_allocated_slots == bucket->get_slots_per_span());
if (ret) {
DCHECK(!freelist_head);
DCHECK(!num_unprovisioned_slots);
}
return ret;
}
ALWAYS_INLINE bool PartitionPage::is_empty() const {
DCHECK(this != get_sentinel_page());
DCHECK(!page_offset);
return (!num_allocated_slots && freelist_head);
}
ALWAYS_INLINE bool PartitionPage::is_decommitted() const {
DCHECK(this != get_sentinel_page());
DCHECK(!page_offset);
bool ret = (!num_allocated_slots && !freelist_head);
if (ret) {
DCHECK(!num_unprovisioned_slots);
DCHECK(empty_cache_index == -1);
}
return ret;
}
ALWAYS_INLINE void PartitionPage::set_raw_size(size_t size) {
size_t* raw_size_ptr = get_raw_size_ptr();
if (UNLIKELY(raw_size_ptr != nullptr))
*raw_size_ptr = size;
}
ALWAYS_INLINE void PartitionPage::Reset() {
DCHECK(this->is_decommitted());
num_unprovisioned_slots = bucket->get_slots_per_span();
DCHECK(num_unprovisioned_slots);
next_page = nullptr;
}
} // namespace internal
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_PAGE_INL_H_
......@@ -2,9 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/partition_direct_map_extent-inl.h"
#include "base/allocator/partition_allocator/partition_page-inl.h"
#include "base/allocator/partition_allocator/partition_root_base-inl.h"
#include "base/allocator/partition_allocator/partition_page.h"
#include "base/allocator/partition_allocator/partition_direct_map_extent.h"
#include "base/allocator/partition_allocator/partition_root_base.h"
namespace base {
namespace internal {
......
......@@ -6,12 +6,13 @@
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_PAGE_H_
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/partition_bucket.h"
#include "base/allocator/partition_allocator/partition_cookie.h"
#include "base/allocator/partition_allocator/partition_freelist_entry.h"
namespace base {
namespace internal {
struct PartitionBucket;
struct PartitionFreelistEntry;
struct PartitionRootBase;
// Some notes on page states. A page can be in one of four major states:
......@@ -69,7 +70,6 @@ struct PartitionPage {
ALWAYS_INLINE static void* ToPointer(const PartitionPage* page);
ALWAYS_INLINE static PartitionPage* FromPointerNoAlignmentCheck(void* ptr);
ALWAYS_INLINE static PartitionPage* FromPointer(void* ptr);
ALWAYS_INLINE static bool IsPointerValid(PartitionPage* page);
ALWAYS_INLINE const size_t* get_raw_size_ptr() const;
ALWAYS_INLINE size_t* get_raw_size_ptr() {
......@@ -112,6 +112,176 @@ struct PartitionPage {
static_assert(sizeof(PartitionPage) <= kPageMetadataSize,
"PartitionPage must be able to fit in a metadata slot");
ALWAYS_INLINE char* PartitionSuperPageToMetadataArea(char* ptr) {
uintptr_t pointer_as_uint = reinterpret_cast<uintptr_t>(ptr);
DCHECK(!(pointer_as_uint & kSuperPageOffsetMask));
// The metadata area is exactly one system page (the guard page) into the
// super page.
return reinterpret_cast<char*>(pointer_as_uint + kSystemPageSize);
}
ALWAYS_INLINE PartitionPage* PartitionPage::FromPointerNoAlignmentCheck(
void* ptr) {
uintptr_t pointer_as_uint = reinterpret_cast<uintptr_t>(ptr);
char* super_page_ptr =
reinterpret_cast<char*>(pointer_as_uint & kSuperPageBaseMask);
uintptr_t partition_page_index =
(pointer_as_uint & kSuperPageOffsetMask) >> kPartitionPageShift;
// Index 0 is invalid because it is the metadata and guard area and
// the last index is invalid because it is a guard page.
DCHECK(partition_page_index);
DCHECK(partition_page_index < kNumPartitionPagesPerSuperPage - 1);
PartitionPage* page = reinterpret_cast<PartitionPage*>(
PartitionSuperPageToMetadataArea(super_page_ptr) +
(partition_page_index << kPageMetadataShift));
// Partition pages in the same slot span can share the same page object.
// Adjust for that.
size_t delta = page->page_offset << kPageMetadataShift;
page =
reinterpret_cast<PartitionPage*>(reinterpret_cast<char*>(page) - delta);
return page;
}
// Resturns start of the slot span for the PartitionPage.
ALWAYS_INLINE void* PartitionPage::ToPointer(const PartitionPage* page) {
uintptr_t pointer_as_uint = reinterpret_cast<uintptr_t>(page);
uintptr_t super_page_offset = (pointer_as_uint & kSuperPageOffsetMask);
// A valid |page| must be past the first guard System page and within
// the following metadata region.
DCHECK(super_page_offset > kSystemPageSize);
// Must be less than total metadata region.
DCHECK(super_page_offset < kSystemPageSize + (kNumPartitionPagesPerSuperPage *
kPageMetadataSize));
uintptr_t partition_page_index =
(super_page_offset - kSystemPageSize) >> kPageMetadataShift;
// Index 0 is invalid because it is the superpage extent metadata and the
// last index is invalid because the whole PartitionPage is set as guard
// pages for the metadata region.
DCHECK(partition_page_index);
DCHECK(partition_page_index < kNumPartitionPagesPerSuperPage - 1);
uintptr_t super_page_base = (pointer_as_uint & kSuperPageBaseMask);
void* ret = reinterpret_cast<void*>(
super_page_base + (partition_page_index << kPartitionPageShift));
return ret;
}
ALWAYS_INLINE PartitionPage* PartitionPage::FromPointer(void* ptr) {
PartitionPage* page = PartitionPage::FromPointerNoAlignmentCheck(ptr);
// Checks that the pointer is a multiple of bucket size.
DCHECK(!((reinterpret_cast<uintptr_t>(ptr) -
reinterpret_cast<uintptr_t>(PartitionPage::ToPointer(page))) %
page->bucket->slot_size));
return page;
}
ALWAYS_INLINE const size_t* PartitionPage::get_raw_size_ptr() const {
// For single-slot buckets which span more than one partition page, we
// have some spare metadata space to store the raw allocation size. We
// can use this to report better statistics.
if (bucket->slot_size <= kMaxSystemPagesPerSlotSpan * kSystemPageSize)
return nullptr;
DCHECK((bucket->slot_size % kSystemPageSize) == 0);
DCHECK(bucket->is_direct_mapped() || bucket->get_slots_per_span() == 1);
const PartitionPage* the_next_page = this + 1;
return reinterpret_cast<const size_t*>(&the_next_page->freelist_head);
}
ALWAYS_INLINE size_t PartitionPage::get_raw_size() const {
const size_t* ptr = get_raw_size_ptr();
if (UNLIKELY(ptr != nullptr))
return *ptr;
return 0;
}
ALWAYS_INLINE void PartitionPage::Free(void* ptr) {
// If these asserts fire, you probably corrupted memory.
#if DCHECK_IS_ON()
size_t slot_size = this->bucket->slot_size;
size_t raw_size = get_raw_size();
if (raw_size)
slot_size = raw_size;
PartitionCookieCheckValue(ptr);
PartitionCookieCheckValue(reinterpret_cast<char*>(ptr) + slot_size -
kCookieSize);
memset(ptr, kFreedByte, slot_size);
#endif
DCHECK(this->num_allocated_slots);
// TODO(palmer): See if we can afford to make this a CHECK.
// FIX FIX FIX
// DCHECK(!freelist_head || PartitionRootBase::IsValidPage(
// PartitionPage::FromPointer(freelist_head)));
CHECK(ptr != freelist_head); // Catches an immediate double free.
// Look for double free one level deeper in debug.
DCHECK(!freelist_head || ptr != internal::PartitionFreelistEntry::Transform(
freelist_head->next));
internal::PartitionFreelistEntry* entry =
static_cast<internal::PartitionFreelistEntry*>(ptr);
entry->next = internal::PartitionFreelistEntry::Transform(freelist_head);
freelist_head = entry;
--this->num_allocated_slots;
if (UNLIKELY(this->num_allocated_slots <= 0)) {
FreeSlowPath();
} else {
// All single-slot allocations must go through the slow path to
// correctly update the size metadata.
DCHECK(get_raw_size() == 0);
}
}
ALWAYS_INLINE bool PartitionPage::is_active() const {
DCHECK(this != get_sentinel_page());
DCHECK(!page_offset);
return (num_allocated_slots > 0 &&
(freelist_head || num_unprovisioned_slots));
}
ALWAYS_INLINE bool PartitionPage::is_full() const {
DCHECK(this != get_sentinel_page());
DCHECK(!page_offset);
bool ret = (num_allocated_slots == bucket->get_slots_per_span());
if (ret) {
DCHECK(!freelist_head);
DCHECK(!num_unprovisioned_slots);
}
return ret;
}
ALWAYS_INLINE bool PartitionPage::is_empty() const {
DCHECK(this != get_sentinel_page());
DCHECK(!page_offset);
return (!num_allocated_slots && freelist_head);
}
ALWAYS_INLINE bool PartitionPage::is_decommitted() const {
DCHECK(this != get_sentinel_page());
DCHECK(!page_offset);
bool ret = (!num_allocated_slots && !freelist_head);
if (ret) {
DCHECK(!num_unprovisioned_slots);
DCHECK(empty_cache_index == -1);
}
return ret;
}
ALWAYS_INLINE void PartitionPage::set_raw_size(size_t size) {
size_t* raw_size_ptr = get_raw_size_ptr();
if (UNLIKELY(raw_size_ptr != nullptr))
*raw_size_ptr = size;
}
ALWAYS_INLINE void PartitionPage::Reset() {
DCHECK(this->is_decommitted());
num_unprovisioned_slots = bucket->get_slots_per_span();
DCHECK(num_unprovisioned_slots);
next_page = nullptr;
}
} // namespace internal
} // namespace base
......
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ROOT_BASE_INL_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ROOT_BASE_INL_H_
#include "base/allocator/partition_allocator/page_allocator.h"
#include "base/allocator/partition_allocator/partition_root_base.h"
namespace base {
namespace internal {
ALWAYS_INLINE PartitionRootBase* PartitionRootBase::FromPage(
PartitionPage* page) {
PartitionSuperPageExtentEntry* extent_entry =
reinterpret_cast<PartitionSuperPageExtentEntry*>(
reinterpret_cast<uintptr_t>(page) & kSystemPageBaseMask);
return extent_entry->root;
}
ALWAYS_INLINE void PartitionRootBase::IncreaseCommittedPages(size_t len) {
total_size_of_committed_pages += len;
DCHECK(total_size_of_committed_pages <=
total_size_of_super_pages + total_size_of_direct_mapped_pages);
}
ALWAYS_INLINE void PartitionRootBase::DecreaseCommittedPages(size_t len) {
total_size_of_committed_pages -= len;
DCHECK(total_size_of_committed_pages <=
total_size_of_super_pages + total_size_of_direct_mapped_pages);
}
ALWAYS_INLINE void PartitionRootBase::DecommitSystemPages(void* address,
size_t length) {
::base::DecommitSystemPages(address, length);
DecreaseCommittedPages(length);
}
ALWAYS_INLINE void PartitionRootBase::RecommitSystemPages(void* address,
size_t length) {
CHECK(::base::RecommitSystemPages(address, length, PageReadWrite));
IncreaseCommittedPages(length);
}
} // namespace internal
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ROOT_BASE_INL_H_
......@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/partition_root_base-inl.h"
#include "base/allocator/partition_allocator/partition_root_base.h"
#include "base/allocator/partition_allocator/oom.h"
#include "base/allocator/partition_allocator/partition_oom.h"
#include "base/allocator/partition_allocator/partition_page-inl.h"
#include "base/allocator/partition_allocator/partition_page.h"
#include "build/build_config.h"
namespace base {
......
......@@ -5,8 +5,11 @@
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ROOT_BASE_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ROOT_BASE_H_
#include "base/allocator/partition_allocator/page_allocator.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/partition_bucket.h"
#include "base/allocator/partition_allocator/partition_direct_map_extent.h"
#include "base/allocator/partition_allocator/partition_page.h"
namespace base {
namespace internal {
......@@ -51,12 +54,27 @@ struct BASE_EXPORT PartitionRootBase {
// Public API
// Allocates out of the given bucket. Properly, this function should probably
// be in PartitionBucket, but because the implementation needs to be inlined
// for performance, and because it needs to inspect PartitionPage,
// it becomes impossible to have it in PartitionBucket as this causes a
// cyclical dependency on PartitionPage function implementations.
//
// Moving it a layer lower couples PartitionRootBase and PartitionBucket, but
// preserves the layering of the includes.
//
// Note the matching Free() functions are in PartitionPage.
ALWAYS_INLINE void* AllocFromBucket(PartitionBucket* bucket,
int flags,
size_t size);
ALWAYS_INLINE static bool IsValidPage(PartitionPage* page);
ALWAYS_INLINE static PartitionRootBase* FromPage(PartitionPage* page);
// gOomHandlingFunction is invoked when PartitionAlloc hits OutOfMemory.
static void (*gOomHandlingFunction)();
NOINLINE void OutOfMemory();
ALWAYS_INLINE static PartitionRootBase* FromPage(PartitionPage* page);
ALWAYS_INLINE void IncreaseCommittedPages(size_t len);
ALWAYS_INLINE void DecreaseCommittedPages(size_t len);
ALWAYS_INLINE void DecommitSystemPages(void* address, size_t length);
......@@ -65,6 +83,94 @@ struct BASE_EXPORT PartitionRootBase {
void DecommitEmptyPages();
};
ALWAYS_INLINE void* PartitionRootBase::AllocFromBucket(PartitionBucket* bucket,
int flags,
size_t size) {
PartitionPage* page = bucket->active_pages_head;
// Check that this page is neither full nor freed.
DCHECK(page->num_allocated_slots >= 0);
void* ret = page->freelist_head;
if (LIKELY(ret != 0)) {
// If these DCHECKs fire, you probably corrupted memory.
// TODO(palmer): See if we can afford to make this a CHECK.
DCHECK(PartitionRootBase::IsValidPage(page));
// All large allocations must go through the slow path to correctly
// update the size metadata.
DCHECK(page->get_raw_size() == 0);
internal::PartitionFreelistEntry* new_head =
internal::PartitionFreelistEntry::Transform(
static_cast<internal::PartitionFreelistEntry*>(ret)->next);
page->freelist_head = new_head;
page->num_allocated_slots++;
} else {
ret = bucket->SlowPathAlloc(this, flags, size);
// TODO(palmer): See if we can afford to make this a CHECK.
DCHECK(!ret ||
PartitionRootBase::IsValidPage(PartitionPage::FromPointer(ret)));
}
#if DCHECK_IS_ON()
if (!ret)
return 0;
// Fill the uninitialized pattern, and write the cookies.
page = PartitionPage::FromPointer(ret);
// TODO(ajwong): Can |page->bucket| ever not be |this|? If not, can this just
// be bucket->slot_size?
size_t new_slot_size = page->bucket->slot_size;
size_t raw_size = page->get_raw_size();
if (raw_size) {
DCHECK(raw_size == size);
new_slot_size = raw_size;
}
size_t no_cookie_size = PartitionCookieSizeAdjustSubtract(new_slot_size);
char* char_ret = static_cast<char*>(ret);
// The value given to the application is actually just after the cookie.
ret = char_ret + kCookieSize;
// Debug fill region kUninitializedByte and surround it with 2 cookies.
PartitionCookieWriteValue(char_ret);
memset(ret, kUninitializedByte, no_cookie_size);
PartitionCookieWriteValue(char_ret + kCookieSize + no_cookie_size);
#endif
return ret;
}
ALWAYS_INLINE bool PartitionRootBase::IsValidPage(PartitionPage* page) {
PartitionRootBase* root = PartitionRootBase::FromPage(page);
return root->inverted_self == ~reinterpret_cast<uintptr_t>(root);
}
ALWAYS_INLINE PartitionRootBase* PartitionRootBase::FromPage(
PartitionPage* page) {
PartitionSuperPageExtentEntry* extent_entry =
reinterpret_cast<PartitionSuperPageExtentEntry*>(
reinterpret_cast<uintptr_t>(page) & kSystemPageBaseMask);
return extent_entry->root;
}
ALWAYS_INLINE void PartitionRootBase::IncreaseCommittedPages(size_t len) {
total_size_of_committed_pages += len;
DCHECK(total_size_of_committed_pages <=
total_size_of_super_pages + total_size_of_direct_mapped_pages);
}
ALWAYS_INLINE void PartitionRootBase::DecreaseCommittedPages(size_t len) {
total_size_of_committed_pages -= len;
DCHECK(total_size_of_committed_pages <=
total_size_of_super_pages + total_size_of_direct_mapped_pages);
}
ALWAYS_INLINE void PartitionRootBase::DecommitSystemPages(void* address,
size_t length) {
::base::DecommitSystemPages(address, length);
DecreaseCommittedPages(length);
}
ALWAYS_INLINE void PartitionRootBase::RecommitSystemPages(void* address,
size_t length) {
CHECK(::base::RecommitSystemPages(address, length, PageReadWrite));
IncreaseCommittedPages(length);
}
} // namespace internal
} // namespace base
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment