Commit 22b2cdc0 authored by Bartek Nowierski's avatar Bartek Nowierski Committed by Commit Bot

[PartitionAlloc] Transition to SlotSpanMetadata

Currently all metadata is stored in PartitionPage, which is confusing
because the most commonly used metadata is related to slot spans, and
is stored only in the PartitionPage object that corresponds to the first
partition page of the slot span. This CL introduces SlotSpanMetadata
to clarify that confusion.

Change-Id: Id8873dba1c9e3018a8643f4f9c93e694f2edb9c2
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2466007
Commit-Queue: Bartek Nowierski <bartekn@chromium.org>
Reviewed-by: default avatarKentaro Hara <haraken@chromium.org>
Reviewed-by: default avatarAnton Bikineev <bikineev@chromium.org>
Reviewed-by: default avatarBenoit L <lizeb@chromium.org>
Cr-Commit-Position: refs/heads/master@{#817117}
parent 16d2ce32
...@@ -105,8 +105,8 @@ void PartitionAllocMemoryReclaimer::Reclaim() { ...@@ -105,8 +105,8 @@ void PartitionAllocMemoryReclaimer::Reclaim() {
AutoLock lock(lock_); // Has to protect from concurrent (Un)Register calls. AutoLock lock(lock_); // Has to protect from concurrent (Un)Register calls.
TRACE_EVENT0("base", "PartitionAllocMemoryReclaimer::Reclaim()"); TRACE_EVENT0("base", "PartitionAllocMemoryReclaimer::Reclaim()");
constexpr int kFlags = constexpr int kFlags = PartitionPurgeDecommitEmptySlotSpans |
PartitionPurgeDecommitEmptyPages | PartitionPurgeDiscardUnusedSystemPages; PartitionPurgeDiscardUnusedSystemPages;
for (auto* partition : thread_safe_partitions_) for (auto* partition : thread_safe_partitions_)
partition->PurgeMemory(kFlags); partition->PurgeMemory(kFlags);
......
...@@ -11,7 +11,7 @@ namespace base { ...@@ -11,7 +11,7 @@ namespace base {
namespace internal { namespace internal {
template <bool thread_safe> template <bool thread_safe>
struct PartitionPage; struct SlotSpanMetadata;
BASE_EXPORT size_t PartitionAllocGetSlotOffset(void* ptr); BASE_EXPORT size_t PartitionAllocGetSlotOffset(void* ptr);
......
...@@ -22,13 +22,13 @@ namespace internal { ...@@ -22,13 +22,13 @@ namespace internal {
template <bool thread_safe> template <bool thread_safe>
struct PartitionBucket { struct PartitionBucket {
// Accessed most in hot path => goes first. // Accessed most in hot path => goes first.
PartitionPage<thread_safe>* active_pages_head; SlotSpanMetadata<thread_safe>* active_slot_spans_head;
PartitionPage<thread_safe>* empty_pages_head; SlotSpanMetadata<thread_safe>* empty_slot_spans_head;
PartitionPage<thread_safe>* decommitted_pages_head; SlotSpanMetadata<thread_safe>* decommitted_slot_spans_head;
uint32_t slot_size; uint32_t slot_size;
uint32_t num_system_pages_per_slot_span : 8; uint32_t num_system_pages_per_slot_span : 8;
uint32_t num_full_pages : 24; uint32_t num_full_slot_spans : 24;
// `slot_size_reciprocal` is used to improve the performance of // `slot_size_reciprocal` is used to improve the performance of
// `GetSlotOffset`. It is computed as `(1 / size) * (2 ** M)` where M is // `GetSlotOffset`. It is computed as `(1 / size) * (2 ** M)` where M is
...@@ -57,7 +57,7 @@ struct PartitionBucket { ...@@ -57,7 +57,7 @@ struct PartitionBucket {
// there is no need to call memset on fresh pages; the OS has already zeroed // there is no need to call memset on fresh pages; the OS has already zeroed
// them. (See |PartitionRoot::AllocFromBucket|.) // them. (See |PartitionRoot::AllocFromBucket|.)
// //
// Note the matching Free() functions are in PartitionPage. // Note the matching Free() functions are in SlotSpanMetadata.
BASE_EXPORT NOINLINE void* SlowPathAlloc(PartitionRoot<thread_safe>* root, BASE_EXPORT NOINLINE void* SlowPathAlloc(PartitionRoot<thread_safe>* root,
int flags, int flags,
size_t raw_size, size_t raw_size,
...@@ -86,17 +86,17 @@ struct PartitionBucket { ...@@ -86,17 +86,17 @@ struct PartitionBucket {
return (size + SystemPageOffsetMask()) & SystemPageBaseMask(); return (size + SystemPageOffsetMask()) & SystemPageBaseMask();
} }
// This helper function scans a bucket's active page list for a suitable new // This helper function scans a bucket's active slot span list for a suitable
// active page. When it finds a suitable new active page (one that has // new active slot span. When it finds a suitable new active slot span (one
// free slots and is not empty), it is set as the new active page. If there // that has free slots and is not empty), it is set as the new active slot
// is no suitable new active page, the current active page is set to // span. If there is no suitable new active slot span, the current active slot
// PartitionPage::get_sentinel_page(). As potential pages are scanned, they // span is set to SlotSpanMetadata::get_sentinel_slot_span(). As potential
// are tidied up according to their state. Empty pages are swept on to the // slot spans are scanned, they are tidied up according to their state. Empty
// empty page list, decommitted pages on to the decommitted page list and full // slot spans are swept on to the empty list, decommitted slot spans on to the
// pages are unlinked from any list. // decommitted list and full slot spans are unlinked from any list.
// //
// This is where the guts of the bucket maintenance is done! // This is where the guts of the bucket maintenance is done!
bool SetNewActivePage(); bool SetNewActiveSlotSpan();
// Returns an offset within an allocation slot. // Returns an offset within an allocation slot.
ALWAYS_INLINE size_t GetSlotOffset(size_t offset_in_slot_span) { ALWAYS_INLINE size_t GetSlotOffset(size_t offset_in_slot_span) {
...@@ -122,17 +122,17 @@ struct PartitionBucket { ...@@ -122,17 +122,17 @@ struct PartitionBucket {
private: private:
static NOINLINE void OnFull(); static NOINLINE void OnFull();
// Returns a natural number of PartitionPages (calculated by // Returns a natural number of partition pages (calculated by
// get_system_pages_per_slot_span()) to allocate from the current // get_system_pages_per_slot_span()) to allocate from the current
// SuperPage when the bucket runs out of slots. // super page when the bucket runs out of slots.
ALWAYS_INLINE uint16_t get_pages_per_slot_span(); ALWAYS_INLINE uint16_t get_pages_per_slot_span();
// Returns the number of system pages in a slot span. // Returns the number of system pages in a slot span.
// //
// The calculation attemps to find the best number of System Pages to // The calculation attempts to find the best number of system pages to
// allocate for the given slot_size to minimize wasted space. It uses a // allocate for the given slot_size to minimize wasted space. It uses a
// heuristic that looks at number of bytes wasted after the last slot and // heuristic that looks at number of bytes wasted after the last slot and
// attempts to account for the PTE usage of each System Page. // attempts to account for the PTE usage of each system page.
uint8_t get_system_pages_per_slot_span(); uint8_t get_system_pages_per_slot_span();
// Allocates a new slot span with size |num_partition_pages| from the // Allocates a new slot span with size |num_partition_pages| from the
...@@ -146,16 +146,19 @@ struct PartitionBucket { ...@@ -146,16 +146,19 @@ struct PartitionBucket {
// Each bucket allocates a slot span when it runs out of slots. // Each bucket allocates a slot span when it runs out of slots.
// A slot span's size is equal to get_pages_per_slot_span() number of // A slot span's size is equal to get_pages_per_slot_span() number of
// PartitionPages. This function initializes all PartitionPage within the // partition pages. This function initializes all PartitionPage within the
// span to point to the first PartitionPage which holds all the metadata // span to point to the first PartitionPage which holds all the metadata
// for the span and registers this bucket as the owner of the span. It does // for the span (in PartitionPage::SlotSpanMetadata) and registers this bucket
// NOT put the slots into the bucket's freelist. // as the owner of the span. It does NOT put the slots into the bucket's
ALWAYS_INLINE void InitializeSlotSpan(PartitionPage<thread_safe>* page); // freelist.
ALWAYS_INLINE void InitializeSlotSpan(
// Allocates one slot from the given |page| and then adds the remainder to SlotSpanMetadata<thread_safe>* slot_span);
// the current bucket. If the |page| was freshly allocated, it must have been
// passed through InitializeSlotSpan() first. // Allocates one slot from the given |slot_span| and then adds the remainder
ALWAYS_INLINE char* AllocAndFillFreelist(PartitionPage<thread_safe>* page); // to the current bucket. If the |slot_span| was freshly allocated, it must
// have been passed through InitializeSlotSpan() first.
ALWAYS_INLINE char* AllocAndFillFreelist(
SlotSpanMetadata<thread_safe>* slot_span);
}; };
} // namespace internal } // namespace internal
......
...@@ -20,8 +20,8 @@ struct PartitionDirectMapExtent { ...@@ -20,8 +20,8 @@ struct PartitionDirectMapExtent {
PartitionBucket<thread_safe>* bucket; PartitionBucket<thread_safe>* bucket;
size_t map_size; // Mapped size, not including guard pages and meta-data. size_t map_size; // Mapped size, not including guard pages and meta-data.
ALWAYS_INLINE static PartitionDirectMapExtent<thread_safe>* FromPage( ALWAYS_INLINE static PartitionDirectMapExtent<thread_safe>* FromSlotSpan(
PartitionPage<thread_safe>* page); SlotSpanMetadata<thread_safe>* slot_span);
}; };
// Metadata page for direct-mapped allocations. // Metadata page for direct-mapped allocations.
...@@ -39,9 +39,10 @@ struct PartitionDirectMapMetadata { ...@@ -39,9 +39,10 @@ struct PartitionDirectMapMetadata {
template <bool thread_safe> template <bool thread_safe>
ALWAYS_INLINE PartitionDirectMapExtent<thread_safe>* ALWAYS_INLINE PartitionDirectMapExtent<thread_safe>*
PartitionDirectMapExtent<thread_safe>::FromPage( PartitionDirectMapExtent<thread_safe>::FromSlotSpan(
PartitionPage<thread_safe>* page) { SlotSpanMetadata<thread_safe>* slot_span) {
PA_DCHECK(page->bucket->is_direct_mapped()); PA_DCHECK(slot_span->bucket->is_direct_mapped());
auto* page = reinterpret_cast<PartitionPage<thread_safe>*>(slot_span);
// The page passed here is always |page| in |PartitionDirectMapMetadata| // The page passed here is always |page| in |PartitionDirectMapMetadata|
// above. To get the metadata structure, need to get the invalid page address. // above. To get the metadata structure, need to get the invalid page address.
auto* first_invalid_page = page - 1; auto* first_invalid_page = page - 1;
......
...@@ -17,11 +17,12 @@ namespace internal { ...@@ -17,11 +17,12 @@ namespace internal {
// TODO(glazunov): Simplify the function once the non-thread-safe PartitionRoot // TODO(glazunov): Simplify the function once the non-thread-safe PartitionRoot
// is no longer used. // is no longer used.
void PartitionRefCount::Free() { void PartitionRefCount::Free() {
auto* page = PartitionPage<ThreadSafe>::FromPointerNoAlignmentCheck(this); auto* slot_span =
auto* root = PartitionRoot<ThreadSafe>::FromPage(page); SlotSpanMetadata<ThreadSafe>::FromPointerNoAlignmentCheck(this);
auto* root = PartitionRoot<ThreadSafe>::FromSlotSpan(slot_span);
#ifdef ADDRESS_SANITIZER #ifdef ADDRESS_SANITIZER
size_t utilized_slot_size = page->GetUtilizedSlotSize(); size_t utilized_slot_size = slot_span->GetUtilizedSlotSize();
// PartitionRefCount is required to be allocated inside a `PartitionRoot` that // PartitionRefCount is required to be allocated inside a `PartitionRoot` that
// supports extras. // supports extras.
PA_DCHECK(root->allow_extras); PA_DCHECK(root->allow_extras);
...@@ -31,15 +32,15 @@ void PartitionRefCount::Free() { ...@@ -31,15 +32,15 @@ void PartitionRefCount::Free() {
#endif #endif
if (root->is_thread_safe) { if (root->is_thread_safe) {
root->RawFree(this, page); root->RawFree(this, slot_span);
return; return;
} }
auto* non_thread_safe_page = auto* non_thread_safe_slot_span =
reinterpret_cast<PartitionPage<NotThreadSafe>*>(page); reinterpret_cast<SlotSpanMetadata<NotThreadSafe>*>(slot_span);
auto* non_thread_safe_root = auto* non_thread_safe_root =
reinterpret_cast<PartitionRoot<NotThreadSafe>*>(root); reinterpret_cast<PartitionRoot<NotThreadSafe>*>(root);
non_thread_safe_root->RawFree(this, non_thread_safe_page); non_thread_safe_root->RawFree(this, non_thread_safe_slot_span);
} }
#endif // ENABLE_REF_COUNT_FOR_BACKUP_REF_PTR #endif // ENABLE_REF_COUNT_FOR_BACKUP_REF_PTR
......
...@@ -52,7 +52,7 @@ struct PartitionOptions { ...@@ -52,7 +52,7 @@ struct PartitionOptions {
// PartitionAllocator. // PartitionAllocator.
template <bool thread_safe> template <bool thread_safe>
struct BASE_EXPORT PartitionRoot { struct BASE_EXPORT PartitionRoot {
using Page = internal::PartitionPage<thread_safe>; using SlotSpan = internal::SlotSpanMetadata<thread_safe>;
using Bucket = internal::PartitionBucket<thread_safe>; using Bucket = internal::PartitionBucket<thread_safe>;
using SuperPageExtentEntry = using SuperPageExtentEntry =
internal::PartitionSuperPageExtentEntry<thread_safe>; internal::PartitionSuperPageExtentEntry<thread_safe>;
...@@ -92,8 +92,8 @@ struct BASE_EXPORT PartitionRoot { ...@@ -92,8 +92,8 @@ struct BASE_EXPORT PartitionRoot {
SuperPageExtentEntry* current_extent = nullptr; SuperPageExtentEntry* current_extent = nullptr;
SuperPageExtentEntry* first_extent = nullptr; SuperPageExtentEntry* first_extent = nullptr;
DirectMapExtent* direct_map_list = nullptr; DirectMapExtent* direct_map_list = nullptr;
Page* global_empty_page_ring[kMaxFreeableSpans] = {}; SlotSpan* global_empty_slot_span_ring[kMaxFreeableSpans] = {};
int16_t global_empty_page_ring_index = 0; int16_t global_empty_slot_span_ring_index = 0;
// Integrity check = ~reinterpret_cast<uintptr_t>(this). // Integrity check = ~reinterpret_cast<uintptr_t>(this).
uintptr_t inverted_self = 0; uintptr_t inverted_self = 0;
...@@ -119,16 +119,16 @@ struct BASE_EXPORT PartitionRoot { ...@@ -119,16 +119,16 @@ struct BASE_EXPORT PartitionRoot {
// //
// Allocates out of the given bucket. Properly, this function should probably // Allocates out of the given bucket. Properly, this function should probably
// be in PartitionBucket, but because the implementation needs to be inlined // be in PartitionBucket, but because the implementation needs to be inlined
// for performance, and because it needs to inspect PartitionPage, // for performance, and because it needs to inspect SlotSpanMetadata,
// it becomes impossible to have it in PartitionBucket as this causes a // it becomes impossible to have it in PartitionBucket as this causes a
// cyclical dependency on PartitionPage function implementations. // cyclical dependency on SlotSpanMetadata function implementations.
// //
// Moving it a layer lower couples PartitionRoot and PartitionBucket, but // Moving it a layer lower couples PartitionRoot and PartitionBucket, but
// preserves the layering of the includes. // preserves the layering of the includes.
void Init(PartitionOptions); void Init(PartitionOptions);
ALWAYS_INLINE static bool IsValidPage(Page* page); ALWAYS_INLINE static bool IsValidSlotSpan(SlotSpan* slot_span);
ALWAYS_INLINE static PartitionRoot* FromPage(Page* page); ALWAYS_INLINE static PartitionRoot* FromSlotSpan(SlotSpan* slot_span);
ALWAYS_INLINE void IncreaseCommittedPages(size_t len) ALWAYS_INLINE void IncreaseCommittedPages(size_t len)
EXCLUSIVE_LOCKS_REQUIRED(lock_); EXCLUSIVE_LOCKS_REQUIRED(lock_);
...@@ -181,14 +181,14 @@ struct BASE_EXPORT PartitionRoot { ...@@ -181,14 +181,14 @@ struct BASE_EXPORT PartitionRoot {
// Same as |Free()|, bypasses the allocator hooks. // Same as |Free()|, bypasses the allocator hooks.
ALWAYS_INLINE static void FreeNoHooks(void* ptr); ALWAYS_INLINE static void FreeNoHooks(void* ptr);
// Immediately frees the pointer bypassing the quarantine. // Immediately frees the pointer bypassing the quarantine.
ALWAYS_INLINE void FreeNoHooksImmediate(void* ptr, Page* page); ALWAYS_INLINE void FreeNoHooksImmediate(void* ptr, SlotSpan* slot_span);
ALWAYS_INLINE static size_t GetUsableSize(void* ptr); ALWAYS_INLINE static size_t GetUsableSize(void* ptr);
ALWAYS_INLINE size_t GetSize(void* ptr) const; ALWAYS_INLINE size_t GetSize(void* ptr) const;
ALWAYS_INLINE size_t ActualSize(size_t size); ALWAYS_INLINE size_t ActualSize(size_t size);
// Frees memory from this partition, if possible, by decommitting pages. // Frees memory from this partition, if possible, by decommitting pages or
// |flags| is an OR of base::PartitionPurgeFlags. // even etnire slot spans. |flags| is an OR of base::PartitionPurgeFlags.
void PurgeMemory(int flags); void PurgeMemory(int flags);
void DumpStats(const char* partition_name, void DumpStats(const char* partition_name,
...@@ -198,7 +198,7 @@ struct BASE_EXPORT PartitionRoot { ...@@ -198,7 +198,7 @@ struct BASE_EXPORT PartitionRoot {
static uint16_t SizeToBucketIndex(size_t size); static uint16_t SizeToBucketIndex(size_t size);
// Frees memory, with |ptr| as returned by |RawAlloc()|. // Frees memory, with |ptr| as returned by |RawAlloc()|.
ALWAYS_INLINE void RawFree(void* ptr, Page* page); ALWAYS_INLINE void RawFree(void* ptr, SlotSpan* slot_span);
static void RawFreeStatic(void* ptr); static void RawFreeStatic(void* ptr);
internal::ThreadCache* thread_cache_for_testing() const { internal::ThreadCache* thread_cache_for_testing() const {
...@@ -241,10 +241,10 @@ struct BASE_EXPORT PartitionRoot { ...@@ -241,10 +241,10 @@ struct BASE_EXPORT PartitionRoot {
bool* is_already_zeroed) bool* is_already_zeroed)
EXCLUSIVE_LOCKS_REQUIRED(lock_); EXCLUSIVE_LOCKS_REQUIRED(lock_);
bool ReallocDirectMappedInPlace(internal::PartitionPage<thread_safe>* page, bool ReallocDirectMappedInPlace(
size_t requested_size) internal::SlotSpanMetadata<thread_safe>* slot_span,
EXCLUSIVE_LOCKS_REQUIRED(lock_); size_t requested_size) EXCLUSIVE_LOCKS_REQUIRED(lock_);
void DecommitEmptyPages() EXCLUSIVE_LOCKS_REQUIRED(lock_); void DecommitEmptySlotSpans() EXCLUSIVE_LOCKS_REQUIRED(lock_);
friend class internal::ThreadCache; friend class internal::ThreadCache;
}; };
......
...@@ -91,7 +91,7 @@ class PCScan<thread_safe>::PCScanTask final { ...@@ -91,7 +91,7 @@ class PCScan<thread_safe>::PCScanTask final {
void RunOnce() &&; void RunOnce() &&;
private: private:
using Page = PartitionPage<thread_safe>; using SlotSpan = SlotSpanMetadata<thread_safe>;
struct ScanArea { struct ScanArea {
uintptr_t* begin = nullptr; uintptr_t* begin = nullptr;
...@@ -134,7 +134,7 @@ template <bool thread_safe> ...@@ -134,7 +134,7 @@ template <bool thread_safe>
QuarantineBitmap* PCScan<thread_safe>::PCScanTask::FindScannerBitmapForPointer( QuarantineBitmap* PCScan<thread_safe>::PCScanTask::FindScannerBitmapForPointer(
uintptr_t maybe_ptr) const { uintptr_t maybe_ptr) const {
// TODO(bikineev): Consider using the bitset in AddressPoolManager::Pool to // TODO(bikineev): Consider using the bitset in AddressPoolManager::Pool to
// quickly find a super-page. // quickly find a super page.
const auto super_page_base = maybe_ptr & kSuperPageBaseMask; const auto super_page_base = maybe_ptr & kSuperPageBaseMask;
auto it = super_pages_.lower_bound(super_page_base); auto it = super_pages_.lower_bound(super_page_base);
...@@ -145,7 +145,7 @@ QuarantineBitmap* PCScan<thread_safe>::PCScanTask::FindScannerBitmapForPointer( ...@@ -145,7 +145,7 @@ QuarantineBitmap* PCScan<thread_safe>::PCScanTask::FindScannerBitmapForPointer(
reinterpret_cast<void*>(maybe_ptr))) reinterpret_cast<void*>(maybe_ptr)))
return nullptr; return nullptr;
// We are certain here that |maybe_ptr| points to the superpage payload. // We are certain here that |maybe_ptr| points to the super page payload.
return QuarantineBitmapFromPointer(QuarantineBitmapType::kScanner, return QuarantineBitmapFromPointer(QuarantineBitmapType::kScanner,
pcscan_.quarantine_data_.epoch(), pcscan_.quarantine_data_.epoch(),
reinterpret_cast<char*>(maybe_ptr)); reinterpret_cast<char*>(maybe_ptr));
...@@ -176,12 +176,13 @@ size_t PCScan<thread_safe>::PCScanTask::TryMarkObjectInNormalBucketPool( ...@@ -176,12 +176,13 @@ size_t PCScan<thread_safe>::PCScanTask::TryMarkObjectInNormalBucketPool(
PA_DCHECK((maybe_ptr & kSuperPageBaseMask) == (base & kSuperPageBaseMask)); PA_DCHECK((maybe_ptr & kSuperPageBaseMask) == (base & kSuperPageBaseMask));
auto target_page = auto target_slot_span =
Page::FromPointerNoAlignmentCheck(reinterpret_cast<void*>(base)); SlotSpan::FromPointerNoAlignmentCheck(reinterpret_cast<void*>(base));
PA_DCHECK(&root_ == PartitionRoot<thread_safe>::FromPage(target_page)); PA_DCHECK(&root_ ==
PartitionRoot<thread_safe>::FromSlotSpan(target_slot_span));
const size_t usable_size = PartitionSizeAdjustSubtract( const size_t usable_size = PartitionSizeAdjustSubtract(
root_.allow_extras, target_page->GetUtilizedSlotSize()); root_.allow_extras, target_slot_span->GetUtilizedSlotSize());
// Range check for inner pointers. // Range check for inner pointers.
if (maybe_ptr >= base + usable_size) if (maybe_ptr >= base + usable_size)
return 0; return 0;
...@@ -193,7 +194,7 @@ size_t PCScan<thread_safe>::PCScanTask::TryMarkObjectInNormalBucketPool( ...@@ -193,7 +194,7 @@ size_t PCScan<thread_safe>::PCScanTask::TryMarkObjectInNormalBucketPool(
pcscan_.quarantine_data_.epoch(), pcscan_.quarantine_data_.epoch(),
reinterpret_cast<char*>(base)) reinterpret_cast<char*>(base))
->SetBit(base); ->SetBit(base);
return target_page->bucket->slot_size; return target_slot_span->bucket->slot_size;
} }
template <bool thread_safe> template <bool thread_safe>
...@@ -205,12 +206,12 @@ void PCScan<thread_safe>::PCScanTask::ClearQuarantinedObjects() const { ...@@ -205,12 +206,12 @@ void PCScan<thread_safe>::PCScanTask::ClearQuarantinedObjects() const {
reinterpret_cast<char*>(super_page)); reinterpret_cast<char*>(super_page));
bitmap->Iterate([allow_extras](uintptr_t ptr) { bitmap->Iterate([allow_extras](uintptr_t ptr) {
auto* object = reinterpret_cast<void*>(ptr); auto* object = reinterpret_cast<void*>(ptr);
auto* page = Page::FromPointerNoAlignmentCheck(object); auto* slot_span = SlotSpan::FromPointerNoAlignmentCheck(object);
// Use zero as a zapping value to speed up the fast bailout check in // Use zero as a zapping value to speed up the fast bailout check in
// ScanPartition. // ScanPartition.
memset(object, 0, memset(object, 0,
PartitionSizeAdjustSubtract(allow_extras, PartitionSizeAdjustSubtract(allow_extras,
page->GetUtilizedSlotSize())); slot_span->GetUtilizedSlotSize()));
}); });
} }
} }
...@@ -234,7 +235,7 @@ size_t PCScan<thread_safe>::PCScanTask::ScanPartition() NO_SANITIZE("thread") { ...@@ -234,7 +235,7 @@ size_t PCScan<thread_safe>::PCScanTask::ScanPartition() NO_SANITIZE("thread") {
// implemented. // implemented.
#if defined(PA_HAS_64_BITS_POINTERS) #if defined(PA_HAS_64_BITS_POINTERS)
// On partitions without extras (partitions with aligned allocations), // On partitions without extras (partitions with aligned allocations),
// pages are not allocated from the GigaCage. // memory is not allocated from the GigaCage.
if (features::IsPartitionAllocGigaCageEnabled() && root_.allow_extras) { if (features::IsPartitionAllocGigaCageEnabled() && root_.allow_extras) {
// With GigaCage, we first do a fast bitmask check to see if the pointer // With GigaCage, we first do a fast bitmask check to see if the pointer
// points to the normal bucket pool. // points to the normal bucket pool.
...@@ -267,9 +268,9 @@ size_t PCScan<thread_safe>::PCScanTask::SweepQuarantine() { ...@@ -267,9 +268,9 @@ size_t PCScan<thread_safe>::PCScanTask::SweepQuarantine() {
reinterpret_cast<char*>(super_page)); reinterpret_cast<char*>(super_page));
bitmap->Iterate([this, &swept_bytes](uintptr_t ptr) { bitmap->Iterate([this, &swept_bytes](uintptr_t ptr) {
auto* object = reinterpret_cast<void*>(ptr); auto* object = reinterpret_cast<void*>(ptr);
auto* page = Page::FromPointerNoAlignmentCheck(object); auto* slot_span = SlotSpan::FromPointerNoAlignmentCheck(object);
swept_bytes += page->bucket->slot_size; swept_bytes += slot_span->bucket->slot_size;
root_.FreeNoHooksImmediate(object, page); root_.FreeNoHooksImmediate(object, slot_span);
}); });
bitmap->Clear(); bitmap->Clear();
} }
...@@ -293,25 +294,27 @@ PCScan<thread_safe>::PCScanTask::PCScanTask(PCScan& pcscan, Root& root) ...@@ -293,25 +294,27 @@ PCScan<thread_safe>::PCScanTask::PCScanTask(PCScan& pcscan, Root& root)
} }
} }
// Take a snapshot of all active pages. // Take a snapshot of all active slot spans.
static constexpr size_t kScanAreasReservationSlack = 10; static constexpr size_t kScanAreasReservationSlack = 10;
const size_t kScanAreasReservationSize = root_.total_size_of_committed_pages / const size_t kScanAreasReservationSize = root_.total_size_of_committed_pages /
PartitionPageSize() / PartitionPageSize() /
kScanAreasReservationSlack; kScanAreasReservationSlack;
scan_areas_.reserve(kScanAreasReservationSize); scan_areas_.reserve(kScanAreasReservationSize);
{ {
// TODO(bikineev): Scan full pages. // TODO(bikineev): Scan full slot spans.
for (const auto& bucket : root_.buckets) { for (const auto& bucket : root_.buckets) {
for (auto* page = bucket.active_pages_head; for (auto* slot_span = bucket.active_slot_spans_head;
page && page != page->get_sentinel_page(); page = page->next_page) { slot_span && slot_span != slot_span->get_sentinel_slot_span();
slot_span = slot_span->next_slot_span) {
// The active list may contain false positives, skip them. // The active list may contain false positives, skip them.
if (page->is_empty() || page->is_decommitted()) if (slot_span->is_empty() || slot_span->is_decommitted())
continue; continue;
auto* payload_begin = static_cast<uintptr_t*>(Page::ToPointer(page)); auto* payload_begin =
static_cast<uintptr_t*>(SlotSpan::ToPointer(slot_span));
auto* payload_end = auto* payload_end =
payload_begin + payload_begin +
(page->bucket->get_bytes_per_span() / sizeof(uintptr_t)); (slot_span->bucket->get_bytes_per_span() / sizeof(uintptr_t));
scan_areas_.push_back({payload_begin, payload_end}); scan_areas_.push_back({payload_begin, payload_end});
} }
} }
......
...@@ -36,7 +36,7 @@ template <bool thread_safe> ...@@ -36,7 +36,7 @@ template <bool thread_safe>
class BASE_EXPORT PCScan final { class BASE_EXPORT PCScan final {
public: public:
using Root = PartitionRoot<thread_safe>; using Root = PartitionRoot<thread_safe>;
using Page = PartitionPage<thread_safe>; using SlotSpan = SlotSpanMetadata<thread_safe>;
explicit PCScan(Root* root) : root_(root) {} explicit PCScan(Root* root) : root_(root) {}
...@@ -45,7 +45,7 @@ class BASE_EXPORT PCScan final { ...@@ -45,7 +45,7 @@ class BASE_EXPORT PCScan final {
~PCScan(); ~PCScan();
ALWAYS_INLINE void MoveToQuarantine(void* ptr, Page* page); ALWAYS_INLINE void MoveToQuarantine(void* ptr, SlotSpan* slot_span);
private: private:
class PCScanTask; class PCScanTask;
...@@ -113,15 +113,15 @@ void PCScan<thread_safe>::QuarantineData::GrowLimitIfNeeded() { ...@@ -113,15 +113,15 @@ void PCScan<thread_safe>::QuarantineData::GrowLimitIfNeeded() {
template <bool thread_safe> template <bool thread_safe>
ALWAYS_INLINE void PCScan<thread_safe>::MoveToQuarantine(void* ptr, ALWAYS_INLINE void PCScan<thread_safe>::MoveToQuarantine(void* ptr,
Page* page) { SlotSpan* slot_span) {
PA_DCHECK(!page->bucket->is_direct_mapped()); PA_DCHECK(!slot_span->bucket->is_direct_mapped());
QuarantineBitmapFromPointer(QuarantineBitmapType::kMutator, QuarantineBitmapFromPointer(QuarantineBitmapType::kMutator,
quarantine_data_.epoch(), ptr) quarantine_data_.epoch(), ptr)
->SetBit(reinterpret_cast<uintptr_t>(ptr)); ->SetBit(reinterpret_cast<uintptr_t>(ptr));
const bool is_limit_reached = const bool is_limit_reached =
quarantine_data_.Account(page->bucket->slot_size); quarantine_data_.Account(slot_span->bucket->slot_size);
if (is_limit_reached) { if (is_limit_reached) {
// Post a background task to not block the current thread. // Post a background task to not block the current thread.
ScheduleTask(TaskType::kNonBlocking); ScheduleTask(TaskType::kNonBlocking);
......
...@@ -21,7 +21,7 @@ class PCScanTest : public testing::Test { ...@@ -21,7 +21,7 @@ class PCScanTest : public testing::Test {
PartitionOptions::PCScan::kEnabled}); PartitionOptions::PCScan::kEnabled});
} }
~PCScanTest() override { ~PCScanTest() override {
allocator_.root()->PurgeMemory(PartitionPurgeDecommitEmptyPages | allocator_.root()->PurgeMemory(PartitionPurgeDecommitEmptySlotSpans |
PartitionPurgeDiscardUnusedSystemPages); PartitionPurgeDiscardUnusedSystemPages);
PartitionAllocGlobalUninitForTesting(); PartitionAllocGlobalUninitForTesting();
} }
...@@ -47,17 +47,17 @@ class PCScanTest : public testing::Test { ...@@ -47,17 +47,17 @@ class PCScanTest : public testing::Test {
namespace { namespace {
using Page = ThreadSafePartitionRoot::Page; using SlotSpan = ThreadSafePartitionRoot::SlotSpan;
struct FullPageAllocation { struct FullSlotSpanAllocation {
Page* page; SlotSpan* slot_span;
void* first; void* first;
void* last; void* last;
}; };
// Assumes heap is purged. // Assumes heap is purged.
FullPageAllocation GetFullPage(ThreadSafePartitionRoot& root, FullSlotSpanAllocation GetFullSlotSpan(ThreadSafePartitionRoot& root,
size_t object_size) { size_t object_size) {
CHECK_EQ(0u, root.total_size_of_committed_pages_for_testing()); CHECK_EQ(0u, root.total_size_of_committed_pages_for_testing());
const size_t size_with_extra = PartitionSizeAdjustAdd(true, object_size); const size_t size_with_extra = PartitionSizeAdjustAdd(true, object_size);
...@@ -76,24 +76,24 @@ FullPageAllocation GetFullPage(ThreadSafePartitionRoot& root, ...@@ -76,24 +76,24 @@ FullPageAllocation GetFullPage(ThreadSafePartitionRoot& root,
last = PartitionPointerAdjustSubtract(true, ptr); last = PartitionPointerAdjustSubtract(true, ptr);
} }
EXPECT_EQ(ThreadSafePartitionRoot::Page::FromPointer(first), EXPECT_EQ(SlotSpan::FromPointer(first), SlotSpan::FromPointer(last));
ThreadSafePartitionRoot::Page::FromPointer(last));
if (bucket.num_system_pages_per_slot_span == NumSystemPagesPerPartitionPage()) if (bucket.num_system_pages_per_slot_span == NumSystemPagesPerPartitionPage())
EXPECT_EQ(reinterpret_cast<size_t>(first) & PartitionPageBaseMask(), EXPECT_EQ(reinterpret_cast<size_t>(first) & PartitionPageBaseMask(),
reinterpret_cast<size_t>(last) & PartitionPageBaseMask()); reinterpret_cast<size_t>(last) & PartitionPageBaseMask());
EXPECT_EQ(num_slots, EXPECT_EQ(num_slots, static_cast<size_t>(
static_cast<size_t>(bucket.active_pages_head->num_allocated_slots)); bucket.active_slot_spans_head->num_allocated_slots));
EXPECT_EQ(nullptr, bucket.active_pages_head->freelist_head); EXPECT_EQ(nullptr, bucket.active_slot_spans_head->freelist_head);
EXPECT_TRUE(bucket.active_pages_head); EXPECT_TRUE(bucket.active_slot_spans_head);
EXPECT_TRUE(bucket.active_pages_head != Page::get_sentinel_page()); EXPECT_TRUE(bucket.active_slot_spans_head !=
SlotSpan::get_sentinel_slot_span());
return {bucket.active_pages_head, PartitionPointerAdjustAdd(true, first),
return {bucket.active_slot_spans_head, PartitionPointerAdjustAdd(true, first),
PartitionPointerAdjustAdd(true, last)}; PartitionPointerAdjustAdd(true, last)};
} }
bool IsInFreeList(void* object) { bool IsInFreeList(void* object) {
auto* page = Page::FromPointerNoAlignmentCheck(object); auto* slot_span = SlotSpan::FromPointerNoAlignmentCheck(object);
for (auto* entry = page->freelist_head; entry; for (auto* entry = slot_span->freelist_head; entry;
entry = EncodedPartitionFreelistEntry::Decode(entry->next)) { entry = EncodedPartitionFreelistEntry::Decode(entry->next)) {
if (entry == object) if (entry == object)
return true; return true;
...@@ -138,21 +138,23 @@ TEST_F(PCScanTest, ArbitraryObjectInQuarantine) { ...@@ -138,21 +138,23 @@ TEST_F(PCScanTest, ArbitraryObjectInQuarantine) {
TEST_F(PCScanTest, FirstObjectInQuarantine) { TEST_F(PCScanTest, FirstObjectInQuarantine) {
static constexpr size_t kAllocationSize = 16; static constexpr size_t kAllocationSize = 16;
FullPageAllocation full_page = GetFullPage(root(), kAllocationSize); FullSlotSpanAllocation full_slot_span =
EXPECT_FALSE(IsInQuarantine(full_page.first)); GetFullSlotSpan(root(), kAllocationSize);
EXPECT_FALSE(IsInQuarantine(full_slot_span.first));
root().FreeNoHooks(full_page.first); root().FreeNoHooks(full_slot_span.first);
EXPECT_TRUE(IsInQuarantine(full_page.first)); EXPECT_TRUE(IsInQuarantine(full_slot_span.first));
} }
TEST_F(PCScanTest, LastObjectInQuarantine) { TEST_F(PCScanTest, LastObjectInQuarantine) {
static constexpr size_t kAllocationSize = 16; static constexpr size_t kAllocationSize = 16;
FullPageAllocation full_page = GetFullPage(root(), kAllocationSize); FullSlotSpanAllocation full_slot_span =
EXPECT_FALSE(IsInQuarantine(full_page.last)); GetFullSlotSpan(root(), kAllocationSize);
EXPECT_FALSE(IsInQuarantine(full_slot_span.last));
root().FreeNoHooks(full_page.last); root().FreeNoHooks(full_slot_span.last);
EXPECT_TRUE(IsInQuarantine(full_page.last)); EXPECT_TRUE(IsInQuarantine(full_slot_span.last));
} }
namespace { namespace {
...@@ -217,23 +219,22 @@ TEST_F(PCScanTest, DanglingReferenceSameSlotSpanButDifferentPages) { ...@@ -217,23 +219,22 @@ TEST_F(PCScanTest, DanglingReferenceSameSlotSpanButDifferentPages) {
static const size_t kObjectSizeForSlotSpanConsistingOfMultiplePartitionPages = static const size_t kObjectSizeForSlotSpanConsistingOfMultiplePartitionPages =
static_cast<size_t>(PartitionPageSize() * 0.75); static_cast<size_t>(PartitionPageSize() * 0.75);
FullPageAllocation full_page = GetFullPage( FullSlotSpanAllocation full_slot_span = GetFullSlotSpan(
root(), root(),
PartitionSizeAdjustSubtract( PartitionSizeAdjustSubtract(
true, kObjectSizeForSlotSpanConsistingOfMultiplePartitionPages)); true, kObjectSizeForSlotSpanConsistingOfMultiplePartitionPages));
// Assert that the first and the last objects are in the same slot span but on // Assert that the first and the last objects are in the same slot span but on
// different partition pages. // different partition pages.
ASSERT_EQ(ThreadSafePartitionRoot::Page::FromPointerNoAlignmentCheck( ASSERT_EQ(SlotSpan::FromPointerNoAlignmentCheck(full_slot_span.first),
full_page.first), SlotSpan::FromPointerNoAlignmentCheck(full_slot_span.last));
ThreadSafePartitionRoot::Page::FromPointerNoAlignmentCheck( ASSERT_NE(
full_page.last)); reinterpret_cast<size_t>(full_slot_span.first) & PartitionPageBaseMask(),
ASSERT_NE(reinterpret_cast<size_t>(full_page.first) & PartitionPageBaseMask(), reinterpret_cast<size_t>(full_slot_span.last) & PartitionPageBaseMask());
reinterpret_cast<size_t>(full_page.last) & PartitionPageBaseMask());
// Create two objects, on different partition pages. // Create two objects, on different partition pages.
auto* value = new (full_page.first) ValueList; auto* value = new (full_slot_span.first) ValueList;
auto* source = new (full_page.last) SourceList; auto* source = new (full_slot_span.last) SourceList;
source->next = value; source->next = value;
TestDanglingReference(*this, source, value); TestDanglingReference(*this, source, value);
......
...@@ -134,16 +134,17 @@ void PartitionStatsDumperImpl::PartitionsDumpBucketStats( ...@@ -134,16 +134,17 @@ void PartitionStatsDumperImpl::PartitionsDumpBucketStats(
memory_stats->decommittable_bytes); memory_stats->decommittable_bytes);
allocator_dump->AddScalar("discardable_size", "bytes", allocator_dump->AddScalar("discardable_size", "bytes",
memory_stats->discardable_bytes); memory_stats->discardable_bytes);
// TODO(bartekn): Rename the scalar names.
allocator_dump->AddScalar("total_pages_size", "bytes", allocator_dump->AddScalar("total_pages_size", "bytes",
memory_stats->allocated_page_size); memory_stats->allocated_slot_span_size);
allocator_dump->AddScalar("active_pages", "objects", allocator_dump->AddScalar("active_pages", "objects",
memory_stats->num_active_pages); memory_stats->num_active_slot_spans);
allocator_dump->AddScalar("full_pages", "objects", allocator_dump->AddScalar("full_pages", "objects",
memory_stats->num_full_pages); memory_stats->num_full_slot_spans);
allocator_dump->AddScalar("empty_pages", "objects", allocator_dump->AddScalar("empty_pages", "objects",
memory_stats->num_empty_pages); memory_stats->num_empty_slot_spans);
allocator_dump->AddScalar("decommitted_pages", "objects", allocator_dump->AddScalar("decommitted_pages", "objects",
memory_stats->num_decommitted_pages); memory_stats->num_decommitted_slot_spans);
} }
} // namespace } // namespace
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment