Commit 22b2cdc0 authored by Bartek Nowierski's avatar Bartek Nowierski Committed by Commit Bot

[PartitionAlloc] Transition to SlotSpanMetadata

Currently all metadata is stored in PartitionPage, which is confusing
because the most commonly used metadata is related to slot spans, and
is stored only in the PartitionPage object that corresponds to the first
partition page of the slot span. This CL introduces SlotSpanMetadata
to clarify that confusion.

Change-Id: Id8873dba1c9e3018a8643f4f9c93e694f2edb9c2
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2466007
Commit-Queue: Bartek Nowierski <bartekn@chromium.org>
Reviewed-by: default avatarKentaro Hara <haraken@chromium.org>
Reviewed-by: default avatarAnton Bikineev <bikineev@chromium.org>
Reviewed-by: default avatarBenoit L <lizeb@chromium.org>
Cr-Commit-Position: refs/heads/master@{#817117}
parent 16d2ce32
......@@ -105,8 +105,8 @@ void PartitionAllocMemoryReclaimer::Reclaim() {
AutoLock lock(lock_); // Has to protect from concurrent (Un)Register calls.
TRACE_EVENT0("base", "PartitionAllocMemoryReclaimer::Reclaim()");
constexpr int kFlags =
PartitionPurgeDecommitEmptyPages | PartitionPurgeDiscardUnusedSystemPages;
constexpr int kFlags = PartitionPurgeDecommitEmptySlotSpans |
PartitionPurgeDiscardUnusedSystemPages;
for (auto* partition : thread_safe_partitions_)
partition->PurgeMemory(kFlags);
......
......@@ -11,7 +11,7 @@ namespace base {
namespace internal {
template <bool thread_safe>
struct PartitionPage;
struct SlotSpanMetadata;
BASE_EXPORT size_t PartitionAllocGetSlotOffset(void* ptr);
......
......@@ -22,13 +22,13 @@ namespace internal {
template <bool thread_safe>
struct PartitionBucket {
// Accessed most in hot path => goes first.
PartitionPage<thread_safe>* active_pages_head;
SlotSpanMetadata<thread_safe>* active_slot_spans_head;
PartitionPage<thread_safe>* empty_pages_head;
PartitionPage<thread_safe>* decommitted_pages_head;
SlotSpanMetadata<thread_safe>* empty_slot_spans_head;
SlotSpanMetadata<thread_safe>* decommitted_slot_spans_head;
uint32_t slot_size;
uint32_t num_system_pages_per_slot_span : 8;
uint32_t num_full_pages : 24;
uint32_t num_full_slot_spans : 24;
// `slot_size_reciprocal` is used to improve the performance of
// `GetSlotOffset`. It is computed as `(1 / size) * (2 ** M)` where M is
......@@ -57,7 +57,7 @@ struct PartitionBucket {
// there is no need to call memset on fresh pages; the OS has already zeroed
// them. (See |PartitionRoot::AllocFromBucket|.)
//
// Note the matching Free() functions are in PartitionPage.
// Note the matching Free() functions are in SlotSpanMetadata.
BASE_EXPORT NOINLINE void* SlowPathAlloc(PartitionRoot<thread_safe>* root,
int flags,
size_t raw_size,
......@@ -86,17 +86,17 @@ struct PartitionBucket {
return (size + SystemPageOffsetMask()) & SystemPageBaseMask();
}
// This helper function scans a bucket's active page list for a suitable new
// active page. When it finds a suitable new active page (one that has
// free slots and is not empty), it is set as the new active page. If there
// is no suitable new active page, the current active page is set to
// PartitionPage::get_sentinel_page(). As potential pages are scanned, they
// are tidied up according to their state. Empty pages are swept on to the
// empty page list, decommitted pages on to the decommitted page list and full
// pages are unlinked from any list.
// This helper function scans a bucket's active slot span list for a suitable
// new active slot span. When it finds a suitable new active slot span (one
// that has free slots and is not empty), it is set as the new active slot
// span. If there is no suitable new active slot span, the current active slot
// span is set to SlotSpanMetadata::get_sentinel_slot_span(). As potential
// slot spans are scanned, they are tidied up according to their state. Empty
// slot spans are swept on to the empty list, decommitted slot spans on to the
// decommitted list and full slot spans are unlinked from any list.
//
// This is where the guts of the bucket maintenance is done!
bool SetNewActivePage();
bool SetNewActiveSlotSpan();
// Returns an offset within an allocation slot.
ALWAYS_INLINE size_t GetSlotOffset(size_t offset_in_slot_span) {
......@@ -122,17 +122,17 @@ struct PartitionBucket {
private:
static NOINLINE void OnFull();
// Returns a natural number of PartitionPages (calculated by
// Returns a natural number of partition pages (calculated by
// get_system_pages_per_slot_span()) to allocate from the current
// SuperPage when the bucket runs out of slots.
// super page when the bucket runs out of slots.
ALWAYS_INLINE uint16_t get_pages_per_slot_span();
// Returns the number of system pages in a slot span.
//
// The calculation attemps to find the best number of System Pages to
// The calculation attempts to find the best number of system pages to
// allocate for the given slot_size to minimize wasted space. It uses a
// heuristic that looks at number of bytes wasted after the last slot and
// attempts to account for the PTE usage of each System Page.
// attempts to account for the PTE usage of each system page.
uint8_t get_system_pages_per_slot_span();
// Allocates a new slot span with size |num_partition_pages| from the
......@@ -146,16 +146,19 @@ struct PartitionBucket {
// Each bucket allocates a slot span when it runs out of slots.
// A slot span's size is equal to get_pages_per_slot_span() number of
// PartitionPages. This function initializes all PartitionPage within the
// partition pages. This function initializes all PartitionPage within the
// span to point to the first PartitionPage which holds all the metadata
// for the span and registers this bucket as the owner of the span. It does
// NOT put the slots into the bucket's freelist.
ALWAYS_INLINE void InitializeSlotSpan(PartitionPage<thread_safe>* page);
// Allocates one slot from the given |page| and then adds the remainder to
// the current bucket. If the |page| was freshly allocated, it must have been
// passed through InitializeSlotSpan() first.
ALWAYS_INLINE char* AllocAndFillFreelist(PartitionPage<thread_safe>* page);
// for the span (in PartitionPage::SlotSpanMetadata) and registers this bucket
// as the owner of the span. It does NOT put the slots into the bucket's
// freelist.
ALWAYS_INLINE void InitializeSlotSpan(
SlotSpanMetadata<thread_safe>* slot_span);
// Allocates one slot from the given |slot_span| and then adds the remainder
// to the current bucket. If the |slot_span| was freshly allocated, it must
// have been passed through InitializeSlotSpan() first.
ALWAYS_INLINE char* AllocAndFillFreelist(
SlotSpanMetadata<thread_safe>* slot_span);
};
} // namespace internal
......
......@@ -20,8 +20,8 @@ struct PartitionDirectMapExtent {
PartitionBucket<thread_safe>* bucket;
size_t map_size; // Mapped size, not including guard pages and meta-data.
ALWAYS_INLINE static PartitionDirectMapExtent<thread_safe>* FromPage(
PartitionPage<thread_safe>* page);
ALWAYS_INLINE static PartitionDirectMapExtent<thread_safe>* FromSlotSpan(
SlotSpanMetadata<thread_safe>* slot_span);
};
// Metadata page for direct-mapped allocations.
......@@ -39,9 +39,10 @@ struct PartitionDirectMapMetadata {
template <bool thread_safe>
ALWAYS_INLINE PartitionDirectMapExtent<thread_safe>*
PartitionDirectMapExtent<thread_safe>::FromPage(
PartitionPage<thread_safe>* page) {
PA_DCHECK(page->bucket->is_direct_mapped());
PartitionDirectMapExtent<thread_safe>::FromSlotSpan(
SlotSpanMetadata<thread_safe>* slot_span) {
PA_DCHECK(slot_span->bucket->is_direct_mapped());
auto* page = reinterpret_cast<PartitionPage<thread_safe>*>(slot_span);
// The page passed here is always |page| in |PartitionDirectMapMetadata|
// above. To get the metadata structure, need to get the invalid page address.
auto* first_invalid_page = page - 1;
......
......@@ -17,11 +17,12 @@ namespace internal {
// TODO(glazunov): Simplify the function once the non-thread-safe PartitionRoot
// is no longer used.
void PartitionRefCount::Free() {
auto* page = PartitionPage<ThreadSafe>::FromPointerNoAlignmentCheck(this);
auto* root = PartitionRoot<ThreadSafe>::FromPage(page);
auto* slot_span =
SlotSpanMetadata<ThreadSafe>::FromPointerNoAlignmentCheck(this);
auto* root = PartitionRoot<ThreadSafe>::FromSlotSpan(slot_span);
#ifdef ADDRESS_SANITIZER
size_t utilized_slot_size = page->GetUtilizedSlotSize();
size_t utilized_slot_size = slot_span->GetUtilizedSlotSize();
// PartitionRefCount is required to be allocated inside a `PartitionRoot` that
// supports extras.
PA_DCHECK(root->allow_extras);
......@@ -31,15 +32,15 @@ void PartitionRefCount::Free() {
#endif
if (root->is_thread_safe) {
root->RawFree(this, page);
root->RawFree(this, slot_span);
return;
}
auto* non_thread_safe_page =
reinterpret_cast<PartitionPage<NotThreadSafe>*>(page);
auto* non_thread_safe_slot_span =
reinterpret_cast<SlotSpanMetadata<NotThreadSafe>*>(slot_span);
auto* non_thread_safe_root =
reinterpret_cast<PartitionRoot<NotThreadSafe>*>(root);
non_thread_safe_root->RawFree(this, non_thread_safe_page);
non_thread_safe_root->RawFree(this, non_thread_safe_slot_span);
}
#endif // ENABLE_REF_COUNT_FOR_BACKUP_REF_PTR
......
......@@ -52,7 +52,7 @@ struct PartitionOptions {
// PartitionAllocator.
template <bool thread_safe>
struct BASE_EXPORT PartitionRoot {
using Page = internal::PartitionPage<thread_safe>;
using SlotSpan = internal::SlotSpanMetadata<thread_safe>;
using Bucket = internal::PartitionBucket<thread_safe>;
using SuperPageExtentEntry =
internal::PartitionSuperPageExtentEntry<thread_safe>;
......@@ -92,8 +92,8 @@ struct BASE_EXPORT PartitionRoot {
SuperPageExtentEntry* current_extent = nullptr;
SuperPageExtentEntry* first_extent = nullptr;
DirectMapExtent* direct_map_list = nullptr;
Page* global_empty_page_ring[kMaxFreeableSpans] = {};
int16_t global_empty_page_ring_index = 0;
SlotSpan* global_empty_slot_span_ring[kMaxFreeableSpans] = {};
int16_t global_empty_slot_span_ring_index = 0;
// Integrity check = ~reinterpret_cast<uintptr_t>(this).
uintptr_t inverted_self = 0;
......@@ -119,16 +119,16 @@ struct BASE_EXPORT PartitionRoot {
//
// Allocates out of the given bucket. Properly, this function should probably
// be in PartitionBucket, but because the implementation needs to be inlined
// for performance, and because it needs to inspect PartitionPage,
// for performance, and because it needs to inspect SlotSpanMetadata,
// it becomes impossible to have it in PartitionBucket as this causes a
// cyclical dependency on PartitionPage function implementations.
// cyclical dependency on SlotSpanMetadata function implementations.
//
// Moving it a layer lower couples PartitionRoot and PartitionBucket, but
// preserves the layering of the includes.
void Init(PartitionOptions);
ALWAYS_INLINE static bool IsValidPage(Page* page);
ALWAYS_INLINE static PartitionRoot* FromPage(Page* page);
ALWAYS_INLINE static bool IsValidSlotSpan(SlotSpan* slot_span);
ALWAYS_INLINE static PartitionRoot* FromSlotSpan(SlotSpan* slot_span);
ALWAYS_INLINE void IncreaseCommittedPages(size_t len)
EXCLUSIVE_LOCKS_REQUIRED(lock_);
......@@ -181,14 +181,14 @@ struct BASE_EXPORT PartitionRoot {
// Same as |Free()|, bypasses the allocator hooks.
ALWAYS_INLINE static void FreeNoHooks(void* ptr);
// Immediately frees the pointer bypassing the quarantine.
ALWAYS_INLINE void FreeNoHooksImmediate(void* ptr, Page* page);
ALWAYS_INLINE void FreeNoHooksImmediate(void* ptr, SlotSpan* slot_span);
ALWAYS_INLINE static size_t GetUsableSize(void* ptr);
ALWAYS_INLINE size_t GetSize(void* ptr) const;
ALWAYS_INLINE size_t ActualSize(size_t size);
// Frees memory from this partition, if possible, by decommitting pages.
// |flags| is an OR of base::PartitionPurgeFlags.
// Frees memory from this partition, if possible, by decommitting pages or
// even etnire slot spans. |flags| is an OR of base::PartitionPurgeFlags.
void PurgeMemory(int flags);
void DumpStats(const char* partition_name,
......@@ -198,7 +198,7 @@ struct BASE_EXPORT PartitionRoot {
static uint16_t SizeToBucketIndex(size_t size);
// Frees memory, with |ptr| as returned by |RawAlloc()|.
ALWAYS_INLINE void RawFree(void* ptr, Page* page);
ALWAYS_INLINE void RawFree(void* ptr, SlotSpan* slot_span);
static void RawFreeStatic(void* ptr);
internal::ThreadCache* thread_cache_for_testing() const {
......@@ -241,10 +241,10 @@ struct BASE_EXPORT PartitionRoot {
bool* is_already_zeroed)
EXCLUSIVE_LOCKS_REQUIRED(lock_);
bool ReallocDirectMappedInPlace(internal::PartitionPage<thread_safe>* page,
size_t requested_size)
EXCLUSIVE_LOCKS_REQUIRED(lock_);
void DecommitEmptyPages() EXCLUSIVE_LOCKS_REQUIRED(lock_);
bool ReallocDirectMappedInPlace(
internal::SlotSpanMetadata<thread_safe>* slot_span,
size_t requested_size) EXCLUSIVE_LOCKS_REQUIRED(lock_);
void DecommitEmptySlotSpans() EXCLUSIVE_LOCKS_REQUIRED(lock_);
friend class internal::ThreadCache;
};
......
......@@ -91,7 +91,7 @@ class PCScan<thread_safe>::PCScanTask final {
void RunOnce() &&;
private:
using Page = PartitionPage<thread_safe>;
using SlotSpan = SlotSpanMetadata<thread_safe>;
struct ScanArea {
uintptr_t* begin = nullptr;
......@@ -134,7 +134,7 @@ template <bool thread_safe>
QuarantineBitmap* PCScan<thread_safe>::PCScanTask::FindScannerBitmapForPointer(
uintptr_t maybe_ptr) const {
// TODO(bikineev): Consider using the bitset in AddressPoolManager::Pool to
// quickly find a super-page.
// quickly find a super page.
const auto super_page_base = maybe_ptr & kSuperPageBaseMask;
auto it = super_pages_.lower_bound(super_page_base);
......@@ -145,7 +145,7 @@ QuarantineBitmap* PCScan<thread_safe>::PCScanTask::FindScannerBitmapForPointer(
reinterpret_cast<void*>(maybe_ptr)))
return nullptr;
// We are certain here that |maybe_ptr| points to the superpage payload.
// We are certain here that |maybe_ptr| points to the super page payload.
return QuarantineBitmapFromPointer(QuarantineBitmapType::kScanner,
pcscan_.quarantine_data_.epoch(),
reinterpret_cast<char*>(maybe_ptr));
......@@ -176,12 +176,13 @@ size_t PCScan<thread_safe>::PCScanTask::TryMarkObjectInNormalBucketPool(
PA_DCHECK((maybe_ptr & kSuperPageBaseMask) == (base & kSuperPageBaseMask));
auto target_page =
Page::FromPointerNoAlignmentCheck(reinterpret_cast<void*>(base));
PA_DCHECK(&root_ == PartitionRoot<thread_safe>::FromPage(target_page));
auto target_slot_span =
SlotSpan::FromPointerNoAlignmentCheck(reinterpret_cast<void*>(base));
PA_DCHECK(&root_ ==
PartitionRoot<thread_safe>::FromSlotSpan(target_slot_span));
const size_t usable_size = PartitionSizeAdjustSubtract(
root_.allow_extras, target_page->GetUtilizedSlotSize());
root_.allow_extras, target_slot_span->GetUtilizedSlotSize());
// Range check for inner pointers.
if (maybe_ptr >= base + usable_size)
return 0;
......@@ -193,7 +194,7 @@ size_t PCScan<thread_safe>::PCScanTask::TryMarkObjectInNormalBucketPool(
pcscan_.quarantine_data_.epoch(),
reinterpret_cast<char*>(base))
->SetBit(base);
return target_page->bucket->slot_size;
return target_slot_span->bucket->slot_size;
}
template <bool thread_safe>
......@@ -205,12 +206,12 @@ void PCScan<thread_safe>::PCScanTask::ClearQuarantinedObjects() const {
reinterpret_cast<char*>(super_page));
bitmap->Iterate([allow_extras](uintptr_t ptr) {
auto* object = reinterpret_cast<void*>(ptr);
auto* page = Page::FromPointerNoAlignmentCheck(object);
auto* slot_span = SlotSpan::FromPointerNoAlignmentCheck(object);
// Use zero as a zapping value to speed up the fast bailout check in
// ScanPartition.
memset(object, 0,
PartitionSizeAdjustSubtract(allow_extras,
page->GetUtilizedSlotSize()));
slot_span->GetUtilizedSlotSize()));
});
}
}
......@@ -234,7 +235,7 @@ size_t PCScan<thread_safe>::PCScanTask::ScanPartition() NO_SANITIZE("thread") {
// implemented.
#if defined(PA_HAS_64_BITS_POINTERS)
// On partitions without extras (partitions with aligned allocations),
// pages are not allocated from the GigaCage.
// memory is not allocated from the GigaCage.
if (features::IsPartitionAllocGigaCageEnabled() && root_.allow_extras) {
// With GigaCage, we first do a fast bitmask check to see if the pointer
// points to the normal bucket pool.
......@@ -267,9 +268,9 @@ size_t PCScan<thread_safe>::PCScanTask::SweepQuarantine() {
reinterpret_cast<char*>(super_page));
bitmap->Iterate([this, &swept_bytes](uintptr_t ptr) {
auto* object = reinterpret_cast<void*>(ptr);
auto* page = Page::FromPointerNoAlignmentCheck(object);
swept_bytes += page->bucket->slot_size;
root_.FreeNoHooksImmediate(object, page);
auto* slot_span = SlotSpan::FromPointerNoAlignmentCheck(object);
swept_bytes += slot_span->bucket->slot_size;
root_.FreeNoHooksImmediate(object, slot_span);
});
bitmap->Clear();
}
......@@ -293,25 +294,27 @@ PCScan<thread_safe>::PCScanTask::PCScanTask(PCScan& pcscan, Root& root)
}
}
// Take a snapshot of all active pages.
// Take a snapshot of all active slot spans.
static constexpr size_t kScanAreasReservationSlack = 10;
const size_t kScanAreasReservationSize = root_.total_size_of_committed_pages /
PartitionPageSize() /
kScanAreasReservationSlack;
scan_areas_.reserve(kScanAreasReservationSize);
{
// TODO(bikineev): Scan full pages.
// TODO(bikineev): Scan full slot spans.
for (const auto& bucket : root_.buckets) {
for (auto* page = bucket.active_pages_head;
page && page != page->get_sentinel_page(); page = page->next_page) {
for (auto* slot_span = bucket.active_slot_spans_head;
slot_span && slot_span != slot_span->get_sentinel_slot_span();
slot_span = slot_span->next_slot_span) {
// The active list may contain false positives, skip them.
if (page->is_empty() || page->is_decommitted())
if (slot_span->is_empty() || slot_span->is_decommitted())
continue;
auto* payload_begin = static_cast<uintptr_t*>(Page::ToPointer(page));
auto* payload_begin =
static_cast<uintptr_t*>(SlotSpan::ToPointer(slot_span));
auto* payload_end =
payload_begin +
(page->bucket->get_bytes_per_span() / sizeof(uintptr_t));
(slot_span->bucket->get_bytes_per_span() / sizeof(uintptr_t));
scan_areas_.push_back({payload_begin, payload_end});
}
}
......
......@@ -36,7 +36,7 @@ template <bool thread_safe>
class BASE_EXPORT PCScan final {
public:
using Root = PartitionRoot<thread_safe>;
using Page = PartitionPage<thread_safe>;
using SlotSpan = SlotSpanMetadata<thread_safe>;
explicit PCScan(Root* root) : root_(root) {}
......@@ -45,7 +45,7 @@ class BASE_EXPORT PCScan final {
~PCScan();
ALWAYS_INLINE void MoveToQuarantine(void* ptr, Page* page);
ALWAYS_INLINE void MoveToQuarantine(void* ptr, SlotSpan* slot_span);
private:
class PCScanTask;
......@@ -113,15 +113,15 @@ void PCScan<thread_safe>::QuarantineData::GrowLimitIfNeeded() {
template <bool thread_safe>
ALWAYS_INLINE void PCScan<thread_safe>::MoveToQuarantine(void* ptr,
Page* page) {
PA_DCHECK(!page->bucket->is_direct_mapped());
SlotSpan* slot_span) {
PA_DCHECK(!slot_span->bucket->is_direct_mapped());
QuarantineBitmapFromPointer(QuarantineBitmapType::kMutator,
quarantine_data_.epoch(), ptr)
->SetBit(reinterpret_cast<uintptr_t>(ptr));
const bool is_limit_reached =
quarantine_data_.Account(page->bucket->slot_size);
quarantine_data_.Account(slot_span->bucket->slot_size);
if (is_limit_reached) {
// Post a background task to not block the current thread.
ScheduleTask(TaskType::kNonBlocking);
......
......@@ -21,7 +21,7 @@ class PCScanTest : public testing::Test {
PartitionOptions::PCScan::kEnabled});
}
~PCScanTest() override {
allocator_.root()->PurgeMemory(PartitionPurgeDecommitEmptyPages |
allocator_.root()->PurgeMemory(PartitionPurgeDecommitEmptySlotSpans |
PartitionPurgeDiscardUnusedSystemPages);
PartitionAllocGlobalUninitForTesting();
}
......@@ -47,17 +47,17 @@ class PCScanTest : public testing::Test {
namespace {
using Page = ThreadSafePartitionRoot::Page;
using SlotSpan = ThreadSafePartitionRoot::SlotSpan;
struct FullPageAllocation {
Page* page;
struct FullSlotSpanAllocation {
SlotSpan* slot_span;
void* first;
void* last;
};
// Assumes heap is purged.
FullPageAllocation GetFullPage(ThreadSafePartitionRoot& root,
size_t object_size) {
FullSlotSpanAllocation GetFullSlotSpan(ThreadSafePartitionRoot& root,
size_t object_size) {
CHECK_EQ(0u, root.total_size_of_committed_pages_for_testing());
const size_t size_with_extra = PartitionSizeAdjustAdd(true, object_size);
......@@ -76,24 +76,24 @@ FullPageAllocation GetFullPage(ThreadSafePartitionRoot& root,
last = PartitionPointerAdjustSubtract(true, ptr);
}
EXPECT_EQ(ThreadSafePartitionRoot::Page::FromPointer(first),
ThreadSafePartitionRoot::Page::FromPointer(last));
EXPECT_EQ(SlotSpan::FromPointer(first), SlotSpan::FromPointer(last));
if (bucket.num_system_pages_per_slot_span == NumSystemPagesPerPartitionPage())
EXPECT_EQ(reinterpret_cast<size_t>(first) & PartitionPageBaseMask(),
reinterpret_cast<size_t>(last) & PartitionPageBaseMask());
EXPECT_EQ(num_slots,
static_cast<size_t>(bucket.active_pages_head->num_allocated_slots));
EXPECT_EQ(nullptr, bucket.active_pages_head->freelist_head);
EXPECT_TRUE(bucket.active_pages_head);
EXPECT_TRUE(bucket.active_pages_head != Page::get_sentinel_page());
return {bucket.active_pages_head, PartitionPointerAdjustAdd(true, first),
EXPECT_EQ(num_slots, static_cast<size_t>(
bucket.active_slot_spans_head->num_allocated_slots));
EXPECT_EQ(nullptr, bucket.active_slot_spans_head->freelist_head);
EXPECT_TRUE(bucket.active_slot_spans_head);
EXPECT_TRUE(bucket.active_slot_spans_head !=
SlotSpan::get_sentinel_slot_span());
return {bucket.active_slot_spans_head, PartitionPointerAdjustAdd(true, first),
PartitionPointerAdjustAdd(true, last)};
}
bool IsInFreeList(void* object) {
auto* page = Page::FromPointerNoAlignmentCheck(object);
for (auto* entry = page->freelist_head; entry;
auto* slot_span = SlotSpan::FromPointerNoAlignmentCheck(object);
for (auto* entry = slot_span->freelist_head; entry;
entry = EncodedPartitionFreelistEntry::Decode(entry->next)) {
if (entry == object)
return true;
......@@ -138,21 +138,23 @@ TEST_F(PCScanTest, ArbitraryObjectInQuarantine) {
TEST_F(PCScanTest, FirstObjectInQuarantine) {
static constexpr size_t kAllocationSize = 16;
FullPageAllocation full_page = GetFullPage(root(), kAllocationSize);
EXPECT_FALSE(IsInQuarantine(full_page.first));
FullSlotSpanAllocation full_slot_span =
GetFullSlotSpan(root(), kAllocationSize);
EXPECT_FALSE(IsInQuarantine(full_slot_span.first));
root().FreeNoHooks(full_page.first);
EXPECT_TRUE(IsInQuarantine(full_page.first));
root().FreeNoHooks(full_slot_span.first);
EXPECT_TRUE(IsInQuarantine(full_slot_span.first));
}
TEST_F(PCScanTest, LastObjectInQuarantine) {
static constexpr size_t kAllocationSize = 16;
FullPageAllocation full_page = GetFullPage(root(), kAllocationSize);
EXPECT_FALSE(IsInQuarantine(full_page.last));
FullSlotSpanAllocation full_slot_span =
GetFullSlotSpan(root(), kAllocationSize);
EXPECT_FALSE(IsInQuarantine(full_slot_span.last));
root().FreeNoHooks(full_page.last);
EXPECT_TRUE(IsInQuarantine(full_page.last));
root().FreeNoHooks(full_slot_span.last);
EXPECT_TRUE(IsInQuarantine(full_slot_span.last));
}
namespace {
......@@ -217,23 +219,22 @@ TEST_F(PCScanTest, DanglingReferenceSameSlotSpanButDifferentPages) {
static const size_t kObjectSizeForSlotSpanConsistingOfMultiplePartitionPages =
static_cast<size_t>(PartitionPageSize() * 0.75);
FullPageAllocation full_page = GetFullPage(
FullSlotSpanAllocation full_slot_span = GetFullSlotSpan(
root(),
PartitionSizeAdjustSubtract(
true, kObjectSizeForSlotSpanConsistingOfMultiplePartitionPages));
// Assert that the first and the last objects are in the same slot span but on
// different partition pages.
ASSERT_EQ(ThreadSafePartitionRoot::Page::FromPointerNoAlignmentCheck(
full_page.first),
ThreadSafePartitionRoot::Page::FromPointerNoAlignmentCheck(
full_page.last));
ASSERT_NE(reinterpret_cast<size_t>(full_page.first) & PartitionPageBaseMask(),
reinterpret_cast<size_t>(full_page.last) & PartitionPageBaseMask());
ASSERT_EQ(SlotSpan::FromPointerNoAlignmentCheck(full_slot_span.first),
SlotSpan::FromPointerNoAlignmentCheck(full_slot_span.last));
ASSERT_NE(
reinterpret_cast<size_t>(full_slot_span.first) & PartitionPageBaseMask(),
reinterpret_cast<size_t>(full_slot_span.last) & PartitionPageBaseMask());
// Create two objects, on different partition pages.
auto* value = new (full_page.first) ValueList;
auto* source = new (full_page.last) SourceList;
auto* value = new (full_slot_span.first) ValueList;
auto* source = new (full_slot_span.last) SourceList;
source->next = value;
TestDanglingReference(*this, source, value);
......
......@@ -134,16 +134,17 @@ void PartitionStatsDumperImpl::PartitionsDumpBucketStats(
memory_stats->decommittable_bytes);
allocator_dump->AddScalar("discardable_size", "bytes",
memory_stats->discardable_bytes);
// TODO(bartekn): Rename the scalar names.
allocator_dump->AddScalar("total_pages_size", "bytes",
memory_stats->allocated_page_size);
memory_stats->allocated_slot_span_size);
allocator_dump->AddScalar("active_pages", "objects",
memory_stats->num_active_pages);
memory_stats->num_active_slot_spans);
allocator_dump->AddScalar("full_pages", "objects",
memory_stats->num_full_pages);
memory_stats->num_full_slot_spans);
allocator_dump->AddScalar("empty_pages", "objects",
memory_stats->num_empty_pages);
memory_stats->num_empty_slot_spans);
allocator_dump->AddScalar("decommitted_pages", "objects",
memory_stats->num_decommitted_pages);
memory_stats->num_decommitted_slot_spans);
}
} // namespace
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment