Commit 8795ca62 authored by Bartek Nowierski's avatar Bartek Nowierski Committed by Commit Bot

[PartitionAlloc] Fix stats reported by PartitionDirectMap

PartitionDirectMap and AllocNewSlotSpan have been reporting things
inconsistently. After this CL, the metrics will have the following
(consistent) meaning:
1. total_size_of_super_pages - total virtual address space for normal
   bucket super pages
2. total_size_of_direct_mapped_pages - total virtual address space for
   direct map regions
3. total_size_of_committed_pages - total committed pages for slots
   (doesn't include metadata, bitmaps, or any data outside or regions
   described in #1 and #2)

Change-Id: I1ac1b3044d15b33dcbd506f133d1fe7fa322277f
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2532542
Commit-Queue: Bartek Nowierski <bartekn@chromium.org>
Reviewed-by: default avatarBenoit L <lizeb@chromium.org>
Reviewed-by: default avatarKentaro Hara <haraken@chromium.org>
Auto-Submit: Bartek Nowierski <bartekn@chromium.org>
Cr-Commit-Position: refs/heads/master@{#827564}
parent 4264bf86
...@@ -2591,6 +2591,112 @@ TEST_F(PartitionAllocTest, GetUsableSize) { ...@@ -2591,6 +2591,112 @@ TEST_F(PartitionAllocTest, GetUsableSize) {
} }
} }
TEST_F(PartitionAllocTest, Bookkeeping) {
auto& root = *allocator.root();
EXPECT_EQ(0U, root.total_size_of_committed_pages);
EXPECT_EQ(0U, root.total_size_of_super_pages);
size_t small_size = 1000 - kExtraAllocSize;
// A full slot span of size 1 partition page is committed.
void* ptr = root.Alloc(small_size - kExtraAllocSize, type_name);
size_t expected_committed_size = PartitionPageSize();
size_t expected_super_pages_size = kSuperPageSize;
EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
// Freeing memory doesn't result in decommitting pages right away.
root.Free(ptr);
EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
// Allocating the same size lands it in the same slot span.
ptr = root.Alloc(small_size - kExtraAllocSize, type_name);
EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
// Freeing memory doesn't result in decommitting pages right away.
root.Free(ptr);
EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
// Allocating another size commits another slot span.
ptr = root.Alloc(2 * small_size - kExtraAllocSize, type_name);
expected_committed_size += PartitionPageSize();
EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
// Freeing memory doesn't result in decommitting pages right away.
root.Free(ptr);
EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
// Single-slot slot spans...
size_t big_size = kMaxBucketed - SystemPageSize();
size_t bucket_index = SizeToIndex(big_size + kExtraAllocSize);
PartitionBucket<base::internal::ThreadSafe>* bucket =
&root.buckets[bucket_index];
ASSERT_LT(big_size, bucket->get_bytes_per_span());
ASSERT_NE(big_size % PartitionPageSize(), 0U);
ptr = root.Alloc(big_size - kExtraAllocSize, type_name);
expected_committed_size += bucket->get_bytes_per_span();
EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
// Allocating 2nd time doesn't overflow the super page...
void* ptr2 = root.Alloc(big_size - kExtraAllocSize, type_name);
expected_committed_size += bucket->get_bytes_per_span();
EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
// ... but 3rd time does.
void* ptr3 = root.Alloc(big_size - kExtraAllocSize, type_name);
expected_committed_size += bucket->get_bytes_per_span();
expected_super_pages_size += kSuperPageSize;
EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
// Freeing memory doesn't result in decommitting pages right away.
root.Free(ptr);
root.Free(ptr2);
root.Free(ptr3);
EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
// Now everything should be decommitted. The reserved space for super pages
// stays the same and will never go away (by design).
root.PurgeMemory(PartitionPurgeDecommitEmptySlotSpans);
expected_committed_size = 0;
EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
// None of the above should affect the direct map space.
EXPECT_EQ(0U, root.total_size_of_direct_mapped_pages);
// For direct map, we commit only as many pages as needed.
size_t huge_size = kMaxBucketed + SystemPageSize();
ptr = root.Alloc(huge_size - kExtraAllocSize, type_name);
expected_committed_size += huge_size;
size_t surrounding_pages_size = PartitionPageSize();
#if !defined(PA_HAS_64_BITS_POINTERS)
surrounding_pages_size += SystemPageSize();
#endif
size_t expected_direct_map_size = bits::Align(
huge_size + surrounding_pages_size, PageAllocationGranularity());
EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
EXPECT_EQ(expected_direct_map_size, root.total_size_of_direct_mapped_pages);
// Freeing memory in the diret map decommits pages right away. The address
// space is released for re-use too.
root.Free(ptr);
expected_committed_size -= huge_size;
expected_direct_map_size = 0;
EXPECT_EQ(expected_committed_size, root.total_size_of_committed_pages);
EXPECT_EQ(expected_super_pages_size, root.total_size_of_super_pages);
EXPECT_EQ(expected_direct_map_size, root.total_size_of_direct_mapped_pages);
}
#if ENABLE_REF_COUNT_FOR_BACKUP_REF_PTR #if ENABLE_REF_COUNT_FOR_BACKUP_REF_PTR
TEST_F(PartitionAllocTest, RefCountBasic) { TEST_F(PartitionAllocTest, RefCountBasic) {
......
...@@ -45,13 +45,13 @@ PartitionDirectMap(PartitionRoot<thread_safe>* root, int flags, size_t raw_size) ...@@ -45,13 +45,13 @@ PartitionDirectMap(PartitionRoot<thread_safe>* root, int flags, size_t raw_size)
// guarantees that the region is in the company of regions that have leading // guarantees that the region is in the company of regions that have leading
// guard pages). // guard pages).
size_t reserved_size = slot_size + PartitionPageSize(); size_t reserved_size = slot_size + PartitionPageSize();
#if !defined(ARCH_CPU_64_BITS) #if !defined(PA_HAS_64_BITS_POINTERS)
reserved_size += SystemPageSize(); reserved_size += SystemPageSize();
#endif #endif
// Round up to the allocation granularity. // Round up to the allocation granularity.
reserved_size = bits::Align(reserved_size, PageAllocationGranularity()); reserved_size = bits::Align(reserved_size, PageAllocationGranularity());
size_t map_size = reserved_size - PartitionPageSize(); size_t map_size = reserved_size - PartitionPageSize();
#if !defined(ARCH_CPU_64_BITS) #if !defined(PA_HAS_64_BITS_POINTERS)
map_size -= SystemPageSize(); map_size -= SystemPageSize();
#endif #endif
PA_DCHECK(slot_size <= map_size); PA_DCHECK(slot_size <= map_size);
...@@ -71,17 +71,16 @@ PartitionDirectMap(PartitionRoot<thread_safe>* root, int flags, size_t raw_size) ...@@ -71,17 +71,16 @@ PartitionDirectMap(PartitionRoot<thread_safe>* root, int flags, size_t raw_size)
if (UNLIKELY(!ptr)) if (UNLIKELY(!ptr))
return nullptr; return nullptr;
size_t committed_page_size = slot_size + SystemPageSize(); root->total_size_of_direct_mapped_pages.fetch_add(reserved_size,
root->total_size_of_direct_mapped_pages.fetch_add(committed_page_size,
std::memory_order_relaxed); std::memory_order_relaxed);
root->IncreaseCommittedPages(committed_page_size); root->IncreaseCommittedPages(slot_size);
char* slot = ptr + PartitionPageSize(); char* slot = ptr + PartitionPageSize();
SetSystemPagesAccess(ptr, SystemPageSize(), PageInaccessible); SetSystemPagesAccess(ptr, SystemPageSize(), PageInaccessible);
SetSystemPagesAccess(ptr + (SystemPageSize() * 2), SetSystemPagesAccess(ptr + (SystemPageSize() * 2),
PartitionPageSize() - (SystemPageSize() * 2), PartitionPageSize() - (SystemPageSize() * 2),
PageInaccessible); PageInaccessible);
#if !defined(ARCH_CPU_64_BITS) #if !defined(PA_HAS_64_BITS_POINTERS)
// TODO(bartekn): Uncommit all the way up to reserved_size, or in case of // TODO(bartekn): Uncommit all the way up to reserved_size, or in case of
// GigaCage, all the way up to 2MB boundary. // GigaCage, all the way up to 2MB boundary.
PA_DCHECK(slot + slot_size + SystemPageSize() <= ptr + reserved_size); PA_DCHECK(slot + slot_size + SystemPageSize() <= ptr + reserved_size);
...@@ -676,9 +675,12 @@ void* PartitionBucket<thread_safe>::SlowPathAlloc( ...@@ -676,9 +675,12 @@ void* PartitionBucket<thread_safe>::SlowPathAlloc(
PA_DCHECK(new_slot_span); PA_DCHECK(new_slot_span);
} else { } else {
// Third. If we get here, we need a brand new slot span. // Third. If we get here, we need a brand new slot span.
// TODO(bartekn): For single-slot slot spans, we can use rounded raw_size
// as slot_span_committed_size.
uint16_t num_partition_pages = get_pages_per_slot_span(); uint16_t num_partition_pages = get_pages_per_slot_span();
void* raw_memory = AllocNewSlotSpan(root, flags, num_partition_pages, void* raw_memory =
get_bytes_per_span()); AllocNewSlotSpan(root, flags, num_partition_pages,
/* slot_span_committed_size= */ get_bytes_per_span());
if (LIKELY(raw_memory != nullptr)) { if (LIKELY(raw_memory != nullptr)) {
new_slot_span = new_slot_span =
SlotSpanMetadata<thread_safe>::FromPointerNoAlignmentCheck( SlotSpanMetadata<thread_safe>::FromPointerNoAlignmentCheck(
......
...@@ -27,7 +27,6 @@ PartitionDirectUnmap(SlotSpanMetadata<thread_safe>* slot_span) { ...@@ -27,7 +27,6 @@ PartitionDirectUnmap(SlotSpanMetadata<thread_safe>* slot_span) {
auto* root = PartitionRoot<thread_safe>::FromSlotSpan(slot_span); auto* root = PartitionRoot<thread_safe>::FromSlotSpan(slot_span);
root->lock_.AssertAcquired(); root->lock_.AssertAcquired();
auto* extent = PartitionDirectMapExtent<thread_safe>::FromSlotSpan(slot_span); auto* extent = PartitionDirectMapExtent<thread_safe>::FromSlotSpan(slot_span);
size_t unmap_size = extent->map_size;
// Maintain the doubly-linked list of all direct mappings. // Maintain the doubly-linked list of all direct mappings.
if (extent->prev_extent) { if (extent->prev_extent) {
...@@ -41,27 +40,25 @@ PartitionDirectUnmap(SlotSpanMetadata<thread_safe>* slot_span) { ...@@ -41,27 +40,25 @@ PartitionDirectUnmap(SlotSpanMetadata<thread_safe>* slot_span) {
extent->next_extent->prev_extent = extent->prev_extent; extent->next_extent->prev_extent = extent->prev_extent;
} }
root->DecreaseCommittedPages(slot_span->bucket->slot_size);
// Add the size of the trailing guard page (32-bit only) and preceding // Add the size of the trailing guard page (32-bit only) and preceding
// partition page. // partition page.
unmap_size += PartitionPageSize(); size_t reserved_size = extent->map_size + PartitionPageSize();
#if !defined(ARCH_CPU_64_BITS) #if !defined(PA_HAS_64_BITS_POINTERS)
unmap_size += SystemPageSize(); reserved_size += SystemPageSize();
#endif #endif
PA_DCHECK(!(reserved_size & PageAllocationGranularityOffsetMask()));
size_t uncommitted_page_size = PA_DCHECK(root->total_size_of_direct_mapped_pages >= reserved_size);
slot_span->bucket->slot_size + SystemPageSize(); root->total_size_of_direct_mapped_pages -= reserved_size;
root->DecreaseCommittedPages(uncommitted_page_size); PA_DCHECK(!(reserved_size & PageAllocationGranularityOffsetMask()));
PA_DCHECK(root->total_size_of_direct_mapped_pages >= uncommitted_page_size);
root->total_size_of_direct_mapped_pages -= uncommitted_page_size;
PA_DCHECK(!(unmap_size & PageAllocationGranularityOffsetMask()));
char* ptr = reinterpret_cast<char*>( char* ptr = reinterpret_cast<char*>(
SlotSpanMetadata<thread_safe>::ToPointer(slot_span)); SlotSpanMetadata<thread_safe>::ToPointer(slot_span));
// Account for the mapping starting a partition page before the actual // Account for the mapping starting a partition page before the actual
// allocation address. // allocation address.
ptr -= PartitionPageSize(); ptr -= PartitionPageSize();
return {ptr, unmap_size}; return {ptr, reserved_size};
} }
template <bool thread_safe> template <bool thread_safe>
......
...@@ -148,7 +148,14 @@ struct BASE_EXPORT PartitionRoot { ...@@ -148,7 +148,14 @@ struct BASE_EXPORT PartitionRoot {
#endif #endif
// Bookkeeping. // Bookkeeping.
// Invariant: total_size_of_committed_pages <= // - total_size_of_super_pages - total virtual address space for normal bucket
// super pages
// - total_size_of_direct_mapped_pages - total virtual address space for
// direct-map regions
// - total_size_of_committed_pages - total committed pages for slots (doesn't
// include metadata, bitmaps (if any), or any data outside or regions
// described in #1 and #2)
// Invariant: total_size_of_committed_pages <
// total_size_of_super_pages + // total_size_of_super_pages +
// total_size_of_direct_mapped_pages. // total_size_of_direct_mapped_pages.
// Since all operations on these atomic variables have relaxed semantics, we // Since all operations on these atomic variables have relaxed semantics, we
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment