Commit 41b29808 authored by Sergei Glazunov's avatar Sergei Glazunov Committed by Chromium LUCI CQ

Make PartitionAllocGetSlotOffset's implementation match its name

|PartitionAllocGetSlotOffset| currently returns the offset from the
beginning of the user data area rather than the actual slot start.
Remove unnecessary pointer adjustments to make the implementation
match the name.

Similarly, update |PartitionAllocGetSlotStart|.

Bug: 1073933
Change-Id: I82bcd3a397ab01f1a25ae98b0b768b9bd81315a9
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2613050
Commit-Queue: Sergei Glazunov <glazunov@google.com>
Reviewed-by: default avatarBartek Nowierski <bartekn@chromium.org>
Cr-Commit-Position: refs/heads/master@{#841056}
parent 28492529
...@@ -19,7 +19,7 @@ constexpr bool ThreadSafe = true; ...@@ -19,7 +19,7 @@ constexpr bool ThreadSafe = true;
constexpr bool NotThreadSafe = false; constexpr bool NotThreadSafe = false;
#if DCHECK_IS_ON() #if DCHECK_IS_ON()
BASE_EXPORT void DCheckGetSlotOffsetIsZero(void* ptr); BASE_EXPORT void DCheckGetSlotOffsetIsZero(void*);
#else #else
ALWAYS_INLINE void DCheckGetSlotOffsetIsZero(void*) {} ALWAYS_INLINE void DCheckGetSlotOffsetIsZero(void*) {}
#endif #endif
......
...@@ -854,6 +854,7 @@ TEST_F(PartitionAllocTest, AllocSizes) { ...@@ -854,6 +854,7 @@ TEST_F(PartitionAllocTest, AllocSizes) {
// Test that we can fetch the real allocated size after an allocation. // Test that we can fetch the real allocated size after an allocation.
TEST_F(PartitionAllocTest, AllocGetSizeAndOffsetAndStart) { TEST_F(PartitionAllocTest, AllocGetSizeAndOffsetAndStart) {
void* ptr; void* ptr;
void* slot_start;
size_t requested_size, actual_size, predicted_size; size_t requested_size, actual_size, predicted_size;
// Allocate something small. // Allocate something small.
...@@ -861,6 +862,7 @@ TEST_F(PartitionAllocTest, AllocGetSizeAndOffsetAndStart) { ...@@ -861,6 +862,7 @@ TEST_F(PartitionAllocTest, AllocGetSizeAndOffsetAndStart) {
predicted_size = allocator.root()->ActualSize(requested_size); predicted_size = allocator.root()->ActualSize(requested_size);
ptr = allocator.root()->Alloc(requested_size, type_name); ptr = allocator.root()->Alloc(requested_size, type_name);
EXPECT_TRUE(ptr); EXPECT_TRUE(ptr);
slot_start = reinterpret_cast<char*>(ptr) - allocator.root()->extras_offset;
actual_size = allocator.root()->GetSize(ptr); actual_size = allocator.root()->GetSize(ptr);
EXPECT_EQ(predicted_size, actual_size); EXPECT_EQ(predicted_size, actual_size);
EXPECT_LT(requested_size, actual_size); EXPECT_LT(requested_size, actual_size);
...@@ -868,9 +870,9 @@ TEST_F(PartitionAllocTest, AllocGetSizeAndOffsetAndStart) { ...@@ -868,9 +870,9 @@ TEST_F(PartitionAllocTest, AllocGetSizeAndOffsetAndStart) {
if (features::IsPartitionAllocGigaCageEnabled()) { if (features::IsPartitionAllocGigaCageEnabled()) {
for (size_t offset = 0; offset < requested_size; ++offset) { for (size_t offset = 0; offset < requested_size; ++offset) {
EXPECT_EQ(PartitionAllocGetSlotOffset(static_cast<char*>(ptr) + offset), EXPECT_EQ(PartitionAllocGetSlotOffset(static_cast<char*>(ptr) + offset),
offset); offset + allocator.root()->extras_offset);
EXPECT_EQ(PartitionAllocGetSlotStart(static_cast<char*>(ptr) + offset), EXPECT_EQ(PartitionAllocGetSlotStart(static_cast<char*>(ptr) + offset),
ptr); slot_start);
} }
} }
#endif #endif
...@@ -882,6 +884,7 @@ TEST_F(PartitionAllocTest, AllocGetSizeAndOffsetAndStart) { ...@@ -882,6 +884,7 @@ TEST_F(PartitionAllocTest, AllocGetSizeAndOffsetAndStart) {
predicted_size = allocator.root()->ActualSize(requested_size); predicted_size = allocator.root()->ActualSize(requested_size);
ptr = allocator.root()->Alloc(requested_size, type_name); ptr = allocator.root()->Alloc(requested_size, type_name);
EXPECT_TRUE(ptr); EXPECT_TRUE(ptr);
slot_start = reinterpret_cast<char*>(ptr) - allocator.root()->extras_offset;
actual_size = allocator.root()->GetSize(ptr); actual_size = allocator.root()->GetSize(ptr);
EXPECT_EQ(predicted_size, actual_size); EXPECT_EQ(predicted_size, actual_size);
EXPECT_EQ(requested_size, actual_size); EXPECT_EQ(requested_size, actual_size);
...@@ -889,9 +892,9 @@ TEST_F(PartitionAllocTest, AllocGetSizeAndOffsetAndStart) { ...@@ -889,9 +892,9 @@ TEST_F(PartitionAllocTest, AllocGetSizeAndOffsetAndStart) {
if (features::IsPartitionAllocGigaCageEnabled()) { if (features::IsPartitionAllocGigaCageEnabled()) {
for (size_t offset = 0; offset < requested_size; offset += 877) { for (size_t offset = 0; offset < requested_size; offset += 877) {
EXPECT_EQ(PartitionAllocGetSlotOffset(static_cast<char*>(ptr) + offset), EXPECT_EQ(PartitionAllocGetSlotOffset(static_cast<char*>(ptr) + offset),
offset); offset + allocator.root()->extras_offset);
EXPECT_EQ(PartitionAllocGetSlotStart(static_cast<char*>(ptr) + offset), EXPECT_EQ(PartitionAllocGetSlotStart(static_cast<char*>(ptr) + offset),
ptr); slot_start);
} }
} }
#endif #endif
...@@ -907,6 +910,7 @@ TEST_F(PartitionAllocTest, AllocGetSizeAndOffsetAndStart) { ...@@ -907,6 +910,7 @@ TEST_F(PartitionAllocTest, AllocGetSizeAndOffsetAndStart) {
predicted_size = allocator.root()->ActualSize(requested_size); predicted_size = allocator.root()->ActualSize(requested_size);
ptr = allocator.root()->Alloc(requested_size, type_name); ptr = allocator.root()->Alloc(requested_size, type_name);
EXPECT_TRUE(ptr); EXPECT_TRUE(ptr);
slot_start = reinterpret_cast<char*>(ptr) - allocator.root()->extras_offset;
actual_size = allocator.root()->GetSize(ptr); actual_size = allocator.root()->GetSize(ptr);
EXPECT_EQ(predicted_size, actual_size); EXPECT_EQ(predicted_size, actual_size);
EXPECT_EQ(requested_size + SystemPageSize(), actual_size); EXPECT_EQ(requested_size + SystemPageSize(), actual_size);
...@@ -914,9 +918,9 @@ TEST_F(PartitionAllocTest, AllocGetSizeAndOffsetAndStart) { ...@@ -914,9 +918,9 @@ TEST_F(PartitionAllocTest, AllocGetSizeAndOffsetAndStart) {
if (features::IsPartitionAllocGigaCageEnabled()) { if (features::IsPartitionAllocGigaCageEnabled()) {
for (size_t offset = 0; offset < requested_size; offset += 4999) { for (size_t offset = 0; offset < requested_size; offset += 4999) {
EXPECT_EQ(PartitionAllocGetSlotOffset(static_cast<char*>(ptr) + offset), EXPECT_EQ(PartitionAllocGetSlotOffset(static_cast<char*>(ptr) + offset),
offset); offset + allocator.root()->extras_offset);
EXPECT_EQ(PartitionAllocGetSlotStart(static_cast<char*>(ptr) + offset), EXPECT_EQ(PartitionAllocGetSlotStart(static_cast<char*>(ptr) + offset),
ptr); slot_start);
} }
} }
#endif #endif
...@@ -926,6 +930,7 @@ TEST_F(PartitionAllocTest, AllocGetSizeAndOffsetAndStart) { ...@@ -926,6 +930,7 @@ TEST_F(PartitionAllocTest, AllocGetSizeAndOffsetAndStart) {
predicted_size = allocator.root()->ActualSize(requested_size); predicted_size = allocator.root()->ActualSize(requested_size);
ptr = allocator.root()->Alloc(requested_size, type_name); ptr = allocator.root()->Alloc(requested_size, type_name);
EXPECT_TRUE(ptr); EXPECT_TRUE(ptr);
slot_start = reinterpret_cast<char*>(ptr) - allocator.root()->extras_offset;
actual_size = allocator.root()->GetSize(ptr); actual_size = allocator.root()->GetSize(ptr);
EXPECT_EQ(predicted_size, actual_size); EXPECT_EQ(predicted_size, actual_size);
EXPECT_EQ(requested_size, actual_size); EXPECT_EQ(requested_size, actual_size);
...@@ -933,9 +938,9 @@ TEST_F(PartitionAllocTest, AllocGetSizeAndOffsetAndStart) { ...@@ -933,9 +938,9 @@ TEST_F(PartitionAllocTest, AllocGetSizeAndOffsetAndStart) {
if (features::IsPartitionAllocGigaCageEnabled()) { if (features::IsPartitionAllocGigaCageEnabled()) {
for (size_t offset = 0; offset < requested_size; offset += 4999) { for (size_t offset = 0; offset < requested_size; offset += 4999) {
EXPECT_EQ(PartitionAllocGetSlotOffset(static_cast<char*>(ptr) + offset), EXPECT_EQ(PartitionAllocGetSlotOffset(static_cast<char*>(ptr) + offset),
offset); offset + allocator.root()->extras_offset);
EXPECT_EQ(PartitionAllocGetSlotStart(static_cast<char*>(ptr) + offset), EXPECT_EQ(PartitionAllocGetSlotStart(static_cast<char*>(ptr) + offset),
ptr); slot_start);
} }
} }
#endif #endif
...@@ -991,7 +996,8 @@ TEST_F(PartitionAllocTest, GetOffsetMultiplePages) { ...@@ -991,7 +996,8 @@ TEST_F(PartitionAllocTest, GetOffsetMultiplePages) {
char* ptr = static_cast<char*>(ptrs[i]); char* ptr = static_cast<char*>(ptrs[i]);
for (size_t offset = 0; offset < requested_size; offset += 13) { for (size_t offset = 0; offset < requested_size; offset += 13) {
EXPECT_EQ(allocator.root()->GetSize(ptr), requested_size); EXPECT_EQ(allocator.root()->GetSize(ptr), requested_size);
EXPECT_EQ(PartitionAllocGetSlotOffset(ptr + offset), offset); EXPECT_EQ(PartitionAllocGetSlotOffset(static_cast<char*>(ptr) + offset),
offset + allocator.root()->extras_offset);
} }
allocator.root()->Free(ptr); allocator.root()->Free(ptr);
} }
...@@ -2725,7 +2731,8 @@ TEST_F(PartitionAllocTest, RefCountBasic) { ...@@ -2725,7 +2731,8 @@ TEST_F(PartitionAllocTest, RefCountBasic) {
*ptr1 = kCookie; *ptr1 = kCookie;
auto* ref_count = PartitionRefCountPointer(ptr1); auto* ref_count =
PartitionRefCountPointer(reinterpret_cast<char*>(ptr1) - kPointerOffset);
EXPECT_TRUE(ref_count->HasOneRef()); EXPECT_TRUE(ref_count->HasOneRef());
ref_count->Acquire(); ref_count->Acquire();
...@@ -2750,7 +2757,8 @@ TEST_F(PartitionAllocTest, RefCountBasic) { ...@@ -2750,7 +2757,8 @@ TEST_F(PartitionAllocTest, RefCountBasic) {
// When the last reference is released, the slot should become reusable. // When the last reference is released, the slot should become reusable.
EXPECT_TRUE(ref_count->Release()); EXPECT_TRUE(ref_count->Release());
PartitionAllocFreeForRefCounting(ptr1); PartitionAllocFreeForRefCounting(reinterpret_cast<char*>(ptr1) -
kPointerOffset);
uint64_t* ptr3 = reinterpret_cast<uint64_t*>( uint64_t* ptr3 = reinterpret_cast<uint64_t*>(
allocator.root()->Alloc(alloc_size, type_name)); allocator.root()->Alloc(alloc_size, type_name));
EXPECT_EQ(ptr1, ptr3); EXPECT_EQ(ptr1, ptr3);
......
...@@ -103,11 +103,9 @@ static constexpr size_t kPartitionRefCountOffset = ...@@ -103,11 +103,9 @@ static constexpr size_t kPartitionRefCountOffset =
static constexpr size_t kPartitionRefCountOffset = kInSlotRefCountBufferSize; static constexpr size_t kPartitionRefCountOffset = kInSlotRefCountBufferSize;
#endif #endif
ALWAYS_INLINE PartitionRefCount* PartitionRefCountPointer(void* slot_start) {
ALWAYS_INLINE PartitionRefCount* PartitionRefCountPointer(void* ptr) { DCheckGetSlotOffsetIsZero(slot_start);
DCheckGetSlotOffsetIsZero(ptr); return reinterpret_cast<PartitionRefCount*>(slot_start);
return reinterpret_cast<PartitionRefCount*>(reinterpret_cast<char*>(ptr) -
kPartitionRefCountOffset);
} }
#else // ENABLE_REF_COUNTER_FOR_BACKUP_REF_PTR #else // ENABLE_REF_COUNTER_FOR_BACKUP_REF_PTR
......
...@@ -634,8 +634,7 @@ PartitionAllocGetSlotSpanForSizeQuery(void* ptr) { ...@@ -634,8 +634,7 @@ PartitionAllocGetSlotSpanForSizeQuery(void* ptr) {
// This file may end up getting included even when PartitionAlloc isn't used, // This file may end up getting included even when PartitionAlloc isn't used,
// but the .cc file won't be linked. Exclude the code that relies on it. // but the .cc file won't be linked. Exclude the code that relies on it.
#if BUILDFLAG(USE_PARTITION_ALLOC) #if BUILDFLAG(USE_PARTITION_ALLOC)
// Gets the offset from the beginning of the allocated slot, adjusted for cookie // Gets the offset from the beginning of the allocated slot.
// (if any).
// CAUTION! Use only for normal buckets. Using on direct-mapped allocations may // CAUTION! Use only for normal buckets. Using on direct-mapped allocations may
// lead to undefined behavior. // lead to undefined behavior.
// //
...@@ -651,7 +650,6 @@ ALWAYS_INLINE size_t PartitionAllocGetSlotOffset(void* ptr) { ...@@ -651,7 +650,6 @@ ALWAYS_INLINE size_t PartitionAllocGetSlotOffset(void* ptr) {
// The only allocations that don't use ref-count are allocated outside of // The only allocations that don't use ref-count are allocated outside of
// GigaCage, hence we'd never get here in the `allow_extras = false` case. // GigaCage, hence we'd never get here in the `allow_extras = false` case.
PA_DCHECK(root->allow_extras); PA_DCHECK(root->allow_extras);
ptr = root->AdjustPointerForExtrasSubtract(ptr);
// Get the offset from the beginning of the slot span. // Get the offset from the beginning of the slot span.
uintptr_t ptr_addr = reinterpret_cast<uintptr_t>(ptr); uintptr_t ptr_addr = reinterpret_cast<uintptr_t>(ptr);
...@@ -679,32 +677,31 @@ ALWAYS_INLINE void* PartitionAllocGetSlotStart(void* ptr) { ...@@ -679,32 +677,31 @@ ALWAYS_INLINE void* PartitionAllocGetSlotStart(void* ptr) {
size_t offset_in_slot_span = ptr_addr - slot_span_start; size_t offset_in_slot_span = ptr_addr - slot_span_start;
auto* bucket = slot_span->bucket; auto* bucket = slot_span->bucket;
return root->AdjustPointerForExtrasAdd(reinterpret_cast<void*>( return reinterpret_cast<void*>(
slot_span_start + slot_span_start +
bucket->slot_size * bucket->GetSlotNumber(offset_in_slot_span))); bucket->slot_size * bucket->GetSlotNumber(offset_in_slot_span));
} }
#if ENABLE_REF_COUNT_FOR_BACKUP_REF_PTR #if ENABLE_REF_COUNT_FOR_BACKUP_REF_PTR
// TODO(glazunov): Simplify the function once the non-thread-safe PartitionRoot // TODO(glazunov): Simplify the function once the non-thread-safe PartitionRoot
// is no longer used. // is no longer used.
ALWAYS_INLINE void PartitionAllocFreeForRefCounting(void* ptr) { ALWAYS_INLINE void PartitionAllocFreeForRefCounting(void* slot_start) {
PA_DCHECK(!internal::PartitionRefCountPointer(ptr)->IsAlive()); PA_DCHECK(!internal::PartitionRefCountPointer(slot_start)->IsAlive());
auto* slot_span = auto* slot_span =
SlotSpanMetadata<ThreadSafe>::FromPointerNoAlignmentCheck(ptr); SlotSpanMetadata<ThreadSafe>::FromPointerNoAlignmentCheck(slot_start);
auto* root = PartitionRoot<ThreadSafe>::FromSlotSpan(slot_span); auto* root = PartitionRoot<ThreadSafe>::FromSlotSpan(slot_span);
// PartitionRefCount is required to be allocated inside a `PartitionRoot` that
// supports extras.
PA_DCHECK(root->allow_extras); PA_DCHECK(root->allow_extras);
#ifdef ADDRESS_SANITIZER #ifdef ADDRESS_SANITIZER
size_t utilized_slot_size = slot_span->GetUtilizedSlotSize(); void* ptr = root->AdjustPointerForExtrasAdd(slot_start);
// PartitionRefCount is required to be allocated inside a `PartitionRoot` that size_t usable_size =
// supports extras. root->AdjustSizeForExtrasSubtract(slot_span->GetUtilizedSlotSize());
size_t usable_size = root->AdjustSizeForExtrasSubtract(utilized_slot_size);
ASAN_UNPOISON_MEMORY_REGION(ptr, usable_size); ASAN_UNPOISON_MEMORY_REGION(ptr, usable_size);
#endif #endif
void* slot_start = root->AdjustPointerForExtrasSubtract(ptr);
#if DCHECK_IS_ON() #if DCHECK_IS_ON()
memset(slot_start, kFreedByte, slot_span->GetUtilizedSlotSize()); memset(slot_start, kFreedByte, slot_span->GetUtilizedSlotSize());
#endif #endif
...@@ -865,9 +862,11 @@ ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooksImmediate( ...@@ -865,9 +862,11 @@ ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooksImmediate(
internal::PartitionCookieCheckValue(char_ptr + usable_size); internal::PartitionCookieCheckValue(char_ptr + usable_size);
#endif #endif
void* slot_start = AdjustPointerForExtrasSubtract(ptr);
if (!slot_span->bucket->is_direct_mapped()) { if (!slot_span->bucket->is_direct_mapped()) {
#if ENABLE_REF_COUNT_FOR_BACKUP_REF_PTR #if ENABLE_REF_COUNT_FOR_BACKUP_REF_PTR
auto* ref_count = internal::PartitionRefCountPointer(ptr); auto* ref_count = internal::PartitionRefCountPointer(slot_start);
// If we are holding the last reference to the allocation, it can be freed // If we are holding the last reference to the allocation, it can be freed
// immediately. Otherwise, defer the operation and zap the memory to turn // immediately. Otherwise, defer the operation and zap the memory to turn
// potential use-after-free issues into unexploitable crashes. // potential use-after-free issues into unexploitable crashes.
...@@ -885,7 +884,7 @@ ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooksImmediate( ...@@ -885,7 +884,7 @@ ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooksImmediate(
} }
// Shift ptr to the beginning of the slot. // Shift ptr to the beginning of the slot.
ptr = AdjustPointerForExtrasSubtract(ptr); ptr = slot_start;
} // if (allow_extras) } // if (allow_extras)
#if DCHECK_IS_ON() #if DCHECK_IS_ON()
...@@ -1194,6 +1193,9 @@ ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocFlagsNoHooks( ...@@ -1194,6 +1193,9 @@ ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocFlagsNoHooks(
// to save raw_size, i.e. only for large allocations. For small allocations, // to save raw_size, i.e. only for large allocations. For small allocations,
// we have no other choice than putting the cookie at the very end of the // we have no other choice than putting the cookie at the very end of the
// slot, thus creating the "empty" space. // slot, thus creating the "empty" space.
#if ENABLE_REF_COUNT_FOR_BACKUP_REF_PTR
void* slot_start = ret;
#endif // ENABLE_REF_COUNT_FOR_BACKUP_REF_PTR
size_t usable_size = AdjustSizeForExtrasSubtract(utilized_slot_size); size_t usable_size = AdjustSizeForExtrasSubtract(utilized_slot_size);
// The value given to the application is just after the ref-count and cookie. // The value given to the application is just after the ref-count and cookie.
ret = AdjustPointerForExtrasAdd(ret); ret = AdjustPointerForExtrasAdd(ret);
...@@ -1221,7 +1223,8 @@ ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocFlagsNoHooks( ...@@ -1221,7 +1223,8 @@ ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocFlagsNoHooks(
bool is_direct_mapped = raw_size > kMaxBucketed; bool is_direct_mapped = raw_size > kMaxBucketed;
if (allow_extras && !is_direct_mapped) { if (allow_extras && !is_direct_mapped) {
#if ENABLE_REF_COUNT_FOR_BACKUP_REF_PTR #if ENABLE_REF_COUNT_FOR_BACKUP_REF_PTR
new (internal::PartitionRefCountPointer(ret)) internal::PartitionRefCount(); new (internal::PartitionRefCountPointer(slot_start))
internal::PartitionRefCount();
#endif // ENABLE_REF_COUNT_FOR_BACKUP_REF_PTR #endif // ENABLE_REF_COUNT_FOR_BACKUP_REF_PTR
} }
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment