Commit 41b29808 authored by Sergei Glazunov's avatar Sergei Glazunov Committed by Chromium LUCI CQ

Make PartitionAllocGetSlotOffset's implementation match its name

|PartitionAllocGetSlotOffset| currently returns the offset from the
beginning of the user data area rather than the actual slot start.
Remove unnecessary pointer adjustments to make the implementation
match the name.

Similarly, update |PartitionAllocGetSlotStart|.

Bug: 1073933
Change-Id: I82bcd3a397ab01f1a25ae98b0b768b9bd81315a9
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2613050
Commit-Queue: Sergei Glazunov <glazunov@google.com>
Reviewed-by: default avatarBartek Nowierski <bartekn@chromium.org>
Cr-Commit-Position: refs/heads/master@{#841056}
parent 28492529
......@@ -19,7 +19,7 @@ constexpr bool ThreadSafe = true;
constexpr bool NotThreadSafe = false;
#if DCHECK_IS_ON()
BASE_EXPORT void DCheckGetSlotOffsetIsZero(void* ptr);
BASE_EXPORT void DCheckGetSlotOffsetIsZero(void*);
#else
ALWAYS_INLINE void DCheckGetSlotOffsetIsZero(void*) {}
#endif
......
......@@ -854,6 +854,7 @@ TEST_F(PartitionAllocTest, AllocSizes) {
// Test that we can fetch the real allocated size after an allocation.
TEST_F(PartitionAllocTest, AllocGetSizeAndOffsetAndStart) {
void* ptr;
void* slot_start;
size_t requested_size, actual_size, predicted_size;
// Allocate something small.
......@@ -861,6 +862,7 @@ TEST_F(PartitionAllocTest, AllocGetSizeAndOffsetAndStart) {
predicted_size = allocator.root()->ActualSize(requested_size);
ptr = allocator.root()->Alloc(requested_size, type_name);
EXPECT_TRUE(ptr);
slot_start = reinterpret_cast<char*>(ptr) - allocator.root()->extras_offset;
actual_size = allocator.root()->GetSize(ptr);
EXPECT_EQ(predicted_size, actual_size);
EXPECT_LT(requested_size, actual_size);
......@@ -868,9 +870,9 @@ TEST_F(PartitionAllocTest, AllocGetSizeAndOffsetAndStart) {
if (features::IsPartitionAllocGigaCageEnabled()) {
for (size_t offset = 0; offset < requested_size; ++offset) {
EXPECT_EQ(PartitionAllocGetSlotOffset(static_cast<char*>(ptr) + offset),
offset);
offset + allocator.root()->extras_offset);
EXPECT_EQ(PartitionAllocGetSlotStart(static_cast<char*>(ptr) + offset),
ptr);
slot_start);
}
}
#endif
......@@ -882,6 +884,7 @@ TEST_F(PartitionAllocTest, AllocGetSizeAndOffsetAndStart) {
predicted_size = allocator.root()->ActualSize(requested_size);
ptr = allocator.root()->Alloc(requested_size, type_name);
EXPECT_TRUE(ptr);
slot_start = reinterpret_cast<char*>(ptr) - allocator.root()->extras_offset;
actual_size = allocator.root()->GetSize(ptr);
EXPECT_EQ(predicted_size, actual_size);
EXPECT_EQ(requested_size, actual_size);
......@@ -889,9 +892,9 @@ TEST_F(PartitionAllocTest, AllocGetSizeAndOffsetAndStart) {
if (features::IsPartitionAllocGigaCageEnabled()) {
for (size_t offset = 0; offset < requested_size; offset += 877) {
EXPECT_EQ(PartitionAllocGetSlotOffset(static_cast<char*>(ptr) + offset),
offset);
offset + allocator.root()->extras_offset);
EXPECT_EQ(PartitionAllocGetSlotStart(static_cast<char*>(ptr) + offset),
ptr);
slot_start);
}
}
#endif
......@@ -907,6 +910,7 @@ TEST_F(PartitionAllocTest, AllocGetSizeAndOffsetAndStart) {
predicted_size = allocator.root()->ActualSize(requested_size);
ptr = allocator.root()->Alloc(requested_size, type_name);
EXPECT_TRUE(ptr);
slot_start = reinterpret_cast<char*>(ptr) - allocator.root()->extras_offset;
actual_size = allocator.root()->GetSize(ptr);
EXPECT_EQ(predicted_size, actual_size);
EXPECT_EQ(requested_size + SystemPageSize(), actual_size);
......@@ -914,9 +918,9 @@ TEST_F(PartitionAllocTest, AllocGetSizeAndOffsetAndStart) {
if (features::IsPartitionAllocGigaCageEnabled()) {
for (size_t offset = 0; offset < requested_size; offset += 4999) {
EXPECT_EQ(PartitionAllocGetSlotOffset(static_cast<char*>(ptr) + offset),
offset);
offset + allocator.root()->extras_offset);
EXPECT_EQ(PartitionAllocGetSlotStart(static_cast<char*>(ptr) + offset),
ptr);
slot_start);
}
}
#endif
......@@ -926,6 +930,7 @@ TEST_F(PartitionAllocTest, AllocGetSizeAndOffsetAndStart) {
predicted_size = allocator.root()->ActualSize(requested_size);
ptr = allocator.root()->Alloc(requested_size, type_name);
EXPECT_TRUE(ptr);
slot_start = reinterpret_cast<char*>(ptr) - allocator.root()->extras_offset;
actual_size = allocator.root()->GetSize(ptr);
EXPECT_EQ(predicted_size, actual_size);
EXPECT_EQ(requested_size, actual_size);
......@@ -933,9 +938,9 @@ TEST_F(PartitionAllocTest, AllocGetSizeAndOffsetAndStart) {
if (features::IsPartitionAllocGigaCageEnabled()) {
for (size_t offset = 0; offset < requested_size; offset += 4999) {
EXPECT_EQ(PartitionAllocGetSlotOffset(static_cast<char*>(ptr) + offset),
offset);
offset + allocator.root()->extras_offset);
EXPECT_EQ(PartitionAllocGetSlotStart(static_cast<char*>(ptr) + offset),
ptr);
slot_start);
}
}
#endif
......@@ -991,7 +996,8 @@ TEST_F(PartitionAllocTest, GetOffsetMultiplePages) {
char* ptr = static_cast<char*>(ptrs[i]);
for (size_t offset = 0; offset < requested_size; offset += 13) {
EXPECT_EQ(allocator.root()->GetSize(ptr), requested_size);
EXPECT_EQ(PartitionAllocGetSlotOffset(ptr + offset), offset);
EXPECT_EQ(PartitionAllocGetSlotOffset(static_cast<char*>(ptr) + offset),
offset + allocator.root()->extras_offset);
}
allocator.root()->Free(ptr);
}
......@@ -2725,7 +2731,8 @@ TEST_F(PartitionAllocTest, RefCountBasic) {
*ptr1 = kCookie;
auto* ref_count = PartitionRefCountPointer(ptr1);
auto* ref_count =
PartitionRefCountPointer(reinterpret_cast<char*>(ptr1) - kPointerOffset);
EXPECT_TRUE(ref_count->HasOneRef());
ref_count->Acquire();
......@@ -2750,7 +2757,8 @@ TEST_F(PartitionAllocTest, RefCountBasic) {
// When the last reference is released, the slot should become reusable.
EXPECT_TRUE(ref_count->Release());
PartitionAllocFreeForRefCounting(ptr1);
PartitionAllocFreeForRefCounting(reinterpret_cast<char*>(ptr1) -
kPointerOffset);
uint64_t* ptr3 = reinterpret_cast<uint64_t*>(
allocator.root()->Alloc(alloc_size, type_name));
EXPECT_EQ(ptr1, ptr3);
......
......@@ -103,11 +103,9 @@ static constexpr size_t kPartitionRefCountOffset =
static constexpr size_t kPartitionRefCountOffset = kInSlotRefCountBufferSize;
#endif
ALWAYS_INLINE PartitionRefCount* PartitionRefCountPointer(void* ptr) {
DCheckGetSlotOffsetIsZero(ptr);
return reinterpret_cast<PartitionRefCount*>(reinterpret_cast<char*>(ptr) -
kPartitionRefCountOffset);
ALWAYS_INLINE PartitionRefCount* PartitionRefCountPointer(void* slot_start) {
DCheckGetSlotOffsetIsZero(slot_start);
return reinterpret_cast<PartitionRefCount*>(slot_start);
}
#else // ENABLE_REF_COUNTER_FOR_BACKUP_REF_PTR
......
......@@ -634,8 +634,7 @@ PartitionAllocGetSlotSpanForSizeQuery(void* ptr) {
// This file may end up getting included even when PartitionAlloc isn't used,
// but the .cc file won't be linked. Exclude the code that relies on it.
#if BUILDFLAG(USE_PARTITION_ALLOC)
// Gets the offset from the beginning of the allocated slot, adjusted for cookie
// (if any).
// Gets the offset from the beginning of the allocated slot.
// CAUTION! Use only for normal buckets. Using on direct-mapped allocations may
// lead to undefined behavior.
//
......@@ -651,7 +650,6 @@ ALWAYS_INLINE size_t PartitionAllocGetSlotOffset(void* ptr) {
// The only allocations that don't use ref-count are allocated outside of
// GigaCage, hence we'd never get here in the `allow_extras = false` case.
PA_DCHECK(root->allow_extras);
ptr = root->AdjustPointerForExtrasSubtract(ptr);
// Get the offset from the beginning of the slot span.
uintptr_t ptr_addr = reinterpret_cast<uintptr_t>(ptr);
......@@ -679,32 +677,31 @@ ALWAYS_INLINE void* PartitionAllocGetSlotStart(void* ptr) {
size_t offset_in_slot_span = ptr_addr - slot_span_start;
auto* bucket = slot_span->bucket;
return root->AdjustPointerForExtrasAdd(reinterpret_cast<void*>(
return reinterpret_cast<void*>(
slot_span_start +
bucket->slot_size * bucket->GetSlotNumber(offset_in_slot_span)));
bucket->slot_size * bucket->GetSlotNumber(offset_in_slot_span));
}
#if ENABLE_REF_COUNT_FOR_BACKUP_REF_PTR
// TODO(glazunov): Simplify the function once the non-thread-safe PartitionRoot
// is no longer used.
ALWAYS_INLINE void PartitionAllocFreeForRefCounting(void* ptr) {
PA_DCHECK(!internal::PartitionRefCountPointer(ptr)->IsAlive());
ALWAYS_INLINE void PartitionAllocFreeForRefCounting(void* slot_start) {
PA_DCHECK(!internal::PartitionRefCountPointer(slot_start)->IsAlive());
auto* slot_span =
SlotSpanMetadata<ThreadSafe>::FromPointerNoAlignmentCheck(ptr);
SlotSpanMetadata<ThreadSafe>::FromPointerNoAlignmentCheck(slot_start);
auto* root = PartitionRoot<ThreadSafe>::FromSlotSpan(slot_span);
// PartitionRefCount is required to be allocated inside a `PartitionRoot` that
// supports extras.
PA_DCHECK(root->allow_extras);
#ifdef ADDRESS_SANITIZER
size_t utilized_slot_size = slot_span->GetUtilizedSlotSize();
// PartitionRefCount is required to be allocated inside a `PartitionRoot` that
// supports extras.
size_t usable_size = root->AdjustSizeForExtrasSubtract(utilized_slot_size);
void* ptr = root->AdjustPointerForExtrasAdd(slot_start);
size_t usable_size =
root->AdjustSizeForExtrasSubtract(slot_span->GetUtilizedSlotSize());
ASAN_UNPOISON_MEMORY_REGION(ptr, usable_size);
#endif
void* slot_start = root->AdjustPointerForExtrasSubtract(ptr);
#if DCHECK_IS_ON()
memset(slot_start, kFreedByte, slot_span->GetUtilizedSlotSize());
#endif
......@@ -865,9 +862,11 @@ ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooksImmediate(
internal::PartitionCookieCheckValue(char_ptr + usable_size);
#endif
void* slot_start = AdjustPointerForExtrasSubtract(ptr);
if (!slot_span->bucket->is_direct_mapped()) {
#if ENABLE_REF_COUNT_FOR_BACKUP_REF_PTR
auto* ref_count = internal::PartitionRefCountPointer(ptr);
auto* ref_count = internal::PartitionRefCountPointer(slot_start);
// If we are holding the last reference to the allocation, it can be freed
// immediately. Otherwise, defer the operation and zap the memory to turn
// potential use-after-free issues into unexploitable crashes.
......@@ -885,7 +884,7 @@ ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooksImmediate(
}
// Shift ptr to the beginning of the slot.
ptr = AdjustPointerForExtrasSubtract(ptr);
ptr = slot_start;
} // if (allow_extras)
#if DCHECK_IS_ON()
......@@ -1194,6 +1193,9 @@ ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocFlagsNoHooks(
// to save raw_size, i.e. only for large allocations. For small allocations,
// we have no other choice than putting the cookie at the very end of the
// slot, thus creating the "empty" space.
#if ENABLE_REF_COUNT_FOR_BACKUP_REF_PTR
void* slot_start = ret;
#endif // ENABLE_REF_COUNT_FOR_BACKUP_REF_PTR
size_t usable_size = AdjustSizeForExtrasSubtract(utilized_slot_size);
// The value given to the application is just after the ref-count and cookie.
ret = AdjustPointerForExtrasAdd(ret);
......@@ -1221,7 +1223,8 @@ ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocFlagsNoHooks(
bool is_direct_mapped = raw_size > kMaxBucketed;
if (allow_extras && !is_direct_mapped) {
#if ENABLE_REF_COUNT_FOR_BACKUP_REF_PTR
new (internal::PartitionRefCountPointer(ret)) internal::PartitionRefCount();
new (internal::PartitionRefCountPointer(slot_start))
internal::PartitionRefCount();
#endif // ENABLE_REF_COUNT_FOR_BACKUP_REF_PTR
}
return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment