Commit a4df6cf8 authored by Benoit Lize's avatar Benoit Lize Committed by Commit Bot

[base/allocator] Make PartitionRoot<{,Not}ThreadSafe> layouts identical.

There are a few places where we access a PartitionRoot with the "wrong"
thread safety template parameter. This is fragile, but also forces us to
remove some DCHECK()s as their layouts don't match. This CL makes sure
the layouts do match, which then removes the issue.

The only downside is making PartitionRoot<NotThreadSafe> a tiny bit
larger, but otherwise this does not add any runtime cost.

Bug: 1092288
Change-Id: I7e24eceacd03682eab7693730a4fc7db973d6f09
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2368874
Commit-Queue: Benoit L <lizeb@chromium.org>
Reviewed-by: default avatarKentaro Hara <haraken@chromium.org>
Reviewed-by: default avatarBartek Nowierski <bartekn@chromium.org>
Cr-Commit-Position: refs/heads/master@{#800963}
parent cde4d274
...@@ -306,6 +306,20 @@ class LOCKABLE MaybeSpinLock<ThreadSafe> { ...@@ -306,6 +306,20 @@ class LOCKABLE MaybeSpinLock<ThreadSafe> {
}; };
#endif // DCHECK_IS_ON() #endif // DCHECK_IS_ON()
template <>
class LOCKABLE MaybeSpinLock<NotThreadSafe> {
public:
void Lock() EXCLUSIVE_LOCK_FUNCTION() {}
void Unlock() UNLOCK_FUNCTION() {}
void AssertAcquired() const ASSERT_EXCLUSIVE_LOCK() {}
char padding_[sizeof(MaybeSpinLock<ThreadSafe>)];
};
static_assert(
sizeof(MaybeSpinLock<ThreadSafe>) == sizeof(MaybeSpinLock<NotThreadSafe>),
"Sizes should be equal to enseure identical layout of PartitionRoot");
// An "extent" is a span of consecutive superpages. We link to the partition's // An "extent" is a span of consecutive superpages. We link to the partition's
// next extent (if there is one) to the very start of a superpage's metadata // next extent (if there is one) to the very start of a superpage's metadata
// area. // area.
...@@ -556,6 +570,13 @@ struct BASE_EXPORT PartitionRoot { ...@@ -556,6 +570,13 @@ struct BASE_EXPORT PartitionRoot {
void DecommitEmptyPages() EXCLUSIVE_LOCKS_REQUIRED(lock_); void DecommitEmptyPages() EXCLUSIVE_LOCKS_REQUIRED(lock_);
}; };
static_assert(sizeof(PartitionRoot<internal::ThreadSafe>) ==
sizeof(PartitionRoot<internal::NotThreadSafe>),
"Layouts should match");
static_assert(offsetof(PartitionRoot<internal::ThreadSafe>, buckets) ==
offsetof(PartitionRoot<internal::NotThreadSafe>, buckets),
"Layouts should match");
template <bool thread_safe> template <bool thread_safe>
ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocFromBucket( ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocFromBucket(
Bucket* bucket, Bucket* bucket,
...@@ -754,14 +775,8 @@ PartitionAllocGetPageForSize(void* ptr) { ...@@ -754,14 +775,8 @@ PartitionAllocGetPageForSize(void* ptr) {
// cause trouble, and the caller is responsible for that not happening. // cause trouble, and the caller is responsible for that not happening.
auto* page = auto* page =
internal::PartitionPage<thread_safe>::FromPointerNoAlignmentCheck(ptr); internal::PartitionPage<thread_safe>::FromPointerNoAlignmentCheck(ptr);
// This PA_DCHECK has been temporarily commented out, because
// CheckedPtr2OrMTEImpl calls ThreadSafe variant of
// PartitionAllocGetSlotOffset even in the NotThreadSafe case. Everything
// seems to work, except IsValidPage is failing (PartitionRoot's fields are
// laid out differently between variants).
// TODO(bartekn): Uncomment once we figure out thread-safety variant mismatch.
// TODO(palmer): See if we can afford to make this a CHECK. // TODO(palmer): See if we can afford to make this a CHECK.
// PA_DCHECK(PartitionRoot<thread_safe>::IsValidPage(page)); PA_DCHECK(PartitionRoot<thread_safe>::IsValidPage(page));
return page; return page;
} }
} // namespace internal } // namespace internal
...@@ -805,22 +820,24 @@ ALWAYS_INLINE void DCheckIfManagedByPartitionAllocNormalBuckets(const void*) {} ...@@ -805,22 +820,24 @@ ALWAYS_INLINE void DCheckIfManagedByPartitionAllocNormalBuckets(const void*) {}
// (if any). // (if any).
// CAUTION! Use only for normal buckets. Using on direct-mapped allocations may // CAUTION! Use only for normal buckets. Using on direct-mapped allocations may
// lead to undefined behavior. // lead to undefined behavior.
template <bool thread_safe> //
// This function is not a template, and can be used on either variant
// (thread-safe or not) of the allocator. This relies on the two PartitionRoot<>
// having the same layout, which is enforced by static_assert().
ALWAYS_INLINE size_t PartitionAllocGetSlotOffset(void* ptr) { ALWAYS_INLINE size_t PartitionAllocGetSlotOffset(void* ptr) {
internal::DCheckIfManagedByPartitionAllocNormalBuckets(ptr); internal::DCheckIfManagedByPartitionAllocNormalBuckets(ptr);
// The only allocations that don't use tag are allocated outside of GigaCage, // The only allocations that don't use tag are allocated outside of GigaCage,
// hence we'd never get here in the use_tag=false case. // hence we'd never get here in the use_tag=false case.
// TODO(bartekn): Add a DCHECK(page->root->allow_extras) to assert this, once
// we figure out the thread-safety variant mismatch problem (see the comment
// in PartitionAllocGetPageForSize for the problem description).
ptr = internal::PartitionPointerAdjustSubtract(true /* use_tag */, ptr); ptr = internal::PartitionPointerAdjustSubtract(true /* use_tag */, ptr);
auto* page = internal::PartitionAllocGetPageForSize<thread_safe>(ptr); auto* page =
internal::PartitionAllocGetPageForSize<internal::ThreadSafe>(ptr);
PA_DCHECK(PartitionRoot<internal::ThreadSafe>::FromPage(page)->allow_extras);
size_t slot_size = page->bucket->slot_size; size_t slot_size = page->bucket->slot_size;
// Get the offset from the beginning of the slot span. // Get the offset from the beginning of the slot span.
uintptr_t ptr_addr = reinterpret_cast<uintptr_t>(ptr); uintptr_t ptr_addr = reinterpret_cast<uintptr_t>(ptr);
uintptr_t slot_span_start = reinterpret_cast<uintptr_t>( uintptr_t slot_span_start = reinterpret_cast<uintptr_t>(
internal::PartitionPage<thread_safe>::ToPointer(page)); internal::PartitionPage<internal::ThreadSafe>::ToPointer(page));
size_t offset_in_slot_span = ptr_addr - slot_span_start; size_t offset_in_slot_span = ptr_addr - slot_span_start;
// Knowing that slots are tightly packed in a slot span, calculate an offset // Knowing that slots are tightly packed in a slot span, calculate an offset
// within a slot using simple % operation. // within a slot using simple % operation.
......
...@@ -855,16 +855,14 @@ TEST_F(PartitionAllocTest, AllocGetSizeAndOffset) { ...@@ -855,16 +855,14 @@ TEST_F(PartitionAllocTest, AllocGetSizeAndOffset) {
EXPECT_LT(requested_size, actual_size); EXPECT_LT(requested_size, actual_size);
#if defined(ARCH_CPU_64_BITS) && !defined(OS_NACL) #if defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
for (size_t offset = 0; offset < requested_size; ++offset) { for (size_t offset = 0; offset < requested_size; ++offset) {
EXPECT_EQ(PartitionAllocGetSlotOffset<ThreadSafe>(static_cast<char*>(ptr) + EXPECT_EQ(PartitionAllocGetSlotOffset(static_cast<char*>(ptr) + offset),
offset),
offset); offset);
// Testing the mismatched thread-safety variant here and below, because // Testing the mismatched thread-safety variant here and below, because
// CheckedPtr2OrMTEImpl may call the wrong variant and relies on the result // CheckedPtr2OrMTEImpl may call the wrong variant and relies on the result
// to be identical. // to be identical.
// TODO(bartekn): Remove when CheckedPtr2OrMTEImpl no longer calls // TODO(bartekn): Remove when CheckedPtr2OrMTEImpl no longer calls
// mismatched vartiant. // mismatched vartiant.
EXPECT_EQ(PartitionAllocGetSlotOffset<NotThreadSafe>( EXPECT_EQ(PartitionAllocGetSlotOffset(static_cast<char*>(ptr) + offset),
static_cast<char*>(ptr) + offset),
offset); offset);
} }
#endif #endif
...@@ -881,13 +879,7 @@ TEST_F(PartitionAllocTest, AllocGetSizeAndOffset) { ...@@ -881,13 +879,7 @@ TEST_F(PartitionAllocTest, AllocGetSizeAndOffset) {
EXPECT_EQ(requested_size, actual_size); EXPECT_EQ(requested_size, actual_size);
#if defined(ARCH_CPU_64_BITS) && !defined(OS_NACL) #if defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
for (size_t offset = 0; offset < requested_size; offset += 877) { for (size_t offset = 0; offset < requested_size; offset += 877) {
EXPECT_EQ(PartitionAllocGetSlotOffset<ThreadSafe>(static_cast<char*>(ptr) + EXPECT_EQ(PartitionAllocGetSlotOffset(static_cast<char*>(ptr) + offset),
offset),
offset);
// TODO(bartekn): Remove when CheckedPtr2OrMTEImpl no longer calls
// mismatched vartiant.
EXPECT_EQ(PartitionAllocGetSlotOffset<NotThreadSafe>(
static_cast<char*>(ptr) + offset),
offset); offset);
} }
#endif #endif
...@@ -908,13 +900,7 @@ TEST_F(PartitionAllocTest, AllocGetSizeAndOffset) { ...@@ -908,13 +900,7 @@ TEST_F(PartitionAllocTest, AllocGetSizeAndOffset) {
EXPECT_EQ(requested_size + kSystemPageSize, actual_size); EXPECT_EQ(requested_size + kSystemPageSize, actual_size);
#if defined(ARCH_CPU_64_BITS) && !defined(OS_NACL) #if defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
for (size_t offset = 0; offset < requested_size; offset += 4999) { for (size_t offset = 0; offset < requested_size; offset += 4999) {
EXPECT_EQ(PartitionAllocGetSlotOffset<ThreadSafe>(static_cast<char*>(ptr) + EXPECT_EQ(PartitionAllocGetSlotOffset(static_cast<char*>(ptr) + offset),
offset),
offset);
// TODO(bartekn): Remove when CheckedPtr2OrMTEImpl no longer calls
// mismatched vartiant.
EXPECT_EQ(PartitionAllocGetSlotOffset<NotThreadSafe>(
static_cast<char*>(ptr) + offset),
offset); offset);
} }
#endif #endif
...@@ -966,11 +952,7 @@ TEST_F(PartitionAllocTest, GetOffsetMultiplePages) { ...@@ -966,11 +952,7 @@ TEST_F(PartitionAllocTest, GetOffsetMultiplePages) {
char* ptr = static_cast<char*>(ptrs[i]); char* ptr = static_cast<char*>(ptrs[i]);
for (size_t offset = 0; offset < requested_size; offset += 13) { for (size_t offset = 0; offset < requested_size; offset += 13) {
EXPECT_EQ(allocator.root()->GetSize(ptr), requested_size); EXPECT_EQ(allocator.root()->GetSize(ptr), requested_size);
EXPECT_EQ(PartitionAllocGetSlotOffset<ThreadSafe>(ptr + offset), offset); EXPECT_EQ(PartitionAllocGetSlotOffset(ptr + offset), offset);
// TODO(bartekn): Remove when CheckedPtr2OrMTEImpl no longer calls
// mismatched vartiant.
EXPECT_EQ(PartitionAllocGetSlotOffset<NotThreadSafe>(ptr + offset),
offset);
} }
allocator.root()->Free(ptr); allocator.root()->Free(ptr);
} }
......
...@@ -27,19 +27,12 @@ BASE_EXPORT bool CheckedPtr2OrMTEImplPartitionAllocSupport::EnabledForPtr( ...@@ -27,19 +27,12 @@ BASE_EXPORT bool CheckedPtr2OrMTEImplPartitionAllocSupport::EnabledForPtr(
// PartitionAlloc supports it. (Currently not implemented for simplicity, but // PartitionAlloc supports it. (Currently not implemented for simplicity, but
// there are no technological obstacles preventing it; whereas in case of // there are no technological obstacles preventing it; whereas in case of
// CheckedPtr2, PartitionAllocGetSlotOffset won't work with direct-map.) // CheckedPtr2, PartitionAllocGetSlotOffset won't work with direct-map.)
//
// NOTE, CheckedPtr doesn't know which thread-safery PartitionAlloc variant
// it's dealing with. Just use ThreadSafe variant, because it's more common.
// NotThreadSafe is only used by Blink's layout, which is currently being
// transitioned to Oilpan. PartitionAllocGetSlotOffset is expected to return
// the same result regardless, anyway.
// TODO(bartekn): Figure out the thread-safety mismatch.
return IsManagedByPartitionAllocNormalBuckets(ptr) return IsManagedByPartitionAllocNormalBuckets(ptr)
// Checking offset is not needed for ENABLE_TAG_FOR_SINGLE_TAG_CHECKED_PTR, // Checking offset is not needed for ENABLE_TAG_FOR_SINGLE_TAG_CHECKED_PTR,
// but call it anyway for apples-to-apples comparison with // but call it anyway for apples-to-apples comparison with
// ENABLE_TAG_FOR_CHECKED_PTR2. // ENABLE_TAG_FOR_CHECKED_PTR2.
#if ENABLE_TAG_FOR_CHECKED_PTR2 || ENABLE_TAG_FOR_SINGLE_TAG_CHECKED_PTR #if ENABLE_TAG_FOR_CHECKED_PTR2 || ENABLE_TAG_FOR_SINGLE_TAG_CHECKED_PTR
&& PartitionAllocGetSlotOffset<ThreadSafe>(ptr) == 0 && PartitionAllocGetSlotOffset(ptr) == 0
#endif #endif
; ;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment