Commit 5129a5dd authored by Benoît Lizé's avatar Benoît Lizé Committed by Commit Bot

base/allocator: Use typed encoded/decoded freelist entries.

In PartitionAlloc, the freelist entries are encoded (except for the head). The
encoding process is symmetric, meaning that there was a single "Transform()"
function. This is confusing and error-prone.

This adds a type for an encoded freelist entry, making the difference
clearer.

Note that this fixes a bug of PartitionAlloc on big endian platforms. In
PartitionPurgePage(), the tail of the freelist had an un-encoded nullptr
value. This is incorrect, as the code using it would decode the value, and get
the wrong one as a consequence. This does not trigger on little-endian
platforms, as the transformation we use is such that Encode(nullptr) == nullptr,
but on big endian ones Encode(ptr) = ~ptr, meaning that this would lead to a
crash.

It seems that Chrome does not ship on big endian platforms though (and that no
chromium-based project uses PartitionAlloc on big endian platforms), as
otherwise this would crash the renderer very quickly. The issue is eliminated
with the new types.

There is no behavior change in this CL on little endian, and should not impact
performance either.

Bug: 998048, 787153
Change-Id: I67798659202156360aeddc6e71c5d330f5daa163
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1857328
Commit-Queue: Benoit L <lizeb@chromium.org>
Reviewed-by: default avatarEgor Pasko <pasko@chromium.org>
Reviewed-by: default avatarChris Palmer <palmer@chromium.org>
Reviewed-by: default avatarKentaro Hara <haraken@chromium.org>
Cr-Commit-Position: refs/heads/master@{#705884}
parent b1a7500d
......@@ -493,14 +493,14 @@ static size_t PartitionPurgePage(internal::PartitionPage* page, bool discard) {
size_t slot_index = (reinterpret_cast<char*>(entry) - ptr) / slot_size;
DCHECK(slot_index < num_slots);
slot_usage[slot_index] = 0;
entry = internal::PartitionFreelistEntry::Transform(entry->next);
entry = internal::EncodedPartitionFreelistEntry::Decode(entry->next);
#if !defined(OS_WIN)
// If we have a slot where the masked freelist entry is 0, we can actually
// discard that freelist entry because touching a discarded page is
// guaranteed to return original content or 0. (Note that this optimization
// won't fire on big-endian machines because the masking function is
// negation.)
if (!internal::PartitionFreelistEntry::Transform(entry))
if (!internal::PartitionFreelistEntry::Encode(entry))
last_slot = slot_index;
#endif
}
......@@ -534,25 +534,33 @@ static size_t PartitionPurgePage(internal::PartitionPage* page, bool discard) {
DCHECK(truncated_slots > 0);
size_t num_new_entries = 0;
page->num_unprovisioned_slots += static_cast<uint16_t>(truncated_slots);
// Rewrite the freelist.
internal::PartitionFreelistEntry** entry_ptr = &page->freelist_head;
internal::PartitionFreelistEntry* head = nullptr;
internal::PartitionFreelistEntry* back = head;
for (size_t slot_index = 0; slot_index < num_slots; ++slot_index) {
if (slot_usage[slot_index])
continue;
auto* entry = reinterpret_cast<internal::PartitionFreelistEntry*>(
ptr + (slot_size * slot_index));
*entry_ptr = internal::PartitionFreelistEntry::Transform(entry);
entry_ptr = reinterpret_cast<internal::PartitionFreelistEntry**>(entry);
if (!head) {
head = entry;
back = entry;
} else {
back->next = internal::PartitionFreelistEntry::Encode(entry);
back = entry;
}
num_new_entries++;
#if !defined(OS_WIN)
last_slot = slot_index;
#endif
}
// Terminate the freelist chain.
*entry_ptr = nullptr;
// The freelist head is stored unmasked.
page->freelist_head =
internal::PartitionFreelistEntry::Transform(page->freelist_head);
page->freelist_head = head;
if (back)
back->next = internal::PartitionFreelistEntry::Encode(nullptr);
DCHECK(num_new_entries == num_slots - page->num_allocated_slots);
// Discard the memory.
DiscardSystemPages(begin_ptr, unprovisioned_bytes);
......
......@@ -79,7 +79,7 @@ ALWAYS_INLINE PartitionPage* PartitionDirectMap(PartitionRootBase* root,
page->freelist_head = reinterpret_cast<PartitionFreelistEntry*>(slot);
PartitionFreelistEntry* next_entry =
reinterpret_cast<PartitionFreelistEntry*>(slot);
next_entry->next = PartitionFreelistEntry::Transform(nullptr);
next_entry->next = PartitionFreelistEntry::Encode(nullptr);
DCHECK(!bucket->active_pages_head);
DCHECK(!bucket->empty_pages_head);
......@@ -394,10 +394,10 @@ ALWAYS_INLINE char* PartitionBucket::AllocAndFillFreelist(PartitionPage* page) {
freelist_pointer += size;
PartitionFreelistEntry* next_entry =
reinterpret_cast<PartitionFreelistEntry*>(freelist_pointer);
entry->next = PartitionFreelistEntry::Transform(next_entry);
entry->next = PartitionFreelistEntry::Encode(next_entry);
entry = next_entry;
}
entry->next = PartitionFreelistEntry::Transform(nullptr);
entry->next = PartitionFreelistEntry::Encode(nullptr);
} else {
page->freelist_head = nullptr;
}
......@@ -555,7 +555,7 @@ void* PartitionBucket::SlowPathAlloc(PartitionRootBase* root,
if (LIKELY(new_page->freelist_head != nullptr)) {
PartitionFreelistEntry* entry = new_page->freelist_head;
PartitionFreelistEntry* new_head =
PartitionFreelistEntry::Transform(entry->next);
EncodedPartitionFreelistEntry::Decode(entry->next);
new_page->freelist_head = new_head;
new_page->num_allocated_slots++;
return entry;
......
......@@ -15,33 +15,56 @@
namespace base {
namespace internal {
// TODO(ajwong): Introduce an EncodedFreelistEntry type and then replace
// Transform() with Encode()/Decode() such that the API provides some static
// type safety.
//
// https://crbug.com/787153
struct EncodedPartitionFreelistEntry;
struct PartitionFreelistEntry {
PartitionFreelistEntry* next;
EncodedPartitionFreelistEntry* next;
PartitionFreelistEntry() = delete;
~PartitionFreelistEntry() = delete;
static ALWAYS_INLINE PartitionFreelistEntry* Transform(
ALWAYS_INLINE static EncodedPartitionFreelistEntry* Encode(
PartitionFreelistEntry* ptr) {
// We use bswap on little endian as a fast mask for two reasons:
// 1) If an object is freed and its vtable used where the attacker doesn't
// get the chance to run allocations between the free and use, the vtable
// dereference is likely to fault.
// 2) If the attacker has a linear buffer overflow and elects to try and
// corrupt a freelist pointer, partial pointer overwrite attacks are
// thwarted.
// For big endian, similar guarantees are arrived at with a negation.
return reinterpret_cast<EncodedPartitionFreelistEntry*>(Transform(ptr));
}
private:
friend struct EncodedPartitionFreelistEntry;
static ALWAYS_INLINE void* Transform(void* ptr) {
// We use bswap on little endian as a fast mask for two reasons:
// 1) If an object is freed and its vtable used where the attacker doesn't
// get the chance to run allocations between the free and use, the vtable
// dereference is likely to fault.
// 2) If the attacker has a linear buffer overflow and elects to try and
// corrupt a freelist pointer, partial pointer overwrite attacks are
// thwarted.
// For big endian, similar guarantees are arrived at with a negation.
#if defined(ARCH_CPU_BIG_ENDIAN)
uintptr_t masked = ~reinterpret_cast<uintptr_t>(ptr);
#else
uintptr_t masked = ByteSwapUintPtrT(reinterpret_cast<uintptr_t>(ptr));
#endif
return reinterpret_cast<PartitionFreelistEntry*>(masked);
return reinterpret_cast<void*>(masked);
}
};
struct EncodedPartitionFreelistEntry {
char scrambled[sizeof(PartitionFreelistEntry*)];
EncodedPartitionFreelistEntry() = delete;
~EncodedPartitionFreelistEntry() = delete;
ALWAYS_INLINE static PartitionFreelistEntry* Decode(
EncodedPartitionFreelistEntry* ptr) {
return reinterpret_cast<PartitionFreelistEntry*>(
PartitionFreelistEntry::Transform(ptr));
}
};
static_assert(sizeof(PartitionFreelistEntry) ==
sizeof(EncodedPartitionFreelistEntry),
"Should not have padding");
} // namespace internal
} // namespace base
......
......@@ -244,11 +244,11 @@ ALWAYS_INLINE void PartitionPage::Free(void* ptr) {
// Catches an immediate double free.
CHECK(ptr != freelist_head);
// Look for double free one level deeper in debug.
DCHECK(!freelist_head || ptr != internal::PartitionFreelistEntry::Transform(
freelist_head->next));
DCHECK(!freelist_head ||
ptr != EncodedPartitionFreelistEntry::Decode(freelist_head->next));
internal::PartitionFreelistEntry* entry =
static_cast<internal::PartitionFreelistEntry*>(ptr);
entry->next = internal::PartitionFreelistEntry::Transform(freelist_head);
entry->next = internal::PartitionFreelistEntry::Encode(freelist_head);
freelist_head = entry;
--this->num_allocated_slots;
if (UNLIKELY(this->num_allocated_slots <= 0)) {
......
......@@ -107,8 +107,8 @@ ALWAYS_INLINE void* PartitionRootBase::AllocFromBucket(PartitionBucket* bucket,
// the size metadata.
DCHECK(page->get_raw_size() == 0);
internal::PartitionFreelistEntry* new_head =
internal::PartitionFreelistEntry::Transform(
static_cast<internal::PartitionFreelistEntry*>(ret)->next);
internal::EncodedPartitionFreelistEntry::Decode(
page->freelist_head->next);
page->freelist_head = new_head;
page->num_allocated_slots++;
} else {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment