Commit 51e0f562 authored by Benoit Lize's avatar Benoit Lize Committed by Chromium LUCI CQ

[PartitionAlloc] Make sure that freelists are null-terminated.

Freelists must be null-terminated. This is currently not enforced in the
code. This commit removes all "reinterpret_cast<>"s for the freelist,
and enforces null termination.

Bug: 998048
Change-Id: I0593a337ef36693e4ffe08ce8ec04ba679e24183
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2584764Reviewed-by: default avatarChris Palmer <palmer@chromium.org>
Commit-Queue: Benoit L <lizeb@chromium.org>
Cr-Commit-Position: refs/heads/master@{#836046}
parent 919b7927
......@@ -79,11 +79,8 @@ PartitionDirectMap(PartitionRoot<thread_safe>* root, int flags, size_t raw_size)
PA_DCHECK(!page->slot_span_metadata.num_unprovisioned_slots);
PA_DCHECK(!page->slot_span_metadata.empty_cache_index);
page->slot_span_metadata.bucket = &metadata->bucket;
page->slot_span_metadata.SetFreelistHead(
reinterpret_cast<PartitionFreelistEntry*>(slot));
auto* next_entry = reinterpret_cast<PartitionFreelistEntry*>(slot);
next_entry->SetNext(nullptr);
auto* next_entry = new (slot) PartitionFreelistEntry();
page->slot_span_metadata.SetFreelistHead(next_entry);
PA_DCHECK(!metadata->bucket.active_slot_spans_head);
PA_DCHECK(!metadata->bucket.empty_slot_spans_head);
......@@ -404,10 +401,9 @@ ALWAYS_INLINE char* PartitionBucket<thread_safe>::ProvisionMoreSlotsAndAllocOne(
// Add all slots that fit within so far committed pages to the free list.
PartitionFreelistEntry* prev_entry = nullptr;
PartitionFreelistEntry* entry =
reinterpret_cast<PartitionFreelistEntry*>(next_slot);
char* next_slot_end = next_slot + size;
while (next_slot_end <= commit_end) {
auto* entry = new (next_slot) PartitionFreelistEntry();
if (!slot_span->freelist_head) {
PA_DCHECK(!prev_entry);
slot_span->SetFreelistHead(entry);
......@@ -417,15 +413,8 @@ ALWAYS_INLINE char* PartitionBucket<thread_safe>::ProvisionMoreSlotsAndAllocOne(
next_slot = next_slot_end;
next_slot_end = next_slot + size;
prev_entry = entry;
entry = reinterpret_cast<PartitionFreelistEntry*>(next_slot);
slot_span->num_unprovisioned_slots--;
}
// Null-terminate the list, if any slot made it to the list.
// One might think that this isn't needed as the page was just committed thus
// zeroed, but it isn't always the case on OS_APPLE.
if (prev_entry) {
prev_entry->SetNext(nullptr);
}
return return_slot;
}
......
......@@ -19,9 +19,23 @@ struct EncodedPartitionFreelistEntry;
class PartitionFreelistEntry {
public:
PartitionFreelistEntry() = delete;
PartitionFreelistEntry() { SetNext(nullptr); }
~PartitionFreelistEntry() = delete;
// Creates a new entry, with |next| following it.
static ALWAYS_INLINE PartitionFreelistEntry* InitForThreadCache(
void* ptr,
PartitionFreelistEntry* next) {
auto* entry = reinterpret_cast<PartitionFreelistEntry*>(ptr);
entry->SetNextForThreadCache(next);
return entry;
}
// Placement new only.
void* operator new(size_t) = delete;
void operator delete(void* ptr) = delete;
void* operator new(size_t, void* buffer) { return buffer; }
ALWAYS_INLINE static EncodedPartitionFreelistEntry* Encode(
PartitionFreelistEntry* ptr) {
return reinterpret_cast<EncodedPartitionFreelistEntry*>(Transform(ptr));
......@@ -37,11 +51,6 @@ class PartitionFreelistEntry {
next_ = Encode(ptr);
}
// ThreadCache freelists can point to entries across superpage boundaries.
ALWAYS_INLINE void SetNextForThreadCache(PartitionFreelistEntry* ptr) {
next_ = Encode(ptr);
}
private:
friend struct EncodedPartitionFreelistEntry;
ALWAYS_INLINE static void* Transform(void* ptr) {
......@@ -61,6 +70,11 @@ class PartitionFreelistEntry {
return reinterpret_cast<void*>(masked);
}
// ThreadCache freelists can point to entries across superpage boundaries.
ALWAYS_INLINE void SetNextForThreadCache(PartitionFreelistEntry* ptr) {
next_ = Encode(ptr);
}
EncodedPartitionFreelistEntry* next_;
};
......
......@@ -141,8 +141,8 @@ static size_t PartitionPurgeSlotSpan(
if (slot_usage[slot_index])
continue;
auto* entry = reinterpret_cast<internal::PartitionFreelistEntry*>(
ptr + (slot_size * slot_index));
auto* entry = new (ptr + (slot_size * slot_index))
internal::PartitionFreelistEntry();
if (!head) {
head = entry;
back = entry;
......@@ -157,8 +157,6 @@ static size_t PartitionPurgeSlotSpan(
}
slot_span->SetFreelistHead(head);
if (back)
back->SetNext(nullptr);
PA_DCHECK(num_new_entries == num_slots - slot_span->num_allocated_slots);
// Discard the memory.
......
......@@ -329,8 +329,8 @@ ALWAYS_INLINE void* ThreadCache::GetFromCache(size_t bucket_index,
}
ALWAYS_INLINE void ThreadCache::PutInBucket(Bucket& bucket, void* ptr) {
auto* entry = reinterpret_cast<PartitionFreelistEntry*>(ptr);
entry->SetNextForThreadCache(bucket.freelist_head);
auto* entry =
PartitionFreelistEntry::InitForThreadCache(ptr, bucket.freelist_head);
bucket.freelist_head = entry;
bucket.count++;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment