Commit bb25c608 authored by Morten Stenshorne's avatar Morten Stenshorne Committed by Chromium LUCI CQ

Revert "[PartitionAlloc] Better freelist corruption detection."

This reverts commit ac10443d.

Reason for revert: Probably breaking mini_installer_tests and ProcessSnapshotTest.CrashpadInfoChild
https://ci.chromium.org/p/chromium/builders/ci/Win7%20Tests%20%281%29/112543

Original change's description:
> [PartitionAlloc] Better freelist corruption detection.
>
> Some PartitionAlloc crashes are likely due to freelist
> corruption. PartitionAlloc has two types of freelists: in the central
> allocator, and in the thread cache. The central allocator one already
> has a DCHECK() to verify that subsequent entries belong the same
> superpage, which doesn't hold for the thread cache freelists.
>
> This CL:
> - Makes these crash lead to a NOINLINE function
> - Add an interity check to all freelists.
>
> This is not meant to be a security mitigation, but to prevent against
> accidental issues.
>
> Bug: 998048
> Change-Id: I21aedfe2b6363069362514a8edd6cd5bdea1acfc
> Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2582218
> Commit-Queue: Benoit L <lizeb@chromium.org>
> Reviewed-by: Chris Palmer <palmer@chromium.org>
> Reviewed-by: Kentaro Hara <haraken@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#840587}

TBR=palmer@chromium.org,haraken@chromium.org,lizeb@chromium.org,chromium-scoped@luci-project-accounts.iam.gserviceaccount.com

Change-Id: Idd7f8213dae35146d4ada835cd443505f88749ce
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: 998048
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2612968Reviewed-by: default avatarMorten Stenshorne <mstensho@chromium.org>
Commit-Queue: Morten Stenshorne <mstensho@chromium.org>
Cr-Commit-Position: refs/heads/master@{#840622}
parent f19e613c
...@@ -1685,81 +1685,6 @@ TEST_F(PartitionAllocDeathTest, DirectMapGuardPages) { ...@@ -1685,81 +1685,6 @@ TEST_F(PartitionAllocDeathTest, DirectMapGuardPages) {
} }
} }
// These tests rely on precise layout. We handle cookies, not reference counts.
#if !ENABLE_REF_COUNT_FOR_BACKUP_REF_PTR
namespace {
// Used to adjust the returned pointer. This is necessary to make the following
// death tests pass with DCHECK_IS_ON(), as they rely on the precise knowledge
// of the allocator layout. Since the offset is 0 for non-DCHECK() builds, this
// provides the benefit of more testing coverage.
constexpr size_t kAllocationOffset =
#if DCHECK_IS_ON()
kCookieSize;
#else
0;
#endif
} // namespace
TEST_F(PartitionAllocDeathTest, UseAfterFreeDetection) {
void* data = allocator.root()->Alloc(100, "");
allocator.root()->Free(data);
// use after free, not crashing here, but the next allocation should crash,
// since we corrupted the freelist.
//
// When there is a cookie, must offset the UaF since the freelist entry is
// stored over the cookie area, not the allocated data.
void* data_before_cookie = reinterpret_cast<char*>(data) - kAllocationOffset;
memset(data_before_cookie, 0x42, 100);
EXPECT_DEATH(allocator.root()->Alloc(100, ""), "");
}
TEST_F(PartitionAllocDeathTest, FreelistCorruption) {
const size_t alloc_size = 2 * sizeof(void*);
void** fake_freelist_entry =
static_cast<void**>(allocator.root()->Alloc(alloc_size, ""));
fake_freelist_entry[0] = nullptr;
fake_freelist_entry[1] = nullptr;
void** uaf_data =
static_cast<void**>(allocator.root()->Alloc(alloc_size, ""));
allocator.root()->Free(uaf_data);
void** uaf_data_before_cookie = reinterpret_cast<void**>(
reinterpret_cast<char*>(uaf_data) - kAllocationOffset);
// Try to confuse the allocator. This is still easy to circumvent willingly,
// "just" need to set uaf_data[1] to ~uaf_data[0].
uaf_data_before_cookie[0] = fake_freelist_entry;
EXPECT_DEATH(allocator.root()->Alloc(alloc_size, ""), "");
}
// With DCHECK_IS_ON(), cookies already handle off-by-one detection.
#if !DCHECK_IS_ON()
TEST_F(PartitionAllocDeathTest, OffByOneDetection) {
const size_t alloc_size = 2 * sizeof(void*);
char* array = static_cast<char*>(allocator.root()->Alloc(alloc_size, ""));
array[alloc_size] = 'A';
// Crash at the next allocation. This assumes that we are touching a new,
// non-randomized slot span, where the next slot to be handed over to the
// application directly follows the current one.
EXPECT_DEATH(allocator.root()->Alloc(alloc_size, ""), "");
}
TEST_F(PartitionAllocDeathTest, OffByOneDetectionWithRealisticData) {
const size_t alloc_size = 2 * sizeof(void*);
void** array = static_cast<void**>(allocator.root()->Alloc(alloc_size, ""));
char valid;
array[2] = &valid;
// Crash at the next allocation. This assumes that we are touching a new,
// non-randomized slot span, where the next slot to be handed over to the
// application directly follows the current one.
EXPECT_DEATH(allocator.root()->Alloc(alloc_size, ""), "");
}
#endif // !DCHECK_IS_ON()
#endif // !ENABLE_REF_COUNT_FOR_BACKUP_REF_PTR
#endif // !defined(OS_ANDROID) && !defined(OS_IOS) #endif // !defined(OS_ANDROID) && !defined(OS_IOS)
// Tests that |PartitionDumpStats| and |PartitionDumpStats| run without // Tests that |PartitionDumpStats| and |PartitionDumpStats| run without
......
...@@ -416,10 +416,6 @@ ALWAYS_INLINE char* PartitionBucket<thread_safe>::ProvisionMoreSlotsAndAllocOne( ...@@ -416,10 +416,6 @@ ALWAYS_INLINE char* PartitionBucket<thread_safe>::ProvisionMoreSlotsAndAllocOne(
slot_span->num_unprovisioned_slots--; slot_span->num_unprovisioned_slots--;
} }
#if DCHECK_IS_ON()
slot_span->freelist_head->CheckFreeList();
#endif
return return_slot; return return_slot;
} }
...@@ -632,10 +628,7 @@ void* PartitionBucket<thread_safe>::SlowPathAlloc( ...@@ -632,10 +628,7 @@ void* PartitionBucket<thread_safe>::SlowPathAlloc(
PartitionFreelistEntry* new_head = entry->GetNext(); PartitionFreelistEntry* new_head = entry->GetNext();
new_slot_span->SetFreelistHead(new_head); new_slot_span->SetFreelistHead(new_head);
new_slot_span->num_allocated_slots++; new_slot_span->num_allocated_slots++;
return entry;
// We likely set *is_already_zeroed to true above, make sure that the
// freelist entry doesn't contain data.
return entry->ClearForAllocation();
} }
// Otherwise, we need to provision more slots by committing more pages. Build // Otherwise, we need to provision more slots by committing more pages. Build
......
...@@ -9,30 +9,14 @@ ...@@ -9,30 +9,14 @@
#include "base/allocator/partition_allocator/partition_alloc_constants.h" #include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/compiler_specific.h" #include "base/compiler_specific.h"
#include "base/dcheck_is_on.h"
#include "base/immediate_crash.h"
#include "base/sys_byteorder.h" #include "base/sys_byteorder.h"
#include "build/build_config.h" #include "build/build_config.h"
namespace base { namespace base {
namespace internal { namespace internal {
namespace {
[[noreturn]] NOINLINE void FreelistCorruptionDetected() {
IMMEDIATE_CRASH();
}
} // namespace
struct EncodedPartitionFreelistEntry; struct EncodedPartitionFreelistEntry;
static_assert((1 << kMinBucketedOrder) >= 2 * sizeof(void*),
"Need enough space for two pointers in freelist entries");
// Freelist entries are encoded for security reasons. See
// //base/allocator/partition_allocator/PartitionAlloc.md and |Transform()| for
// the rationale and mechanism, respectively.
class PartitionFreelistEntry { class PartitionFreelistEntry {
public: public:
PartitionFreelistEntry() { SetNext(nullptr); } PartitionFreelistEntry() { SetNext(nullptr); }
...@@ -43,9 +27,7 @@ class PartitionFreelistEntry { ...@@ -43,9 +27,7 @@ class PartitionFreelistEntry {
void* ptr, void* ptr,
PartitionFreelistEntry* next) { PartitionFreelistEntry* next) {
auto* entry = reinterpret_cast<PartitionFreelistEntry*>(ptr); auto* entry = reinterpret_cast<PartitionFreelistEntry*>(ptr);
// ThreadCache freelists can point to entries across superpage boundaries, entry->SetNextForThreadCache(next);
// no check contrary to |SetNext()|.
entry->SetNextInternal(next);
return entry; return entry;
} }
...@@ -60,29 +42,13 @@ class PartitionFreelistEntry { ...@@ -60,29 +42,13 @@ class PartitionFreelistEntry {
} }
ALWAYS_INLINE PartitionFreelistEntry* GetNext() const; ALWAYS_INLINE PartitionFreelistEntry* GetNext() const;
NOINLINE void CheckFreeList() const {
for (auto* entry = this; entry; entry = entry->GetNext()) {
// |GetNext()| checks freelist integrity.
}
}
ALWAYS_INLINE void SetNext(PartitionFreelistEntry* ptr) {
#if DCHECK_IS_ON()
// Regular freelists always point to an entry within the same super page. // Regular freelists always point to an entry within the same super page.
if (UNLIKELY(ptr && ALWAYS_INLINE void SetNext(PartitionFreelistEntry* ptr) {
(reinterpret_cast<uintptr_t>(this) & kSuperPageBaseMask) != PA_DCHECK(!ptr ||
(reinterpret_cast<uintptr_t>(ptr) & kSuperPageBaseMask))) { (reinterpret_cast<uintptr_t>(this) & kSuperPageBaseMask) ==
FreelistCorruptionDetected(); (reinterpret_cast<uintptr_t>(ptr) & kSuperPageBaseMask));
} next_ = Encode(ptr);
#endif
SetNextInternal(ptr);
}
// Zeroes out |this| before returning it.
ALWAYS_INLINE void* ClearForAllocation() {
next_ = nullptr;
inverted_next_ = 0;
return reinterpret_cast<void*>(this);
} }
private: private:
...@@ -104,21 +70,16 @@ class PartitionFreelistEntry { ...@@ -104,21 +70,16 @@ class PartitionFreelistEntry {
return reinterpret_cast<void*>(masked); return reinterpret_cast<void*>(masked);
} }
ALWAYS_INLINE void SetNextInternal(PartitionFreelistEntry* ptr) { // ThreadCache freelists can point to entries across superpage boundaries.
ALWAYS_INLINE void SetNextForThreadCache(PartitionFreelistEntry* ptr) {
next_ = Encode(ptr); next_ = Encode(ptr);
inverted_next_ = ~reinterpret_cast<uintptr_t>(next_);
} }
EncodedPartitionFreelistEntry* next_; EncodedPartitionFreelistEntry* next_;
// This is intended to detect unintentional corruptions of the freelist.
// These can happen due to a Use-after-Free, or overflow of the previous
// allocation in the slot span.
uintptr_t inverted_next_;
}; };
struct EncodedPartitionFreelistEntry { struct EncodedPartitionFreelistEntry {
char scrambled[sizeof(PartitionFreelistEntry*)]; char scrambled[sizeof(PartitionFreelistEntry*)];
char copy_of_scrambled[sizeof(PartitionFreelistEntry*)];
EncodedPartitionFreelistEntry() = delete; EncodedPartitionFreelistEntry() = delete;
~EncodedPartitionFreelistEntry() = delete; ~EncodedPartitionFreelistEntry() = delete;
...@@ -135,11 +96,6 @@ static_assert(sizeof(PartitionFreelistEntry) == ...@@ -135,11 +96,6 @@ static_assert(sizeof(PartitionFreelistEntry) ==
"Should not have padding"); "Should not have padding");
ALWAYS_INLINE PartitionFreelistEntry* PartitionFreelistEntry::GetNext() const { ALWAYS_INLINE PartitionFreelistEntry* PartitionFreelistEntry::GetNext() const {
// GetNext() can be called on decommitted memory, which is full of
// zeroes. This is not a corruption issue, so only check integrity when we
// have a non-nullptr |next_| pointer.
if (UNLIKELY(next_ && ~reinterpret_cast<uintptr_t>(next_) != inverted_next_))
FreelistCorruptionDetected();
return EncodedPartitionFreelistEntry::Decode(next_); return EncodedPartitionFreelistEntry::Decode(next_);
} }
......
...@@ -126,9 +126,6 @@ DeferredUnmap SlotSpanMetadata<thread_safe>::FreeSlowPath() { ...@@ -126,9 +126,6 @@ DeferredUnmap SlotSpanMetadata<thread_safe>::FreeSlowPath() {
if (UNLIKELY(bucket->is_direct_mapped())) { if (UNLIKELY(bucket->is_direct_mapped())) {
return PartitionDirectUnmap(this); return PartitionDirectUnmap(this);
} }
#if DCHECK_IS_ON()
freelist_head->CheckFreeList();
#endif
// If it's the current active slot span, change it. We bounce the slot span // If it's the current active slot span, change it. We bounce the slot span
// to the empty list as a force towards defragmentation. // to the empty list as a force towards defragmentation.
if (LIKELY(this == bucket->active_slot_spans_head)) if (LIKELY(this == bucket->active_slot_spans_head))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment