Commit c37c1768 authored by Benoit Lize's avatar Benoit Lize Committed by Chromium LUCI CQ

Reland "[PartitionAlloc] Better freelist corruption detection."

This reverts commit bb25c608.

Reason for revert: WIP DO NOT LAND

Original change's description:
> Revert "[PartitionAlloc] Better freelist corruption detection."
>
> This reverts commit ac10443d.
>
> Reason for revert: Probably breaking mini_installer_tests and ProcessSnapshotTest.CrashpadInfoChild
> https://ci.chromium.org/p/chromium/builders/ci/Win7%20Tests%20%281%29/112543
>
> Original change's description:
> > [PartitionAlloc] Better freelist corruption detection.
> >
> > Some PartitionAlloc crashes are likely due to freelist
> > corruption. PartitionAlloc has two types of freelists: in the central
> > allocator, and in the thread cache. The central allocator one already
> > has a DCHECK() to verify that subsequent entries belong the same
> > superpage, which doesn't hold for the thread cache freelists.
> >
> > This CL:
> > - Makes these crash lead to a NOINLINE function
> > - Add an interity check to all freelists.
> >
> > This is not meant to be a security mitigation, but to prevent against
> > accidental issues.
> >
> > Bug: 998048
> > Change-Id: I21aedfe2b6363069362514a8edd6cd5bdea1acfc
> > Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2582218
> > Commit-Queue: Benoit L <lizeb@chromium.org>
> > Reviewed-by: Chris Palmer <palmer@chromium.org>
> > Reviewed-by: Kentaro Hara <haraken@chromium.org>
> > Cr-Commit-Position: refs/heads/master@{#840587}
>
> TBR=palmer@chromium.org,haraken@chromium.org,lizeb@chromium.org,chromium-scoped@luci-project-accounts.iam.gserviceaccount.com
>
> Change-Id: Idd7f8213dae35146d4ada835cd443505f88749ce
> No-Presubmit: true
> No-Tree-Checks: true
> No-Try: true
> Bug: 998048
> Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2612968
> Reviewed-by: Morten Stenshorne <mstensho@chromium.org>
> Commit-Queue: Morten Stenshorne <mstensho@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#840622}

TBR=palmer@chromium.org,haraken@chromium.org,lizeb@chromium.org,mstensho@chromium.org,chromium-scoped@luci-project-accounts.iam.gserviceaccount.com

# Not skipping CQ checks because this is a reland.

Bug: 998048
Change-Id: I799d98335b5617d52a3eeb1e254d64f487144f87
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2613267Reviewed-by: default avatarBenoit L <lizeb@chromium.org>
Reviewed-by: default avatarKentaro Hara <haraken@chromium.org>
Commit-Queue: Benoit L <lizeb@chromium.org>
Cr-Commit-Position: refs/heads/master@{#846030}
parent c85e08a4
......@@ -19,6 +19,7 @@
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/partition_alloc_features.h"
#include "base/allocator/partition_allocator/partition_cookie.h"
#include "base/allocator/partition_allocator/partition_freelist_entry.h"
#include "base/allocator/partition_allocator/partition_page.h"
#include "base/allocator/partition_allocator/partition_ref_count.h"
#include "base/bits.h"
......@@ -1745,6 +1746,82 @@ TEST_F(PartitionAllocDeathTest, DirectMapGuardPages) {
}
}
// These tests rely on precise layout. We handle cookies, not reference counts.
#if !ENABLE_REF_COUNT_FOR_BACKUP_REF_PTR && defined(PA_HAS_FREELIST_HARDENING)
namespace {
// Used to adjust the returned pointer. This is necessary to make the following
// death tests pass with DCHECK_IS_ON(), as they rely on the precise knowledge
// of the allocator layout. Since the offset is 0 for non-DCHECK() builds, this
// provides the benefit of more testing coverage.
constexpr size_t kAllocationOffset =
#if DCHECK_IS_ON()
kCookieSize;
#else
0;
#endif
} // namespace
TEST_F(PartitionAllocDeathTest, UseAfterFreeDetection) {
void* data = allocator.root()->Alloc(100, "");
allocator.root()->Free(data);
// use after free, not crashing here, but the next allocation should crash,
// since we corrupted the freelist.
//
// When there is a cookie, must offset the UaF since the freelist entry is
// stored over the cookie area, not the allocated data.
void* data_before_cookie = reinterpret_cast<char*>(data) - kAllocationOffset;
memset(data_before_cookie, 0x42, 100);
EXPECT_DEATH(allocator.root()->Alloc(100, ""), "");
}
TEST_F(PartitionAllocDeathTest, FreelistCorruption) {
const size_t alloc_size = 2 * sizeof(void*);
void** fake_freelist_entry =
static_cast<void**>(allocator.root()->Alloc(alloc_size, ""));
fake_freelist_entry[0] = nullptr;
fake_freelist_entry[1] = nullptr;
void** uaf_data =
static_cast<void**>(allocator.root()->Alloc(alloc_size, ""));
allocator.root()->Free(uaf_data);
void** uaf_data_before_cookie = reinterpret_cast<void**>(
reinterpret_cast<char*>(uaf_data) - kAllocationOffset);
// Try to confuse the allocator. This is still easy to circumvent willingly,
// "just" need to set uaf_data[1] to ~uaf_data[0].
uaf_data_before_cookie[0] = fake_freelist_entry;
EXPECT_DEATH(allocator.root()->Alloc(alloc_size, ""), "");
}
// With DCHECK_IS_ON(), cookies already handle off-by-one detection.
#if !DCHECK_IS_ON()
TEST_F(PartitionAllocDeathTest, OffByOneDetection) {
const size_t alloc_size = 2 * sizeof(void*);
char* array = static_cast<char*>(allocator.root()->Alloc(alloc_size, ""));
array[alloc_size] = 'A';
// Crash at the next allocation. This assumes that we are touching a new,
// non-randomized slot span, where the next slot to be handed over to the
// application directly follows the current one.
EXPECT_DEATH(allocator.root()->Alloc(alloc_size, ""), "");
}
TEST_F(PartitionAllocDeathTest, OffByOneDetectionWithRealisticData) {
const size_t alloc_size = 2 * sizeof(void*);
void** array = static_cast<void**>(allocator.root()->Alloc(alloc_size, ""));
char valid;
array[2] = &valid;
// Crash at the next allocation. This assumes that we are touching a new,
// non-randomized slot span, where the next slot to be handed over to the
// application directly follows the current one.
EXPECT_DEATH(allocator.root()->Alloc(alloc_size, ""), "");
}
#endif // !DCHECK_IS_ON()
#endif // !ENABLE_REF_COUNT_FOR_BACKUP_REF_PTR &&
// defined(PA_HAS_FREELIST_HARDENING
#endif // !defined(OS_ANDROID) && !defined(OS_IOS)
// Tests that |PartitionDumpStats| and |PartitionDumpStats| run without
......
......@@ -418,6 +418,10 @@ ALWAYS_INLINE char* PartitionBucket<thread_safe>::ProvisionMoreSlotsAndAllocOne(
slot_span->num_unprovisioned_slots--;
}
#if DCHECK_IS_ON()
slot_span->freelist_head->CheckFreeList();
#endif
return return_slot;
}
......@@ -632,7 +636,10 @@ void* PartitionBucket<thread_safe>::SlowPathAlloc(
PartitionFreelistEntry* new_head = entry->GetNext();
new_slot_span->SetFreelistHead(new_head);
new_slot_span->num_allocated_slots++;
return entry;
// We likely set *is_already_zeroed to true above, make sure that the
// freelist entry doesn't contain data.
return entry->ClearForAllocation();
}
// Otherwise, we need to provision more slots by committing more pages. Build
......
......@@ -9,14 +9,40 @@
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/compiler_specific.h"
#include "base/dcheck_is_on.h"
#include "base/immediate_crash.h"
#include "base/sys_byteorder.h"
#include "build/build_config.h"
// DCHECK_IS_ON() only for now, as it causes issues on some tests.
// TODO(lizeb): Enable in as many configurations as possible.
#if DCHECK_IS_ON()
#define PA_HAS_FREELIST_HARDENING
#endif
namespace base {
namespace internal {
namespace {
#if defined(PA_HAS_FREELIST_HARDENING) || DCHECK_IS_ON()
[[noreturn]] NOINLINE void FreelistCorruptionDetected() {
IMMEDIATE_CRASH();
}
#endif // defined(PA_HAS_FREELIST_HARDENING) || DCHECK_IS_ON()
} // namespace
struct EncodedPartitionFreelistEntry;
#if defined(PA_HAS_FREELIST_HARDENING)
static_assert((1 << kMinBucketedOrder) >= 2 * sizeof(void*),
"Need enough space for two pointers in freelist entries");
#endif
// Freelist entries are encoded for security reasons. See
// //base/allocator/partition_allocator/PartitionAlloc.md and |Transform()| for
// the rationale and mechanism, respectively.
class PartitionFreelistEntry {
public:
PartitionFreelistEntry() { SetNext(nullptr); }
......@@ -27,7 +53,9 @@ class PartitionFreelistEntry {
void* slot_start,
PartitionFreelistEntry* next) {
auto* entry = reinterpret_cast<PartitionFreelistEntry*>(slot_start);
entry->SetNextForThreadCache(next);
// ThreadCache freelists can point to entries across superpage boundaries,
// no check contrary to |SetNext()|.
entry->SetNextInternal(next);
return entry;
}
......@@ -42,13 +70,33 @@ class PartitionFreelistEntry {
}
ALWAYS_INLINE PartitionFreelistEntry* GetNext() const;
NOINLINE void CheckFreeList() const {
#if defined(PA_HAS_FREELIST_HARDENING)
for (auto* entry = this; entry; entry = entry->GetNext()) {
// |GetNext()| checks freelist integrity.
}
#endif
}
// Regular freelists always point to an entry within the same super page.
ALWAYS_INLINE void SetNext(PartitionFreelistEntry* ptr) {
PA_DCHECK(!ptr ||
(reinterpret_cast<uintptr_t>(this) & kSuperPageBaseMask) ==
(reinterpret_cast<uintptr_t>(ptr) & kSuperPageBaseMask));
next_ = Encode(ptr);
#if DCHECK_IS_ON()
// Regular freelists always point to an entry within the same super page.
if (UNLIKELY(ptr &&
(reinterpret_cast<uintptr_t>(this) & kSuperPageBaseMask) !=
(reinterpret_cast<uintptr_t>(ptr) & kSuperPageBaseMask))) {
FreelistCorruptionDetected();
}
#endif // DCHECK_IS_ON()
SetNextInternal(ptr);
}
// Zeroes out |this| before returning it.
ALWAYS_INLINE void* ClearForAllocation() {
next_ = nullptr;
#if defined(PA_HAS_FREELIST_HARDENING)
inverted_next_ = 0;
#endif
return reinterpret_cast<void*>(this);
}
private:
......@@ -70,16 +118,27 @@ class PartitionFreelistEntry {
return reinterpret_cast<void*>(masked);
}
// ThreadCache freelists can point to entries across superpage boundaries.
ALWAYS_INLINE void SetNextForThreadCache(PartitionFreelistEntry* ptr) {
ALWAYS_INLINE void SetNextInternal(PartitionFreelistEntry* ptr) {
next_ = Encode(ptr);
#if defined(PA_HAS_FREELIST_HARDENING)
inverted_next_ = ~reinterpret_cast<uintptr_t>(next_);
#endif
}
EncodedPartitionFreelistEntry* next_;
// This is intended to detect unintentional corruptions of the freelist.
// These can happen due to a Use-after-Free, or overflow of the previous
// allocation in the slot span.
#if defined(PA_HAS_FREELIST_HARDENING)
uintptr_t inverted_next_;
#endif
};
struct EncodedPartitionFreelistEntry {
char scrambled[sizeof(PartitionFreelistEntry*)];
#if defined(PA_HAS_FREELIST_HARDENING)
char copy_of_scrambled[sizeof(PartitionFreelistEntry*)];
#endif
EncodedPartitionFreelistEntry() = delete;
~EncodedPartitionFreelistEntry() = delete;
......@@ -96,6 +155,13 @@ static_assert(sizeof(PartitionFreelistEntry) ==
"Should not have padding");
ALWAYS_INLINE PartitionFreelistEntry* PartitionFreelistEntry::GetNext() const {
#if defined(PA_HAS_FREELIST_HARDENING)
// GetNext() can be called on decommitted memory, which is full of
// zeroes. This is not a corruption issue, so only check integrity when we
// have a non-nullptr |next_| pointer.
if (UNLIKELY(next_ && ~reinterpret_cast<uintptr_t>(next_) != inverted_next_))
FreelistCorruptionDetected();
#endif // defined(PA_HAS_FREELIST_HARDENING)
return EncodedPartitionFreelistEntry::Decode(next_);
}
......
......@@ -126,6 +126,9 @@ DeferredUnmap SlotSpanMetadata<thread_safe>::FreeSlowPath() {
if (UNLIKELY(bucket->is_direct_mapped())) {
return PartitionDirectUnmap(this);
}
#if DCHECK_IS_ON()
freelist_head->CheckFreeList();
#endif
// If it's the current active slot span, change it. We bounce the slot span
// to the empty list as a force towards defragmentation.
if (LIKELY(this == bucket->active_slot_spans_head))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment