Commit 3f096d72 authored by Bill Budge's avatar Bill Budge Committed by Commit Bot

[Memory] Adds ASLR handling for OS'es and Architectures supported by V8.

- Refactors GetRandomPageBase to make it easier to understand.
- Adds handling for architectures and OS'es needed by V8.
- Increases random bits on Windows 64 bit to 48 for 8.10 or greater,
  44 for older versions of Windows.
- Adds unit tests.

Bug: chromium:756050
Change-Id: I3d27aeb9e243cfdda005662dc6df710bdbcabaaa
Reviewed-on: https://chromium-review.googlesource.com/688741
Commit-Queue: Bill Budge <bbudge@chromium.org>
Reviewed-by: default avatarAlbert J. Wong <ajwong@chromium.org>
Reviewed-by: default avatarLei Zhang <thestig@chromium.org>
Reviewed-by: default avatarChris Palmer <palmer@chromium.org>
Reviewed-by: default avatarErik Chen <erikchen@chromium.org>
Reviewed-by: default avatarKentaro Hara <haraken@chromium.org>
Cr-Commit-Position: refs/heads/master@{#505900}
parent 8d7e0e19
...@@ -2334,6 +2334,7 @@ test("base_unittests") { ...@@ -2334,6 +2334,7 @@ test("base_unittests") {
if (use_partition_alloc) { if (use_partition_alloc) {
sources += [ sources += [
"allocator/partition_allocator/address_space_randomization_unittest.cc",
"allocator/partition_allocator/partition_alloc_unittest.cc", "allocator/partition_allocator/partition_alloc_unittest.cc",
"allocator/partition_allocator/spin_lock_unittest.cc", "allocator/partition_allocator/spin_lock_unittest.cc",
] ]
......
...@@ -90,14 +90,11 @@ static LazyInstance<ranctx>::Leaky s_ranctx = LAZY_INSTANCE_INITIALIZER; ...@@ -90,14 +90,11 @@ static LazyInstance<ranctx>::Leaky s_ranctx = LAZY_INSTANCE_INITIALIZER;
void* GetRandomPageBase() { void* GetRandomPageBase() {
uintptr_t random = static_cast<uintptr_t>(ranval(s_ranctx.Pointer())); uintptr_t random = static_cast<uintptr_t>(ranval(s_ranctx.Pointer()));
#if defined(ARCH_CPU_X86_64) #if defined(ARCH_CPU_64_BITS)
random <<= 32UL; random <<= 32ULL;
random |= static_cast<uintptr_t>(ranval(s_ranctx.Pointer())); random |= static_cast<uintptr_t>(ranval(s_ranctx.Pointer()));
// This address mask gives a low likelihood of address space collisions. We
// handle the situation gracefully if there is a collision.
#if defined(OS_WIN) #if defined(OS_WIN)
random &= 0x3ffffffffffUL;
// Windows >= 8.1 has the full 47 bits. Use them where available. // Windows >= 8.1 has the full 47 bits. Use them where available.
static bool windows_81 = false; static bool windows_81 = false;
static bool windows_81_initialized = false; static bool windows_81_initialized = false;
...@@ -106,23 +103,16 @@ void* GetRandomPageBase() { ...@@ -106,23 +103,16 @@ void* GetRandomPageBase() {
windows_81_initialized = true; windows_81_initialized = true;
} }
if (!windows_81) { if (!windows_81) {
random += 0x10000000000UL; random &= internal::kASLRMaskBefore8_10;
} else {
random &= internal::kASLRMask;
} }
#elif defined(MEMORY_TOOL_REPLACES_ALLOCATOR) random += internal::kASLROffset;
// This range is copied from the TSan source, but works for all tools. #else // defined(OS_POSIX)
random &= 0x007fffffffffUL; random &= internal::kASLRMask;
random += 0x7e8000000000UL; random += internal::kASLROffset;
#else #endif // defined(OS_POSIX)
// Linux and OS X support the full 47-bit user space of x64 processors. #else // defined(ARCH_CPU_32_BITS)
random &= 0x3fffffffffffUL;
#endif // defined(OS_WIN)
#elif defined(ARCH_CPU_ARM64)
// ARM64 on Linux has 39-bit user space.
random &= 0x3fffffffffUL;
random += 0x1000000000UL;
#else // !defined(ARCH_CPU_X86_64) && !defined(ARCH_CPU_ARM64)
#if defined(OS_WIN) #if defined(OS_WIN)
// On win32 host systems the randomization plus huge alignment causes // On win32 host systems the randomization plus huge alignment causes
// excessive fragmentation. Plus most of these systems lack ASLR, so the // excessive fragmentation. Plus most of these systems lack ASLR, so the
...@@ -133,31 +123,12 @@ void* GetRandomPageBase() { ...@@ -133,31 +123,12 @@ void* GetRandomPageBase() {
isWow64 = FALSE; isWow64 = FALSE;
if (!isWow64) if (!isWow64)
return nullptr; return nullptr;
#elif defined(OS_MACOSX)
// macOS as of 10.12.5 does not clean up entries in page map levels 3/4
// [PDP/PML4] created from mmap or mach_vm_allocate, even after the region is
// destroyed. Using a virtual address space that is too large causes a leak of
// about 1 wired [can never be paged out] page per call to mmap(). The page is
// only reclaimed when the process is killed. Confine the hint to a 39-bit
// section of the virtual address space.
//
// This implementation adapted from
// https://chromium-review.googlesource.com/c/v8/v8/+/557958. The difference
// is that here we clamp to 39 bits, not 32.
//
// TODO(crbug.com/738925): Remove this limitation if/when the macOS behavior
// changes.
random &= 0x3fffffffffUL;
random += 0x1000000000UL;
#endif // defined(OS_WIN) #endif // defined(OS_WIN)
random &= internal::kASLRMask;
random += internal::kASLROffset;
#endif // defined(ARCH_CPU_32_BITS)
// This is a good range on Windows, Linux and Mac. DCHECK_EQ(0ULL, (random & kPageAllocationGranularityOffsetMask));
// Allocates in the 0.5-1.5GB region.
random &= 0x3fffffff;
random += 0x20000000;
#endif // defined(ARCH_CPU_X86_64)
random &= kPageAllocationGranularityBaseMask;
return reinterpret_cast<void*>(random); return reinterpret_cast<void*>(random);
} }
......
...@@ -5,10 +5,130 @@ ...@@ -5,10 +5,130 @@
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_SPACE_RANDOMIZATION #ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_SPACE_RANDOMIZATION
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_SPACE_RANDOMIZATION #define BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_SPACE_RANDOMIZATION
#include "base/allocator/partition_allocator/page_allocator.h"
#include "base/base_export.h" #include "base/base_export.h"
#include "build/build_config.h"
namespace base { namespace base {
namespace internal {
constexpr uintptr_t AslrAddress(uintptr_t mask) {
return mask & kPageAllocationGranularityBaseMask;
}
constexpr uintptr_t AslrMask(uintptr_t bits) {
return AslrAddress((1ULL << bits) - 1ULL);
}
#if defined(ARCH_CPU_64_BITS)
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
// We shouldn't be allocating system pages at all for sanitizer builds. However,
// we do, and if random hint addresses interfere with address ranges hard coded
// in those tools, bad things happen. This address range is copied from TSAN
// source but works with all tools.
// See crbug.com/539863.
constexpr uintptr_t kASLRMask = AslrAddress(0x007fffffffffULL);
constexpr uintptr_t kASLROffset = AslrAddress(0x7e8000000000ULL);
#elif defined(OS_WIN)
// Windows 8.10 and newer support the full 48 bit address range. Older versions
// of Windows only support 44 bits. Since kASLROffset is non-zero and may cause
// a carry, use 47 and 43 bit masks.
// See http://www.alex-ionescu.com/?p=246
constexpr uintptr_t kASLRMask = AslrMask(47);
constexpr uintptr_t kASLRMaskBefore8_10 = AslrMask(43);
// Try not to map pages into the range where Windows loads DLLs by default.
constexpr uintptr_t kASLROffset = 0x80000000ULL;
#elif defined(OS_MACOSX)
// macOS as of 10.12.5 does not clean up entries in page map levels 3/4
// [PDP/PML4] created from mmap or mach_vm_allocate, even after the region is
// destroyed. Using a virtual address space that is too large causes a leak of
// about 1 wired [can never be paged out] page per call to mmap(). The page is
// only reclaimed when the process is killed. Confine the hint to a 39-bit
// section of the virtual address space.
//
// This implementation adapted from
// https://chromium-review.googlesource.com/c/v8/v8/+/557958. The difference
// is that here we clamp to 39 bits, not 32.
//
// TODO(crbug.com/738925): Remove this limitation if/when the macOS behavior
// changes.
constexpr uintptr_t kASLRMask = AslrMask(38);
constexpr uintptr_t kASLROffset = AslrAddress(0x1000000000ULL);
#else // defined(OS_POSIX)
#if defined(ARCH_CPU_X86_64)
// Linux and OS X support the full 47-bit user space of x64 processors. Use
// only 46 to allow kernel a chance to fulfill request.
constexpr uintptr_t kASLRMask = AslrMask(46);
constexpr uintptr_t kASLROffset = AslrAddress(0);
#elif defined(ARCH_CPU_ARM64)
// ARM64 on Linux has 39-bit user space. Use 38 bits since kASLROffset could
// cause a carry.
constexpr uintptr_t kASLRMask = AslrMask(38);
constexpr uintptr_t kASLROffset = AslrAddress(0x1000000000ULL);
#elif defined(ARCH_CPU_PPC64)
#if defined(OS_AIX)
// AIX: 64 bits of virtual addressing, but we limit address range to:
// a) minimize Segment Lookaside Buffer (SLB) misses and
// b) use extra address space to isolate the mmap regions.
constexpr uintptr_t kASLRMask = AslrMask(30);
constexpr uintptr_t kASLROffset = AslrAddress(0x400000000000ULL);
#elif defined(ARCH_CPU_BIG_ENDIAN)
// Big-endian Linux: 44 bits of virtual addressing. Use 42.
constexpr uintptr_t kASLRMask = AslrMask(42);
constexpr uintptr_t kASLROffset = AslrAddress(0);
#else // !defined(OS_AIX) && !defined(ARCH_CPU_BIG_ENDIAN)
// Little-endian Linux: 48 bits of virtual addressing. Use 46.
constexpr uintptr_t kASLRMask = AslrMask(46);
constexpr uintptr_t kASLROffset = AslrAddress(0);
#endif // !defined(OS_AIX) && !defined(ARCH_CPU_BIG_ENDIAN)
#elif defined(ARCH_CPU_S390X)
// Linux on Z uses bits 22-32 for Region Indexing, which translates to 42 bits
// of virtual addressing. Truncate to 40 bits to allow kernel chance to
// fulfill request.
constexpr uintptr_t kASLRMask = AslrMask(40);
constexpr uintptr_t kASLROffset = AslrAddress(0);
#elif defined(ARCH_CPU_S390)
// 31 bits of virtual addressing. Truncate to 29 bits to allow kernel a chance
// to fulfill request.
constexpr uintptr_t kASLRMask = AslrMask(29);
constexpr uintptr_t kASLROffset = AslrAddress(0);
#else // !defined(ARCH_CPU_X86_64) && !defined(ARCH_CPU_PPC64) &&
// !defined(ARCH_CPU_S390X) && !defined(ARCH_CPU_S390)
// All other POSIX variants, use 30 bits.
constexpr uintptr_t kASLRMask = AslrMask(30);
#if defined(OS_SOLARIS)
// For our Solaris/illumos mmap hint, we pick a random address in the bottom
// half of the top half of the address space (that is, the third quarter).
// Because we do not MAP_FIXED, this will be treated only as a hint -- the
// system will not fail to mmap() because something else happens to already
// be mapped at our random address. We deliberately set the hint high enough
// to get well above the system's break (that is, the heap); Solaris and
// illumos will try the hint and if that fails allocate as if there were
// no hint at all. The high hint prevents the break from getting hemmed in
// at low values, ceding half of the address space to the system heap.
constexpr uintptr_t kASLROffset = AslrAddress(0x80000000ULL);
#elif defined(OS_AIX)
// The range 0x30000000 - 0xD0000000 is available on AIX;
// choose the upper range.
constexpr uintptr_t kASLROffset = AslrAddress(0x90000000ULL);
#else // !defined(OS_SOLARIS) && !defined(OS_AIX)
// The range 0x20000000 - 0x60000000 is relatively unpopulated across a
// variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macos
// 10.6 and 10.7.
constexpr uintptr_t kASLROffset = AslrAddress(0x20000000ULL);
#endif // !defined(OS_SOLARIS) && !defined(OS_AIX)
#endif // !defined(ARCH_CPU_X86_64) && !defined(ARCH_CPU_PPC64) &&
// !defined(ARCH_CPU_S390X) && !defined(ARCH_CPU_S390)
#endif // defined(OS_POSIX)
#else // defined(ARCH_CPU_32_BITS)
// This is a good range on 32 bit Windows, Linux and Mac.
// Allocates in the 0.5-1.5GB region. There is no issue with carries here.
constexpr uintptr_t kASLRMask = AslrMask(30);
constexpr uintptr_t kASLROffset = AslrAddress(0x20000000ULL);
#endif // defined(ARCH_CPU_32_BITS)
} // namespace internal
// Calculates a random preferred mapping address. In calculating an address, we // Calculates a random preferred mapping address. In calculating an address, we
// balance good ASLR against not fragmenting the address space too badly. // balance good ASLR against not fragmenting the address space too badly.
BASE_EXPORT void* GetRandomPageBase(); BASE_EXPORT void* GetRandomPageBase();
......
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/address_space_randomization.h"
#include "base/allocator/partition_allocator/page_allocator.h"
#include "base/bit_cast.h"
#include "base/bits.h"
#include "base/sys_info.h"
#include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h"
#if defined(OS_WIN)
#include <windows.h>
#include "base/win/windows_version.h"
// VersionHelpers.h must be included after windows.h.
#include <VersionHelpers.h>
#endif
namespace base {
TEST(AddressSpaceRandomizationTest, GetRandomPageBase) {
uintptr_t mask = internal::kASLRMask;
#if defined(ARCH_CPU_64_BITS) && defined(OS_WIN)
if (!IsWindows8Point1OrGreater()) {
mask = internal::kASLRMaskBefore8_10;
}
#endif
// Sample the first 100 addresses.
std::set<uintptr_t> addresses;
uintptr_t address_logical_sum = 0;
uintptr_t address_logical_product = static_cast<uintptr_t>(-1);
for (int i = 0; i < 100; i++) {
uintptr_t address = reinterpret_cast<uintptr_t>(base::GetRandomPageBase());
// Test that address is in range.
EXPECT_LE(internal::kASLROffset, address);
EXPECT_GE(internal::kASLROffset + mask, address);
// Test that address is page aligned.
EXPECT_EQ(0ULL, (address & kPageAllocationGranularityOffsetMask));
// Test that address is unique (no collisions in 100 tries)
CHECK_EQ(0ULL, addresses.count(address));
addresses.insert(address);
// Sum and product to test randomness at each bit position, below.
address -= internal::kASLROffset;
address_logical_sum |= address;
address_logical_product &= address;
}
// All bits in address_logical_sum should be set, since the likelihood of
// never setting any of the bits is 1 / (2 ^ 100) with a good RNG. Likewise,
// all bits in address_logical_product should be cleared.
EXPECT_EQ(mask, address_logical_sum);
EXPECT_EQ(0ULL, address_logical_product);
}
} // namespace base
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment