Commit f4127cf9 authored by Bartek Nowierski's avatar Bartek Nowierski Committed by Commit Bot

Adjust CheckedPtr2Impl to use PA tag support

Bug: 1073933
Change-Id: Id5e95549368323ce77fd3d0ff8957e92445b3b4b
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2266255
Commit-Queue: Bartek Nowierski <bartekn@chromium.org>
Reviewed-by: default avatarBartek Nowierski <bartekn@chromium.org>
Reviewed-by: default avatarKentaro Hara <haraken@chromium.org>
Reviewed-by: default avatarBenoit L <lizeb@chromium.org>
Cr-Commit-Position: refs/heads/master@{#782912}
parent 3398d444
...@@ -10,15 +10,11 @@ ...@@ -10,15 +10,11 @@
#include <utility> #include <utility>
#include "base/allocator/partition_allocator/partition_tag.h"
#include "base/check_op.h" #include "base/check_op.h"
#include "base/compiler_specific.h" #include "base/compiler_specific.h"
#include "build/build_config.h" #include "build/build_config.h"
// TEST: We can't use protection in the real code (yet) because it may lead to
// crashes in absence of PartitionAlloc support. Setting it to 0 will disable
// the protection, while preserving all calculations.
#define CHECKED_PTR2_PROTECTION_ENABLED 0
#define CHECKED_PTR2_USE_NO_OP_WRAPPER 0 #define CHECKED_PTR2_USE_NO_OP_WRAPPER 0
// Set it to 1 to avoid branches when checking if per-pointer protection is // Set it to 1 to avoid branches when checking if per-pointer protection is
...@@ -116,9 +112,7 @@ struct CheckedPtr2Impl { ...@@ -116,9 +112,7 @@ struct CheckedPtr2Impl {
static ALWAYS_INLINE uintptr_t WrapRawPtr(const volatile void* cv_ptr) { static ALWAYS_INLINE uintptr_t WrapRawPtr(const volatile void* cv_ptr) {
void* ptr = const_cast<void*>(cv_ptr); void* ptr = const_cast<void*>(cv_ptr);
uintptr_t addr = reinterpret_cast<uintptr_t>(ptr); uintptr_t addr = reinterpret_cast<uintptr_t>(ptr);
#if CHECKED_PTR2_USE_NO_OP_WRAPPER #if !CHECKED_PTR2_USE_NO_OP_WRAPPER
static_assert(!CHECKED_PTR2_PROTECTION_ENABLED, "");
#else
// Make sure that the address bits that will be used for generation are 0. // Make sure that the address bits that will be used for generation are 0.
// If they aren't, they'd fool the unwrapper into thinking that the // If they aren't, they'd fool the unwrapper into thinking that the
// protection is enabled, making it try to read and compare the generation. // protection is enabled, making it try to read and compare the generation.
...@@ -130,33 +124,18 @@ struct CheckedPtr2Impl { ...@@ -130,33 +124,18 @@ struct CheckedPtr2Impl {
return addr; return addr;
} }
// Read the generation from 16 bits before the allocation. Then place it in // Read the generation and place it in the top bits of the address.
// the top bits of the address. static_assert(sizeof(PartitionTag) * 8 == kGenerationBits, "");
static_assert(sizeof(uint16_t) * 8 == kGenerationBits, ""); uintptr_t generation =
#if CHECKED_PTR2_PROTECTION_ENABLED *(static_cast<volatile PartitionTag*>(PartitionTagPointer(ptr)));
uintptr_t generation = *(static_cast<volatile uint16_t*>(ptr) - 1);
#else
// TEST: Reading from offset -1 may crash without full PA support.
// Just read from offset 0 to attain the same perf characteristics as the
// expected production solution.
// This generation will be ignored anyway either when unwrapping or below
// (depending on the algorithm variant), on the
// !CHECKED_PTR2_PROTECTION_ENABLED path.
uintptr_t generation = *(static_cast<volatile uint16_t*>(ptr));
#endif // CHECKED_PTR2_PROTECTION_ENABLED
generation <<= kValidAddressBits; generation <<= kValidAddressBits;
addr |= generation; addr |= generation;
#if CHECKED_PTR2_AVOID_BRANCH_WHEN_CHECKING_ENABLED #if CHECKED_PTR2_AVOID_BRANCH_WHEN_CHECKING_ENABLED
// Always set top bit to 1, to indicated that the protection is enabled. // Always set top bit to 1, to indicated that the protection is enabled.
addr |= kTopBit; addr |= kTopBit;
#if !CHECKED_PTR2_PROTECTION_ENABLED
// TEST: Clear the generation, or else it could crash without PA support.
// If the top bit was set, the unwrapper would read from before the address
// address, but with it cleared, it'll read from the address itself.
addr &= kAddressMask;
#endif // !CHECKED_PTR2_PROTECTION_ENABLED
#endif // CHECKED_PTR2_AVOID_BRANCH_WHEN_CHECKING_ENABLED #endif // CHECKED_PTR2_AVOID_BRANCH_WHEN_CHECKING_ENABLED
#endif // CHECKED_PTR2_USE_NO_OP_WRAPPER #endif // !CHECKED_PTR2_USE_NO_OP_WRAPPER
return addr; return addr;
} }
...@@ -166,8 +145,10 @@ struct CheckedPtr2Impl { ...@@ -166,8 +145,10 @@ struct CheckedPtr2Impl {
return kWrappedNullPtr; return kWrappedNullPtr;
} }
static ALWAYS_INLINE uintptr_t // Unwraps the pointer's uintptr_t representation, while asserting that memory
SafelyUnwrapPtrInternal(uintptr_t wrapped_ptr) { // hasn't been freed. The function is allowed to crash on nullptr.
static ALWAYS_INLINE void* SafelyUnwrapPtrForDereference(
uintptr_t wrapped_ptr) {
#if CHECKED_PTR2_AVOID_BRANCH_WHEN_CHECKING_ENABLED #if CHECKED_PTR2_AVOID_BRANCH_WHEN_CHECKING_ENABLED
// Top bit tells if the protection is enabled. Use it to decide whether to // Top bit tells if the protection is enabled. Use it to decide whether to
// read the word before the allocation, which exists only if the protection // read the word before the allocation, which exists only if the protection
...@@ -205,9 +186,9 @@ struct CheckedPtr2Impl { ...@@ -205,9 +186,9 @@ struct CheckedPtr2Impl {
// anything) // anything)
// Ex.2: generation_ptr=0x0000000012345678, read e.g. 0x2345 (doesn't // Ex.2: generation_ptr=0x0000000012345678, read e.g. 0x2345 (doesn't
// matter what we read, as long as this read doesn't crash) // matter what we read, as long as this read doesn't crash)
volatile uint16_t* generation_ptr = volatile PartitionTag* generation_ptr =
reinterpret_cast<volatile uint16_t*>(ExtractAddress(wrapped_ptr)) - static_cast<volatile PartitionTag*>(ExtractPtr(wrapped_ptr)) -
offset; offset * (kPartitionTagOffset / sizeof(PartitionTag));
uintptr_t generation = *generation_ptr; uintptr_t generation = *generation_ptr;
// Shift generation into the right place and add back the enabled bit. // Shift generation into the right place and add back the enabled bit.
// //
...@@ -241,7 +222,7 @@ struct CheckedPtr2Impl { ...@@ -241,7 +222,7 @@ struct CheckedPtr2Impl {
// b) returning 0x1676000012345678 (this will generate a desired crash) // b) returning 0x1676000012345678 (this will generate a desired crash)
// Ex.2: returning 0x0000000012345678 // Ex.2: returning 0x0000000012345678
static_assert(CHECKED_PTR2_AVOID_BRANCH_WHEN_DEREFERENCING, ""); static_assert(CHECKED_PTR2_AVOID_BRANCH_WHEN_DEREFERENCING, "");
return generation ^ wrapped_ptr; return reinterpret_cast<void*>(generation ^ wrapped_ptr);
#else // CHECKED_PTR2_AVOID_BRANCH_WHEN_CHECKING_ENABLED #else // CHECKED_PTR2_AVOID_BRANCH_WHEN_CHECKING_ENABLED
uintptr_t ptr_generation = wrapped_ptr >> kValidAddressBits; uintptr_t ptr_generation = wrapped_ptr >> kValidAddressBits;
if (ptr_generation > 0) { if (ptr_generation > 0) {
...@@ -250,69 +231,39 @@ struct CheckedPtr2Impl { ...@@ -250,69 +231,39 @@ struct CheckedPtr2Impl {
// Cast to volatile to ensure memory is read. E.g. in a tight loop, the // Cast to volatile to ensure memory is read. E.g. in a tight loop, the
// compiler could cache the value in a register and thus could miss that // compiler could cache the value in a register and thus could miss that
// another thread freed memory and cleared generation. // another thread freed memory and cleared generation.
#if CHECKED_PTR2_PROTECTION_ENABLED uintptr_t read_generation = *static_cast<volatile PartitionTag*>(
uintptr_t read_generation = PartitionTagPointer(ExtractPtr(wrapped_ptr)));
*(reinterpret_cast<volatile uint16_t*>(ExtractAddress(wrapped_ptr)) -
1);
#else
// TEST: Reading from before the pointer may crash. See more above...
uintptr_t read_generation =
*(reinterpret_cast<volatile uint16_t*>(ExtractAddress(wrapped_ptr)));
#endif
#if CHECKED_PTR2_AVOID_BRANCH_WHEN_DEREFERENCING #if CHECKED_PTR2_AVOID_BRANCH_WHEN_DEREFERENCING
// Use hardware to detect generation mismatch. CPU will crash if top bits // Use hardware to detect generation mismatch. CPU will crash if top bits
// aren't all 0 (technically it won't if all bits are 1, but that's a // aren't all 0 (technically it won't if all bits are 1, but that's a
// kernel mode address, which isn't allowed either). // kernel mode address, which isn't allowed either).
read_generation <<= kValidAddressBits; read_generation <<= kValidAddressBits;
return read_generation ^ wrapped_ptr; return reinterpret_cast<void*>(read_generation ^ wrapped_ptr);
#else #else
#if CHECKED_PTR2_PROTECTION_ENABLED
if (UNLIKELY(ptr_generation != read_generation)) if (UNLIKELY(ptr_generation != read_generation))
IMMEDIATE_CRASH(); IMMEDIATE_CRASH();
#else return reinterpret_cast<void*>(wrapped_ptr & kAddressMask);
// TEST: Use volatile to prevent optimizing out the calculations leading
// to this point.
volatile bool x = false;
if (ptr_generation != read_generation)
x = true;
#endif // CHECKED_PTR2_PROTECTION_ENABLED
return wrapped_ptr & kAddressMask;
#endif // CHECKED_PTR2_AVOID_BRANCH_WHEN_DEREFERENCING #endif // CHECKED_PTR2_AVOID_BRANCH_WHEN_DEREFERENCING
} }
return wrapped_ptr; return reinterpret_cast<void*>(wrapped_ptr);
#endif // CHECKED_PTR2_AVOID_BRANCH_WHEN_CHECKING_ENABLED #endif // CHECKED_PTR2_AVOID_BRANCH_WHEN_CHECKING_ENABLED
} }
// Unwraps the pointer's uintptr_t representation, while asserting that memory
// hasn't been freed. The function is allowed to crash on nullptr.
static ALWAYS_INLINE void* SafelyUnwrapPtrForDereference(
uintptr_t wrapped_ptr) {
#if CHECKED_PTR2_PROTECTION_ENABLED
return reinterpret_cast<void*>(SafelyUnwrapPtrInternal(wrapped_ptr));
#else
// TEST: Use volatile to prevent optimizing out the calculations leading to
// this point.
// |SafelyUnwrapPtrInternal| was separated out solely for this purpose.
volatile uintptr_t addr = SafelyUnwrapPtrInternal(wrapped_ptr);
return reinterpret_cast<void*>(addr);
#endif
}
// Unwraps the pointer's uintptr_t representation, while asserting that memory // Unwraps the pointer's uintptr_t representation, while asserting that memory
// hasn't been freed. The function must handle nullptr gracefully. // hasn't been freed. The function must handle nullptr gracefully.
static ALWAYS_INLINE void* SafelyUnwrapPtrForExtraction( static ALWAYS_INLINE void* SafelyUnwrapPtrForExtraction(
uintptr_t wrapped_ptr) { uintptr_t wrapped_ptr) {
#if CHECKED_PTR2_AVOID_BRANCH_WHEN_CHECKING_ENABLED #if CHECKED_PTR2_AVOID_BRANCH_WHEN_CHECKING_ENABLED
// In this implementation SafelyUnwrapPtrForDereference doesn't tolerate // In this implementation, SafelyUnwrapPtrForDereference doesn't tolerate
// nullptr, because it reads unconditionally to avoid branches. Handle the // nullptr, because it reads unconditionally to avoid branches. Handle the
// nullptr case here. // nullptr case here.
if (wrapped_ptr == kWrappedNullPtr) if (wrapped_ptr == kWrappedNullPtr)
return nullptr; return nullptr;
return reinterpret_cast<void*>(SafelyUnwrapPtrForDereference(wrapped_ptr)); return SafelyUnwrapPtrForDereference(wrapped_ptr);
#else #else
// In this implementation SafelyUnwrapPtrForDereference handles nullptr case // In this implementation, SafelyUnwrapPtrForDereference handles nullptr
// well. // case well.
return reinterpret_cast<void*>(SafelyUnwrapPtrForDereference(wrapped_ptr)); return SafelyUnwrapPtrForDereference(wrapped_ptr);
#endif #endif
} }
...@@ -320,7 +271,7 @@ struct CheckedPtr2Impl { ...@@ -320,7 +271,7 @@ struct CheckedPtr2Impl {
// on whether memory was freed or not. // on whether memory was freed or not.
static ALWAYS_INLINE void* UnsafelyUnwrapPtrForComparison( static ALWAYS_INLINE void* UnsafelyUnwrapPtrForComparison(
uintptr_t wrapped_ptr) { uintptr_t wrapped_ptr) {
return reinterpret_cast<void*>(ExtractAddress(wrapped_ptr)); return ExtractPtr(wrapped_ptr);
} }
// Advance the wrapped pointer by |delta| bytes. // Advance the wrapped pointer by |delta| bytes.
...@@ -337,7 +288,9 @@ struct CheckedPtr2Impl { ...@@ -337,7 +288,9 @@ struct CheckedPtr2Impl {
static ALWAYS_INLINE uintptr_t ExtractAddress(uintptr_t wrapped_ptr) { static ALWAYS_INLINE uintptr_t ExtractAddress(uintptr_t wrapped_ptr) {
return wrapped_ptr & kAddressMask; return wrapped_ptr & kAddressMask;
} }
static ALWAYS_INLINE void* ExtractPtr(uintptr_t wrapped_ptr) {
return reinterpret_cast<void*>(ExtractAddress(wrapped_ptr));
}
static ALWAYS_INLINE uintptr_t ExtractGeneration(uintptr_t wrapped_ptr) { static ALWAYS_INLINE uintptr_t ExtractGeneration(uintptr_t wrapped_ptr) {
return wrapped_ptr & kGenerationMask; return wrapped_ptr & kGenerationMask;
} }
...@@ -375,7 +328,7 @@ struct DereferencedPointerType<void> {}; ...@@ -375,7 +328,7 @@ struct DereferencedPointerType<void> {};
// we aren't striving to maximize compatibility with raw pointers, merely // we aren't striving to maximize compatibility with raw pointers, merely
// adding support for cases encountered so far). // adding support for cases encountered so far).
template <typename T, template <typename T,
#if defined(ARCH_CPU_64_BITS) && !defined(OS_NACL) #if defined(ARCH_CPU_64_BITS) && !defined(OS_NACL) && ENABLE_CHECKED_PTR
typename Impl = internal::CheckedPtr2Impl<>> typename Impl = internal::CheckedPtr2Impl<>>
#else #else
typename Impl = internal::CheckedPtrNoOpImpl> typename Impl = internal::CheckedPtrNoOpImpl>
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include <type_traits> #include <type_traits>
#include <utility> #include <utility>
#include "base/allocator/partition_allocator/partition_tag.h"
#include "build/build_config.h" #include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h" #include "testing/gtest/include/gtest/gtest.h"
...@@ -51,6 +52,8 @@ static_assert( ...@@ -51,6 +52,8 @@ static_assert(
std::is_trivially_default_constructible<CheckedPtr<std::string>>::value, std::is_trivially_default_constructible<CheckedPtr<std::string>>::value,
"CheckedPtr should be trivially default constructible"); "CheckedPtr should be trivially default constructible");
// Don't use base::internal for testing CheckedPtr API, to test if code outside
// this namespace calls the correct functions from this namespace.
namespace { namespace {
static int g_wrap_raw_ptr_cnt = INT_MIN; static int g_wrap_raw_ptr_cnt = INT_MIN;
...@@ -613,23 +616,24 @@ TEST_F(CheckedPtrTest, AssignmentFromNullptr) { ...@@ -613,23 +616,24 @@ TEST_F(CheckedPtrTest, AssignmentFromNullptr) {
EXPECT_EQ(g_get_for_dereference_cnt, 0); EXPECT_EQ(g_get_for_dereference_cnt, 0);
} }
#if defined(ARCH_CPU_64_BITS) && !defined(OS_NACL) } // namespace
namespace { #if defined(ARCH_CPU_64_BITS) && !defined(OS_NACL) && ENABLE_CHECKED_PTR
namespace base {
namespace internal {
struct CheckedPtr2ImplPartitionAllocSupportEnabled struct CheckedPtr2ImplPartitionAllocSupportEnabled
: base::internal::CheckedPtr2ImplPartitionAllocSupport { : CheckedPtr2ImplPartitionAllocSupport {
static bool EnabledForPtr(void* ptr) { return true; } static bool EnabledForPtr(void* ptr) { return true; }
}; };
using CheckedPtr2ImplEnabled = base::internal::CheckedPtr2Impl< using CheckedPtr2ImplEnabled =
CheckedPtr2ImplPartitionAllocSupportEnabled>; CheckedPtr2Impl<CheckedPtr2ImplPartitionAllocSupportEnabled>;
} // namespace
TEST(CheckedPtr2Impl, WrapNull) { TEST(CheckedPtr2Impl, WrapNull) {
ASSERT_EQ(base::internal::CheckedPtr2Impl<>::GetWrappedNullPtr(), 0u); ASSERT_EQ(CheckedPtr2Impl<>::GetWrappedNullPtr(), 0u);
ASSERT_EQ(base::internal::CheckedPtr2Impl<>::WrapRawPtr(nullptr), 0u); ASSERT_EQ(CheckedPtr2Impl<>::WrapRawPtr(nullptr), 0u);
} }
TEST(CheckedPtr2Impl, SafelyUnwrapNull) { TEST(CheckedPtr2Impl, SafelyUnwrapNull) {
...@@ -637,23 +641,20 @@ TEST(CheckedPtr2Impl, SafelyUnwrapNull) { ...@@ -637,23 +641,20 @@ TEST(CheckedPtr2Impl, SafelyUnwrapNull) {
} }
TEST(CheckedPtr2Impl, WrapAndSafelyUnwrap) { TEST(CheckedPtr2Impl, WrapAndSafelyUnwrap) {
char bytes[] = {0x12, 0x23, 0x34, 0x45, 0x56, 0x67, 0xBA, 0x42, 0x78, 0x89}; // Put generation 16B and 32B before the "object", so that it works on both
#if !CHECKED_PTR2_PROTECTION_ENABLED // Debug and Release builds.
// If protection is disabled, wrap & unwrap will read at the pointer, not char bytes[] = {0xBA, 0x42, 0xCC, 0xCC, 0xCC, 0xCC, 0xCC, 0xCC, 0xCC,
// before it. 0xCC, 0xCC, 0xCC, 0xCC, 0xCC, 0xCC, 0xCC, 0xBA, 0x42,
bytes[8] = bytes[6]; 0xCC, 0xCC, 0xCC, 0xCC, 0xCC, 0xCC, 0xCC, 0xCC, 0xCC,
bytes[9] = bytes[7]; 0xCC, 0xCC, 0xCC, 0xCC, 0xCC, 0x78, 0x89};
#endif void* ptr = bytes + 32;
void* ptr = bytes + sizeof(uintptr_t); ASSERT_EQ(0x78, *static_cast<char*>(ptr));
uintptr_t addr = reinterpret_cast<uintptr_t>(ptr); uintptr_t addr = reinterpret_cast<uintptr_t>(ptr);
uintptr_t set_top_bit = 0x0000000000000000; uintptr_t set_top_bit = 0x0000000000000000;
uintptr_t mask = 0xFFFFFFFFFFFFFFFF; uintptr_t mask = 0xFFFFFFFFFFFFFFFF;
#if CHECKED_PTR2_AVOID_BRANCH_WHEN_CHECKING_ENABLED #if CHECKED_PTR2_AVOID_BRANCH_WHEN_CHECKING_ENABLED
set_top_bit = 0x8000000000000000; set_top_bit = 0x8000000000000000;
#if !CHECKED_PTR2_PROTECTION_ENABLED
mask = 0x0000FFFFFFFFFFFF;
#endif
#endif #endif
uintptr_t wrapped = CheckedPtr2ImplEnabled::WrapRawPtr(ptr); uintptr_t wrapped = CheckedPtr2ImplEnabled::WrapRawPtr(ptr);
...@@ -666,48 +667,53 @@ TEST(CheckedPtr2Impl, WrapAndSafelyUnwrap) { ...@@ -666,48 +667,53 @@ TEST(CheckedPtr2Impl, WrapAndSafelyUnwrap) {
#else #else
ASSERT_EQ(wrapped, (addr | 0x42BA000000000000 | set_top_bit) & mask); ASSERT_EQ(wrapped, (addr | 0x42BA000000000000 | set_top_bit) & mask);
#endif #endif
ASSERT_EQ(CheckedPtr2ImplEnabled::SafelyUnwrapPtrInternal(wrapped), addr); ASSERT_EQ(CheckedPtr2ImplEnabled::SafelyUnwrapPtrForDereference(wrapped),
ptr);
bytes[7] |= 0x80; bytes[1] |= 0x80; // for Debug builds
#if !CHECKED_PTR2_PROTECTION_ENABLED bytes[kCookieSize + 1] |= 0x80; // for Release builds
bytes[9] = bytes[7];
#endif
wrapped = CheckedPtr2ImplEnabled::WrapRawPtr(ptr); wrapped = CheckedPtr2ImplEnabled::WrapRawPtr(ptr);
#if CHECKED_PTR2_USE_NO_OP_WRAPPER #if CHECKED_PTR2_USE_NO_OP_WRAPPER
ASSERT_EQ(wrapped, addr); ASSERT_EQ(wrapped, addr);
#else #else
ASSERT_EQ(wrapped, (addr | 0xC2BA000000000000 | set_top_bit) & mask); ASSERT_EQ(wrapped, (addr | 0xC2BA000000000000 | set_top_bit) & mask);
#endif #endif
ASSERT_EQ(CheckedPtr2ImplEnabled::SafelyUnwrapPtrInternal(wrapped), addr); ASSERT_EQ(CheckedPtr2ImplEnabled::SafelyUnwrapPtrForDereference(wrapped),
ptr);
#if CHECKED_PTR2_AVOID_BRANCH_WHEN_DEREFERENCING #if CHECKED_PTR2_AVOID_BRANCH_WHEN_DEREFERENCING
bytes[6] = 0; bytes[0] = 0; // for Debug builds
bytes[7] = 0; bytes[1] = 0;
#if !CHECKED_PTR2_PROTECTION_ENABLED bytes[kCookieSize] = 0; // for Release builds
bytes[8] = bytes[6]; bytes[kCookieSize + 1] = 0;
bytes[9] = bytes[7];
#endif
mask = 0xFFFFFFFFFFFFFFFF; mask = 0xFFFFFFFFFFFFFFFF;
#if CHECKED_PTR2_AVOID_BRANCH_WHEN_CHECKING_ENABLED #if CHECKED_PTR2_AVOID_BRANCH_WHEN_CHECKING_ENABLED
mask = 0x7FFFFFFFFFFFFFFF; mask = 0x7FFFFFFFFFFFFFFF;
#if !CHECKED_PTR2_PROTECTION_ENABLED
mask = 0x0000FFFFFFFFFFFF;
#endif
#endif #endif
// Mask out the top bit, because in some cases (not all), it may differ. // Mask out the top bit, because in some cases (not all), it may differ.
ASSERT_EQ(CheckedPtr2ImplEnabled::SafelyUnwrapPtrInternal(wrapped) & mask, ASSERT_EQ(
wrapped & mask); reinterpret_cast<uintptr_t>(
#endif CheckedPtr2ImplEnabled::SafelyUnwrapPtrForDereference(wrapped)) &
mask,
wrapped & mask);
#endif // CHECKED_PTR2_AVOID_BRANCH_WHEN_DEREFERENCING
} }
TEST(CheckedPtr2Impl, SafelyUnwrapDisabled) { TEST(CheckedPtr2Impl, SafelyUnwrapDisabled) {
char bytes[] = {0x12, 0x23, 0x34, 0x45, 0x56, 0x67, 0xBA, 0x42, 0x78, 0x89}; // Put generation 16B and 32B before the "object", so that it works on both
void* ptr = bytes + sizeof(uintptr_t); // Debug and Release builds.
char bytes[] = {0xBA, 0x42, 0xCC, 0xCC, 0xCC, 0xCC, 0xCC, 0xCC, 0xCC,
0xCC, 0xCC, 0xCC, 0xCC, 0xCC, 0xCC, 0xCC, 0xBA, 0x42,
0xCC, 0xCC, 0xCC, 0xCC, 0xCC, 0xCC, 0xCC, 0xCC, 0xCC,
0xCC, 0xCC, 0xCC, 0xCC, 0xCC, 0x78, 0x89};
void* ptr = bytes + 32;
ASSERT_EQ(0x78, *static_cast<char*>(ptr));
uintptr_t addr = reinterpret_cast<uintptr_t>(ptr); uintptr_t addr = reinterpret_cast<uintptr_t>(ptr);
ASSERT_EQ(CheckedPtr2ImplEnabled::SafelyUnwrapPtrInternal(addr), addr); ASSERT_EQ(CheckedPtr2ImplEnabled::SafelyUnwrapPtrForDereference(addr), ptr);
} }
#endif // defined(ARCH_CPU_64_BITS) && !defined(OS_NACL) } // namespace internal
} // namespace base
} // namespace #endif // defined(ARCH_CPU_64_BITS) && !defined(OS_NACL) && ENABLE_CHECKED_PTR
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment