Commit 0ba5ea12 authored by Vlad Tsyrklevich's avatar Vlad Tsyrklevich Committed by Commit Bot

allocator: Add Windows _aligned_* shims

On Windows we don’t currently hook the _aligned_* allocation APIs, this
can cause issues because _aligned_realloc can call HeapSize and cause
GWP-ASan crashes similar to bug 909720. Unfortunately the
_aligned_realloc API is different enough that it can not be implemented
using the standard POSIX shims, in particular because _aligned_malloc
and _aligned_free don't return valid allocation addresses, they are
offsets into allocations.

I add new Windows platform-specific shims for _aligned_malloc,
_aligned_realloc, and _aligned_free and wire them in for all users of
the allocator shims. I implement these routines on top of the Windows
Heap* API and leave uncommon _aligned_* shims to crash to ensure that
any future uses immediately surface why their use fails.

Bug: 912500, 896019
Change-Id: Ieaa50b816ab277a6ad4b80ee8519027343fa9878
Reviewed-on: https://chromium-review.googlesource.com/c/1367485Reviewed-by: default avatardanakj <danakj@chromium.org>
Reviewed-by: default avatarVitaly Buka <vitalybuka@chromium.org>
Reviewed-by: default avatarAlexei Filippov <alph@chromium.org>
Reviewed-by: default avatarErik Chen <erikchen@chromium.org>
Reviewed-by: default avatarWill Harris <wfh@chromium.org>
Reviewed-by: default avatarSigurður Ásgeirsson <siggi@chromium.org>
Commit-Queue: Vlad Tsyrklevich <vtsyrklevich@chromium.org>
Cr-Commit-Position: refs/heads/master@{#616106}
parent 2b044398
......@@ -2781,6 +2781,7 @@ test("base_unittests") {
if (use_allocator_shim) {
sources += [
"allocator/allocator_shim_unittest.cc",
"allocator/winheap_stubs_win_unittest.cc",
"sampling_heap_profiler/sampling_heap_profiler_unittest.cc",
]
}
......
......@@ -277,6 +277,38 @@ ALWAYS_INLINE void ShimFreeDefiniteSize(void* ptr, size_t size, void* context) {
context);
}
ALWAYS_INLINE void* ShimAlignedMalloc(size_t size,
size_t alignment,
void* context) {
const allocator::AllocatorDispatch* const chain_head = GetChainHead();
void* ptr = nullptr;
do {
ptr = chain_head->aligned_malloc_function(chain_head, size, alignment,
context);
} while (!ptr && g_call_new_handler_on_malloc_failure &&
CallNewHandler(size));
return ptr;
}
ALWAYS_INLINE void* ShimAlignedRealloc(void* address,
size_t size,
size_t alignment,
void* context) {
const allocator::AllocatorDispatch* const chain_head = GetChainHead();
void* ptr = nullptr;
do {
ptr = chain_head->aligned_realloc_function(chain_head, address, size,
alignment, context);
} while (!ptr && g_call_new_handler_on_malloc_failure &&
CallNewHandler(size));
return ptr;
}
ALWAYS_INLINE void ShimAlignedFree(void* address, void* context) {
const allocator::AllocatorDispatch* const chain_head = GetChainHead();
return chain_head->aligned_free_function(chain_head, address, context);
}
} // extern "C"
#if !defined(OS_WIN) && !defined(OS_MACOSX)
......
......@@ -41,7 +41,7 @@ namespace allocator {
// It is possible to dynamically insert further AllocatorDispatch stages
// to the front of the chain, for debugging / profiling purposes.
//
// All the functions must be thred safe. The shim does not enforce any
// All the functions must be thread safe. The shim does not enforce any
// serialization. This is to route to thread-aware allocators (e.g, tcmalloc)
// wihout introducing unnecessary perf hits.
......@@ -84,6 +84,18 @@ struct AllocatorDispatch {
void* ptr,
size_t size,
void* context);
using AlignedMallocFn = void*(const AllocatorDispatch* self,
size_t size,
size_t alignment,
void* context);
using AlignedReallocFn = void*(const AllocatorDispatch* self,
void* address,
size_t size,
size_t alignment,
void* context);
using AlignedFreeFn = void(const AllocatorDispatch* self,
void* address,
void* context);
AllocFn* const alloc_function;
AllocZeroInitializedFn* const alloc_zero_initialized_function;
......@@ -91,9 +103,16 @@ struct AllocatorDispatch {
ReallocFn* const realloc_function;
FreeFn* const free_function;
GetSizeEstimateFn* const get_size_estimate_function;
// batch_malloc, batch_free, and free_definite_size are specific to the OSX
// and iOS allocators.
BatchMallocFn* const batch_malloc_function;
BatchFreeFn* const batch_free_function;
FreeDefiniteSizeFn* const free_definite_size_function;
// _aligned_malloc, _aligned_realloc, and _aligned_free are specific to the
// Windows allocator.
AlignedMallocFn* const aligned_malloc_function;
AlignedReallocFn* const aligned_realloc_function;
AlignedFreeFn* const aligned_free_function;
const AllocatorDispatch* next;
......
......@@ -71,5 +71,8 @@ const AllocatorDispatch AllocatorDispatch::default_dispatch = {
nullptr, /* batch_malloc_function */
nullptr, /* batch_free_function */
nullptr, /* free_definite_size_function */
nullptr, /* aligned_malloc_function */
nullptr, /* aligned_realloc_function */
nullptr, /* aligned_free_function */
nullptr, /* next */
};
......@@ -109,5 +109,8 @@ const AllocatorDispatch AllocatorDispatch::default_dispatch = {
nullptr, /* batch_malloc_function */
nullptr, /* batch_free_function */
nullptr, /* free_definite_size_function */
nullptr, /* aligned_malloc_function */
nullptr, /* aligned_realloc_function */
nullptr, /* aligned_free_function */
nullptr, /* next */
};
......@@ -102,6 +102,9 @@ const AllocatorDispatch AllocatorDispatch::default_dispatch = {
&BatchMallocImpl, /* batch_malloc_function */
&BatchFreeImpl, /* batch_free_function */
&FreeDefiniteSizeImpl, /* free_definite_size_function */
nullptr, /* aligned_malloc_function */
nullptr, /* aligned_realloc_function */
nullptr, /* aligned_free_function */
nullptr, /* next */
};
......
......@@ -62,6 +62,9 @@ const AllocatorDispatch AllocatorDispatch::default_dispatch = {
nullptr, /* batch_malloc_function */
nullptr, /* batch_free_function */
nullptr, /* free_definite_size_function */
nullptr, /* aligned_malloc_function */
nullptr, /* aligned_realloc_function */
nullptr, /* aligned_free_function */
nullptr, /* next */
};
......
......@@ -60,6 +60,27 @@ size_t DefaultWinHeapGetSizeEstimateImpl(const AllocatorDispatch*,
return base::allocator::WinHeapGetSizeEstimate(address);
}
void* DefaultWinHeapAlignedMallocImpl(const AllocatorDispatch*,
size_t size,
size_t alignment,
void* context) {
return base::allocator::WinHeapAlignedMalloc(size, alignment);
}
void* DefaultWinHeapAlignedReallocImpl(const AllocatorDispatch*,
void* ptr,
size_t size,
size_t alignment,
void* context) {
return base::allocator::WinHeapAlignedRealloc(ptr, size, alignment);
}
void DefaultWinHeapAlignedFreeImpl(const AllocatorDispatch*,
void* ptr,
void* context) {
base::allocator::WinHeapAlignedFree(ptr);
}
} // namespace
// Guarantee that default_dispatch is compile-time initialized to avoid using
......@@ -75,5 +96,8 @@ constexpr AllocatorDispatch AllocatorDispatch::default_dispatch = {
nullptr, /* batch_malloc_function */
nullptr, /* batch_free_function */
nullptr, /* free_definite_size_function */
&DefaultWinHeapAlignedMallocImpl,
&DefaultWinHeapAlignedReallocImpl,
&DefaultWinHeapAlignedFreeImpl,
nullptr, /* next */
};
......@@ -70,6 +70,60 @@ size_t _msize(void* memblock) {
return ShimGetSizeEstimate(memblock, nullptr);
}
__declspec(restrict) void* _aligned_malloc(size_t size, size_t alignment) {
return ShimAlignedMalloc(size, alignment, nullptr);
}
__declspec(restrict) void* _aligned_realloc(void* address,
size_t size,
size_t alignment) {
return ShimAlignedRealloc(address, size, alignment, nullptr);
}
void _aligned_free(void* address) {
ShimAlignedFree(address, nullptr);
}
// The following uncommon _aligned_* routines are not used in Chromium and have
// been shimmed to immediately crash to ensure that implementations are added if
// uses are introduced.
__declspec(restrict) void* _aligned_recalloc(void* address,
size_t num,
size_t size,
size_t alignment) {
CHECK(false) << "This routine has not been implemented";
__builtin_unreachable();
}
size_t _aligned_msize(void* address, size_t alignment, size_t offset) {
CHECK(false) << "This routine has not been implemented";
__builtin_unreachable();
}
__declspec(restrict) void* _aligned_offset_malloc(size_t size,
size_t alignment,
size_t offset) {
CHECK(false) << "This routine has not been implemented";
__builtin_unreachable();
}
__declspec(restrict) void* _aligned_offset_realloc(void* address,
size_t size,
size_t alignment,
size_t offset) {
CHECK(false) << "This routine has not been implemented";
__builtin_unreachable();
}
__declspec(restrict) void* _aligned_offset_recalloc(void* address,
size_t num,
size_t size,
size_t alignment,
size_t offset) {
CHECK(false) << "This routine has not been implemented";
__builtin_unreachable();
}
// The symbols
// * __acrt_heap
// * __acrt_initialize_heap
......
......@@ -179,6 +179,40 @@ class AllocatorShimTest : public testing::Test {
self->next->free_definite_size_function(self->next, ptr, size, context);
}
static void* MockAlignedMalloc(const AllocatorDispatch* self,
size_t size,
size_t alignment,
void* context) {
if (instance_ && size < kMaxSizeTracked) {
++instance_->aligned_mallocs_intercepted_by_size[size];
}
return self->next->aligned_malloc_function(self->next, size, alignment,
context);
}
static void* MockAlignedRealloc(const AllocatorDispatch* self,
void* address,
size_t size,
size_t alignment,
void* context) {
if (instance_) {
if (size < kMaxSizeTracked)
++instance_->aligned_reallocs_intercepted_by_size[size];
++instance_->aligned_reallocs_intercepted_by_addr[Hash(address)];
}
return self->next->aligned_realloc_function(self->next, address, size,
alignment, context);
}
static void MockAlignedFree(const AllocatorDispatch* self,
void* address,
void* context) {
if (instance_) {
++instance_->aligned_frees_intercepted_by_addr[Hash(address)];
}
self->next->aligned_free_function(self->next, address, context);
}
static void NewHandler() {
if (!instance_)
return;
......@@ -196,10 +230,15 @@ class AllocatorShimTest : public testing::Test {
memset(&aligned_allocs_intercepted_by_size, 0, array_size);
memset(&aligned_allocs_intercepted_by_alignment, 0, array_size);
memset(&reallocs_intercepted_by_size, 0, array_size);
memset(&reallocs_intercepted_by_addr, 0, array_size);
memset(&frees_intercepted_by_addr, 0, array_size);
memset(&batch_mallocs_intercepted_by_size, 0, array_size);
memset(&batch_frees_intercepted_by_addr, 0, array_size);
memset(&free_definite_sizes_intercepted_by_size, 0, array_size);
memset(&aligned_mallocs_intercepted_by_size, 0, array_size);
memset(&aligned_reallocs_intercepted_by_size, 0, array_size);
memset(&aligned_reallocs_intercepted_by_addr, 0, array_size);
memset(&aligned_frees_intercepted_by_addr, 0, array_size);
did_fail_realloc_0xfeed_once.reset(new ThreadLocalBoolean());
subtle::Release_Store(&num_new_handler_calls, 0);
instance_ = this;
......@@ -227,6 +266,10 @@ class AllocatorShimTest : public testing::Test {
size_t batch_mallocs_intercepted_by_size[kMaxSizeTracked];
size_t batch_frees_intercepted_by_addr[kMaxSizeTracked];
size_t free_definite_sizes_intercepted_by_size[kMaxSizeTracked];
size_t aligned_mallocs_intercepted_by_size[kMaxSizeTracked];
size_t aligned_reallocs_intercepted_by_size[kMaxSizeTracked];
size_t aligned_reallocs_intercepted_by_addr[kMaxSizeTracked];
size_t aligned_frees_intercepted_by_addr[kMaxSizeTracked];
std::unique_ptr<ThreadLocalBoolean> did_fail_realloc_0xfeed_once;
subtle::Atomic32 num_new_handler_calls;
......@@ -271,6 +314,9 @@ AllocatorDispatch g_mock_dispatch = {
&AllocatorShimTest::MockBatchMalloc, /* batch_malloc_function */
&AllocatorShimTest::MockBatchFree, /* batch_free_function */
&AllocatorShimTest::MockFreeDefiniteSize, /* free_definite_size_function */
&AllocatorShimTest::MockAlignedMalloc, /* aligned_malloc_function */
&AllocatorShimTest::MockAlignedRealloc, /* aligned_realloc_function */
&AllocatorShimTest::MockAlignedFree, /* aligned_free_function */
nullptr, /* next */
};
......@@ -399,6 +445,25 @@ TEST_F(AllocatorShimTest, InterceptLibcSymbolsFreeDefiniteSize) {
}
#endif // defined(OS_MACOSX)
#if defined(OS_WIN)
TEST_F(AllocatorShimTest, InterceptUcrtAlignedAllocationSymbols) {
InsertAllocatorDispatch(&g_mock_dispatch);
constexpr size_t kAlignment = 32;
void* alloc_ptr = _aligned_malloc(123, kAlignment);
EXPECT_GE(aligned_mallocs_intercepted_by_size[123], 1u);
void* new_alloc_ptr = _aligned_realloc(alloc_ptr, 1234, kAlignment);
EXPECT_GE(aligned_reallocs_intercepted_by_size[1234], 1u);
EXPECT_GE(aligned_reallocs_intercepted_by_addr[Hash(alloc_ptr)], 1u);
_aligned_free(new_alloc_ptr);
EXPECT_GE(aligned_frees_intercepted_by_addr[Hash(new_alloc_ptr)], 1u);
RemoveAllocatorDispatchForTesting(&g_mock_dispatch);
}
#endif
TEST_F(AllocatorShimTest, InterceptCppSymbols) {
InsertAllocatorDispatch(&g_mock_dispatch);
......
......@@ -12,6 +12,11 @@
#include <malloc.h>
#include <new.h>
#include <windows.h>
#include <algorithm>
#include <limits>
#include "base/bits.h"
#include "base/logging.h"
namespace base {
namespace allocator {
......@@ -76,5 +81,126 @@ bool WinCallNewHandler(size_t size) {
return nh(size) ? true : false;
}
// The Windows _aligned_* functions are implemented by creating an allocation
// with enough space to create an aligned allocation internally. The offset to
// the original allocation is prefixed to the aligned allocation so that it can
// be correctly freed.
namespace {
struct AlignedPrefix {
// Offset to the original allocation point.
unsigned int original_allocation_offset;
// Make sure an unsigned int is enough to store the offset
static_assert(
kMaxWindowsAllocation < std::numeric_limits<unsigned int>::max(),
"original_allocation_offset must be able to fit into an unsigned int");
#if DCHECK_IS_ON()
// Magic value used to check that _aligned_free() and _aligned_realloc() are
// only ever called on an aligned allocated chunk.
static constexpr unsigned int kMagic = 0x12003400;
unsigned int magic;
#endif
};
// Compute how large an allocation we need to fit an allocation with the given
// size and alignment and space for a prefix pointer.
size_t AdjustedSize(size_t size, size_t alignment) {
// Minimal alignment is the prefix size so the prefix is properly aligned.
alignment = std::max(alignment, alignof(AlignedPrefix));
return size + sizeof(AlignedPrefix) + alignment - 1;
}
// Align the allocation and write the prefix.
void* AlignAllocation(void* ptr, size_t alignment) {
// Minimal alignment is the prefix size so the prefix is properly aligned.
alignment = std::max(alignment, alignof(AlignedPrefix));
uintptr_t address = reinterpret_cast<uintptr_t>(ptr);
address = base::bits::Align(address + sizeof(AlignedPrefix), alignment);
// Write the prefix.
AlignedPrefix* prefix = reinterpret_cast<AlignedPrefix*>(address) - 1;
prefix->original_allocation_offset =
address - reinterpret_cast<uintptr_t>(ptr);
#if DCHECK_IS_ON()
prefix->magic = AlignedPrefix::kMagic;
#endif // DCHECK_IS_ON()
return reinterpret_cast<void*>(address);
}
// Return the original allocation from an aligned allocation.
void* UnalignAllocation(void* ptr) {
AlignedPrefix* prefix = reinterpret_cast<AlignedPrefix*>(ptr) - 1;
DCHECK_EQ(prefix->magic, AlignedPrefix::kMagic);
void* unaligned =
static_cast<uint8_t*>(ptr) - prefix->original_allocation_offset;
CHECK_LT(unaligned, ptr);
CHECK_LE(
reinterpret_cast<uintptr_t>(ptr) - reinterpret_cast<uintptr_t>(unaligned),
kMaxWindowsAllocation);
return unaligned;
}
} // namespace
void* WinHeapAlignedMalloc(size_t size, size_t alignment) {
CHECK(base::bits::IsPowerOfTwo(alignment));
size_t adjusted = AdjustedSize(size, alignment);
if (adjusted >= kMaxWindowsAllocation)
return nullptr;
void* ptr = WinHeapMalloc(adjusted);
if (!ptr)
return nullptr;
return AlignAllocation(ptr, alignment);
}
void* WinHeapAlignedRealloc(void* ptr, size_t size, size_t alignment) {
CHECK(base::bits::IsPowerOfTwo(alignment));
if (!ptr)
return WinHeapAlignedMalloc(size, alignment);
if (!size) {
WinHeapAlignedFree(ptr);
return nullptr;
}
size_t adjusted = AdjustedSize(size, alignment);
if (adjusted >= kMaxWindowsAllocation)
return nullptr;
// Try to resize the allocation in place first.
if (HeapReAlloc(get_heap_handle(), HEAP_REALLOC_IN_PLACE_ONLY,
UnalignAllocation(ptr), adjusted)) {
return ptr;
}
// Otherwise manually performed an _aligned_malloc() and copy since an
// unaligned allocation from HeapReAlloc() would force us to copy the
// allocation twice.
void* new_ptr = WinHeapAlignedMalloc(size, alignment);
if (!new_ptr)
return nullptr;
void* unaligned = UnalignAllocation(ptr);
size_t gap =
reinterpret_cast<uintptr_t>(ptr) - reinterpret_cast<uintptr_t>(unaligned);
size_t old_size = WinHeapGetSizeEstimate(unaligned) - gap;
memcpy(new_ptr, ptr, std::min(size, old_size));
WinHeapAlignedFree(ptr);
return new_ptr;
}
void WinHeapAlignedFree(void* ptr) {
if (!ptr)
return;
void* original_allocation = UnalignAllocation(ptr);
WinHeapFree(original_allocation);
}
} // namespace allocator
} // namespace base
......@@ -11,6 +11,8 @@
#include <stdint.h>
#include "base/base_export.h"
namespace base {
namespace allocator {
......@@ -32,7 +34,15 @@ size_t WinHeapGetSizeEstimate(void* ptr);
// Returns true on successfully calling the handler, false otherwise.
bool WinCallNewHandler(size_t size);
// Wrappers to implement the interface for the _aligned_* functions on top of
// the CRT's Windows heap. Exported for tests.
BASE_EXPORT void* WinHeapAlignedMalloc(size_t size, size_t alignment);
BASE_EXPORT void* WinHeapAlignedRealloc(void* ptr,
size_t size,
size_t alignment);
BASE_EXPORT void WinHeapAlignedFree(void* ptr);
} // namespace allocator
} // namespace base
#endif // BASE_ALLOCATOR_WINHEAP_STUBS_H_
\ No newline at end of file
#endif // BASE_ALLOCATOR_WINHEAP_STUBS_H_
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/winheap_stubs_win.h"
#include "base/bits.h"
#include "base/logging.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace base {
namespace allocator {
namespace {
bool IsPtrAligned(void* ptr, size_t alignment) {
CHECK(base::bits::IsPowerOfTwo(alignment));
uintptr_t address = reinterpret_cast<uintptr_t>(ptr);
return base::bits::Align(address, alignment) == address;
}
} // namespace
TEST(WinHeapStubs, AlignedAllocationAreAligned) {
for (size_t alignment = 1; alignment < 65536; alignment *= 2) {
SCOPED_TRACE(alignment);
void* ptr = WinHeapAlignedMalloc(10, alignment);
ASSERT_NE(ptr, nullptr);
EXPECT_TRUE(IsPtrAligned(ptr, alignment));
ptr = WinHeapAlignedRealloc(ptr, 1000, alignment);
ASSERT_NE(ptr, nullptr);
EXPECT_TRUE(IsPtrAligned(ptr, alignment));
WinHeapAlignedFree(ptr);
}
}
TEST(WinHeapStubs, AlignedReallocationsCorrectlyCopyData) {
constexpr size_t kAlignment = 64;
constexpr uint8_t kMagicByte = 0xab;
size_t old_size = 8;
void* ptr = WinHeapAlignedMalloc(old_size, kAlignment);
ASSERT_NE(ptr, nullptr);
// Cause allocations to grow and shrink and confirm allocation contents are
// copied regardless.
constexpr size_t kSizes[] = {10, 1000, 50, 3000, 30, 9000};
for (size_t size : kSizes) {
SCOPED_TRACE(size);
memset(ptr, kMagicByte, old_size);
ptr = WinHeapAlignedRealloc(ptr, size, kAlignment);
ASSERT_NE(ptr, nullptr);
for (size_t i = 0; i < std::min(size, old_size); i++) {
SCOPED_TRACE(i);
ASSERT_EQ(reinterpret_cast<uint8_t*>(ptr)[i], kMagicByte);
}
old_size = size;
}
WinHeapAlignedFree(ptr);
}
} // namespace allocator
} // namespace base
......@@ -204,6 +204,42 @@ void FreeDefiniteSizeFn(const AllocatorDispatch* self,
self->next->free_definite_size_function(self->next, ptr, size, context);
}
static void* AlignedMallocFn(const AllocatorDispatch* self,
size_t size,
size_t alignment,
void* context) {
void* ret =
self->next->aligned_malloc_function(self->next, size, alignment, context);
if (ret != nullptr)
RecordAlloc(self->next, ret, size, context);
return ret;
}
static void* AlignedReallocFn(const AllocatorDispatch* self,
void* address,
size_t size,
size_t alignment,
void* context) {
if (address != nullptr)
RecordFree(self->next, address, context);
void* ret = self->next->aligned_realloc_function(self->next, address, size,
alignment, context);
if (ret != nullptr && size != 0)
RecordAlloc(self->next, ret, size, context);
return ret;
}
static void AlignedFreeFn(const AllocatorDispatch* self,
void* address,
void* context) {
if (address != nullptr)
RecordFree(self->next, address, context);
self->next->aligned_free_function(self->next, address, context);
}
// The allocator dispatch used to intercept heap operations.
AllocatorDispatch allocator_dispatch = {&AllocFn,
&AllocZeroInitializedFn,
......@@ -214,6 +250,9 @@ AllocatorDispatch allocator_dispatch = {&AllocFn,
&BatchMallocFn,
&BatchFreeFn,
&FreeDefiniteSizeFn,
&AlignedMallocFn,
&AlignedReallocFn,
&AlignedFreeFn,
nullptr};
ThreadHeapUsage* GetOrCreateThreadUsage() {
......
......@@ -248,6 +248,44 @@ void FreeDefiniteSizeFn(const AllocatorDispatch* self,
self->next->free_definite_size_function(self->next, address, size, context);
}
static void* AlignedMallocFn(const AllocatorDispatch* self,
size_t size,
size_t alignment,
void* context) {
ReentryGuard guard;
void* address =
self->next->aligned_malloc_function(self->next, size, alignment, context);
if (LIKELY(guard)) {
PoissonAllocationSampler::RecordAlloc(
address, size, PoissonAllocationSampler::kMalloc, nullptr);
}
return address;
}
static void* AlignedReallocFn(const AllocatorDispatch* self,
void* address,
size_t size,
size_t alignment,
void* context) {
ReentryGuard guard;
// Note: size == 0 actually performs free.
PoissonAllocationSampler::RecordFree(address);
address = self->next->aligned_realloc_function(self->next, address, size,
alignment, context);
if (LIKELY(guard)) {
PoissonAllocationSampler::RecordAlloc(
address, size, PoissonAllocationSampler::kMalloc, nullptr);
}
return address;
}
static void AlignedFreeFn(const AllocatorDispatch* self,
void* address,
void* context) {
PoissonAllocationSampler::RecordFree(address);
self->next->aligned_free_function(self->next, address, context);
}
AllocatorDispatch g_allocator_dispatch = {&AllocFn,
&AllocZeroInitializedFn,
&AllocAlignedFn,
......@@ -257,6 +295,9 @@ AllocatorDispatch g_allocator_dispatch = {&AllocFn,
&BatchMallocFn,
&BatchFreeFn,
&FreeDefiniteSizeFn,
&AlignedMallocFn,
&AlignedReallocFn,
&AlignedFreeFn,
nullptr};
#if BUILDFLAG(USE_PARTITION_ALLOC) && !defined(OS_NACL)
......
......@@ -209,6 +209,57 @@ void FreeDefiniteSizeFn(const AllocatorDispatch* self,
self->next->free_definite_size_function(self->next, address, size, context);
}
static void* AlignedMallocFn(const AllocatorDispatch* self,
size_t size,
size_t alignment,
void* context) {
if (UNLIKELY(sampling_state.Sample()))
if (void* allocation = GetGpa().Allocate(size, alignment))
return allocation;
return self->next->aligned_malloc_function(self->next, size, alignment,
context);
}
static void* AlignedReallocFn(const AllocatorDispatch* self,
void* address,
size_t size,
size_t alignment,
void* context) {
if (UNLIKELY(!address))
return AlignedMallocFn(self, size, alignment, context);
if (LIKELY(!GetGpa().PointerIsMine(address)))
return self->next->aligned_realloc_function(self->next, address, size,
alignment, context);
if (!size) {
GetGpa().Deallocate(address);
return nullptr;
}
void* new_alloc = GetGpa().Allocate(size, alignment);
if (!new_alloc)
new_alloc = self->next->aligned_malloc_function(self->next, size, alignment,
context);
if (!new_alloc)
return nullptr;
memcpy(new_alloc, address,
std::min(size, GetGpa().GetRequestedSize(address)));
GetGpa().Deallocate(address);
return new_alloc;
}
static void AlignedFreeFn(const AllocatorDispatch* self,
void* address,
void* context) {
if (UNLIKELY(GetGpa().PointerIsMine(address)))
return GetGpa().Deallocate(address);
self->next->aligned_free_function(self->next, address, context);
}
AllocatorDispatch g_allocator_dispatch = {
&AllocFn,
&AllocZeroInitializedFn,
......@@ -219,6 +270,9 @@ AllocatorDispatch g_allocator_dispatch = {
&BatchMallocFn,
&BatchFreeFn,
&FreeDefiniteSizeFn,
&AlignedMallocFn,
&AlignedReallocFn,
&AlignedFreeFn,
nullptr /* next */
};
......
......@@ -104,6 +104,14 @@ MULTIPROCESS_TEST_MAIN(BasicFunctionality) {
EXPECT_TRUE(allocationCheck([&] { return realloc(nullptr, page_size); },
&free, &failures));
#if defined(OS_WIN)
EXPECT_TRUE(allocationCheck([&] { return _aligned_malloc(123, 16); },
&_aligned_free, &failures));
EXPECT_TRUE(
allocationCheck([&] { return _aligned_realloc(nullptr, 123, 16); },
&_aligned_free, &failures));
#endif
#if !defined(OS_WIN)
EXPECT_TRUE(allocationCheck(
[&] { return aligned_alloc(page_size, page_size); }, &free, &failures));
......@@ -233,6 +241,29 @@ TEST_F(SamplingAllocatorShimsTest, GetSizeEstimate) {
runTest("GetSizeEstimate");
}
#if defined(OS_WIN)
MULTIPROCESS_TEST_MAIN(AlignedRealloc) {
// Exercise the _aligned_* shims and ensure that we handle them stably.
InstallAllocatorHooks(AllocatorState::kGpaMaxPages, kSamplingFrequency);
constexpr size_t kAllocationSize = 123;
constexpr size_t kAllocationAlignment = 64;
for (size_t i = 0; i < kLoopIterations; i++) {
void* ptr = _aligned_malloc(kAllocationSize, kAllocationAlignment);
CHECK(ptr);
ptr = _aligned_realloc(ptr, kAllocationSize * 2, kAllocationAlignment);
CHECK(ptr);
_aligned_free(ptr);
}
return kSuccess;
}
TEST_F(SamplingAllocatorShimsTest, AlignedRealloc) {
runTest("AlignedRealloc");
}
#endif
} // namespace
} // namespace internal
......
......@@ -543,6 +543,57 @@ void HookFreeDefiniteSize(const AllocatorDispatch* self,
}
}
void* HookAlignedMalloc(const AllocatorDispatch* self,
size_t size,
size_t alignment,
void* context) {
ScopedAllowAlloc allow_logging;
const AllocatorDispatch* const next = self->next;
void* ptr = next->aligned_malloc_function(next, size, alignment, context);
if (LIKELY(allow_logging)) {
AllocatorShimLogAlloc(AllocatorType::kMalloc, ptr, size, nullptr);
}
return ptr;
}
void* HookAlignedRealloc(const AllocatorDispatch* self,
void* address,
size_t size,
size_t alignment,
void* context) {
ScopedAllowRealloc allow_logging;
const AllocatorDispatch* const next = self->next;
void* ptr =
next->aligned_realloc_function(next, address, size, alignment, context);
if (LIKELY(allow_logging.allow_free())) {
AllocatorShimLogFree(address);
// _aligned_realloc(size == 0) means _aligned_free()
if (size > 0 && LIKELY(allow_logging.allow_alloc()))
AllocatorShimLogAlloc(AllocatorType::kMalloc, ptr, size, nullptr);
}
return ptr;
}
void HookAlignedFree(const AllocatorDispatch* self,
void* address,
void* context) {
ScopedAllowFree allow_logging;
const AllocatorDispatch* const next = self->next;
next->aligned_free_function(next, address, context);
if (LIKELY(allow_logging)) {
AllocatorShimLogFree(address);
}
}
AllocatorDispatch g_hooks = {
&HookAlloc, // alloc_function
&HookZeroInitAlloc, // alloc_zero_initialized_function
......@@ -553,6 +604,9 @@ AllocatorDispatch g_hooks = {
&HookBatchMalloc, // batch_malloc_function
&HookBatchFree, // batch_free_function
&HookFreeDefiniteSize, // free_definite_size_function
&HookAlignedMalloc, // aligned_malloc_function
&HookAlignedRealloc, // aligned_realloc_function
&HookAlignedFree, // aligned_free_function
nullptr, // next
};
#endif // BUILDFLAG(USE_ALLOCATOR_SHIM)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment