Commit 7bd9d693 authored by Chris Palmer's avatar Chris Palmer Committed by Commit Bot

[PartitionAlloc] Add a flag to allow callers to zero-fill allocations.

This enables callers to avoid doing it themselves (centralizing the `memset` in
1 place, rather than duplicating it at every call site that wants it), and to
allow the PartitionAlloc implementation to avoid doing it itself if the
operating system has already zero'd the new page(s).

This is a re-land, with fixes, of
https://chromium-review.googlesource.com/c/chromium/src/+/1162825.

Bug: 864462,680657
TBR: ajwong,haraken
Change-Id: Icfe05bca82c79c8a8029db40b2902271d3d10347
Reviewed-on: https://chromium-review.googlesource.com/1214173Reviewed-by: default avatarChris Palmer <palmer@chromium.org>
Commit-Queue: Chris Palmer <palmer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#589717}
parent d5e69f48
......@@ -235,8 +235,7 @@ bool PartitionReallocDirectMappedInPlace(PartitionRootGeneric* root,
root->RecommitSystemPages(char_ptr + current_size, recommit_size);
#if DCHECK_IS_ON()
memset(char_ptr + current_size, internal::kUninitializedByte,
recommit_size);
memset(char_ptr + current_size, kUninitializedByte, recommit_size);
#endif
} else {
// We can't perform the realloc in-place.
......
......@@ -313,12 +313,6 @@ ALWAYS_INLINE void PartitionFree(void* ptr) {
internal::PartitionPage* page = internal::PartitionPage::FromPointer(ptr);
// TODO(palmer): See if we can afford to make this a CHECK.
DCHECK(internal::PartitionRootBase::IsValidPage(page));
// This is somewhat redundant with |PartitionPage::Free|.
// TODO(crbug.com/680657): Doing this here might? make it OK to not do it
// there.
memset(original_ptr, 0xCD, PartitionAllocGetSize(original_ptr));
page->Free(ptr);
#endif
}
......@@ -345,9 +339,10 @@ ALWAYS_INLINE void* PartitionAllocGenericFlags(PartitionRootGeneric* root,
size_t size,
const char* type_name) {
DCHECK_LT(flags, PartitionAllocLastFlag << 1);
const bool zero_fill = flags & PartitionAllocZeroFill;
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
void* result = malloc(size);
void* result = zero_fill ? calloc(1, size) : malloc(size);
CHECK(result || flags & PartitionAllocReturnNull);
return result;
#else
......@@ -361,6 +356,14 @@ ALWAYS_INLINE void* PartitionAllocGenericFlags(PartitionRootGeneric* root,
ret = root->AllocFromBucket(bucket, flags, size);
}
PartitionAllocHooks::AllocationHookIfEnabled(ret, requested_size, type_name);
// TODO(crbug.com/864462): This is suboptimal. Change `AllocFromBucket` such
// that it tells callers if the allocation was satisfied with a fresh mapping
// from the OS, so that we can skip this step and save some time.
if (zero_fill) {
memset(ret, 0, requested_size);
}
return ret;
#endif
}
......
......@@ -151,11 +151,16 @@ static const size_t kMaxFreeableSpans = 16;
// "out of physical memory" in crash reports.
static const size_t kReasonableSizeOfUnusedPages = 1024 * 1024 * 1024; // 1GB
// These two byte values match tcmalloc.
static const unsigned char kUninitializedByte = 0xAB;
static const unsigned char kFreedByte = 0xCD;
// Flags for PartitionAllocGenericFlags.
enum PartitionAllocFlags {
PartitionAllocReturnNull = 1 << 0,
PartitionAllocZeroFill = 1 << 1,
PartitionAllocLastFlag = PartitionAllocReturnNull
PartitionAllocLastFlag = PartitionAllocZeroFill
};
} // namespace base
......
......@@ -728,20 +728,15 @@ TEST_F(PartitionAllocTest, GenericAllocSizes) {
EXPECT_EQ(ptr3, new_ptr);
new_ptr = generic_allocator.root()->Alloc(size, type_name);
EXPECT_EQ(ptr2, new_ptr);
#if defined(OS_LINUX) && !DCHECK_IS_ON()
// On Linux, we have a guarantee that freelisting a page should cause its
// contents to be nulled out. We check for null here to detect an bug we
// had where a large slot size was causing us to not properly free all
// resources back to the system.
// We only run the check when asserts are disabled because when they are
// enabled, the allocated area is overwritten with an "uninitialized"
// byte pattern.
EXPECT_EQ(0, *(reinterpret_cast<char*>(new_ptr) + (size - 1)));
#endif
generic_allocator.root()->Free(new_ptr);
generic_allocator.root()->Free(ptr3);
generic_allocator.root()->Free(ptr4);
// |PartitionPage::Free| must poison the slot's contents with |kFreedByte|.
EXPECT_EQ(kFreedByte,
*(reinterpret_cast<unsigned char*>(new_ptr) + (size - 1)));
// Can we allocate a massive (512MB) size?
// Allocate 512MB, but +1, to test for cookie writing alignment issues.
// Test this only if the device has enough memory or it might fail due
......@@ -2158,6 +2153,36 @@ TEST_F(PartitionAllocTest, SmallReallocDoesNotMoveTrailingCookie) {
generic_allocator.root()->Free(ptr);
}
TEST_F(PartitionAllocTest, ZeroFill) {
const size_t test_sizes[] = {
1,
17,
100,
kSystemPageSize,
kSystemPageSize + 1,
internal::PartitionBucket::get_direct_map_size(100),
1 << 20,
1 << 21,
};
constexpr static size_t kAllZerosSentinel =
std::numeric_limits<size_t>::max();
for (size_t size : test_sizes) {
char* p = static_cast<char*>(PartitionAllocGenericFlags(
generic_allocator.root(), PartitionAllocZeroFill, size, nullptr));
size_t non_zero_position = kAllZerosSentinel;
for (size_t i = 0; i < size; ++i) {
if (0 != p[i]) {
non_zero_position = i;
break;
}
}
EXPECT_EQ(kAllZerosSentinel, non_zero_position)
<< "test allocation size: " << size;
PartitionFree(p);
}
}
} // namespace internal
} // namespace base
......
......@@ -37,8 +37,6 @@ ALWAYS_INLINE PartitionPage* PartitionDirectMap(PartitionRootBase* root,
map_size += kPageAllocationGranularityOffsetMask;
map_size &= kPageAllocationGranularityBaseMask;
// TODO: these pages will be zero-filled. Consider internalizing an
// AllocZeroed() API so we can avoid a memset() entirely in this case.
char* ptr = reinterpret_cast<char*>(
AllocPages(nullptr, map_size, kSuperPageSize, PageReadWrite));
if (UNLIKELY(!ptr))
......
......@@ -12,11 +12,9 @@ namespace base {
namespace internal {
#if DCHECK_IS_ON()
// These two byte values match tcmalloc.
static const unsigned char kUninitializedByte = 0xAB;
static const unsigned char kFreedByte = 0xCD;
static const size_t kCookieSize =
16; // Handles alignment up to XMM instructions on Intel.
// Handles alignment up to XMM instructions on Intel.
static const size_t kCookieSize = 16;
static const unsigned char kCookieValue[kCookieSize] = {
0xDE, 0xAD, 0xBE, 0xEF, 0xCA, 0xFE, 0xD0, 0x0D,
0x13, 0x37, 0xF0, 0x05, 0xBA, 0x11, 0xAB, 0x1E};
......
......@@ -198,17 +198,25 @@ ALWAYS_INLINE size_t PartitionPage::get_raw_size() const {
}
ALWAYS_INLINE void PartitionPage::Free(void* ptr) {
// If these asserts fire, you probably corrupted memory.
#if DCHECK_IS_ON()
size_t slot_size = this->bucket->slot_size;
size_t raw_size = get_raw_size();
if (raw_size)
const size_t raw_size = get_raw_size();
if (raw_size) {
slot_size = raw_size;
}
#if DCHECK_IS_ON()
// If these asserts fire, you probably corrupted memory.
PartitionCookieCheckValue(ptr);
PartitionCookieCheckValue(reinterpret_cast<char*>(ptr) + slot_size -
kCookieSize);
memset(ptr, kFreedByte, slot_size);
#endif
// Perhaps surprisingly, this does not measurably regress performance. See
// https://crbug.com/680657 for history. We formerly did this in
// |PartitionFree|, and in `DCHECK_IS_ON()` builds we redundantly did it
// here, too. Now we only do it here, unconditionally.
memset(ptr, kFreedByte, slot_size);
DCHECK(this->num_allocated_slots);
// TODO(palmer): See if we can afford to make this a CHECK.
// FIX FIX FIX
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment