Commit b60e5cc0 authored by Chris Palmer's avatar Chris Palmer Committed by Commit Bot

[PartitionAlloc] Add a flag to allow callers to zero-fill allocations.

This enables callers to avoid doing it themselves (centralizing the `memset` in
1 place, rather than duplicating it at every call site that wants it), and to
allow the PartitionAlloc implementation to avoid doing it itself if the
operating system has already zero'd the new page(s).

Bug: 864462,680657
Change-Id: Ibeae0bf7b9f20238334bf9bc022d0f243a77ef11
Reviewed-on: https://chromium-review.googlesource.com/1162825
Commit-Queue: Chris Palmer <palmer@chromium.org>
Reviewed-by: default avatarKentaro Hara <haraken@chromium.org>
Reviewed-by: default avatarAlbert J. Wong <ajwong@chromium.org>
Cr-Commit-Position: refs/heads/master@{#589389}
parent 18399828
...@@ -235,8 +235,7 @@ bool PartitionReallocDirectMappedInPlace(PartitionRootGeneric* root, ...@@ -235,8 +235,7 @@ bool PartitionReallocDirectMappedInPlace(PartitionRootGeneric* root,
root->RecommitSystemPages(char_ptr + current_size, recommit_size); root->RecommitSystemPages(char_ptr + current_size, recommit_size);
#if DCHECK_IS_ON() #if DCHECK_IS_ON()
memset(char_ptr + current_size, internal::kUninitializedByte, memset(char_ptr + current_size, kUninitializedByte, recommit_size);
recommit_size);
#endif #endif
} else { } else {
// We can't perform the realloc in-place. // We can't perform the realloc in-place.
......
...@@ -313,12 +313,6 @@ ALWAYS_INLINE void PartitionFree(void* ptr) { ...@@ -313,12 +313,6 @@ ALWAYS_INLINE void PartitionFree(void* ptr) {
internal::PartitionPage* page = internal::PartitionPage::FromPointer(ptr); internal::PartitionPage* page = internal::PartitionPage::FromPointer(ptr);
// TODO(palmer): See if we can afford to make this a CHECK. // TODO(palmer): See if we can afford to make this a CHECK.
DCHECK(internal::PartitionRootBase::IsValidPage(page)); DCHECK(internal::PartitionRootBase::IsValidPage(page));
// This is somewhat redundant with |PartitionPage::Free|.
// TODO(crbug.com/680657): Doing this here might? make it OK to not do it
// there.
memset(original_ptr, 0xCD, PartitionAllocGetSize(original_ptr));
page->Free(ptr); page->Free(ptr);
#endif #endif
} }
...@@ -345,9 +339,10 @@ ALWAYS_INLINE void* PartitionAllocGenericFlags(PartitionRootGeneric* root, ...@@ -345,9 +339,10 @@ ALWAYS_INLINE void* PartitionAllocGenericFlags(PartitionRootGeneric* root,
size_t size, size_t size,
const char* type_name) { const char* type_name) {
DCHECK_LT(flags, PartitionAllocLastFlag << 1); DCHECK_LT(flags, PartitionAllocLastFlag << 1);
const bool zero_fill = flags & PartitionAllocZeroFill;
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
void* result = malloc(size); void* result = zero_fill ? calloc(1, size) : malloc(size);
CHECK(result || flags & PartitionAllocReturnNull); CHECK(result || flags & PartitionAllocReturnNull);
return result; return result;
#else #else
...@@ -361,6 +356,14 @@ ALWAYS_INLINE void* PartitionAllocGenericFlags(PartitionRootGeneric* root, ...@@ -361,6 +356,14 @@ ALWAYS_INLINE void* PartitionAllocGenericFlags(PartitionRootGeneric* root,
ret = root->AllocFromBucket(bucket, flags, size); ret = root->AllocFromBucket(bucket, flags, size);
} }
PartitionAllocHooks::AllocationHookIfEnabled(ret, requested_size, type_name); PartitionAllocHooks::AllocationHookIfEnabled(ret, requested_size, type_name);
// TODO(crbug.com/864462): This is suboptimal. Change `AllocFromBucket` such
// that it tells callers if the allocation was satisfied with a fresh mapping
// from the OS, so that we can skip this step and save some time.
if (zero_fill) {
memset(ret, 0, requested_size);
}
return ret; return ret;
#endif #endif
} }
......
...@@ -151,11 +151,16 @@ static const size_t kMaxFreeableSpans = 16; ...@@ -151,11 +151,16 @@ static const size_t kMaxFreeableSpans = 16;
// "out of physical memory" in crash reports. // "out of physical memory" in crash reports.
static const size_t kReasonableSizeOfUnusedPages = 1024 * 1024 * 1024; // 1GB static const size_t kReasonableSizeOfUnusedPages = 1024 * 1024 * 1024; // 1GB
// These two byte values match tcmalloc.
static const unsigned char kUninitializedByte = 0xAB;
static const unsigned char kFreedByte = 0xCD;
// Flags for PartitionAllocGenericFlags. // Flags for PartitionAllocGenericFlags.
enum PartitionAllocFlags { enum PartitionAllocFlags {
PartitionAllocReturnNull = 1 << 0, PartitionAllocReturnNull = 1 << 0,
PartitionAllocZeroFill = 1 << 1,
PartitionAllocLastFlag = PartitionAllocReturnNull PartitionAllocLastFlag = PartitionAllocZeroFill
}; };
} // namespace base } // namespace base
......
...@@ -2158,6 +2158,36 @@ TEST_F(PartitionAllocTest, SmallReallocDoesNotMoveTrailingCookie) { ...@@ -2158,6 +2158,36 @@ TEST_F(PartitionAllocTest, SmallReallocDoesNotMoveTrailingCookie) {
generic_allocator.root()->Free(ptr); generic_allocator.root()->Free(ptr);
} }
TEST_F(PartitionAllocTest, ZeroFill) {
const size_t test_sizes[] = {
1,
17,
100,
kSystemPageSize,
kSystemPageSize + 1,
internal::PartitionBucket::get_direct_map_size(100),
1 << 20,
1 << 21,
};
constexpr static size_t kAllZerosSentinel =
std::numeric_limits<size_t>::max();
for (size_t size : test_sizes) {
char* p = static_cast<char*>(PartitionAllocGenericFlags(
generic_allocator.root(), PartitionAllocZeroFill, size, nullptr));
size_t non_zero_position = kAllZerosSentinel;
for (size_t i = 0; i < size; ++i) {
if (0 != p[i]) {
non_zero_position = i;
break;
}
}
EXPECT_EQ(kAllZerosSentinel, non_zero_position)
<< "test allocation size: " << size;
PartitionFree(p);
}
}
} // namespace internal } // namespace internal
} // namespace base } // namespace base
......
...@@ -37,8 +37,6 @@ ALWAYS_INLINE PartitionPage* PartitionDirectMap(PartitionRootBase* root, ...@@ -37,8 +37,6 @@ ALWAYS_INLINE PartitionPage* PartitionDirectMap(PartitionRootBase* root,
map_size += kPageAllocationGranularityOffsetMask; map_size += kPageAllocationGranularityOffsetMask;
map_size &= kPageAllocationGranularityBaseMask; map_size &= kPageAllocationGranularityBaseMask;
// TODO: these pages will be zero-filled. Consider internalizing an
// AllocZeroed() API so we can avoid a memset() entirely in this case.
char* ptr = reinterpret_cast<char*>( char* ptr = reinterpret_cast<char*>(
AllocPages(nullptr, map_size, kSuperPageSize, PageReadWrite)); AllocPages(nullptr, map_size, kSuperPageSize, PageReadWrite));
if (UNLIKELY(!ptr)) if (UNLIKELY(!ptr))
......
...@@ -12,11 +12,9 @@ namespace base { ...@@ -12,11 +12,9 @@ namespace base {
namespace internal { namespace internal {
#if DCHECK_IS_ON() #if DCHECK_IS_ON()
// These two byte values match tcmalloc. // Handles alignment up to XMM instructions on Intel.
static const unsigned char kUninitializedByte = 0xAB; static const size_t kCookieSize = 16;
static const unsigned char kFreedByte = 0xCD;
static const size_t kCookieSize =
16; // Handles alignment up to XMM instructions on Intel.
static const unsigned char kCookieValue[kCookieSize] = { static const unsigned char kCookieValue[kCookieSize] = {
0xDE, 0xAD, 0xBE, 0xEF, 0xCA, 0xFE, 0xD0, 0x0D, 0xDE, 0xAD, 0xBE, 0xEF, 0xCA, 0xFE, 0xD0, 0x0D,
0x13, 0x37, 0xF0, 0x05, 0xBA, 0x11, 0xAB, 0x1E}; 0x13, 0x37, 0xF0, 0x05, 0xBA, 0x11, 0xAB, 0x1E};
......
...@@ -198,17 +198,25 @@ ALWAYS_INLINE size_t PartitionPage::get_raw_size() const { ...@@ -198,17 +198,25 @@ ALWAYS_INLINE size_t PartitionPage::get_raw_size() const {
} }
ALWAYS_INLINE void PartitionPage::Free(void* ptr) { ALWAYS_INLINE void PartitionPage::Free(void* ptr) {
// If these asserts fire, you probably corrupted memory.
#if DCHECK_IS_ON()
size_t slot_size = this->bucket->slot_size; size_t slot_size = this->bucket->slot_size;
size_t raw_size = get_raw_size(); const size_t raw_size = get_raw_size();
if (raw_size) if (raw_size) {
slot_size = raw_size; slot_size = raw_size;
}
#if DCHECK_IS_ON()
// If these asserts fire, you probably corrupted memory.
PartitionCookieCheckValue(ptr); PartitionCookieCheckValue(ptr);
PartitionCookieCheckValue(reinterpret_cast<char*>(ptr) + slot_size - PartitionCookieCheckValue(reinterpret_cast<char*>(ptr) + slot_size -
kCookieSize); kCookieSize);
memset(ptr, kFreedByte, slot_size);
#endif #endif
// Perhaps surprisingly, this does not measurably regress performance. See
// https://crbug.com/680657 for history. We formerly did this in
// |PartitionFree|, and in `DCHECK_IS_ON()` builds we redundantly did it
// here, too. Now we only do it here, unconditionally.
memset(ptr, kFreedByte, slot_size);
DCHECK(this->num_allocated_slots); DCHECK(this->num_allocated_slots);
// TODO(palmer): See if we can afford to make this a CHECK. // TODO(palmer): See if we can afford to make this a CHECK.
// FIX FIX FIX // FIX FIX FIX
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment