Commit e56ee1dd authored by Chris Palmer's avatar Chris Palmer Committed by Commit Bot

[PartitionAlloc] Fix some old blinkStyle names.

Bug: None
Change-Id: I1d6d5f22162f7ea9bb54fb942a2c72183ad62071
Reviewed-on: https://chromium-review.googlesource.com/1148555
Commit-Queue: Chris Palmer <palmer@chromium.org>
Reviewed-by: default avatarKentaro Hara <haraken@chromium.org>
Cr-Commit-Position: refs/heads/master@{#577600}
parent b4c95876
......@@ -22,7 +22,7 @@ namespace {
// This is the same PRNG as used by tcmalloc for mapping address randomness;
// see http://burtleburtle.net/bob/rand/smallprng.html
struct ranctx {
struct RandomContext {
subtle::SpinLock lock;
bool initialized;
uint32_t a;
......@@ -31,11 +31,12 @@ struct ranctx {
uint32_t d;
};
static LazyInstance<ranctx>::Leaky s_ranctx = LAZY_INSTANCE_INITIALIZER;
static LazyInstance<RandomContext>::Leaky s_RandomContext =
LAZY_INSTANCE_INITIALIZER;
#define rot(x, k) (((x) << (k)) | ((x) >> (32 - (k))))
uint32_t ranvalInternal(ranctx* x) {
uint32_t RandomValueInternal(RandomContext* x) {
uint32_t e = x->a - rot(x->b, 27);
x->a = x->b ^ rot(x->c, 17);
x->b = x->c + x->d;
......@@ -46,7 +47,7 @@ uint32_t ranvalInternal(ranctx* x) {
#undef rot
uint32_t ranval(ranctx* x) {
uint32_t RandomValue(RandomContext* x) {
subtle::SpinLock::Guard guard(x->lock);
if (UNLIKELY(!x->initialized)) {
const uint64_t r1 = RandUint64();
......@@ -60,13 +61,13 @@ uint32_t ranval(ranctx* x) {
x->initialized = true;
}
return ranvalInternal(x);
return RandomValueInternal(x);
}
} // namespace
void SetRandomPageBaseSeed(int64_t seed) {
ranctx* x = s_ranctx.Pointer();
RandomContext* x = s_RandomContext.Pointer();
subtle::SpinLock::Guard guard(x->lock);
// Set RNG to initial state.
x->initialized = true;
......@@ -75,11 +76,12 @@ void SetRandomPageBaseSeed(int64_t seed) {
}
void* GetRandomPageBase() {
uintptr_t random = static_cast<uintptr_t>(ranval(s_ranctx.Pointer()));
uintptr_t random =
static_cast<uintptr_t>(RandomValue(s_RandomContext.Pointer()));
#if defined(ARCH_CPU_64_BITS)
random <<= 32ULL;
random |= static_cast<uintptr_t>(ranval(s_ranctx.Pointer()));
random |= static_cast<uintptr_t>(RandomValue(s_RandomContext.Pointer()));
// The kASLRMask and kASLROffset constants will be suitable for the
// OS and build configuration.
......
......@@ -38,7 +38,7 @@ ALWAYS_INLINE PartitionPage* PartitionDirectMap(PartitionRootBase* root,
map_size &= kPageAllocationGranularityBaseMask;
// TODO: these pages will be zero-filled. Consider internalizing an
// allocZeroed() API so we can avoid a memset() entirely in this case.
// AllocZeroed() API so we can avoid a memset() entirely in this case.
char* ptr = reinterpret_cast<char*>(
AllocPages(nullptr, map_size, kSuperPageSize, PageReadWrite));
if (UNLIKELY(!ptr))
......@@ -217,9 +217,9 @@ ALWAYS_INLINE void* PartitionBucket::AllocNewSlotSpan(
// address region as much as possible. This is important for not causing
// page table bloat and not fragmenting address spaces in 32 bit
// architectures.
char* requestedAddress = root->next_super_page;
char* requested_address = root->next_super_page;
char* super_page = reinterpret_cast<char*>(AllocPages(
requestedAddress, kSuperPageSize, kSuperPageSize, PageReadWrite));
requested_address, kSuperPageSize, kSuperPageSize, PageReadWrite));
if (UNLIKELY(!super_page))
return nullptr;
......@@ -262,7 +262,7 @@ ALWAYS_INLINE void* PartitionBucket::AllocNewSlotSpan(
// distributions will allocate the mapping directly before the last
// successful mapping, which is far from random. So we just get fresh
// randomness for the next mapping attempt.
if (requestedAddress && requestedAddress != super_page)
if (requested_address && requested_address != super_page)
root->next_super_page = nullptr;
// We allocated a new super page so update super page metadata.
......@@ -281,8 +281,8 @@ ALWAYS_INLINE void* PartitionBucket::AllocNewSlotSpan(
latest_extent->next = nullptr;
PartitionSuperPageExtentEntry* current_extent = root->current_extent;
bool isNewExtent = (super_page != requestedAddress);
if (UNLIKELY(isNewExtent)) {
bool is_new_extent = (super_page != requested_address);
if (UNLIKELY(is_new_extent)) {
if (UNLIKELY(!current_extent)) {
DCHECK(!root->first_extent);
root->first_extent = latest_extent;
......@@ -512,9 +512,9 @@ void* PartitionBucket::SlowPathAlloc(PartitionRootBase* root,
} else {
// Third. If we get here, we need a brand new page.
uint16_t num_partition_pages = this->get_pages_per_slot_span();
void* rawPages = AllocNewSlotSpan(root, flags, num_partition_pages);
if (LIKELY(rawPages != nullptr)) {
new_page = PartitionPage::FromPointerNoAlignmentCheck(rawPages);
void* raw_pages = AllocNewSlotSpan(root, flags, num_partition_pages);
if (LIKELY(raw_pages != nullptr)) {
new_page = PartitionPage::FromPointerNoAlignmentCheck(raw_pages);
InitializeSlotSpan(new_page);
}
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment