Commit d6941b79 authored by Anton Bikineev's avatar Anton Bikineev Committed by Commit Bot

Oilpan: Add ability to merge freelists in constant time

This is needed for concurrent sweeper, which manages per-page
freelists.

Bug: 967258
Change-Id: Iae8d6b5baee1e5c4c5410987679bb5668e4294c2
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1649784
Commit-Queue: Anton Bikineev <bikineev@chromium.org>
Reviewed-by: default avatarMichael Lippautz <mlippautz@chromium.org>
Reviewed-by: default avatarKentaro Hara <haraken@chromium.org>
Cr-Commit-Position: refs/heads/master@{#668520}
parent 674e3857
...@@ -540,13 +540,12 @@ void NormalPageArena::VerifyMarking() { ...@@ -540,13 +540,12 @@ void NormalPageArena::VerifyMarking() {
bool NormalPageArena::IsConsistentForGC() { bool NormalPageArena::IsConsistentForGC() {
// A thread heap is consistent for sweeping if none of the pages to be swept // A thread heap is consistent for sweeping if none of the pages to be swept
// contain a freelist block or the current allocation point. // contain a freelist block or the current allocation point.
for (size_t i = 0; i < kBlinkPageSizeLog2; ++i) { FreeListEntry* entry = free_list_.FindEntry([this](FreeListEntry* entry) {
for (FreeListEntry* free_list_entry = free_list_.free_lists_[i]; return PagesToBeSweptContains(entry->GetAddress());
free_list_entry; free_list_entry = free_list_entry->Next()) { });
if (PagesToBeSweptContains(free_list_entry->GetAddress())) if (entry)
return false; return false;
}
}
if (HasCurrentAllocationArea()) { if (HasCurrentAllocationArea()) {
if (PagesToBeSweptContains(CurrentAllocationPoint())) if (PagesToBeSweptContains(CurrentAllocationPoint()))
return false; return false;
...@@ -752,6 +751,18 @@ bool NormalPageArena::ShrinkObject(HeapObjectHeader* header, size_t new_size) { ...@@ -752,6 +751,18 @@ bool NormalPageArena::ShrinkObject(HeapObjectHeader* header, size_t new_size) {
return false; return false;
} }
Address NormalPageArena::AllocateFromFreeList(size_t allocation_size,
size_t gc_info_index) {
FreeListEntry* entry = free_list_.Allocate(allocation_size);
if (!entry)
return nullptr;
SetAllocationPoint(entry->GetAddress(), entry->size());
DCHECK(HasCurrentAllocationArea());
DCHECK_GE(RemainingAllocationSize(), allocation_size);
return AllocateObject(allocation_size, gc_info_index);
}
Address NormalPageArena::LazySweepPages(size_t allocation_size, Address NormalPageArena::LazySweepPages(size_t allocation_size,
size_t gc_info_index) { size_t gc_info_index) {
DCHECK(!HasCurrentAllocationArea()); DCHECK(!HasCurrentAllocationArea());
...@@ -869,38 +880,6 @@ Address NormalPageArena::OutOfLineAllocateImpl(size_t allocation_size, ...@@ -869,38 +880,6 @@ Address NormalPageArena::OutOfLineAllocateImpl(size_t allocation_size,
return result; return result;
} }
Address NormalPageArena::AllocateFromFreeList(size_t allocation_size,
size_t gc_info_index) {
// Try reusing a block from the largest bin. The underlying reasoning
// being that we want to amortize this slow allocation call by carving
// off as a large a free block as possible in one go; a block that will
// service this block and let following allocations be serviced quickly
// by bump allocation.
size_t bucket_size = static_cast<size_t>(1)
<< free_list_.biggest_free_list_index_;
int index = free_list_.biggest_free_list_index_;
for (; index > 0; --index, bucket_size >>= 1) {
FreeListEntry* entry = free_list_.free_lists_[index];
if (allocation_size > bucket_size) {
// Final bucket candidate; check initial entry if it is able
// to service this allocation. Do not perform a linear scan,
// as it is considered too costly.
if (!entry || entry->size() < allocation_size)
break;
}
if (entry) {
entry->Unlink(&free_list_.free_lists_[index]);
SetAllocationPoint(entry->GetAddress(), entry->size());
DCHECK(HasCurrentAllocationArea());
DCHECK_GE(RemainingAllocationSize(), allocation_size);
free_list_.biggest_free_list_index_ = index;
return AllocateObject(allocation_size, gc_info_index);
}
}
free_list_.biggest_free_list_index_ = index;
return nullptr;
}
LargeObjectArena::LargeObjectArena(ThreadState* state, int index) LargeObjectArena::LargeObjectArena(ThreadState* state, int index)
: BaseArena(state, index) {} : BaseArena(state, index) {}
...@@ -1014,7 +993,7 @@ Address LargeObjectArena::LazySweepPages(size_t allocation_size, ...@@ -1014,7 +993,7 @@ Address LargeObjectArena::LazySweepPages(size_t allocation_size,
FreeList::FreeList() : biggest_free_list_index_(0) {} FreeList::FreeList() : biggest_free_list_index_(0) {}
void FreeList::AddToFreeList(Address address, size_t size) { void FreeList::Add(Address address, size_t size) {
DCHECK_LT(size, BlinkPagePayloadSize()); DCHECK_LT(size, BlinkPagePayloadSize());
// The free list entries are only pointer aligned (but when we allocate // The free list entries are only pointer aligned (but when we allocate
// from them we are 8 byte aligned due to the header size). // from them we are 8 byte aligned due to the header size).
...@@ -1073,10 +1052,72 @@ void FreeList::AddToFreeList(Address address, size_t size) { ...@@ -1073,10 +1052,72 @@ void FreeList::AddToFreeList(Address address, size_t size) {
#endif #endif
ASAN_POISON_MEMORY_REGION(address, size); ASAN_POISON_MEMORY_REGION(address, size);
int index = BucketIndexForSize(size); const int index = BucketIndexForSize(size);
entry->Link(&free_lists_[index]); entry->Link(&free_list_heads_[index]);
if (index > biggest_free_list_index_) if (index > biggest_free_list_index_) {
biggest_free_list_index_ = index; biggest_free_list_index_ = index;
}
if (!entry->Next()) {
free_list_tails_[index] = entry;
}
}
void FreeList::MoveFrom(FreeList* other) {
const size_t expected_size = FreeListSize() + other->FreeListSize();
// Newly created entries get added to the head.
for (size_t index = 0; index < kBlinkPageSizeLog2; ++index) {
FreeListEntry* other_tail = other->free_list_tails_[index];
FreeListEntry*& this_head = this->free_list_heads_[index];
if (other_tail) {
other_tail->Append(this_head);
if (!this_head) {
this->free_list_tails_[index] = other_tail;
}
this_head = other->free_list_heads_[index];
other->free_list_heads_[index] = nullptr;
other->free_list_tails_[index] = nullptr;
}
}
biggest_free_list_index_ =
std::max(biggest_free_list_index_, other->biggest_free_list_index_);
other->biggest_free_list_index_ = 0;
DCHECK_EQ(expected_size, FreeListSize());
DCHECK(other->IsEmpty());
}
FreeListEntry* FreeList::Allocate(size_t allocation_size) {
// Try reusing a block from the largest bin. The underlying reasoning
// being that we want to amortize this slow allocation call by carving
// off as a large a free block as possible in one go; a block that will
// service this block and let following allocations be serviced quickly
// by bump allocation.
size_t bucket_size = static_cast<size_t>(1) << biggest_free_list_index_;
int index = biggest_free_list_index_;
for (; index > 0; --index, bucket_size >>= 1) {
DCHECK(IsConsistent(index));
FreeListEntry* entry = free_list_heads_[index];
if (allocation_size > bucket_size) {
// Final bucket candidate; check initial entry if it is able
// to service this allocation. Do not perform a linear scan,
// as it is considered too costly.
if (!entry || entry->size() < allocation_size)
break;
}
if (entry) {
if (!entry->Next()) {
DCHECK_EQ(entry, free_list_tails_[index]);
free_list_tails_[index] = nullptr;
}
entry->Unlink(&free_list_heads_[index]);
biggest_free_list_index_ = index;
return entry;
}
}
biggest_free_list_index_ = index;
return nullptr;
} }
#if DCHECK_IS_ON() || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) || \ #if DCHECK_IS_ON() || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) || \
...@@ -1117,7 +1158,7 @@ void NOINLINE FreeList::CheckFreedMemoryIsZapped(Address address, size_t size) { ...@@ -1117,7 +1158,7 @@ void NOINLINE FreeList::CheckFreedMemoryIsZapped(Address address, size_t size) {
size_t FreeList::FreeListSize() const { size_t FreeList::FreeListSize() const {
size_t free_size = 0; size_t free_size = 0;
for (unsigned i = 0; i < kBlinkPageSizeLog2; ++i) { for (unsigned i = 0; i < kBlinkPageSizeLog2; ++i) {
FreeListEntry* entry = free_lists_[i]; FreeListEntry* entry = free_list_heads_[i];
while (entry) { while (entry) {
free_size += entry->size(); free_size += entry->size();
entry = entry->Next(); entry = entry->Next();
...@@ -1127,7 +1168,7 @@ size_t FreeList::FreeListSize() const { ...@@ -1127,7 +1168,7 @@ size_t FreeList::FreeListSize() const {
if (free_size) { if (free_size) {
LOG_HEAP_FREELIST_VERBOSE() << "FreeList(" << this << "): " << free_size; LOG_HEAP_FREELIST_VERBOSE() << "FreeList(" << this << "): " << free_size;
for (unsigned i = 0; i < kBlinkPageSizeLog2; ++i) { for (unsigned i = 0; i < kBlinkPageSizeLog2; ++i) {
FreeListEntry* entry = free_lists_[i]; FreeListEntry* entry = free_list_heads_[i];
size_t bucket = 0; size_t bucket = 0;
size_t count = 0; size_t count = 0;
while (entry) { while (entry) {
...@@ -1148,8 +1189,22 @@ size_t FreeList::FreeListSize() const { ...@@ -1148,8 +1189,22 @@ size_t FreeList::FreeListSize() const {
void FreeList::Clear() { void FreeList::Clear() {
biggest_free_list_index_ = 0; biggest_free_list_index_ = 0;
for (size_t i = 0; i < kBlinkPageSizeLog2; ++i) for (size_t i = 0; i < kBlinkPageSizeLog2; ++i) {
free_lists_[i] = nullptr; free_list_heads_[i] = nullptr;
free_list_tails_[i] = nullptr;
}
}
bool FreeList::IsEmpty() const {
if (biggest_free_list_index_)
return false;
for (size_t i = 0; i < kBlinkPageSizeLog2; ++i) {
if (free_list_heads_[i]) {
DCHECK(free_list_tails_[i]);
return false;
}
}
return true;
} }
int FreeList::BucketIndexForSize(size_t size) { int FreeList::BucketIndexForSize(size_t size) {
...@@ -1167,7 +1222,8 @@ bool FreeList::TakeSnapshot(const String& dump_base_name) { ...@@ -1167,7 +1222,8 @@ bool FreeList::TakeSnapshot(const String& dump_base_name) {
for (size_t i = 0; i < kBlinkPageSizeLog2; ++i) { for (size_t i = 0; i < kBlinkPageSizeLog2; ++i) {
size_t entry_count = 0; size_t entry_count = 0;
size_t free_size = 0; size_t free_size = 0;
for (FreeListEntry* entry = free_lists_[i]; entry; entry = entry->Next()) { for (FreeListEntry* entry = free_list_heads_[i]; entry;
entry = entry->Next()) {
++entry_count; ++entry_count;
free_size += entry->size(); free_size += entry->size();
} }
......
...@@ -303,6 +303,62 @@ class FreeListEntry final : public HeapObjectHeader { ...@@ -303,6 +303,62 @@ class FreeListEntry final : public HeapObjectHeader {
private: private:
FreeListEntry* next_; FreeListEntry* next_;
friend class FreeList;
};
class FreeList {
DISALLOW_NEW();
public:
// Returns a bucket number for inserting a |FreeListEntry| of a given size.
// All entries in the given bucket, n, have size >= 2^n.
static int BucketIndexForSize(size_t);
#if DCHECK_IS_ON() || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) || \
defined(MEMORY_SANITIZER)
static void GetAllowedAndForbiddenCounts(Address, size_t, size_t&, size_t&);
static void ZapFreedMemory(Address, size_t);
static void CheckFreedMemoryIsZapped(Address, size_t);
#endif
FreeList();
FreeListEntry* Allocate(size_t);
void Add(Address, size_t);
void MoveFrom(FreeList*);
void Clear();
bool IsEmpty() const;
size_t FreeListSize() const;
// Returns true if the freelist snapshot is captured.
bool TakeSnapshot(const String& dump_base_name);
template <typename Predicate>
FreeListEntry* FindEntry(Predicate pred) {
for (size_t i = 0; i < kBlinkPageSizeLog2; ++i) {
for (FreeListEntry* entry = free_list_heads_[i]; entry;
entry = entry->Next()) {
if (pred(entry)) {
return entry;
}
}
}
return nullptr;
}
private:
bool IsConsistent(size_t index) const {
return (!free_list_heads_[index] && !free_list_tails_[index]) ||
(free_list_heads_[index] && free_list_tails_[index] &&
!free_list_tails_[index]->Next());
}
// All |FreeListEntry|s in the nth list have size >= 2^n.
FreeListEntry* free_list_heads_[kBlinkPageSizeLog2];
FreeListEntry* free_list_tails_[kBlinkPageSizeLog2];
int biggest_free_list_index_;
}; };
// Blink heap pages are set up with a guard page before and after the payload. // Blink heap pages are set up with a guard page before and after the payload.
...@@ -694,40 +750,6 @@ class PLATFORM_EXPORT LargeObjectPage final : public BasePage { ...@@ -694,40 +750,6 @@ class PLATFORM_EXPORT LargeObjectPage final : public BasePage {
#endif #endif
}; };
class FreeList {
DISALLOW_NEW();
public:
FreeList();
void AddToFreeList(Address, size_t);
void Clear();
// Returns a bucket number for inserting a |FreeListEntry| of a given size.
// All entries in the given bucket, n, have size >= 2^n.
static int BucketIndexForSize(size_t);
// Returns true if the freelist snapshot is captured.
bool TakeSnapshot(const String& dump_base_name);
#if DCHECK_IS_ON() || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) || \
defined(MEMORY_SANITIZER)
static void GetAllowedAndForbiddenCounts(Address, size_t, size_t&, size_t&);
static void ZapFreedMemory(Address, size_t);
static void CheckFreedMemoryIsZapped(Address, size_t);
#endif
private:
int biggest_free_list_index_;
// All |FreeListEntry|s in the nth list have size >= 2^n.
FreeListEntry* free_lists_[kBlinkPageSizeLog2];
size_t FreeListSize() const;
friend class NormalPageArena;
};
// Each thread has a number of thread arenas (e.g., Generic arenas, typed arenas // Each thread has a number of thread arenas (e.g., Generic arenas, typed arenas
// for |Node|, arenas for collection backings, etc.) and |BaseArena| represents // for |Node|, arenas for collection backings, etc.) and |BaseArena| represents
// each thread arena. // each thread arena.
...@@ -815,8 +837,9 @@ class PLATFORM_EXPORT NormalPageArena final : public BaseArena { ...@@ -815,8 +837,9 @@ class PLATFORM_EXPORT NormalPageArena final : public BaseArena {
// similar expressions elsewhere)? // similar expressions elsewhere)?
DCHECK(FindPageFromAddress(address + size - 1)); DCHECK(FindPageFromAddress(address + size - 1));
#endif #endif
free_list_.AddToFreeList(address, size); free_list_.Add(address, size);
} }
void AddToFreeList(FreeList* other) { free_list_.MoveFrom(other); }
void ClearFreeLists() override; void ClearFreeLists() override;
void MakeIterable() override; void MakeIterable() override;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment