Prepare for incremental sweep by making the FreeList independent of the ThreadHeap

R=haraken@chromium.org
BUG=

Review URL: https://codereview.chromium.org/711053002

git-svn-id: svn://svn.chromium.org/blink/trunk@185099 bbb929c8-8fbe-4397-9dbb-9b2b20218538
parent 386c33e6
...@@ -654,7 +654,6 @@ ThreadHeap<Header>::ThreadHeap(ThreadState* state, int index) ...@@ -654,7 +654,6 @@ ThreadHeap<Header>::ThreadHeap(ThreadState* state, int index)
, m_firstPageAllocatedDuringSweeping(0) , m_firstPageAllocatedDuringSweeping(0)
, m_lastPageAllocatedDuringSweeping(0) , m_lastPageAllocatedDuringSweeping(0)
, m_mergePoint(0) , m_mergePoint(0)
, m_biggestFreeListIndex(0)
, m_threadState(state) , m_threadState(state)
, m_index(index) , m_index(index)
, m_numberOfNormalPages(0) , m_numberOfNormalPages(0)
...@@ -663,6 +662,12 @@ ThreadHeap<Header>::ThreadHeap(ThreadState* state, int index) ...@@ -663,6 +662,12 @@ ThreadHeap<Header>::ThreadHeap(ThreadState* state, int index)
clearFreeLists(); clearFreeLists();
} }
template<typename Header>
FreeList<Header>::FreeList()
: m_biggestFreeListIndex(0)
{
}
template<typename Header> template<typename Header>
ThreadHeap<Header>::~ThreadHeap() ThreadHeap<Header>::~ThreadHeap()
{ {
...@@ -711,7 +716,7 @@ Address ThreadHeap<Header>::outOfLineAllocate(size_t size, const GCInfo* gcInfo) ...@@ -711,7 +716,7 @@ Address ThreadHeap<Header>::outOfLineAllocate(size_t size, const GCInfo* gcInfo)
threadState()->setGCRequested(); threadState()->setGCRequested();
} }
if (remainingAllocationSize() > 0) { if (remainingAllocationSize() > 0) {
addToFreeList(currentAllocationPoint(), remainingAllocationSize()); m_freeList.addToFreeList(currentAllocationPoint(), remainingAllocationSize());
setAllocationPoint(0, 0); setAllocationPoint(0, 0);
} }
ensureCurrentAllocation(allocationSize, gcInfo); ensureCurrentAllocation(allocationSize, gcInfo);
...@@ -721,21 +726,21 @@ Address ThreadHeap<Header>::outOfLineAllocate(size_t size, const GCInfo* gcInfo) ...@@ -721,21 +726,21 @@ Address ThreadHeap<Header>::outOfLineAllocate(size_t size, const GCInfo* gcInfo)
template<typename Header> template<typename Header>
bool ThreadHeap<Header>::allocateFromFreeList(size_t minSize) bool ThreadHeap<Header>::allocateFromFreeList(size_t minSize)
{ {
size_t bucketSize = 1 << m_biggestFreeListIndex; size_t bucketSize = 1 << m_freeList.m_biggestFreeListIndex;
int i = m_biggestFreeListIndex; int i = m_freeList.m_biggestFreeListIndex;
for (; i > 0; i--, bucketSize >>= 1) { for (; i > 0; i--, bucketSize >>= 1) {
if (bucketSize < minSize) if (bucketSize < minSize)
break; break;
FreeListEntry* entry = m_freeLists[i]; FreeListEntry* entry = m_freeList.m_freeLists[i];
if (entry) { if (entry) {
m_biggestFreeListIndex = i; m_freeList.m_biggestFreeListIndex = i;
entry->unlink(&m_freeLists[i]); entry->unlink(&m_freeList.m_freeLists[i]);
setAllocationPoint(entry->address(), entry->size()); setAllocationPoint(entry->address(), entry->size());
ASSERT(currentAllocationPoint() && remainingAllocationSize() >= minSize); ASSERT(currentAllocationPoint() && remainingAllocationSize() >= minSize);
return true; return true;
} }
} }
m_biggestFreeListIndex = i; m_freeList.m_biggestFreeListIndex = i;
return false; return false;
} }
...@@ -819,10 +824,8 @@ void ThreadHeap<Header>::snapshot(TracedValue* json, ThreadState::SnapshotInfo* ...@@ -819,10 +824,8 @@ void ThreadHeap<Header>::snapshot(TracedValue* json, ThreadState::SnapshotInfo*
#endif #endif
template<typename Header> template<typename Header>
void ThreadHeap<Header>::addToFreeList(Address address, size_t size) void FreeList<Header>::addToFreeList(Address address, size_t size)
{ {
ASSERT(heapPageFromAddress(address));
ASSERT(heapPageFromAddress(address + size - 1));
ASSERT(size < blinkPagePayloadSize()); ASSERT(size < blinkPagePayloadSize());
// The free list entries are only pointer aligned (but when we allocate // The free list entries are only pointer aligned (but when we allocate
// from them we are 8 byte aligned due to the header size). // from them we are 8 byte aligned due to the header size).
...@@ -892,7 +895,7 @@ bool ThreadHeap<Header>::coalesce(size_t minSize) ...@@ -892,7 +895,7 @@ bool ThreadHeap<Header>::coalesce(size_t minSize)
// The smallest bucket able to satisfy an allocation request for minSize is // The smallest bucket able to satisfy an allocation request for minSize is
// the bucket where all free-list entries are guarantied to be larger than // the bucket where all free-list entries are guarantied to be larger than
// minSize. That bucket is one larger than the bucket minSize would go into. // minSize. That bucket is one larger than the bucket minSize would go into.
size_t neededBucketIndex = bucketIndexForSize(minSize) + 1; size_t neededBucketIndex = FreeList<Header>::bucketIndexForSize(minSize) + 1;
size_t neededFreeEntrySize = 1 << neededBucketIndex; size_t neededFreeEntrySize = 1 << neededBucketIndex;
size_t neededPromptlyFreedSize = neededFreeEntrySize * 3; size_t neededPromptlyFreedSize = neededFreeEntrySize * 3;
size_t foundFreeEntrySize = 0; size_t foundFreeEntrySize = 0;
...@@ -1436,7 +1439,7 @@ bool ThreadHeap<Header>::isConsistentForSweeping() ...@@ -1436,7 +1439,7 @@ bool ThreadHeap<Header>::isConsistentForSweeping()
// be swept contain a freelist block or the current allocation // be swept contain a freelist block or the current allocation
// point. // point.
for (size_t i = 0; i < blinkPageSizeLog2; i++) { for (size_t i = 0; i < blinkPageSizeLog2; i++) {
for (FreeListEntry* freeListEntry = m_freeLists[i]; freeListEntry; freeListEntry = freeListEntry->next()) { for (FreeListEntry* freeListEntry = m_freeList.m_freeLists[i]; freeListEntry; freeListEntry = freeListEntry->next()) {
if (pagesToBeSweptContains(freeListEntry->address())) { if (pagesToBeSweptContains(freeListEntry->address())) {
return false; return false;
} }
...@@ -1479,13 +1482,21 @@ template<typename Header> ...@@ -1479,13 +1482,21 @@ template<typename Header>
void ThreadHeap<Header>::clearFreeLists() void ThreadHeap<Header>::clearFreeLists()
{ {
m_promptlyFreedCount = 0; m_promptlyFreedCount = 0;
m_freeList.clear();
}
template<typename Header>
void FreeList<Header>::clear()
{
m_biggestFreeListIndex = 0;
for (size_t i = 0; i < blinkPageSizeLog2; i++) { for (size_t i = 0; i < blinkPageSizeLog2; i++) {
m_freeLists[i] = 0; m_freeLists[i] = 0;
m_lastFreeListEntries[i] = 0; m_lastFreeListEntries[i] = 0;
} }
} }
int BaseHeap::bucketIndexForSize(size_t size) template<typename Header>
int FreeList<Header>::bucketIndexForSize(size_t size)
{ {
ASSERT(size > 0); ASSERT(size > 0);
int index = -1; int index = -1;
...@@ -2749,11 +2760,11 @@ void ThreadHeap<Header>::merge(PassOwnPtr<BaseHeap> splitOffBase) ...@@ -2749,11 +2760,11 @@ void ThreadHeap<Header>::merge(PassOwnPtr<BaseHeap> splitOffBase)
splitOff->m_firstPage = 0; splitOff->m_firstPage = 0;
// Merge free lists. // Merge free lists.
for (size_t i = 0; i < blinkPageSizeLog2; i++) { for (size_t i = 0; i < blinkPageSizeLog2; i++) {
if (!m_freeLists[i]) { if (!m_freeList.m_freeLists[i]) {
m_freeLists[i] = splitOff->m_freeLists[i]; m_freeList.m_freeLists[i] = splitOff->m_freeList.m_freeLists[i];
} else if (splitOff->m_freeLists[i]) { } else if (splitOff->m_freeList.m_freeLists[i]) {
m_lastFreeListEntries[i]->append(splitOff->m_freeLists[i]); m_freeList.m_lastFreeListEntries[i]->append(splitOff->m_freeList.m_freeLists[i]);
m_lastFreeListEntries[i] = splitOff->m_lastFreeListEntries[i]; m_freeList.m_lastFreeListEntries[i] = splitOff->m_freeList.m_lastFreeListEntries[i];
} }
} }
} }
......
...@@ -723,11 +723,29 @@ public: ...@@ -723,11 +723,29 @@ public:
virtual PassOwnPtr<BaseHeap> split(int normalPages) = 0; virtual PassOwnPtr<BaseHeap> split(int normalPages) = 0;
virtual void merge(PassOwnPtr<BaseHeap> other) = 0; virtual void merge(PassOwnPtr<BaseHeap> other) = 0;
};
template<typename Header>
class FreeList {
public:
FreeList();
void addToFreeList(Address, size_t);
void clear();
private:
// Returns a bucket number for inserting a FreeListEntry of a // Returns a bucket number for inserting a FreeListEntry of a
// given size. All FreeListEntries in the given bucket, n, have // given size. All FreeListEntries in the given bucket, n, have
// size >= 2^n. // size >= 2^n.
static int bucketIndexForSize(size_t); static int bucketIndexForSize(size_t);
int m_biggestFreeListIndex;
// All FreeListEntries in the nth list have size >= 2^n.
FreeListEntry* m_freeLists[blinkPageSizeLog2];
FreeListEntry* m_lastFreeListEntries[blinkPageSizeLog2];
friend class ThreadHeap<Header>;
}; };
// Thread heaps represent a part of the per-thread Blink heap. // Thread heaps represent a part of the per-thread Blink heap.
...@@ -772,8 +790,14 @@ public: ...@@ -772,8 +790,14 @@ public:
ThreadState* threadState() { return m_threadState; } ThreadState* threadState() { return m_threadState; }
HeapStats& stats() { return m_threadState->stats(); } HeapStats& stats() { return m_threadState->stats(); }
void addToFreeList(Address address, size_t size)
{
ASSERT(heapPageFromAddress(address));
ASSERT(heapPageFromAddress(address + size - 1));
m_freeList.addToFreeList(address, size);
}
inline Address allocate(size_t, const GCInfo*); inline Address allocate(size_t, const GCInfo*);
void addToFreeList(Address, size_t);
inline static size_t roundedAllocationSize(size_t size) inline static size_t roundedAllocationSize(size_t size)
{ {
return allocationSizeFromSize(size) - sizeof(Header); return allocationSizeFromSize(size) - sizeof(Header);
...@@ -834,13 +858,9 @@ private: ...@@ -834,13 +858,9 @@ private:
// Merge point for parallel sweep. // Merge point for parallel sweep.
HeapPage<Header>* m_mergePoint; HeapPage<Header>* m_mergePoint;
int m_biggestFreeListIndex;
ThreadState* m_threadState; ThreadState* m_threadState;
// All FreeListEntries in the nth list have size >= 2^n. FreeList<Header> m_freeList;
FreeListEntry* m_freeLists[blinkPageSizeLog2];
FreeListEntry* m_lastFreeListEntries[blinkPageSizeLog2];
// Index into the page pools. This is used to ensure that the pages of the // Index into the page pools. This is used to ensure that the pages of the
// same type go into the correct page pool and thus avoid type confusion. // same type go into the correct page pool and thus avoid type confusion.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment