Commit 34393519 authored by haraken's avatar haraken Committed by Commit bot

Oilpan: Decommit backing storage of CallbackStacks

CallbackStacks are used only while Oilpan's GC is doing marking & weak processing.
However, currently each CallbackStack continues retaining one Block forever.

This wastes memory a lot. Oilpan has 4 global CallbackStacks and 1 CallbackStack per thread.
Each Block consumes 8192 * sizeof(Item) = 128 KB. This means that Oilpan wastes 128 KB * (4 + # of threads).
When I start Chrome's new tab page, it creates 18 CallbackStacks,
meaning that it wastes 2.3 MB of memory.

This CL removes the waste by discarding system pages of the Block after finishing every GC phase.

BUG=

Review URL: https://codereview.chromium.org/1686943002

Cr-Commit-Position: refs/heads/master@{#374674}
parent d6855a21
...@@ -3,14 +3,46 @@ ...@@ -3,14 +3,46 @@
// found in the LICENSE file. // found in the LICENSE file.
#include "platform/heap/CallbackStack.h" #include "platform/heap/CallbackStack.h"
#include "wtf/PageAllocator.h"
namespace blink { namespace blink {
void CallbackStack::Block::clear() CallbackStack::Block::Block(Block* next)
{ {
static_assert((blockSize * sizeof(Item)) % WTF::kPageAllocationGranularity == 0, "CallbackStack::blockSize * sizeof(Item) must be a multiple of WTF::kPageAllocationGranularity");
m_buffer = static_cast<Item*>(WTF::allocPages(nullptr, blockSize * sizeof(Item), WTF::kPageAllocationGranularity, WTF::PageAccessible));
RELEASE_ASSERT(m_buffer);
#if ENABLE(ASSERT)
for (size_t i = 0; i < blockSize; i++)
m_buffer[i] = Item(0, 0);
#endif
m_limit = &(m_buffer[blockSize]);
m_current = &(m_buffer[0]);
m_next = next;
}
CallbackStack::Block::~Block()
{
WTF::freePages(m_buffer, blockSize * sizeof(Item));
m_buffer = nullptr;
m_limit = nullptr;
m_current = nullptr;
m_next = nullptr;
}
void CallbackStack::Block::decommit()
{
#if ENABLE(ASSERT)
for (size_t i = 0; i < blockSize; i++)
m_buffer[i] = Item(0, 0);
#endif
WTF::discardSystemPages(m_buffer, blockSize * sizeof(Item));
m_current = &m_buffer[0]; m_current = &m_buffer[0];
m_next = nullptr; m_next = nullptr;
clearUnused();
} }
void CallbackStack::Block::invokeEphemeronCallbacks(Visitor* visitor) void CallbackStack::Block::invokeEphemeronCallbacks(Visitor* visitor)
...@@ -35,34 +67,28 @@ bool CallbackStack::Block::hasCallbackForObject(const void* object) ...@@ -35,34 +67,28 @@ bool CallbackStack::Block::hasCallbackForObject(const void* object)
} }
#endif #endif
void CallbackStack::Block::clearUnused() CallbackStack::CallbackStack()
{ : m_first(new Block(0))
#if ENABLE(ASSERT) , m_last(m_first)
for (size_t i = 0; i < blockSize; i++)
m_buffer[i] = Item(0, 0);
#endif
}
CallbackStack::CallbackStack() : m_first(new Block(0)), m_last(m_first)
{ {
} }
CallbackStack::~CallbackStack() CallbackStack::~CallbackStack()
{ {
clear(); RELEASE_ASSERT(isEmpty());
delete m_first; delete m_first;
m_first = nullptr; m_first = nullptr;
m_last = nullptr; m_last = nullptr;
} }
void CallbackStack::clear() void CallbackStack::decommit()
{ {
Block* next; Block* next;
for (Block* current = m_first->next(); current; current = next) { for (Block* current = m_first->next(); current; current = next) {
next = current->next(); next = current->next();
delete current; delete current;
} }
m_first->clear(); m_first->decommit();
m_last = m_first; m_last = m_first;
} }
...@@ -86,7 +112,7 @@ CallbackStack::Item* CallbackStack::popSlow() ...@@ -86,7 +112,7 @@ CallbackStack::Item* CallbackStack::popSlow()
Block* next = m_first->next(); Block* next = m_first->next();
if (!next) { if (!next) {
#if ENABLE(ASSERT) #if ENABLE(ASSERT)
m_first->clear(); m_first->decommit();
#endif #endif
return nullptr; return nullptr;
} }
......
...@@ -41,6 +41,7 @@ public: ...@@ -41,6 +41,7 @@ public:
~CallbackStack(); ~CallbackStack();
void clear(); void clear();
void decommit();
Item* allocateEntry(); Item* allocateEntry();
Item* pop(); Item* pop();
...@@ -54,25 +55,15 @@ public: ...@@ -54,25 +55,15 @@ public:
#endif #endif
private: private:
static const size_t blockSize = 8192; static const size_t blockSize = (1 << 13);
class Block { class Block {
USING_FAST_MALLOC(Block); USING_FAST_MALLOC(Block);
public: public:
explicit Block(Block* next) explicit Block(Block* next);
: m_limit(&(m_buffer[blockSize])) ~Block();
, m_current(&(m_buffer[0]))
, m_next(next)
{
clearUnused();
}
~Block()
{
clearUnused();
}
void clear(); void decommit();
Block* next() const { return m_next; } Block* next() const { return m_next; }
void setNext(Block* next) { m_next = next; } void setNext(Block* next) { m_next = next; }
...@@ -82,11 +73,6 @@ private: ...@@ -82,11 +73,6 @@ private:
return m_current == &(m_buffer[0]); return m_current == &(m_buffer[0]);
} }
size_t size() const
{
return blockSize - (m_limit - m_current);
}
Item* allocateEntry() Item* allocateEntry()
{ {
if (LIKELY(m_current < m_limit)) if (LIKELY(m_current < m_limit))
...@@ -107,9 +93,7 @@ private: ...@@ -107,9 +93,7 @@ private:
#endif #endif
private: private:
void clearUnused(); Item* m_buffer;
Item m_buffer[blockSize];
Item* m_limit; Item* m_limit;
Item* m_current; Item* m_current;
Block* m_next; Block* m_next;
......
...@@ -355,6 +355,14 @@ bool Heap::weakTableRegistered(const void* table) ...@@ -355,6 +355,14 @@ bool Heap::weakTableRegistered(const void* table)
} }
#endif #endif
void Heap::decommitCallbackStacks()
{
s_markingStack->decommit();
s_postMarkingCallbackStack->decommit();
s_globalWeakCallbackStack->decommit();
s_ephemeronStack->decommit();
}
void Heap::preGC() void Heap::preGC()
{ {
ASSERT(!ThreadState::current()->isInGC()); ASSERT(!ThreadState::current()->isInGC());
...@@ -469,6 +477,7 @@ void Heap::collectGarbage(BlinkGC::StackState stackState, BlinkGC::GCType gcType ...@@ -469,6 +477,7 @@ void Heap::collectGarbage(BlinkGC::StackState stackState, BlinkGC::GCType gcType
WTF::Partitions::reportMemoryUsageHistogram(); WTF::Partitions::reportMemoryUsageHistogram();
postGC(gcType); postGC(gcType);
Heap::decommitCallbackStacks();
#if ENABLE(ASSERT) #if ENABLE(ASSERT)
// 0 is used to figure non-assigned area, so avoid to use 0 in s_gcGeneration. // 0 is used to figure non-assigned area, so avoid to use 0 in s_gcGeneration.
...@@ -511,6 +520,7 @@ void Heap::collectGarbageForTerminatingThread(ThreadState* state) ...@@ -511,6 +520,7 @@ void Heap::collectGarbageForTerminatingThread(ThreadState* state)
globalWeakProcessing(gcScope.visitor()); globalWeakProcessing(gcScope.visitor());
state->postGC(BlinkGC::GCWithSweep); state->postGC(BlinkGC::GCWithSweep);
Heap::decommitCallbackStacks();
} }
state->preSweep(); state->preSweep();
} }
...@@ -547,8 +557,6 @@ void Heap::postMarkingProcessing(Visitor* visitor) ...@@ -547,8 +557,6 @@ void Heap::postMarkingProcessing(Visitor* visitor)
// if they are only reachable from their front objects. // if they are only reachable from their front objects.
while (popAndInvokePostMarkingCallback(visitor)) { } while (popAndInvokePostMarkingCallback(visitor)) { }
s_ephemeronStack->clear();
// Post-marking callbacks should not trace any objects and // Post-marking callbacks should not trace any objects and
// therefore the marking stack should be empty after the // therefore the marking stack should be empty after the
// post-marking callbacks. // post-marking callbacks.
......
...@@ -279,6 +279,8 @@ private: ...@@ -279,6 +279,8 @@ private:
static int heapIndexForObjectSize(size_t); static int heapIndexForObjectSize(size_t);
static bool isNormalHeapIndex(int); static bool isNormalHeapIndex(int);
static void decommitCallbackStacks();
static CallbackStack* s_markingStack; static CallbackStack* s_markingStack;
static CallbackStack* s_postMarkingCallbackStack; static CallbackStack* s_postMarkingCallbackStack;
static CallbackStack* s_globalWeakCallbackStack; static CallbackStack* s_globalWeakCallbackStack;
......
...@@ -484,6 +484,8 @@ void ThreadState::threadLocalWeakProcessing() ...@@ -484,6 +484,8 @@ void ThreadState::threadLocalWeakProcessing()
// Perform thread-specific weak processing. // Perform thread-specific weak processing.
while (popAndInvokeThreadLocalWeakCallback(&weakProcessingVisitor)) { } while (popAndInvokeThreadLocalWeakCallback(&weakProcessingVisitor)) { }
m_threadLocalWeakCallbackStack->decommit();
if (isMainThread()) { if (isMainThread()) {
double timeForThreadLocalWeakProcessing = WTF::currentTimeMS() - startTime; double timeForThreadLocalWeakProcessing = WTF::currentTimeMS() - startTime;
DEFINE_STATIC_LOCAL(CustomCountHistogram, timeForWeakHistogram, ("BlinkGC.timeForThreadLocalWeakProcessing", 1, 10 * 1000, 50)); DEFINE_STATIC_LOCAL(CustomCountHistogram, timeForWeakHistogram, ("BlinkGC.timeForThreadLocalWeakProcessing", 1, 10 * 1000, 50));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment