Commit ea06924a authored by kouhei@chromium.org's avatar kouhei@chromium.org

Report DOM partitionAlloc size to V8 GC.

Background: Currently V8GC do not have idea of DOM memory usage retained by wrappers. A small set of unreachable wrappers may retain huge DOM, but v8 gc may not be triggered as it is not aware of DOM memory usage.

This CL introduces |V8GCController::reportDOMMemoryUsageToV8|
- DOM memory usage is tracked at partition{Alloc,Free}
- After each task run, |reportDOMMemoryUsageToV8| is called.
- |reportDOMMemoryUsageToV8| notifies V8 GC the partition memory size currently consumed by DOM.

After this CL, http://jsbin.com/nitobiru/18 will not crash.

BUG=365018, 368406

Review URL: https://codereview.chromium.org/301743006

git-svn-id: svn://svn.chromium.org/blink/trunk@176006 bbb929c8-8fbe-4397-9dbb-9b2b20218538
parent 5eb17ee2
...@@ -48,6 +48,7 @@ ...@@ -48,6 +48,7 @@
#include "core/html/imports/HTMLImportsController.h" #include "core/html/imports/HTMLImportsController.h"
#include "core/inspector/InspectorTraceEvents.h" #include "core/inspector/InspectorTraceEvents.h"
#include "core/svg/SVGElement.h" #include "core/svg/SVGElement.h"
#include "platform/Partitions.h"
#include "platform/TraceEvent.h" #include "platform/TraceEvent.h"
#include <algorithm> #include <algorithm>
...@@ -437,4 +438,18 @@ void V8GCController::collectGarbage(v8::Isolate* isolate) ...@@ -437,4 +438,18 @@ void V8GCController::collectGarbage(v8::Isolate* isolate)
scriptState->disposePerContextData(); scriptState->disposePerContextData();
} }
void V8GCController::reportDOMMemoryUsageToV8(v8::Isolate* isolate)
{
if (!isMainThread())
return;
static size_t lastUsageReportedToV8 = 0;
size_t currentUsage = Partitions::currentDOMMemoryUsage();
int64_t diff = static_cast<int64_t>(currentUsage) - static_cast<int64_t>(lastUsageReportedToV8);
isolate->AdjustAmountOfExternalAllocatedMemory(diff);
lastUsageReportedToV8 = currentUsage;
}
} // namespace WebCore } // namespace WebCore
...@@ -50,6 +50,8 @@ public: ...@@ -50,6 +50,8 @@ public:
static void collectGarbage(v8::Isolate*); static void collectGarbage(v8::Isolate*);
static Node* opaqueRootForGC(Node*, v8::Isolate*); static Node* opaqueRootForGC(Node*, v8::Isolate*);
static void reportDOMMemoryUsageToV8(v8::Isolate*);
}; };
} }
......
...@@ -44,6 +44,11 @@ public: ...@@ -44,6 +44,11 @@ public:
ALWAYS_INLINE static PartitionRoot* getObjectModelPartition() { return m_objectModelAllocator.root(); } ALWAYS_INLINE static PartitionRoot* getObjectModelPartition() { return m_objectModelAllocator.root(); }
ALWAYS_INLINE static PartitionRoot* getRenderingPartition() { return m_renderingAllocator.root(); } ALWAYS_INLINE static PartitionRoot* getRenderingPartition() { return m_renderingAllocator.root(); }
static size_t currentDOMMemoryUsage()
{
return m_objectModelAllocator.root()->totalSizeOfCommittedPages;
}
private: private:
static SizeSpecificPartitionAllocator<3072> m_objectModelAllocator; static SizeSpecificPartitionAllocator<3072> m_objectModelAllocator;
static SizeSpecificPartitionAllocator<1024> m_renderingAllocator; static SizeSpecificPartitionAllocator<1024> m_renderingAllocator;
......
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
#include "RuntimeEnabledFeatures.h" #include "RuntimeEnabledFeatures.h"
#include "bindings/v8/V8Binding.h" #include "bindings/v8/V8Binding.h"
#include "bindings/v8/V8GCController.h"
#include "bindings/v8/V8Initializer.h" #include "bindings/v8/V8Initializer.h"
#include "core/Init.h" #include "core/Init.h"
#include "core/animation/AnimationClock.h" #include "core/animation/AnimationClock.h"
...@@ -75,6 +76,7 @@ public: ...@@ -75,6 +76,7 @@ public:
virtual void didProcessTask() OVERRIDE virtual void didProcessTask() OVERRIDE
{ {
WebCore::Microtask::performCheckpoint(); WebCore::Microtask::performCheckpoint();
WebCore::V8GCController::reportDOMMemoryUsageToV8(mainThreadIsolate());
} }
}; };
......
...@@ -112,6 +112,7 @@ static void parititonAllocBaseInit(PartitionRootBase* root) ...@@ -112,6 +112,7 @@ static void parititonAllocBaseInit(PartitionRootBase* root)
spinLockUnlock(&PartitionRootBase::gInitializedLock); spinLockUnlock(&PartitionRootBase::gInitializedLock);
root->initialized = true; root->initialized = true;
root->totalSizeOfCommittedPages = 0;
root->totalSizeOfSuperPages = 0; root->totalSizeOfSuperPages = 0;
root->nextSuperPage = 0; root->nextSuperPage = 0;
root->nextPartitionPage = 0; root->nextPartitionPage = 0;
...@@ -308,12 +309,26 @@ static NEVER_INLINE void partitionFull() ...@@ -308,12 +309,26 @@ static NEVER_INLINE void partitionFull()
IMMEDIATE_CRASH(); IMMEDIATE_CRASH();
} }
static ALWAYS_INLINE void partitionDecommitSystemPages(PartitionRootBase* root, void* addr, size_t len)
{
decommitSystemPages(addr, len);
ASSERT(root->totalSizeOfCommittedPages > len);
root->totalSizeOfCommittedPages -= len;
}
static ALWAYS_INLINE void partitionRecommitSystemPages(PartitionRootBase* root, void* addr, size_t len)
{
recommitSystemPages(addr, len);
root->totalSizeOfCommittedPages += len;
}
static ALWAYS_INLINE void* partitionAllocPartitionPages(PartitionRootBase* root, int flags, size_t numPartitionPages) static ALWAYS_INLINE void* partitionAllocPartitionPages(PartitionRootBase* root, int flags, size_t numPartitionPages)
{ {
ASSERT(!(reinterpret_cast<uintptr_t>(root->nextPartitionPage) % kPartitionPageSize)); ASSERT(!(reinterpret_cast<uintptr_t>(root->nextPartitionPage) % kPartitionPageSize));
ASSERT(!(reinterpret_cast<uintptr_t>(root->nextPartitionPageEnd) % kPartitionPageSize)); ASSERT(!(reinterpret_cast<uintptr_t>(root->nextPartitionPageEnd) % kPartitionPageSize));
RELEASE_ASSERT(numPartitionPages <= kNumPartitionPagesPerSuperPage); RELEASE_ASSERT(numPartitionPages <= kNumPartitionPagesPerSuperPage);
size_t totalSize = kPartitionPageSize * numPartitionPages; size_t totalSize = kPartitionPageSize * numPartitionPages;
root->totalSizeOfCommittedPages += totalSize;
size_t numPartitionPagesLeft = (root->nextPartitionPageEnd - root->nextPartitionPage) >> kPartitionPageShift; size_t numPartitionPagesLeft = (root->nextPartitionPageEnd - root->nextPartitionPage) >> kPartitionPageShift;
if (LIKELY(numPartitionPagesLeft >= numPartitionPages)) { if (LIKELY(numPartitionPagesLeft >= numPartitionPages)) {
// In this case, we can still hand out pages from the current super page // In this case, we can still hand out pages from the current super page
...@@ -382,11 +397,11 @@ static ALWAYS_INLINE void* partitionAllocPartitionPages(PartitionRootBase* root, ...@@ -382,11 +397,11 @@ static ALWAYS_INLINE void* partitionAllocPartitionPages(PartitionRootBase* root,
return ret; return ret;
} }
static ALWAYS_INLINE void partitionUnusePage(PartitionPage* page) static ALWAYS_INLINE void partitionUnusePage(PartitionRootBase* root, PartitionPage* page)
{ {
ASSERT(page->bucket->numSystemPagesPerSlotSpan); ASSERT(page->bucket->numSystemPagesPerSlotSpan);
void* addr = partitionPageToPointer(page); void* addr = partitionPageToPointer(page);
decommitSystemPages(addr, page->bucket->numSystemPagesPerSlotSpan * kSystemPageSize); partitionDecommitSystemPages(root, addr, page->bucket->numSystemPagesPerSlotSpan * kSystemPageSize);
} }
static ALWAYS_INLINE size_t partitionBucketSlots(const PartitionBucket* bucket) static ALWAYS_INLINE size_t partitionBucketSlots(const PartitionBucket* bucket)
...@@ -675,7 +690,7 @@ void* partitionAllocSlowPath(PartitionRootBase* root, int flags, size_t size, Pa ...@@ -675,7 +690,7 @@ void* partitionAllocSlowPath(PartitionRootBase* root, int flags, size_t size, Pa
ASSERT(newPage->freeCacheIndex == -1); ASSERT(newPage->freeCacheIndex == -1);
bucket->freePagesHead = newPage->nextPage; bucket->freePagesHead = newPage->nextPage;
void* addr = partitionPageToPointer(newPage); void* addr = partitionPageToPointer(newPage);
recommitSystemPages(addr, newPage->bucket->numSystemPagesPerSlotSpan * kSystemPageSize); partitionRecommitSystemPages(root, addr, newPage->bucket->numSystemPagesPerSlotSpan * kSystemPageSize);
} else { } else {
// Third. If we get here, we need a brand new page. // Third. If we get here, we need a brand new page.
size_t numPartitionPages = partitionBucketPartitionPages(bucket); size_t numPartitionPages = partitionBucketPartitionPages(bucket);
...@@ -693,11 +708,11 @@ void* partitionAllocSlowPath(PartitionRootBase* root, int flags, size_t size, Pa ...@@ -693,11 +708,11 @@ void* partitionAllocSlowPath(PartitionRootBase* root, int flags, size_t size, Pa
return partitionPageAllocAndFillFreelist(newPage); return partitionPageAllocAndFillFreelist(newPage);
} }
static ALWAYS_INLINE void partitionFreePage(PartitionPage* page) static ALWAYS_INLINE void partitionFreePage(PartitionRootBase* root, PartitionPage* page)
{ {
ASSERT(page->freelistHead); ASSERT(page->freelistHead);
ASSERT(!page->numAllocatedSlots); ASSERT(!page->numAllocatedSlots);
partitionUnusePage(page); partitionUnusePage(root, page);
// We actually leave the freed page in the active list. We'll sweep it on // We actually leave the freed page in the active list. We'll sweep it on
// to the free page list when we next walk the active page list. Pulling // to the free page list when we next walk the active page list. Pulling
// this trick enables us to use a singly-linked page list for all cases, // this trick enables us to use a singly-linked page list for all cases,
...@@ -710,6 +725,7 @@ static ALWAYS_INLINE void partitionFreePage(PartitionPage* page) ...@@ -710,6 +725,7 @@ static ALWAYS_INLINE void partitionFreePage(PartitionPage* page)
static ALWAYS_INLINE void partitionRegisterEmptyPage(PartitionPage* page) static ALWAYS_INLINE void partitionRegisterEmptyPage(PartitionPage* page)
{ {
PartitionRootBase* root = partitionPageToRoot(page); PartitionRootBase* root = partitionPageToRoot(page);
// If the page is already registered as empty, give it another life. // If the page is already registered as empty, give it another life.
if (page->freeCacheIndex != -1) { if (page->freeCacheIndex != -1) {
ASSERT(page->freeCacheIndex >= 0); ASSERT(page->freeCacheIndex >= 0);
...@@ -729,7 +745,7 @@ static ALWAYS_INLINE void partitionRegisterEmptyPage(PartitionPage* page) ...@@ -729,7 +745,7 @@ static ALWAYS_INLINE void partitionRegisterEmptyPage(PartitionPage* page)
ASSERT(pageToFree == root->globalEmptyPageRing[pageToFree->freeCacheIndex]); ASSERT(pageToFree == root->globalEmptyPageRing[pageToFree->freeCacheIndex]);
if (!pageToFree->numAllocatedSlots && pageToFree->freelistHead) { if (!pageToFree->numAllocatedSlots && pageToFree->freelistHead) {
// The page is still empty, and not freed, so _really_ free it. // The page is still empty, and not freed, so _really_ free it.
partitionFreePage(pageToFree); partitionFreePage(root, pageToFree);
} }
pageToFree->freeCacheIndex = -1; pageToFree->freeCacheIndex = -1;
} }
...@@ -757,7 +773,7 @@ void partitionFreeSlowPath(PartitionPage* page) ...@@ -757,7 +773,7 @@ void partitionFreeSlowPath(PartitionPage* page)
partitionDirectUnmap(page); partitionDirectUnmap(page);
return; return;
} }
// If it's the current page, attempt to change it. We'd prefer to leave // If it's the current active page, attempt to change it. We'd prefer to leave
// the page empty as a gentle force towards defragmentation. // the page empty as a gentle force towards defragmentation.
if (LIKELY(page == bucket->activePagesHead) && page->nextPage) { if (LIKELY(page == bucket->activePagesHead) && page->nextPage) {
if (partitionSetNewActivePage(page->nextPage)) { if (partitionSetNewActivePage(page->nextPage)) {
...@@ -827,14 +843,14 @@ bool partitionReallocDirectMappedInPlace(PartitionRootGeneric* root, PartitionPa ...@@ -827,14 +843,14 @@ bool partitionReallocDirectMappedInPlace(PartitionRootGeneric* root, PartitionPa
// Shrink by decommitting unneeded pages and making them inaccessible. // Shrink by decommitting unneeded pages and making them inaccessible.
size_t decommitSize = currentSize - newSize; size_t decommitSize = currentSize - newSize;
decommitSystemPages(charPtr + newSize, decommitSize); partitionDecommitSystemPages(root, charPtr + newSize, decommitSize);
setSystemPagesInaccessible(charPtr + newSize, decommitSize); setSystemPagesInaccessible(charPtr + newSize, decommitSize);
} else if (newSize <= partitionPageToDirectMapExtent(page)->mapSize) { } else if (newSize <= partitionPageToDirectMapExtent(page)->mapSize) {
// Grow within the actually allocated memory. Just need to make the // Grow within the actually allocated memory. Just need to make the
// pages accessible again. // pages accessible again.
size_t recommitSize = newSize - currentSize; size_t recommitSize = newSize - currentSize;
setSystemPagesAccessible(charPtr + currentSize, recommitSize); setSystemPagesAccessible(charPtr + currentSize, recommitSize);
recommitSystemPages(charPtr + currentSize, recommitSize); partitionRecommitSystemPages(root, charPtr + currentSize, recommitSize);
#ifndef NDEBUG #ifndef NDEBUG
memset(charPtr + currentSize, kUninitializedByte, recommitSize); memset(charPtr + currentSize, kUninitializedByte, recommitSize);
......
...@@ -254,6 +254,7 @@ struct PartitionSuperPageExtentEntry { ...@@ -254,6 +254,7 @@ struct PartitionSuperPageExtentEntry {
}; };
struct WTF_EXPORT PartitionRootBase { struct WTF_EXPORT PartitionRootBase {
size_t totalSizeOfCommittedPages;
size_t totalSizeOfSuperPages; size_t totalSizeOfSuperPages;
unsigned numBuckets; unsigned numBuckets;
unsigned maxAllocation; unsigned maxAllocation;
......
...@@ -990,6 +990,8 @@ TEST(PartitionAllocTest, FreeCache) ...@@ -990,6 +990,8 @@ TEST(PartitionAllocTest, FreeCache)
{ {
TestSetup(); TestSetup();
EXPECT_EQ(0U, allocator.root()->totalSizeOfCommittedPages);
size_t bigSize = allocator.root()->maxAllocation - kExtraAllocSize; size_t bigSize = allocator.root()->maxAllocation - kExtraAllocSize;
size_t bucketIdx = (bigSize + kExtraAllocSize) >> WTF::kBucketShift; size_t bucketIdx = (bigSize + kExtraAllocSize) >> WTF::kBucketShift;
WTF::PartitionBucket* bucket = &allocator.root()->buckets()[bucketIdx]; WTF::PartitionBucket* bucket = &allocator.root()->buckets()[bucketIdx];
...@@ -999,6 +1001,7 @@ TEST(PartitionAllocTest, FreeCache) ...@@ -999,6 +1001,7 @@ TEST(PartitionAllocTest, FreeCache)
WTF::PartitionPage* page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr)); WTF::PartitionPage* page = WTF::partitionPointerToPage(WTF::partitionCookieFreePointerAdjust(ptr));
EXPECT_EQ(0, bucket->freePagesHead); EXPECT_EQ(0, bucket->freePagesHead);
EXPECT_EQ(1, page->numAllocatedSlots); EXPECT_EQ(1, page->numAllocatedSlots);
EXPECT_EQ(WTF::kPartitionPageSize, allocator.root()->totalSizeOfCommittedPages);
partitionFree(ptr); partitionFree(ptr);
EXPECT_EQ(0, page->numAllocatedSlots); EXPECT_EQ(0, page->numAllocatedSlots);
EXPECT_NE(-1, page->freeCacheIndex); EXPECT_NE(-1, page->freeCacheIndex);
...@@ -1010,6 +1013,8 @@ TEST(PartitionAllocTest, FreeCache) ...@@ -1010,6 +1013,8 @@ TEST(PartitionAllocTest, FreeCache)
EXPECT_FALSE(page->freelistHead); EXPECT_FALSE(page->freelistHead);
EXPECT_EQ(-1, page->freeCacheIndex); EXPECT_EQ(-1, page->freeCacheIndex);
EXPECT_EQ(0, page->numAllocatedSlots); EXPECT_EQ(0, page->numAllocatedSlots);
WTF::PartitionBucket* cycleFreeCacheBucket = &allocator.root()->buckets()[kTestBucketIndex];
EXPECT_EQ(cycleFreeCacheBucket->numSystemPagesPerSlotSpan * WTF::kSystemPageSize, allocator.root()->totalSizeOfCommittedPages);
// Check that an allocation works ok whilst in this state (a free'd page // Check that an allocation works ok whilst in this state (a free'd page
// as the active pages head). // as the active pages head).
...@@ -1025,7 +1030,7 @@ TEST(PartitionAllocTest, FreeCache) ...@@ -1025,7 +1030,7 @@ TEST(PartitionAllocTest, FreeCache)
partitionFree(ptr); partitionFree(ptr);
EXPECT_TRUE(page->freelistHead); EXPECT_TRUE(page->freelistHead);
} }
EXPECT_EQ(WTF::kPartitionPageSize, allocator.root()->totalSizeOfCommittedPages);
TestShutdown(); TestShutdown();
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment