Commit 2c7d13f7 authored by sigbjornf's avatar sigbjornf Committed by Commit bot

Simple BlinkGC heap compaction.

This implements heap compaction for the Blink GC infrastructure
(Oilpan), compacting the arenas of the BlinkGC heap which are most
susceptible to becoming fragmented during actual use.

Fragmentation is a real problem and a growing one while browsing anything
but static pages: the amount of unused, but allocated, memory is
fluctuating higher over time.

To avoid leaving increasing amounts of unused holes in our heaps,
heap compaction will periodically squeeze out the unused portions,
packing together the live objects. The heap pages that are then
left as unused, are subsequently released and returned to the OS.

Due to a fortunate property of Blink heap collection types, providing
such compaction is within relatively easy reach. Experiments show that
the arenas which hold such collection objects ("backing stores") are
the ones that develop fragmentation the most & persistently. While not
a complete heap compactor of all Blink GC arenas, it addresses the
fragmentation problem where it is most pressing. More can be done, later.

Explainer / design document:

 https://docs.google.com/document/d/1k-vivOinomDXnScw8Ew5zpsYCXiYqj76OCOYZSvHkaU

R=haraken
BUG=672030

Review-Url: https://codereview.chromium.org/2531973002
Cr-Commit-Position: refs/heads/master@{#438125}
parent bb593915
......@@ -17,6 +17,7 @@
#include "core/dom/StyleEngine.h"
#include "core/dom/shadow/ElementShadow.h"
#include "core/html/imports/HTMLImportsController.h"
#include "platform/heap/HeapCompact.h"
#include "platform/heap/HeapPage.h"
#include "public/platform/Platform.h"
#include "public/platform/WebScheduler.h"
......@@ -203,6 +204,10 @@ bool ScriptWrappableVisitor::markWrapperHeader(HeapObjectHeader* header) const {
if (header->isWrapperHeaderMarked())
return false;
// Verify that no compactable & movable objects are slated for
// lazy unmarking.
DCHECK(!HeapCompact::isCompactableArena(
pageFromObject(header)->arena()->arenaIndex()));
header->markWrapperHeader();
m_headersToUnmark.push_back(header);
return true;
......
......@@ -116,6 +116,7 @@ GamepadExtensions origin_trial_feature_name=WebVR
GeometryInterfaces status=experimental, implied_by=CompositorWorker
GetUserMedia status=stable
GlobalCacheStorage status=stable
HeapCompaction status=experimental
IDBObserver status=experimental
IdleTimeSpellChecking
ImageCapture status=experimental, origin_trial_feature_name=ImageCapture
......
......@@ -26,6 +26,8 @@ source_set("heap") {
"Heap.h",
"HeapAllocator.cpp",
"HeapAllocator.h",
"HeapCompact.cpp",
"HeapCompact.h",
"HeapPage.cpp",
"HeapPage.h",
"InlinedGlobalMarkingVisitor.h",
......@@ -42,6 +44,8 @@ source_set("heap") {
"SafePoint.cpp",
"SafePoint.h",
"SelfKeepAlive.h",
"SparseHeapBitmap.cpp",
"SparseHeapBitmap.h",
"StackFrameDepth.cpp",
"StackFrameDepth.h",
"ThreadState.cpp",
......@@ -76,6 +80,7 @@ source_set("heap") {
test("blink_heap_unittests") {
sources = [
"BlinkGCMemoryDumpProviderTest.cpp",
"HeapCompactTest.cpp",
"HeapTest.cpp",
"PersistentTest.cpp",
"RunAllTests.cpp",
......
......@@ -25,6 +25,22 @@ using WeakCallback = VisitorCallback;
using EphemeronCallback = VisitorCallback;
using PreFinalizerCallback = bool (*)(void*);
// Simple alias to avoid heap compaction type signatures turning into
// a sea of generic |void*|s.
using MovableReference = void*;
// Heap compaction supports registering callbacks that are to be invoked
// when an object is moved during compaction. This is to support internal
// location fixups that need to happen as a result.
//
// i.e., when the object residing at |from| is moved to |to| by the compaction
// pass, invoke the callback to adjust any internal references that now need
// to be |to|-relative.
using MovingObjectCallback = void (*)(void* callbackData,
MovableReference from,
MovableReference to,
size_t);
// List of typed arenas. The list is used to generate the implementation
// of typed arena related methods.
//
......@@ -53,6 +69,10 @@ class PLATFORM_EXPORT BlinkGC final {
// Only the marking task runs in ThreadHeap::collectGarbage().
// The sweeping task is split into chunks and scheduled lazily.
GCWithoutSweep,
// After the marking task has run in ThreadHeap::collectGarbage(),
// sweep compaction of some heap arenas is performed. The sweeping
// of the remaining arenas is split into chunks and scheduled lazily.
GCWithSweepCompaction,
// Only the marking task runs just to take a heap snapshot.
// The sweeping task doesn't run. The marks added in the marking task
// are just cleared.
......@@ -77,7 +97,7 @@ class PLATFORM_EXPORT BlinkGC final {
NumberOfGCReason,
};
enum HeapIndices {
enum ArenaIndices {
EagerSweepArenaIndex = 0,
NormalPage1ArenaIndex,
NormalPage2ArenaIndex,
......
......@@ -35,6 +35,7 @@
#include "platform/ScriptForbiddenScope.h"
#include "platform/heap/BlinkGCMemoryDumpProvider.h"
#include "platform/heap/CallbackStack.h"
#include "platform/heap/HeapCompact.h"
#include "platform/heap/MarkingVisitor.h"
#include "platform/heap/PageMemory.h"
#include "platform/heap/PagePool.h"
......@@ -412,6 +413,25 @@ void ThreadHeap::commitCallbackStacks() {
m_ephemeronStack->commit();
}
HeapCompact* ThreadHeap::compaction() {
if (!m_compaction)
m_compaction = HeapCompact::create();
return m_compaction.get();
}
void ThreadHeap::registerMovingObjectReference(MovableReference* slot) {
DCHECK(slot);
DCHECK(*slot);
compaction()->registerMovingObjectReference(slot);
}
void ThreadHeap::registerMovingObjectCallback(MovableReference reference,
MovingObjectCallback callback,
void* callbackData) {
DCHECK(reference);
compaction()->registerMovingObjectCallback(reference, callback, callbackData);
}
void ThreadHeap::decommitCallbackStacks() {
m_markingStack->decommit();
m_postMarkingCallbackStack->decommit();
......
......@@ -81,6 +81,7 @@ class PLATFORM_EXPORT HeapAllocHooks {
};
class CrossThreadPersistentRegion;
class HeapCompact;
template <typename T>
class Member;
template <typename T>
......@@ -381,6 +382,27 @@ class PLATFORM_EXPORT ThreadHeap {
bool weakTableRegistered(const void*);
#endif
// Heap compaction registration methods:
// Register |slot| as containing a reference to a movable heap object.
//
// When compaction moves the object pointed to by |*slot| to |newAddress|,
// |*slot| must be updated to hold |newAddress| instead.
void registerMovingObjectReference(MovableReference*);
// Register a callback to be invoked upon moving the object starting at
// |reference|; see |MovingObjectCallback| documentation for details.
//
// This callback mechanism is needed to account for backing store objects
// containing intra-object pointers, all of which must be relocated/rebased
// with respect to the moved-to location.
//
// For Blink, |HeapLinkedHashSet<>| is currently the only abstraction which
// relies on this feature.
void registerMovingObjectCallback(MovableReference,
MovingObjectCallback,
void* callbackData);
BlinkGC::GCReason lastGCReason() { return m_lastGCReason; }
RegionTree* getRegionTree() { return m_regionTree.get(); }
......@@ -438,6 +460,8 @@ class PLATFORM_EXPORT ThreadHeap {
static void reportMemoryUsageHistogram();
static void reportMemoryUsageForTracing();
HeapCompact* compaction();
private:
// Reset counters that track live and allocated-since-last-GC sizes.
void resetHeapCounters();
......@@ -463,6 +487,8 @@ class PLATFORM_EXPORT ThreadHeap {
BlinkGC::GCReason m_lastGCReason;
StackFrameDepth m_stackFrameDepth;
std::unique_ptr<HeapCompact> m_compaction;
static ThreadHeap* s_mainThreadHeap;
friend class ThreadState;
......
......@@ -188,6 +188,20 @@ class PLATFORM_EXPORT HeapAllocator {
}
#endif
template <typename T, typename VisitorDispatcher>
static void registerBackingStoreReference(VisitorDispatcher visitor,
T** slot) {
visitor->registerBackingStoreReference(slot);
}
template <typename T, typename VisitorDispatcher>
static void registerBackingStoreCallback(VisitorDispatcher visitor,
T* backingStore,
MovingObjectCallback callback,
void* callbackData) {
visitor->registerBackingStoreCallback(backingStore, callback, callbackData);
}
static void enterGCForbiddenScope() {
ThreadState::current()->enterGCForbiddenScope();
}
......
This diff is collapsed.
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef HeapCompact_h
#define HeapCompact_h
#include "platform/PlatformExport.h"
#include "platform/heap/BlinkGC.h"
#include "wtf/DataLog.h"
#include "wtf/PtrUtil.h"
#include "wtf/ThreadingPrimitives.h"
#include "wtf/Vector.h"
#include <bitset>
#include <utility>
// Compaction-specific debug switches:
// Set to 0 to prevent compaction GCs, disabling the heap compaction feature.
#define ENABLE_HEAP_COMPACTION 1
// Emit debug info during compaction.
#define DEBUG_HEAP_COMPACTION 0
// Emit stats on freelist occupancy.
// 0 - disabled, 1 - minimal, 2 - verbose.
#define DEBUG_HEAP_FREELIST 0
// Log the amount of time spent compacting.
#define DEBUG_LOG_HEAP_COMPACTION_RUNNING_TIME 0
// Compact during all idle + precise GCs; for debugging.
#define STRESS_TEST_HEAP_COMPACTION 0
namespace blink {
class NormalPageArena;
class BasePage;
class ThreadState;
class PLATFORM_EXPORT HeapCompact final {
public:
static std::unique_ptr<HeapCompact> create() {
return WTF::wrapUnique(new HeapCompact);
}
~HeapCompact();
// Determine if a GC for the given type and reason should also perform
// additional heap compaction.
//
bool shouldCompact(ThreadState*, BlinkGC::GCType, BlinkGC::GCReason);
// Compaction should be performed as part of the ongoing GC, initialize
// the heap compaction pass. Returns the appropriate visitor type to
// use when running the marking phase.
BlinkGC::GCType initialize(ThreadState*);
// Returns true if the ongoing GC will perform compaction.
bool isCompacting() const { return m_doCompact; }
// Returns true if the ongoing GC will perform compaction for the given
// heap arena.
bool isCompactingArena(int arenaIndex) const {
return m_doCompact && (m_compactableArenas & (0x1u << arenaIndex));
}
// Returns |true| if the ongoing GC may compact the given arena/sub-heap.
static bool isCompactableArena(int arenaIndex) {
return arenaIndex >= BlinkGC::Vector1ArenaIndex &&
arenaIndex <= BlinkGC::HashTableArenaIndex;
}
// See |Heap::registerMovingObjectReference()| documentation.
void registerMovingObjectReference(MovableReference* slot);
// See |Heap::registerMovingObjectCallback()| documentation.
void registerMovingObjectCallback(MovableReference,
MovingObjectCallback,
void* callbackData);
// Thread signalling that a compaction pass is starting or has
// completed.
//
// A thread participating in a heap GC will wait on the completion
// of compaction across all threads. No thread can be allowed to
// potentially access another thread's heap arenas while they're
// still being compacted.
void startThreadCompaction();
void finishThreadCompaction();
// Perform any relocation post-processing after having completed compacting
// the given arena. The number of pages that were freed together with the
// total size (in bytes) of freed heap storage, are passed in as arguments.
void finishedArenaCompaction(NormalPageArena*,
size_t freedPages,
size_t freedSize);
// Register the heap page as containing live objects that will all be
// compacted. Registration happens as part of making the arenas ready
// for a GC.
void addCompactingPage(BasePage*);
// Notify heap compaction that object at |from| has been relocated to.. |to|.
// (Called by the sweep compaction pass.)
void relocate(Address from, Address to);
// For unit testing only: arrange for a compaction GC to be triggered
// next time a non-conservative GC is run. Sets the compact-next flag
// to the new value, returning old.
static bool scheduleCompactionGCForTesting(bool);
private:
class MovableObjectFixups;
HeapCompact();
// Sample the amount of fragmentation and heap memory currently residing
// on the freelists of the arenas we're able to compact. The computed
// numbers will be subsequently used to determine if a heap compaction
// is on order (shouldCompact().)
void updateHeapResidency(ThreadState*);
// Parameters controlling when compaction should be done:
// Number of GCs that must have passed since last compaction GC.
static const int kGCCountSinceLastCompactionThreshold = 10;
// Freelist size threshold that must be exceeded before compaction
// should be considered.
static const size_t kFreeListSizeThreshold = 512 * 1024;
MovableObjectFixups& fixups();
std::unique_ptr<MovableObjectFixups> m_fixups;
// Set to |true| when a compacting sweep will go ahead.
bool m_doCompact;
size_t m_gcCountSinceLastCompaction;
// Lock protecting finishedThreadCompaction() signalling.
Mutex m_mutex;
// All threads performing a GC must synchronize on completion
// of all heap compactions. Not doing so risks one thread resuming
// the mutator, which could perform cross-thread access to a heap
// that's still in the process of being compacted.
ThreadCondition m_finished;
// Number of heap threads participating in the compaction.
int m_threadCount;
// Last reported freelist size, across all compactable arenas.
size_t m_freeListSize;
// If compacting, i'th heap arena will be compacted
// if corresponding bit is set. Indexes are in
// the range of BlinkGC::ArenaIndices.
unsigned m_compactableArenas;
// Stats, number of (complete) pages freed/decommitted +
// bytes freed (which will include partial pages.)
size_t m_freedPages;
size_t m_freedSize;
#if DEBUG_LOG_HEAP_COMPACTION_RUNNING_TIME
double m_startCompactionTimeMS;
#endif
static bool s_forceCompactionGC;
};
} // namespace blink
// Logging macros activated by debug switches.
#define LOG_HEAP_COMPACTION_INTERNAL(msg, ...) dataLogF(msg, ##__VA_ARGS__)
#if DEBUG_HEAP_COMPACTION
#define LOG_HEAP_COMPACTION(msg, ...) \
LOG_HEAP_COMPACTION_INTERNAL(msg, ##__VA_ARGS__)
#else
#define LOG_HEAP_COMPACTION(msg, ...) \
do { \
} while (0)
#endif
#if DEBUG_HEAP_FREELIST
#define LOG_HEAP_FREELIST(msg, ...) \
LOG_HEAP_COMPACTION_INTERNAL(msg, ##__VA_ARGS__)
#else
#define LOG_HEAP_FREELIST(msg, ...) \
do { \
} while (0)
#endif
#if DEBUG_HEAP_FREELIST == 2
#define LOG_HEAP_FREELIST_VERBOSE(msg, ...) \
LOG_HEAP_COMPACTION_INTERNAL(msg, ##__VA_ARGS__)
#else
#define LOG_HEAP_FREELIST_VERBOSE(msg, ...) \
do { \
} while (0)
#endif
#endif // HeapCompact_h
This diff is collapsed.
......@@ -512,6 +512,26 @@ class NormalPage final : public BasePage {
inline NormalPageArena* arenaForNormalPage() const;
// Context object holding the state of the arena page compaction pass,
// passed in when compacting individual pages.
class CompactionContext {
STACK_ALLOCATED();
public:
// Page compacting into.
NormalPage* m_currentPage = nullptr;
// Offset into |m_currentPage| to the next free address.
size_t m_allocationPoint = 0;
// Chain of available pages to use for compaction. Page compaction
// picks the next one when the current one is exhausted.
BasePage* m_availablePages = nullptr;
// Chain of pages that have been compacted. Page compaction will
// add compacted pages once the current one becomes exhausted.
BasePage** m_compactedPages = nullptr;
};
void sweepAndCompact(CompactionContext&);
private:
HeapObjectHeader* findHeaderFromAddress(Address);
void populateObjectStartBitMap();
......@@ -665,6 +685,8 @@ class FreeList {
// All FreeListEntries in the nth list have size >= 2^n.
FreeListEntry* m_freeLists[blinkPageSizeLog2];
size_t freeListSize() const;
friend class NormalPageArena;
};
......@@ -761,8 +783,14 @@ class PLATFORM_EXPORT NormalPageArena final : public BaseArena {
bool isLazySweeping() const { return m_isLazySweeping; }
void setIsLazySweeping(bool flag) { m_isLazySweeping = flag; }
size_t arenaSize();
size_t freeListSize();
void sweepAndCompact();
private:
void allocatePage();
Address outOfLineAllocate(size_t allocationSize, size_t gcInfoIndex);
Address allocateFromFreeList(size_t, size_t gcInfoIndex);
......
......@@ -349,6 +349,12 @@ class CountingVisitor : public Visitor {
return true;
}
void registerMovingObjectReference(MovableReference*) override {}
void registerMovingObjectCallback(MovableReference,
MovingObjectCallback,
void*) override {}
size_t count() { return m_count; }
void reset() { m_count = 0; }
......@@ -3759,6 +3765,7 @@ TEST(HeapTest, RefCountedGarbageCollectedWithStackPointers) {
pointer1 = object1.get();
pointer2 = object2.get();
void* objects[2] = {object1.get(), object2.get()};
ThreadState::GCForbiddenScope gcScope(ThreadState::current());
RefCountedGarbageCollectedVisitor visitor(ThreadState::current(), 2,
objects);
ThreadState::current()->visitPersistents(&visitor);
......@@ -3776,6 +3783,7 @@ TEST(HeapTest, RefCountedGarbageCollectedWithStackPointers) {
// At this point, the reference counts of object1 and object2 are 0.
// Only pointer1 and pointer2 keep references to object1 and object2.
void* objects[] = {0};
ThreadState::GCForbiddenScope gcScope(ThreadState::current());
RefCountedGarbageCollectedVisitor visitor(ThreadState::current(), 0,
objects);
ThreadState::current()->visitPersistents(&visitor);
......@@ -3786,6 +3794,7 @@ TEST(HeapTest, RefCountedGarbageCollectedWithStackPointers) {
Persistent<RefCountedAndGarbageCollected> object1(pointer1);
Persistent<RefCountedAndGarbageCollected2> object2(pointer2);
void* objects[2] = {object1.get(), object2.get()};
ThreadState::GCForbiddenScope gcScope(ThreadState::current());
RefCountedGarbageCollectedVisitor visitor(ThreadState::current(), 2,
objects);
ThreadState::current()->visitPersistents(&visitor);
......@@ -3935,6 +3944,7 @@ TEST(HeapTest, CheckAndMarkPointer) {
// checkAndMarkPointer tests.
{
TestGCScope scope(BlinkGC::HeapPointersOnStack);
ThreadState::GCForbiddenScope gcScope(ThreadState::current());
CountingVisitor visitor(ThreadState::current());
EXPECT_TRUE(scope.allThreadsParked()); // Fail the test if we could not
// park all threads.
......@@ -3956,6 +3966,7 @@ TEST(HeapTest, CheckAndMarkPointer) {
clearOutOldGarbage();
{
TestGCScope scope(BlinkGC::HeapPointersOnStack);
ThreadState::GCForbiddenScope gcScope(ThreadState::current());
CountingVisitor visitor(ThreadState::current());
EXPECT_TRUE(scope.allThreadsParked());
heap.flushHeapDoesNotContainCache();
......@@ -5854,6 +5865,7 @@ class PartObject {
};
TEST(HeapTest, TraceIfNeeded) {
ThreadState::GCForbiddenScope scope(ThreadState::current());
CountingVisitor visitor(ThreadState::current());
{
......
......@@ -18,8 +18,9 @@ class InlinedGlobalMarkingVisitor final
friend class MarkingVisitorImpl<InlinedGlobalMarkingVisitor>;
using Impl = MarkingVisitorImpl<InlinedGlobalMarkingVisitor>;
explicit InlinedGlobalMarkingVisitor(ThreadState* state)
: VisitorHelper(state) {}
InlinedGlobalMarkingVisitor(ThreadState* state,
Visitor::MarkingMode markingMode)
: VisitorHelper(state), m_markingMode(markingMode) {}
// Hack to unify interface to visitor->trace().
// Without this hack, we need to use visitor.trace() for
......@@ -54,14 +55,15 @@ class InlinedGlobalMarkingVisitor final
return true;
}
inline Visitor::MarkingMode getMarkingMode() const {
return Visitor::GlobalMarking;
}
inline Visitor::MarkingMode getMarkingMode() const { return m_markingMode; }
private:
static InlinedGlobalMarkingVisitor fromHelper(Helper* helper) {
return *static_cast<InlinedGlobalMarkingVisitor*>(helper);
}
// TODO(sof): attempt to unify this field with Visitor::m_markingMode.
const Visitor::MarkingMode m_markingMode;
};
inline void GarbageCollectedMixin::trace(InlinedGlobalMarkingVisitor) {}
......
......@@ -48,6 +48,16 @@ class MarkingVisitor final : public Visitor,
}
#endif
void registerMovingObjectReference(MovableReference* slot) override {
Impl::registerMovingObjectReference(slot);
}
void registerMovingObjectCallback(MovableReference backingStore,
MovingObjectCallback callback,
void* callbackData) override {
Impl::registerMovingObjectCallback(backingStore, callback, callbackData);
}
bool ensureMarked(const void* objectPointer) override {
return Impl::ensureMarked(objectPointer);
}
......
......@@ -88,6 +88,21 @@ class MarkingVisitorImpl {
}
#endif
inline void registerMovingObjectReference(MovableReference* slot) {
if (toDerived()->getMarkingMode() != Visitor::GlobalMarkingWithCompaction)
return;
toDerived()->heap().registerMovingObjectReference(slot);
}
inline void registerMovingObjectCallback(MovableReference reference,
MovingObjectCallback callback,
void* callbackData) {
if (toDerived()->getMarkingMode() != Visitor::GlobalMarkingWithCompaction)
return;
toDerived()->heap().registerMovingObjectCallback(reference, callback,
callbackData);
}
inline bool ensureMarked(const void* objectPointer) {
if (!objectPointer)
return false;
......
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "platform/heap/SparseHeapBitmap.h"
#include "wtf/PtrUtil.h"
namespace blink {
// Return the subtree/bitmap that covers the
// [address, address + size) range. Null if there is none.
SparseHeapBitmap* SparseHeapBitmap::hasRange(Address address, size_t size) {
DCHECK(!(reinterpret_cast<uintptr_t>(address) & s_pointerAlignmentMask));
SparseHeapBitmap* bitmap = this;
while (bitmap) {
// Interval starts after, |m_right| handles.
if (address > bitmap->end()) {
bitmap = bitmap->m_right.get();
continue;
}
// Interval starts within, |bitmap| is included in the resulting range.
if (address >= bitmap->base())
break;
Address right = address + size - 1;
// Interval starts before, but intersects with |bitmap|'s range.
if (right >= bitmap->base())
break;
// Interval is before entirely, for |m_left| to handle.
bitmap = bitmap->m_left.get();
}
return bitmap;
}
bool SparseHeapBitmap::isSet(Address address) {
DCHECK(!(reinterpret_cast<uintptr_t>(address) & s_pointerAlignmentMask));
SparseHeapBitmap* bitmap = this;
while (bitmap) {
if (address > bitmap->end()) {
bitmap = bitmap->m_right.get();
continue;
}
if (address >= bitmap->base()) {
if (bitmap->m_bitmap) {
return bitmap->m_bitmap->test((address - bitmap->base()) >>
s_pointerAlignmentInBits);
}
DCHECK(address == bitmap->base());
DCHECK_EQ(bitmap->size(), 1u);
return true;
}
bitmap = bitmap->m_left.get();
}
return false;
}
void SparseHeapBitmap::add(Address address) {
DCHECK(!(reinterpret_cast<uintptr_t>(address) & s_pointerAlignmentMask));
// |address| is beyond the maximum that this SparseHeapBitmap node can
// encompass.
if (address >= maxEnd()) {
if (!m_right) {
m_right = SparseHeapBitmap::create(address);
return;
}
m_right->add(address);
return;
}
// Same on the other side.
if (address < minStart()) {
if (!m_left) {
m_left = SparseHeapBitmap::create(address);
return;
}
m_left->add(address);
return;
}
if (address == base())
return;
// |address| can be encompassed by |this| by expanding its size.
if (address > base()) {
if (!m_bitmap)
createBitmap();
m_bitmap->set((address - base()) >> s_pointerAlignmentInBits);
return;
}
// Use |address| as the new base for this interval.
Address oldBase = swapBase(address);
createBitmap();
m_bitmap->set((oldBase - address) >> s_pointerAlignmentInBits);
}
void SparseHeapBitmap::createBitmap() {
DCHECK(!m_bitmap && size() == 1);
m_bitmap = WTF::makeUnique<std::bitset<s_bitmapChunkSize>>();
m_size = s_bitmapChunkRange;
m_bitmap->set(0);
}
size_t SparseHeapBitmap::intervalCount() const {
size_t count = 1;
if (m_left)
count += m_left->intervalCount();
if (m_right)
count += m_right->intervalCount();
return count;
}
} // namespace blink
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef SparseHeapBitmap_h
#define SparseHeapBitmap_h
#include "platform/heap/BlinkGC.h"
#include "platform/heap/HeapPage.h"
#include "wtf/Alignment.h"
#include "wtf/PtrUtil.h"
#include <bitset>
#include <memory>
namespace blink {
// A sparse bitmap of heap addresses where the (very few) addresses that are
// set are likely to be in small clusters. The abstraction is tailored to
// support heap compaction, assuming the following:
//
// - Addresses will be bitmap-marked from lower to higher addresses.
// - Bitmap lookups are performed for each object that is compacted
// and moved to some new location, supplying the (base, size)
// pair of the object's heap allocation.
// - If the sparse bitmap has any marked addresses in that range, it
// returns a sub-bitmap that can be quickly iterated over to check which
// addresses within the range are actually set.
// - The bitmap is needed to support something that is very rarely done
// by the current Blink codebase, which is to have nested collection
// part objects. Consequently, it is safe to assume sparseness.
//
// Support the above by having a sparse bitmap organized as a binary
// tree with nodes covering fixed size ranges via a simple bitmap/set.
// That is, each SparseHeapBitmap node will contain a bitmap/set for
// some fixed size range, along with pointers to SparseHeapBitmaps
// for addresses on each side its range.
//
// This bitmap tree isn't kept balanced across the Address additions
// made.
//
class PLATFORM_EXPORT SparseHeapBitmap {
public:
static std::unique_ptr<SparseHeapBitmap> create(Address base) {
return WTF::wrapUnique(new SparseHeapBitmap(base));
}
~SparseHeapBitmap() {}
// Return the sparse bitmap subtree that at least covers the
// [address, address + size) range, or nullptr if none.
//
// The returned SparseHeapBitmap can be used to quickly lookup what
// addresses in that range are set or not; see |isSet()|. Its
// |isSet()| behavior outside that range is not defined.
SparseHeapBitmap* hasRange(Address, size_t);
// True iff |address| is set for this SparseHeapBitmap tree.
bool isSet(Address);
// Mark |address| as present/set.
void add(Address);
// The assumed minimum alignment of the pointers being added. Cannot
// exceed |log2(allocationGranularity)|; having it be equal to
// the platform pointer alignment is what's wanted.
static const int s_pointerAlignmentInBits = WTF_ALIGN_OF(void*) == 8 ? 3 : 2;
static const size_t s_pointerAlignmentMask =
(0x1u << s_pointerAlignmentInBits) - 1;
// Represent ranges in 0x100 bitset chunks; bit I is set iff Address
// |m_base + I * (0x1 << s_pointerAlignmentInBits)| has been added to the
// |SparseHeapBitmap|.
static const size_t s_bitmapChunkSize = 0x100;
// A SparseHeapBitmap either contains a single Address or a bitmap
// recording the mapping for [m_base, m_base + s_bitmapChunkRange)
static const size_t s_bitmapChunkRange = s_bitmapChunkSize
<< s_pointerAlignmentInBits;
// Return the number of nodes; for debug stats.
size_t intervalCount() const;
private:
explicit SparseHeapBitmap(Address base) : m_base(base), m_size(1) {
DCHECK(!(reinterpret_cast<uintptr_t>(m_base) & s_pointerAlignmentMask));
static_assert(s_pointerAlignmentMask <= allocationMask,
"address shift exceeds heap pointer alignment");
// For now, only recognize 8 and 4.
static_assert(WTF_ALIGN_OF(void*) == 8 || WTF_ALIGN_OF(void*) == 4,
"unsupported pointer alignment");
}
Address base() const { return m_base; }
size_t size() const { return m_size; }
Address end() const { return base() + (m_size - 1); }
Address maxEnd() const { return base() + s_bitmapChunkRange; }
Address minStart() const {
// If this bitmap node represents the sparse [m_base, s_bitmapChunkRange)
// range, do not allow it to be "left extended" as that would entail
// having to shift down the contents of the std::bitset somehow.
//
// This shouldn't be a real problem as any clusters of set addresses
// will be marked while iterating from lower to higher addresses, hence
// "left extension" are unlikely to be common.
if (m_bitmap)
return base();
return (m_base > reinterpret_cast<Address>(s_bitmapChunkRange))
? (base() - s_bitmapChunkRange + 1)
: nullptr;
}
Address swapBase(Address address) {
DCHECK(!(reinterpret_cast<uintptr_t>(address) & s_pointerAlignmentMask));
Address oldBase = m_base;
m_base = address;
return oldBase;
}
void createBitmap();
Address m_base;
// Either 1 or |s_bitmapChunkRange|.
size_t m_size;
// If non-null, contains a bitmap for addresses within [m_base, m_size)
std::unique_ptr<std::bitset<s_bitmapChunkSize>> m_bitmap;
std::unique_ptr<SparseHeapBitmap> m_left;
std::unique_ptr<SparseHeapBitmap> m_right;
};
} // namespace blink
#endif // SparseHeapBitmap_h
......@@ -38,6 +38,7 @@
#include "platform/heap/CallbackStack.h"
#include "platform/heap/Handle.h"
#include "platform/heap/Heap.h"
#include "platform/heap/HeapCompact.h"
#include "platform/heap/PagePool.h"
#include "platform/heap/SafePoint.h"
#include "platform/heap/Visitor.h"
......@@ -79,7 +80,7 @@ uint8_t ThreadState::s_mainThreadStateStorage[sizeof(ThreadState)];
const size_t defaultAllocatedObjectSizeThreshold = 100 * 1024;
const char* gcReasonString(BlinkGC::GCReason reason) {
const char* ThreadState::gcReasonString(BlinkGC::GCReason reason) {
switch (reason) {
case BlinkGC::IdleGC:
return "IdleGC";
......@@ -504,6 +505,7 @@ void ThreadState::threadLocalWeakProcessing() {
// Due to the complexity, we just forbid allocations.
NoAllocationScope noAllocationScope(this);
GCForbiddenScope gcForbiddenScope(this);
std::unique_ptr<Visitor> visitor =
Visitor::create(this, BlinkGC::ThreadLocalWeakProcessing);
......@@ -999,7 +1001,7 @@ void ThreadState::runScheduledGC(BlinkGC::StackState stackState) {
return;
// If a safe point is entered while initiating a GC, we clearly do
// not want to do another as part that -- the safe point is only
// not want to do another as part of that -- the safe point is only
// entered after checking if a scheduled GC ought to run first.
// Prevent that from happening by marking GCs as forbidden while
// one is initiated and later running.
......@@ -1040,6 +1042,34 @@ void ThreadState::makeConsistentForGC() {
m_arenas[i]->makeConsistentForGC();
}
void ThreadState::compact() {
if (!heap().compaction()->isCompacting())
return;
SweepForbiddenScope scope(this);
ScriptForbiddenIfMainThreadScope scriptForbiddenScope;
NoAllocationScope noAllocationScope(this);
// Compaction is done eagerly and before the mutator threads get
// to run again. Doing it lazily is problematic, as the mutator's
// references to live objects could suddenly be invalidated by
// compaction of a page/heap. We do know all the references to
// the relocating objects just after marking, but won't later.
// (e.g., stack references could have been created, new objects
// created which refer to old collection objects, and so on.)
// Compact the hash table backing store arena first, it usually has
// higher fragmentation and is larger.
//
// TODO: implement bail out wrt any overall deadline, not compacting
// the remaining arenas if the time budget has been exceeded.
heap().compaction()->startThreadCompaction();
for (int i = BlinkGC::HashTableArenaIndex; i >= BlinkGC::Vector1ArenaIndex;
--i)
static_cast<NormalPageArena*>(m_arenas[i])->sweepAndCompact();
heap().compaction()->finishThreadCompaction();
}
void ThreadState::makeConsistentForMutator() {
ASSERT(isInGC());
for (int i = 0; i < BlinkGC::NumberOfArenas; ++i)
......@@ -1123,9 +1153,19 @@ void ThreadState::preSweep() {
eagerSweep();
// Any sweep compaction must happen after pre-finalizers and eager
// sweeping, as it will finalize dead objects in compactable arenas
// (e.g., backing stores for container objects.)
//
// As per-contract for prefinalizers, those finalizable objects must
// still be accessible when the prefinalizer runs, hence we cannot
// schedule compaction until those have run. Similarly for eager sweeping.
compact();
#if defined(ADDRESS_SANITIZER)
poisonAllHeaps();
#endif
if (previousGCState == EagerSweepScheduled) {
// Eager sweeping should happen only in testing.
completeSweep();
......@@ -1674,7 +1714,7 @@ void ThreadState::collectGarbage(BlinkGC::StackState stackState,
RELEASE_ASSERT(!isGCForbidden());
completeSweep();
std::unique_ptr<Visitor> visitor = Visitor::create(this, gcType);
GCForbiddenScope gcForbiddenScope(this);
SafePointScope safePointScope(stackState, this);
......@@ -1685,6 +1725,12 @@ void ThreadState::collectGarbage(BlinkGC::StackState stackState,
if (!parkThreadsScope.parkThreads())
return;
BlinkGC::GCType visitorType = gcType;
if (heap().compaction()->shouldCompact(this, gcType, reason))
visitorType = heap().compaction()->initialize(this);
std::unique_ptr<Visitor> visitor = Visitor::create(this, visitorType);
ScriptForbiddenIfMainThreadScope scriptForbidden;
TRACE_EVENT2("blink_gc,devtools.timeline", "BlinkGCMarking", "lazySweeping",
......@@ -1697,7 +1743,7 @@ void ThreadState::collectGarbage(BlinkGC::StackState stackState,
// Disallow allocation during garbage collection (but not during the
// finalization that happens when the visitorScope is torn down).
ThreadState::NoAllocationScope noAllocationScope(this);
NoAllocationScope noAllocationScope(this);
heap().commitCallbackStacks();
heap().preGC();
......@@ -1785,10 +1831,11 @@ void ThreadState::collectGarbageForTerminatingThread() {
// ahead while it is running, hence the termination GC does not enter a
// safepoint. VisitorScope will not enter also a safepoint scope for
// ThreadTerminationGC.
GCForbiddenScope gcForbiddenScope(this);
std::unique_ptr<Visitor> visitor =
Visitor::create(this, BlinkGC::ThreadTerminationGC);
ThreadState::NoAllocationScope noAllocationScope(this);
NoAllocationScope noAllocationScope(this);
heap().commitCallbackStacks();
preGC();
......
......@@ -282,6 +282,8 @@ class PLATFORM_EXPORT ThreadState {
// the executions of mutators.
void makeConsistentForMutator();
void compact();
// Support for disallowing allocation. Mainly used for sanity
// checks asserts.
bool isAllocationAllowed() const {
......@@ -320,6 +322,20 @@ class PLATFORM_EXPORT ThreadState {
ThreadState* const m_threadState;
};
class GCForbiddenScope final {
STACK_ALLOCATED();
public:
explicit GCForbiddenScope(ThreadState* threadState)
: m_threadState(threadState) {
m_threadState->enterGCForbiddenScope();
}
~GCForbiddenScope() { m_threadState->leaveGCForbiddenScope(); }
private:
ThreadState* const m_threadState;
};
void flushHeapDoesNotContainCacheIfNeeded();
// Safepoint related functionality.
......@@ -536,6 +552,8 @@ class PLATFORM_EXPORT ThreadState {
v8::Isolate* isolate() const { return m_isolate; }
BlinkGC::StackState stackState() const { return m_stackState; }
void collectGarbage(BlinkGC::StackState, BlinkGC::GCType, BlinkGC::GCReason);
void collectGarbageForTerminatingThread();
void collectAllGarbage();
......@@ -560,6 +578,8 @@ class PLATFORM_EXPORT ThreadState {
}
};
static const char* gcReasonString(BlinkGC::GCReason);
private:
template <typename T>
friend class PrefinalizerRegistration;
......
......@@ -231,9 +231,10 @@ class TraceTrait<const T> : public TraceTrait<T> {};
template <typename T>
void TraceTrait<T>::trace(Visitor* visitor, void* self) {
static_assert(WTF::IsTraceable<T>::value, "T should not be traced");
if (visitor->getMarkingMode() == Visitor::GlobalMarking) {
if (visitor->isGlobalMarking()) {
// Switch to inlined global marking dispatch.
static_cast<T*>(self)->trace(InlinedGlobalMarkingVisitor(visitor->state()));
static_cast<T*>(self)->trace(InlinedGlobalMarkingVisitor(
visitor->state(), visitor->getMarkingMode()));
} else {
static_cast<T*>(self)->trace(visitor);
}
......
......@@ -18,6 +18,9 @@ std::unique_ptr<Visitor> Visitor::create(ThreadState* state,
case BlinkGC::GCWithSweep:
case BlinkGC::GCWithoutSweep:
return WTF::makeUnique<MarkingVisitor<Visitor::GlobalMarking>>(state);
case BlinkGC::GCWithSweepCompaction:
return WTF::makeUnique<
MarkingVisitor<Visitor::GlobalMarkingWithCompaction>>(state);
case BlinkGC::TakeSnapshot:
return WTF::makeUnique<MarkingVisitor<Visitor::SnapshotMarking>>(state);
case BlinkGC::ThreadTerminationGC:
......@@ -35,13 +38,12 @@ Visitor::Visitor(ThreadState* state, MarkingMode markingMode)
: VisitorHelper(state), m_markingMode(markingMode) {
// See ThreadState::runScheduledGC() why we need to already be in a
// GCForbiddenScope before any safe point is entered.
state->enterGCForbiddenScope();
ASSERT(state->checkThread());
DCHECK(state->isGCForbidden());
#if ENABLE(ASSERT)
DCHECK(state->checkThread());
#endif
}
Visitor::~Visitor() {
state()->leaveGCForbiddenScope();
}
Visitor::~Visitor() {}
} // namespace blink
......@@ -243,10 +243,26 @@ class VisitorHelper {
void registerWeakMembers(const T* obj) {
registerWeakMembers(obj, &TraceMethodDelegate<T, method>::trampoline);
}
void registerWeakMembers(const void* object, WeakCallback callback) {
Derived::fromHelper(this)->registerWeakMembers(object, object, callback);
}
template <typename T>
void registerBackingStoreReference(T** slot) {
Derived::fromHelper(this)->registerMovingObjectReference(
reinterpret_cast<MovableReference*>(slot));
}
template <typename T>
void registerBackingStoreCallback(T* backingStore,
MovingObjectCallback callback,
void* callbackData) {
Derived::fromHelper(this)->registerMovingObjectCallback(
reinterpret_cast<MovableReference>(backingStore), callback,
callbackData);
}
inline ThreadState* state() const { return m_state; }
inline ThreadHeap& heap() const { return state()->heap(); }
......@@ -254,7 +270,7 @@ class VisitorHelper {
template <typename T>
static void handleWeakCell(Visitor* self, void* object);
ThreadState* m_state;
ThreadState* const m_state;
};
// Visitor is used to traverse the Blink object graph. Used for the
......@@ -284,6 +300,13 @@ class PLATFORM_EXPORT Visitor : public VisitorHelper<Visitor> {
// This visitor is used to trace objects during weak processing.
// This visitor is allowed to trace only already marked objects.
WeakProcessing,
// Perform global marking along with preparing for additional sweep
// compaction of heap arenas afterwards. Compared to the GlobalMarking
// visitor, this visitor will also register references to objects
// that might be moved during arena compaction -- the compaction
// pass will then fix up those references when the object move goes
// ahead.
GlobalMarkingWithCompaction,
};
static std::unique_ptr<Visitor> create(ThreadState*, BlinkGC::GCType);
......@@ -344,10 +367,21 @@ class PLATFORM_EXPORT Visitor : public VisitorHelper<Visitor> {
virtual bool ensureMarked(const void*) = 0;
virtual void registerMovingObjectReference(MovableReference*) = 0;
virtual void registerMovingObjectCallback(MovableReference,
MovingObjectCallback,
void*) = 0;
virtual void registerWeakCellWithCallback(void**, WeakCallback) = 0;
inline MarkingMode getMarkingMode() const { return m_markingMode; }
inline bool isGlobalMarking() const {
return m_markingMode == GlobalMarking ||
m_markingMode == GlobalMarkingWithCompaction;
}
protected:
Visitor(ThreadState*, MarkingMode);
......@@ -356,7 +390,6 @@ class PLATFORM_EXPORT Visitor : public VisitorHelper<Visitor> {
return static_cast<Visitor*>(helper);
}
ThreadState* m_state;
const MarkingMode m_markingMode;
};
......
......@@ -656,8 +656,10 @@ void Deque<T, inlineCapacity, Allocator>::trace(VisitorDispatcher visitor) {
visitor, *const_cast<T*>(bufferEntry));
}
}
if (m_buffer.hasOutOfLineBuffer())
if (m_buffer.hasOutOfLineBuffer()) {
Allocator::markNoTracing(visitor, m_buffer.buffer());
Allocator::registerBackingStoreReference(visitor, m_buffer.bufferSlot());
}
}
template <typename T, size_t inlineCapacity, typename Allocator>
......
......@@ -2105,6 +2105,11 @@ void HashTable<Key,
Extractor, HashFunctions, Traits,
KeyTraits, Allocator>::process);
}
// If the backing store will be moved by sweep compaction, register the
// table reference pointing to the backing store object, so that the
// reference is updated upon object relocation. A no-op if not enabled
// by the visitor.
Allocator::registerBackingStoreReference(visitor, &m_table);
if (!IsTraceableInCollectionTrait<Traits>::value)
return;
if (Traits::weakHandlingFlag == WeakHandlingInCollections) {
......
......@@ -299,6 +299,13 @@ class LinkedHashSet {
template <typename VisitorDispatcher>
void trace(VisitorDispatcher visitor) {
m_impl.trace(visitor);
// Should the underlying table be moved by GC, register a callback
// that fixes up the interior pointers that the (Heap)LinkedHashSet keeps.
if (m_impl.m_table) {
Allocator::registerBackingStoreCallback(
visitor, m_impl.m_table, moveBackingCallback,
reinterpret_cast<void*>(&m_anchor));
}
}
int64_t modifications() const { return m_impl.modifications(); }
......@@ -333,6 +340,50 @@ class LinkedHashSet {
return const_reverse_iterator(position, this);
}
static void moveBackingCallback(void* anchor,
void* from,
void* to,
size_t size) {
// Note: the hash table move may have been overlapping; linearly scan the
// entire table and fixup interior pointers into the old region with
// correspondingly offset ones into the new.
size_t tableSize = size / sizeof(Node);
Node* table = reinterpret_cast<Node*>(to);
NodeBase* fromStart = reinterpret_cast<NodeBase*>(from);
NodeBase* fromEnd =
reinterpret_cast<NodeBase*>(reinterpret_cast<uintptr_t>(from) + size);
for (Node* element = table + tableSize - 1; element >= table; element--) {
Node& node = *element;
if (ImplType::isEmptyOrDeletedBucket(node))
continue;
if (node.m_next >= fromStart && node.m_next < fromEnd) {
size_t diff = reinterpret_cast<uintptr_t>(node.m_next) -
reinterpret_cast<uintptr_t>(from);
node.m_next =
reinterpret_cast<NodeBase*>(reinterpret_cast<uintptr_t>(to) + diff);
}
if (node.m_prev >= fromStart && node.m_prev < fromEnd) {
size_t diff = reinterpret_cast<uintptr_t>(node.m_prev) -
reinterpret_cast<uintptr_t>(from);
node.m_prev =
reinterpret_cast<NodeBase*>(reinterpret_cast<uintptr_t>(to) + diff);
}
}
NodeBase* anchorNode = reinterpret_cast<NodeBase*>(anchor);
if (anchorNode->m_next >= fromStart && anchorNode->m_next < fromEnd) {
size_t diff = reinterpret_cast<uintptr_t>(anchorNode->m_next) -
reinterpret_cast<uintptr_t>(from);
anchorNode->m_next =
reinterpret_cast<NodeBase*>(reinterpret_cast<uintptr_t>(to) + diff);
}
if (anchorNode->m_prev >= fromStart && anchorNode->m_prev < fromEnd) {
size_t diff = reinterpret_cast<uintptr_t>(anchorNode->m_prev) -
reinterpret_cast<uintptr_t>(from);
anchorNode->m_prev =
reinterpret_cast<NodeBase*>(reinterpret_cast<uintptr_t>(to) + diff);
}
}
ImplType m_impl;
NodeBase m_anchor;
};
......
......@@ -492,6 +492,8 @@ class VectorBuffer<T, 0, Allocator>
return buffer();
}
T** bufferSlot() { return &m_buffer; }
protected:
using Base::m_size;
......@@ -771,6 +773,8 @@ class VectorBuffer : protected VectorBufferBase<T, true, Allocator> {
return buffer() && buffer() != inlineBuffer();
}
T** bufferSlot() { return &m_buffer; }
protected:
using Base::m_size;
......@@ -1603,6 +1607,7 @@ void Vector<T, inlineCapacity, Allocator>::trace(VisitorDispatcher visitor) {
if (Allocator::isHeapObjectAlive(buffer()))
return;
Allocator::markNoTracing(visitor, buffer());
Allocator::registerBackingStoreReference(visitor, Base::bufferSlot());
}
const T* bufferBegin = buffer();
const T* bufferEnd = buffer() + size();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment