Commit c164bf66 authored by Michael Lippautz's avatar Michael Lippautz Committed by Commit Bot

heap: Another round of Thread{State,Heap} API cleanups

- Move PrefinalizerRegistration to top
- More documentation
- Move schedule helpers to private section
- Remove dead code

Bug: 982754
Change-Id: I8eaa76bae7f18cc881b66e65df099806a3234f99
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1745988
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Commit-Queue: Kentaro Hara <haraken@chromium.org>
Auto-Submit: Michael Lippautz <mlippautz@chromium.org>
Reviewed-by: default avatarKentaro Hara <haraken@chromium.org>
Cr-Commit-Position: refs/heads/master@{#685523}
parent 69861340
...@@ -182,11 +182,6 @@ class PLATFORM_EXPORT ThreadHeap { ...@@ -182,11 +182,6 @@ class PLATFORM_EXPORT ThreadHeap {
explicit ThreadHeap(ThreadState*); explicit ThreadHeap(ThreadState*);
~ThreadHeap(); ~ThreadHeap();
// Returns true for main thread's heap.
// TODO(keishi): Per-thread-heap will return false.
bool IsMainThreadHeap() { return this == ThreadHeap::MainThreadHeap(); }
static ThreadHeap* MainThreadHeap() { return main_thread_heap_; }
template <typename T> template <typename T>
static inline bool IsHeapObjectAlive(const T* object) { static inline bool IsHeapObjectAlive(const T* object) {
static_assert(sizeof(T), "T must be fully defined"); static_assert(sizeof(T), "T must be fully defined");
......
...@@ -54,7 +54,7 @@ ...@@ -54,7 +54,7 @@
namespace v8 { namespace v8 {
class EmbedderGraph; class EmbedderGraph;
class Isolate; class Isolate;
} } // namespace v8
namespace blink { namespace blink {
...@@ -68,57 +68,43 @@ class PersistentNode; ...@@ -68,57 +68,43 @@ class PersistentNode;
class PersistentRegion; class PersistentRegion;
class ThreadHeap; class ThreadHeap;
class ThreadState; class ThreadState;
class UnifiedHeapController;
class Visitor;
template <ThreadAffinity affinity> template <ThreadAffinity affinity>
class ThreadStateFor; class ThreadStateFor;
class UnifiedHeapController;
class Visitor;
// Declare that a class has a pre-finalizer. The pre-finalizer is called // Declare that a class has a pre-finalizer which gets invoked before objects
// before any object gets swept, so it is safe to touch on-heap objects // get swept. It is thus safe to touch on-heap objects that may be collected in
// that may be collected in the same GC cycle. If you cannot avoid touching // the same GC cycle. This is useful when it's not possible to avoid touching
// on-heap objects in a destructor (which is not allowed), you can consider // on-heap objects in a destructor which is forbidden.
// using the pre-finalizer. The only restriction is that the pre-finalizer
// must not resurrect dead objects (e.g., store unmarked objects into
// Members etc). The pre-finalizer is called on the thread that registered
// the pre-finalizer.
//
// Since a pre-finalizer adds pressure on GC performance, you should use it
// only if necessary.
// //
// A pre-finalizer is similar to the // Note that:
// HeapHashMap<WeakMember<Foo>, std::unique_ptr<Disposer>> idiom. The // (a) Pre-finalizers *must* not resurrect dead objects.
// difference between this and the idiom is that pre-finalizer function is // (b) Run on the same thread they are registered.
// called whenever an object is destructed with this feature. The // (c) Decrease GC performance which means that they should only be used if
// HeapHashMap<WeakMember<Foo>, std::unique_ptr<Disposer>> idiom requires an // absolute necessary.
// assumption that the HeapHashMap outlives objects pointed by WeakMembers.
// FIXME: Replace all of the
// HeapHashMap<WeakMember<Foo>, std::unique_ptr<Disposer>> idiom usages with the
// pre-finalizer if the replacement won't cause performance regressions.
// //
// Usage: // Usage:
// // class Foo : GarbageCollected<Foo> {
// class Foo : GarbageCollected<Foo> { // USING_PRE_FINALIZER(Foo, Dispose);
// USING_PRE_FINALIZER(Foo, dispose); // private:
// private: // void Dispose() {
// void dispose() // bar_->...; // It is safe to touch other on-heap objects.
// {
// bar_->...; // It is safe to touch other on-heap objects.
// } // }
// Member<Bar> bar_; // Member<Bar> bar_;
// }; // };
#define USING_PRE_FINALIZER(Class, preFinalizer) \ #define USING_PRE_FINALIZER(Class, preFinalizer) \
public: \ public: \
static bool InvokePreFinalizer(void* object) { \ static bool InvokePreFinalizer(void* object) { \
Class* self = reinterpret_cast<Class*>(object); \ Class* self = reinterpret_cast<Class*>(object); \
if (ThreadHeap::IsHeapObjectAlive(self)) \ if (ThreadHeap::IsHeapObjectAlive(self)) \
return false; \ return false; \
self->Class::preFinalizer(); \ self->Class::preFinalizer(); \
return true; \ return true; \
} \ } \
\ \
private: \ private: \
ThreadState::PrefinalizerRegistration<Class> prefinalizer_dummy_ = this; \ ThreadState::PrefinalizerRegistration<Class> prefinalizer_dummy_{this}; \
using UsingPreFinalizerMacroNeedsTrailingSemiColon = char using UsingPreFinalizerMacroNeedsTrailingSemiColon = char
class PLATFORM_EXPORT BlinkGCObserver { class PLATFORM_EXPORT BlinkGCObserver {
...@@ -153,6 +139,30 @@ class PLATFORM_EXPORT ThreadState final : private RAILModeObserver { ...@@ -153,6 +139,30 @@ class PLATFORM_EXPORT ThreadState final : private RAILModeObserver {
USING_FAST_MALLOC(ThreadState); USING_FAST_MALLOC(ThreadState);
public: public:
// Register the pre-finalizer for the |self| object. The class T be using
// USING_PRE_FINALIZER() macro.
template <typename T>
class PrefinalizerRegistration final {
DISALLOW_NEW();
public:
PrefinalizerRegistration(T* self) {
static_assert(sizeof(&T::InvokePreFinalizer) > 0,
"USING_PRE_FINALIZER(T) must be defined.");
ThreadState* state =
ThreadStateFor<ThreadingTrait<T>::kAffinity>::GetState();
#if DCHECK_IS_ON()
DCHECK(state->CheckThread());
#endif
DCHECK(!state->SweepForbidden());
DCHECK(std::find(state->ordered_pre_finalizers_.begin(),
state->ordered_pre_finalizers_.end(),
PreFinalizer(self, T::InvokePreFinalizer)) ==
state->ordered_pre_finalizers_.end());
state->ordered_pre_finalizers_.emplace_back(self, T::InvokePreFinalizer);
}
};
// See setGCState() for possible state transitions. // See setGCState() for possible state transitions.
enum GCState { enum GCState {
kNoGCScheduled, kNoGCScheduled,
...@@ -209,6 +219,14 @@ class PLATFORM_EXPORT ThreadState final : private RAILModeObserver { ...@@ -209,6 +219,14 @@ class PLATFORM_EXPORT ThreadState final : private RAILModeObserver {
// Disassociate attached ThreadState from the current thread. The thread // Disassociate attached ThreadState from the current thread. The thread
// can no longer use the garbage collected heap after this call. // can no longer use the garbage collected heap after this call.
//
// When ThreadState is detaching from non-main thread its heap is expected to
// be empty (because it is going away). Perform registered cleanup tasks and
// garbage collection to sweep away any objects that are left on this heap.
//
// This method asserts that no objects remain after this cleanup. If assertion
// does not hold we crash as we are potentially in the dangling pointer
// situation.
static void DetachCurrentThread(); static void DetachCurrentThread();
static ThreadState* Current() { return **thread_specific_; } static ThreadState* Current() { return **thread_specific_; }
...@@ -236,26 +254,15 @@ class PLATFORM_EXPORT ThreadState final : private RAILModeObserver { ...@@ -236,26 +254,15 @@ class PLATFORM_EXPORT ThreadState final : private RAILModeObserver {
void DetachFromIsolate(); void DetachFromIsolate();
// Returns an |UnifiedHeapController| if ThreadState is attached to a V8 // Returns an |UnifiedHeapController| if ThreadState is attached to a V8
// isolate (see |AttachToIsolate|), nullptr otherwise. // isolate (see |AttachToIsolate|) and nullptr otherwise.
UnifiedHeapController* unified_heap_controller() const { UnifiedHeapController* unified_heap_controller() const {
DCHECK(isolate_); DCHECK(isolate_);
return unified_heap_controller_.get(); return unified_heap_controller_.get();
} }
// When ThreadState is detaching from non-main thread its
// heap is expected to be empty (because it is going away).
// Perform registered cleanup tasks and garbage collection
// to sweep away any objects that are left on this heap.
// We assert that nothing must remain after this cleanup.
// If assertion does not hold we crash as we are potentially
// in the dangling pointer situation.
void RunTerminationGC();
void PerformIdleLazySweep(base::TimeTicks deadline); void PerformIdleLazySweep(base::TimeTicks deadline);
void PerformConcurrentSweep(); void PerformConcurrentSweep();
void ScheduleIdleLazySweep();
void ScheduleConcurrentAndLazySweep();
void SchedulePreciseGC(); void SchedulePreciseGC();
void ScheduleIncrementalGC(BlinkGC::GCReason); void ScheduleIncrementalGC(BlinkGC::GCReason);
void ScheduleV8FollowupGCIfNeeded(BlinkGC::V8GCType); void ScheduleV8FollowupGCIfNeeded(BlinkGC::V8GCType);
...@@ -266,8 +273,11 @@ class PLATFORM_EXPORT ThreadState final : private RAILModeObserver { ...@@ -266,8 +273,11 @@ class PLATFORM_EXPORT ThreadState final : private RAILModeObserver {
void SetGCState(GCState); void SetGCState(GCState);
GCState GetGCState() const { return gc_state_; } GCState GetGCState() const { return gc_state_; }
void SetGCPhase(GCPhase); void SetGCPhase(GCPhase);
// Returns true if marking is in progress.
bool IsMarkingInProgress() const { return gc_phase_ == GCPhase::kMarking; } bool IsMarkingInProgress() const { return gc_phase_ == GCPhase::kMarking; }
bool IsSweepingInProgress() const { return gc_phase_ == GCPhase::kSweeping; }
// Returns true if unified heap marking is in progress.
bool IsUnifiedGCMarkingInProgress() const { bool IsUnifiedGCMarkingInProgress() const {
return IsMarkingInProgress() && return IsMarkingInProgress() &&
(current_gc_data_.reason == BlinkGC::GCReason::kUnifiedHeapGC || (current_gc_data_.reason == BlinkGC::GCReason::kUnifiedHeapGC ||
...@@ -275,11 +285,16 @@ class PLATFORM_EXPORT ThreadState final : private RAILModeObserver { ...@@ -275,11 +285,16 @@ class PLATFORM_EXPORT ThreadState final : private RAILModeObserver {
BlinkGC::GCReason::kUnifiedHeapForMemoryReductionGC); BlinkGC::GCReason::kUnifiedHeapForMemoryReductionGC);
} }
void EnableCompactionForNextGCForTesting(); // Returns true if sweeping is in progress.
bool IsSweepingInProgress() const { return gc_phase_ == GCPhase::kSweeping; }
// Incremental GC. // Returns true if the current GC is a memory reducing GC.
void ScheduleIncrementalMarkingStep(); bool IsMemoryReducingGC() const {
void ScheduleIncrementalMarkingFinalize(); return current_gc_data_.reason ==
BlinkGC::GCReason::kUnifiedHeapForMemoryReductionGC;
}
void EnableCompactionForNextGCForTesting();
void IncrementalMarkingStart(BlinkGC::GCReason); void IncrementalMarkingStart(BlinkGC::GCReason);
void IncrementalMarkingStep(BlinkGC::StackState); void IncrementalMarkingStep(BlinkGC::StackState);
...@@ -368,30 +383,6 @@ class PLATFORM_EXPORT ThreadState final : private RAILModeObserver { ...@@ -368,30 +383,6 @@ class PLATFORM_EXPORT ThreadState final : private RAILModeObserver {
BlinkGC::StackState stack_state = BlinkGC::StackState stack_state =
BlinkGC::StackState::kNoHeapPointersOnStack); BlinkGC::StackState::kNoHeapPointersOnStack);
// Register the pre-finalizer for the |self| object. The class T must have
// USING_PRE_FINALIZER().
template <typename T>
class PrefinalizerRegistration final {
DISALLOW_NEW();
public:
PrefinalizerRegistration(T* self) {
static_assert(sizeof(&T::InvokePreFinalizer) > 0,
"USING_PRE_FINALIZER(T) must be defined.");
ThreadState* state =
ThreadStateFor<ThreadingTrait<T>::kAffinity>::GetState();
#if DCHECK_IS_ON()
DCHECK(state->CheckThread());
#endif
DCHECK(!state->SweepForbidden());
DCHECK(std::find(state->ordered_pre_finalizers_.begin(),
state->ordered_pre_finalizers_.end(),
PreFinalizer(self, T::InvokePreFinalizer)) ==
state->ordered_pre_finalizers_.end());
state->ordered_pre_finalizers_.emplace_back(self, T::InvokePreFinalizer);
}
};
// Returns |true| if |object| resides on this thread's heap. // Returns |true| if |object| resides on this thread's heap.
// It is well-defined to call this method on any heap allocated // It is well-defined to call this method on any heap allocated
// reference, provided its associated heap hasn't been detached // reference, provided its associated heap hasn't been detached
...@@ -410,12 +401,6 @@ class PLATFORM_EXPORT ThreadState final : private RAILModeObserver { ...@@ -410,12 +401,6 @@ class PLATFORM_EXPORT ThreadState final : private RAILModeObserver {
bool VerifyMarkingEnabled() const; bool VerifyMarkingEnabled() const;
// Returns true if the current GC is a memory reducing GC.
bool IsMemoryReducingGC() {
return current_gc_data_.reason ==
BlinkGC::GCReason::kUnifiedHeapForMemoryReductionGC;
}
private: private:
// Stores whether some ThreadState is currently in incremental marking. // Stores whether some ThreadState is currently in incremental marking.
static AtomicEntryFlag incremental_marking_flag_; static AtomicEntryFlag incremental_marking_flag_;
...@@ -517,6 +502,15 @@ class PLATFORM_EXPORT ThreadState final : private RAILModeObserver { ...@@ -517,6 +502,15 @@ class PLATFORM_EXPORT ThreadState final : private RAILModeObserver {
// Visit all DOM wrappers allocatd on this thread. // Visit all DOM wrappers allocatd on this thread.
void VisitDOMWrappers(Visitor*); void VisitDOMWrappers(Visitor*);
// Schedule helpers.
void ScheduleIncrementalMarkingStep();
void ScheduleIncrementalMarkingFinalize();
void ScheduleIdleLazySweep();
void ScheduleConcurrentAndLazySweep();
// See |DetachCurrentThread|.
void RunTerminationGC();
// ShouldForceConservativeGC // ShouldForceConservativeGC
// implements the heuristics that are used to determine when to collect // implements the heuristics that are used to determine when to collect
// garbage. // garbage.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment