Commit 472927c5 authored by Michael Lippautz's avatar Michael Lippautz Committed by Commit Bot

heap: Remove dead code in compaction and update style

Change-Id: I44f7b77fc6ae86cbc3ad54b12e2229e5e6db1aee
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1581339
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Commit-Queue: Kentaro Hara <haraken@chromium.org>
Auto-Submit: Michael Lippautz <mlippautz@chromium.org>
Reviewed-by: default avatarKentaro Hara <haraken@chromium.org>
Cr-Commit-Position: refs/heads/master@{#653531}
parent d16297b6
......@@ -124,12 +124,6 @@ class HeapCompact::MovableObjectFixups final {
slot, std::pair<void*, MovingObjectCallback>(callback_data, callback));
}
void RemoveFixupCallback(MovableReference* slot) {
auto it = fixup_callbacks_.find(slot);
if (it != fixup_callbacks_.end())
fixup_callbacks_.erase(it);
}
void RelocateInteriorFixups(Address from, Address to, size_t size) {
SparseHeapBitmap* range = interiors_->HasRange(from, size);
if (LIKELY(!range))
......@@ -271,7 +265,7 @@ class HeapCompact::MovableObjectFixups final {
private:
void VerifyUpdatedSlot(MovableReference* slot);
ThreadHeap* heap_;
ThreadHeap* const heap_;
// Tracking movable and updatable references. For now, we keep a
// map which for each movable object, recording the slot that
......@@ -321,13 +315,7 @@ void HeapCompact::MovableObjectFixups::VerifyUpdatedSlot(
#endif // DCHECK_IS_ON()
}
HeapCompact::HeapCompact(ThreadHeap* heap)
: heap_(heap),
do_compact_(false),
gc_count_since_last_compaction_(0),
free_list_size_(0),
compactable_arenas_(0u),
last_fixup_count_for_testing_(0) {
HeapCompact::HeapCompact(ThreadHeap* heap) : heap_(heap) {
// The heap compaction implementation assumes the contiguous range,
//
// [Vector1ArenaIndex, HashTableArenaIndex]
......@@ -420,13 +408,6 @@ void HeapCompact::Initialize(ThreadState* state) {
force_compaction_gc_ = false;
}
void HeapCompact::RemoveSlot(MovableReference* slot) {
auto it = traced_slots_.find(slot);
if (it != traced_slots_.end())
traced_slots_.erase(it);
Fixups().RemoveFixupCallback(slot);
}
void HeapCompact::RegisterMovingObjectReference(MovableReference* slot) {
CHECK(heap_->LookupPageForAddress(reinterpret_cast<Address>(slot)));
......
......@@ -52,10 +52,6 @@ class PLATFORM_EXPORT HeapCompact final {
explicit HeapCompact(ThreadHeap*);
~HeapCompact();
// Remove slot from traced_slots_ when a registered slot is destructed by
// mutator
void RemoveSlot(MovableReference* slot);
// Determine if a GC for the given type and reason should also perform
// additional heap compaction.
//
......@@ -143,13 +139,7 @@ class PLATFORM_EXPORT HeapCompact final {
private:
class MovableObjectFixups;
// Sample the amount of fragmentation and heap memory currently residing
// on the freelists of the arenas we're able to compact. The computed
// numbers will be subsequently used to determine if a heap compaction
// is on order (shouldCompact().)
void UpdateHeapResidency();
// Parameters controlling when compaction should be done:
static bool force_compaction_gc_;
// Number of GCs that must have passed since last compaction GC.
static const int kGCCountSinceLastCompactionThreshold = 10;
......@@ -158,32 +148,35 @@ class PLATFORM_EXPORT HeapCompact final {
// should be considered.
static const size_t kFreeListSizeThreshold = 512 * 1024;
ThreadHeap* const heap_;
// Sample the amount of fragmentation and heap memory currently residing
// on the freelists of the arenas we're able to compact. The computed
// numbers will be subsequently used to determine if a heap compaction
// is on order (shouldCompact().)
void UpdateHeapResidency();
MovableObjectFixups& Fixups();
ThreadHeap* const heap_;
std::unique_ptr<MovableObjectFixups> fixups_;
// The set is to remember slots that traced during
// marking phases. The mapping between the slots and the backing stores are
// created at the atomic pause phase.
HashSet<MovableReference*> traced_slots_;
// Set to |true| when a compacting sweep will go ahead.
bool do_compact_;
size_t gc_count_since_last_compaction_;
bool do_compact_ = false;
size_t gc_count_since_last_compaction_ = 0;
// Last reported freelist size, across all compactable arenas.
size_t free_list_size_;
size_t free_list_size_ = 0;
size_t last_fixup_count_for_testing_ = 0;
// If compacting, i'th heap arena will be compacted
// if corresponding bit is set. Indexes are in
// the range of BlinkGC::ArenaIndices.
unsigned compactable_arenas_;
// The set is to remember slots that traced during
// marking phases. The mapping between the slots and the backing stores are
// created at the atomic pause phase.
HashSet<MovableReference*> traced_slots_;
size_t last_fixup_count_for_testing_;
static bool force_compaction_gc_;
unsigned compactable_arenas_ = 0u;
};
} // namespace blink
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment