Commit c3328794 authored by Michael Lippautz's avatar Michael Lippautz Committed by Commit Bot

heap: Fix underflow of allocated bytes since last GC counter

The counter can (rightfully) underflow when an object is promptly freed
directly after performing a GC.

Also fixes an issue where the linear allocation area is accounted as
allocated only when setting up the new area. This can yield in negative
overall size if measured before that update happens.

Bug: 948807
Change-Id: I288b607761d5017192ab9bd3c0fe6a13bef52884
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1631597
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Reviewed-by: default avatarKentaro Hara <haraken@chromium.org>
Cr-Commit-Position: refs/heads/master@{#663892}
parent 131a5334
...@@ -779,10 +779,10 @@ void NormalPageArena::SetRemainingAllocationSize( ...@@ -779,10 +779,10 @@ void NormalPageArena::SetRemainingAllocationSize(
size_t new_remaining_allocation_size) { size_t new_remaining_allocation_size) {
remaining_allocation_size_ = new_remaining_allocation_size; remaining_allocation_size_ = new_remaining_allocation_size;
// Sync recorded allocated-object size: // Sync recorded allocated-object size using the recorded checkpoint in
// - if previous alloc checkpoint is larger, allocation size has increased. // |remaining_allocation_size_|:
// - if smaller, a net reduction in size since last call to // - If checkpoint is larger, the allocated size has increased.
// updateRemainingAllocationSize(). // - The allocated size has decreased, otherwise.
if (last_remaining_allocation_size_ > remaining_allocation_size_) { if (last_remaining_allocation_size_ > remaining_allocation_size_) {
GetThreadState()->Heap().IncreaseAllocatedObjectSize( GetThreadState()->Heap().IncreaseAllocatedObjectSize(
last_remaining_allocation_size_ - remaining_allocation_size_); last_remaining_allocation_size_ - remaining_allocation_size_);
...@@ -793,15 +793,6 @@ void NormalPageArena::SetRemainingAllocationSize( ...@@ -793,15 +793,6 @@ void NormalPageArena::SetRemainingAllocationSize(
last_remaining_allocation_size_ = remaining_allocation_size_; last_remaining_allocation_size_ = remaining_allocation_size_;
} }
void NormalPageArena::UpdateRemainingAllocationSize() {
if (last_remaining_allocation_size_ > RemainingAllocationSize()) {
GetThreadState()->Heap().IncreaseAllocatedObjectSize(
last_remaining_allocation_size_ - RemainingAllocationSize());
last_remaining_allocation_size_ = RemainingAllocationSize();
}
DCHECK_EQ(last_remaining_allocation_size_, RemainingAllocationSize());
}
void NormalPageArena::SetAllocationPoint(Address point, size_t size) { void NormalPageArena::SetAllocationPoint(Address point, size_t size) {
#if DCHECK_IS_ON() #if DCHECK_IS_ON()
if (point) { if (point) {
...@@ -811,13 +802,18 @@ void NormalPageArena::SetAllocationPoint(Address point, size_t size) { ...@@ -811,13 +802,18 @@ void NormalPageArena::SetAllocationPoint(Address point, size_t size) {
DCHECK_LE(size, static_cast<NormalPage*>(page)->PayloadSize()); DCHECK_LE(size, static_cast<NormalPage*>(page)->PayloadSize());
} }
#endif #endif
// Free and clear the old linear allocation area.
if (HasCurrentAllocationArea()) { if (HasCurrentAllocationArea()) {
AddToFreeList(CurrentAllocationPoint(), RemainingAllocationSize()); AddToFreeList(CurrentAllocationPoint(), RemainingAllocationSize());
SetRemainingAllocationSize(0);
} }
UpdateRemainingAllocationSize(); // Set up a new linear allocation area.
current_allocation_point_ = point; current_allocation_point_ = point;
last_remaining_allocation_size_ = remaining_allocation_size_ = size; last_remaining_allocation_size_ = remaining_allocation_size_ = size;
if (point) { if (point) {
// Only, update allocated size and object start bitmap if the area is
// actually set up with a non-null address.
GetThreadState()->Heap().IncreaseAllocatedObjectSize(size);
// Current allocation point can never be part of the object bitmap start // Current allocation point can never be part of the object bitmap start
// because the area can grow or shrink. Will be added back before a GC when // because the area can grow or shrink. Will be added back before a GC when
// clearing the allocation point. // clearing the allocation point.
...@@ -843,7 +839,6 @@ Address NormalPageArena::OutOfLineAllocateImpl(size_t allocation_size, ...@@ -843,7 +839,6 @@ Address NormalPageArena::OutOfLineAllocateImpl(size_t allocation_size,
return AllocateLargeObject(allocation_size, gc_info_index); return AllocateLargeObject(allocation_size, gc_info_index);
// 2. Try to allocate from a free list. // 2. Try to allocate from a free list.
UpdateRemainingAllocationSize();
Address result = AllocateFromFreeList(allocation_size, gc_info_index); Address result = AllocateFromFreeList(allocation_size, gc_info_index);
if (result) if (result)
return result; return result;
......
...@@ -885,7 +885,6 @@ class PLATFORM_EXPORT NormalPageArena final : public BaseArena { ...@@ -885,7 +885,6 @@ class PLATFORM_EXPORT NormalPageArena final : public BaseArena {
void SetAllocationPoint(Address, size_t); void SetAllocationPoint(Address, size_t);
void SetRemainingAllocationSize(size_t); void SetRemainingAllocationSize(size_t);
void UpdateRemainingAllocationSize();
FreeList free_list_; FreeList free_list_;
Address current_allocation_point_; Address current_allocation_point_;
......
...@@ -23,17 +23,12 @@ void ThreadHeapStatsCollector::IncreaseCompactionFreedPages(size_t pages) { ...@@ -23,17 +23,12 @@ void ThreadHeapStatsCollector::IncreaseCompactionFreedPages(size_t pages) {
void ThreadHeapStatsCollector::IncreaseAllocatedObjectSize(size_t bytes) { void ThreadHeapStatsCollector::IncreaseAllocatedObjectSize(size_t bytes) {
// The current GC may not have been started. This is ok as recording considers // The current GC may not have been started. This is ok as recording considers
// the whole time range between garbage collections. // the whole time range between garbage collections.
allocated_bytes_since_prev_gc_ += bytes; allocated_bytes_since_prev_gc_ += static_cast<int64_t>(bytes);
} }
void ThreadHeapStatsCollector::DecreaseAllocatedObjectSize(size_t bytes) { void ThreadHeapStatsCollector::DecreaseAllocatedObjectSize(size_t bytes) {
// See IncreaseAllocatedObjectSize. // See IncreaseAllocatedObjectSize.
allocated_bytes_since_prev_gc_ -= static_cast<int64_t>(bytes);
// TODO(mlippautz): Fix underflow.
if (bytes > allocated_bytes_since_prev_gc_)
return;
allocated_bytes_since_prev_gc_ -= bytes;
} }
void ThreadHeapStatsCollector::AllocatedObjectSizeSafepoint() { void ThreadHeapStatsCollector::AllocatedObjectSizeSafepoint() {
...@@ -102,7 +97,11 @@ void ThreadHeapStatsCollector::UpdateReason(BlinkGC::GCReason reason) { ...@@ -102,7 +97,11 @@ void ThreadHeapStatsCollector::UpdateReason(BlinkGC::GCReason reason) {
} }
size_t ThreadHeapStatsCollector::object_size_in_bytes() const { size_t ThreadHeapStatsCollector::object_size_in_bytes() const {
return previous().marked_bytes + allocated_bytes_since_prev_gc_; DCHECK_GE(static_cast<int64_t>(previous().marked_bytes) +
allocated_bytes_since_prev_gc_,
0);
return static_cast<size_t>(static_cast<int64_t>(previous().marked_bytes) +
allocated_bytes_since_prev_gc_);
} }
double ThreadHeapStatsCollector::estimated_marking_time_in_seconds() const { double ThreadHeapStatsCollector::estimated_marking_time_in_seconds() const {
...@@ -138,7 +137,7 @@ TimeDelta ThreadHeapStatsCollector::Event::sweeping_time() const { ...@@ -138,7 +137,7 @@ TimeDelta ThreadHeapStatsCollector::Event::sweeping_time() const {
scope_data[kLazySweepInIdle] + scope_data[kLazySweepOnAllocation]; scope_data[kLazySweepInIdle] + scope_data[kLazySweepOnAllocation];
} }
size_t ThreadHeapStatsCollector::allocated_bytes_since_prev_gc() const { int64_t ThreadHeapStatsCollector::allocated_bytes_since_prev_gc() const {
return allocated_bytes_since_prev_gc_; return allocated_bytes_since_prev_gc_;
} }
......
...@@ -258,7 +258,7 @@ class PLATFORM_EXPORT ThreadHeapStatsCollector { ...@@ -258,7 +258,7 @@ class PLATFORM_EXPORT ThreadHeapStatsCollector {
TimeDelta estimated_marking_time() const; TimeDelta estimated_marking_time() const;
size_t marked_bytes() const; size_t marked_bytes() const;
size_t allocated_bytes_since_prev_gc() const; int64_t allocated_bytes_since_prev_gc() const;
size_t allocated_space_bytes() const; size_t allocated_space_bytes() const;
...@@ -287,9 +287,8 @@ class PLATFORM_EXPORT ThreadHeapStatsCollector { ...@@ -287,9 +287,8 @@ class PLATFORM_EXPORT ThreadHeapStatsCollector {
Event previous_; Event previous_;
// Allocated bytes since the last garbage collection. These bytes are reset // Allocated bytes since the last garbage collection. These bytes are reset
// after marking as they should be accounted in marked_bytes then (which are // after marking as they are accounted in marked_bytes then.
// only available after sweeping though). int64_t allocated_bytes_since_prev_gc_ = 0;
size_t allocated_bytes_since_prev_gc_ = 0;
// Allocated space in bytes for all arenas. // Allocated space in bytes for all arenas.
size_t allocated_space_bytes_ = 0; size_t allocated_space_bytes_ = 0;
......
...@@ -1712,6 +1712,8 @@ TEST(HeapTest, BasicFunctionality) { ...@@ -1712,6 +1712,8 @@ TEST(HeapTest, BasicFunctionality) {
persistents[persistent_count++] = new Persistent<DynamicallySizedObject>( persistents[persistent_count++] = new Persistent<DynamicallySizedObject>(
DynamicallySizedObject::Create(size)); DynamicallySizedObject::Create(size));
slack += 4; slack += 4;
// The allocations in the loop may trigger GC with lazy sweeping.
CompleteSweepingIfNeeded();
CheckWithSlack(base_level + total, heap.ObjectPayloadSizeForTesting(), CheckWithSlack(base_level + total, heap.ObjectPayloadSizeForTesting(),
slack); slack);
if (test_pages_allocated) { if (test_pages_allocated) {
......
...@@ -35,4 +35,9 @@ void ClearOutOldGarbage() { ...@@ -35,4 +35,9 @@ void ClearOutOldGarbage() {
} }
} }
void CompleteSweepingIfNeeded() {
if (ThreadState::Current()->IsSweepingInProgress())
ThreadState::Current()->CompleteSweep();
}
} // namespace blink } // namespace blink
...@@ -20,6 +20,7 @@ void PreciselyCollectGarbage(); ...@@ -20,6 +20,7 @@ void PreciselyCollectGarbage();
void ConservativelyCollectGarbage( void ConservativelyCollectGarbage(
BlinkGC::SweepingType sweeping_type = BlinkGC::kEagerSweeping); BlinkGC::SweepingType sweeping_type = BlinkGC::kEagerSweeping);
void ClearOutOldGarbage(); void ClearOutOldGarbage();
void CompleteSweepingIfNeeded();
template <typename T> template <typename T>
class ObjectWithCallbackBeforeInitializer class ObjectWithCallbackBeforeInitializer
......
...@@ -396,7 +396,7 @@ bool ThreadState::JudgeGCThreshold(size_t allocated_object_size_threshold, ...@@ -396,7 +396,7 @@ bool ThreadState::JudgeGCThreshold(size_t allocated_object_size_threshold,
// If the allocated object size or the total memory size is small, don't // If the allocated object size or the total memory size is small, don't
// trigger a GC. // trigger a GC.
if (heap_->stats_collector()->allocated_bytes_since_prev_gc() < if (heap_->stats_collector()->allocated_bytes_since_prev_gc() <
allocated_object_size_threshold || static_cast<int64_t>(allocated_object_size_threshold) ||
TotalMemorySize() < total_memory_size_threshold) TotalMemorySize() < total_memory_size_threshold)
return false; return false;
...@@ -916,10 +916,13 @@ void UpdateTraceCounters(const ThreadHeapStatsCollector& stats_collector) { ...@@ -916,10 +916,13 @@ void UpdateTraceCounters(const ThreadHeapStatsCollector& stats_collector) {
TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"),
"BlinkGC.AllocatedSpaceKB", "BlinkGC.AllocatedSpaceKB",
CappedSizeInKB(stats_collector.allocated_space_bytes())); CappedSizeInKB(stats_collector.allocated_space_bytes()));
TRACE_COUNTER1( size_t allocated_bytes_since_prev_gc =
TRACE_DISABLED_BY_DEFAULT("blink_gc"), stats_collector.allocated_bytes_since_prev_gc() > 0
"BlinkGC.AllocatedObjectSizeSincePreviousGCKB", ? static_cast<size_t>(stats_collector.allocated_bytes_since_prev_gc())
CappedSizeInKB(stats_collector.allocated_bytes_since_prev_gc())); : 0;
TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"),
"BlinkGC.AllocatedObjectSizeSincePreviousGCKB",
CappedSizeInKB(allocated_bytes_since_prev_gc));
TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"),
"PartitionAlloc.TotalSizeOfCommittedPagesKB", "PartitionAlloc.TotalSizeOfCommittedPagesKB",
CappedSizeInKB(WTF::Partitions::TotalSizeOfCommittedPages())); CappedSizeInKB(WTF::Partitions::TotalSizeOfCommittedPages()));
...@@ -964,7 +967,8 @@ void UpdateHistograms(const ThreadHeapStatsCollector::Event& event) { ...@@ -964,7 +967,8 @@ void UpdateHistograms(const ThreadHeapStatsCollector::Event& event) {
constexpr size_t kMinObjectSizeForReportingThroughput = 1024 * 1024; constexpr size_t kMinObjectSizeForReportingThroughput = 1024 * 1024;
if (WTF::TimeTicks::IsHighResolution() && if (WTF::TimeTicks::IsHighResolution() &&
(event.object_size_in_bytes_before_sweeping > (event.object_size_in_bytes_before_sweeping >
kMinObjectSizeForReportingThroughput)) { kMinObjectSizeForReportingThroughput) &&
!marking_duration.is_zero()) {
DCHECK_GT(marking_duration.InMillisecondsF(), 0.0); DCHECK_GT(marking_duration.InMillisecondsF(), 0.0);
// For marking throughput computation all marking steps, independent of // For marking throughput computation all marking steps, independent of
// whether they are triggered from V8 or Blink, are relevant. // whether they are triggered from V8 or Blink, are relevant.
...@@ -1633,8 +1637,13 @@ void ThreadState::MarkPhaseEpilogue(BlinkGC::MarkingType marking_type) { ...@@ -1633,8 +1637,13 @@ void ThreadState::MarkPhaseEpilogue(BlinkGC::MarkingType marking_type) {
if (ShouldVerifyMarking()) if (ShouldVerifyMarking())
VerifyMarking(marking_type); VerifyMarking(marking_type);
ProcessHeap::DecreaseTotalAllocatedObjectSize( if (Heap().stats_collector()->allocated_bytes_since_prev_gc() > 0) {
Heap().stats_collector()->allocated_bytes_since_prev_gc()); ProcessHeap::DecreaseTotalAllocatedObjectSize(static_cast<size_t>(
Heap().stats_collector()->allocated_bytes_since_prev_gc()));
} else {
ProcessHeap::IncreaseTotalAllocatedObjectSize(static_cast<size_t>(
-Heap().stats_collector()->allocated_bytes_since_prev_gc()));
}
ProcessHeap::DecreaseTotalMarkedObjectSize( ProcessHeap::DecreaseTotalMarkedObjectSize(
Heap().stats_collector()->previous().marked_bytes); Heap().stats_collector()->previous().marked_bytes);
ProcessHeap::IncreaseTotalMarkedObjectSize(marked_bytes); ProcessHeap::IncreaseTotalMarkedObjectSize(marked_bytes);
......
...@@ -182,7 +182,7 @@ bool UnifiedHeapController::IsRootForNonTracingGC( ...@@ -182,7 +182,7 @@ bool UnifiedHeapController::IsRootForNonTracingGC(
} }
void UnifiedHeapController::UpdateAllocatedObjectSize( void UnifiedHeapController::UpdateAllocatedObjectSize(
size_t allocated_bytes_since_prev_gc) { int64_t allocated_bytes_since_prev_gc) {
int64_t delta = int64_t delta =
allocated_bytes_since_prev_gc - old_allocated_bytes_since_prev_gc_; allocated_bytes_since_prev_gc - old_allocated_bytes_since_prev_gc_;
old_allocated_bytes_since_prev_gc_ = allocated_bytes_since_prev_gc; old_allocated_bytes_since_prev_gc_ = allocated_bytes_since_prev_gc;
......
...@@ -48,7 +48,7 @@ class PLATFORM_EXPORT UnifiedHeapController final ...@@ -48,7 +48,7 @@ class PLATFORM_EXPORT UnifiedHeapController final
ThreadState* thread_state() const { return thread_state_; } ThreadState* thread_state() const { return thread_state_; }
// Forwarded from ThreadHeapStatsCollector. // Forwarded from ThreadHeapStatsCollector.
void UpdateAllocatedObjectSize(size_t); void UpdateAllocatedObjectSize(int64_t);
private: private:
static bool IsRootForNonTracingGCInternal( static bool IsRootForNonTracingGCInternal(
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment