Commit c3328794 authored by Michael Lippautz's avatar Michael Lippautz Committed by Commit Bot

heap: Fix underflow of allocated bytes since last GC counter

The counter can (rightfully) underflow when an object is promptly freed
directly after performing a GC.

Also fixes an issue where the linear allocation area is accounted as
allocated only when setting up the new area. This can yield in negative
overall size if measured before that update happens.

Bug: 948807
Change-Id: I288b607761d5017192ab9bd3c0fe6a13bef52884
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1631597
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Reviewed-by: default avatarKentaro Hara <haraken@chromium.org>
Cr-Commit-Position: refs/heads/master@{#663892}
parent 131a5334
......@@ -779,10 +779,10 @@ void NormalPageArena::SetRemainingAllocationSize(
size_t new_remaining_allocation_size) {
remaining_allocation_size_ = new_remaining_allocation_size;
// Sync recorded allocated-object size:
// - if previous alloc checkpoint is larger, allocation size has increased.
// - if smaller, a net reduction in size since last call to
// updateRemainingAllocationSize().
// Sync recorded allocated-object size using the recorded checkpoint in
// |remaining_allocation_size_|:
// - If checkpoint is larger, the allocated size has increased.
// - The allocated size has decreased, otherwise.
if (last_remaining_allocation_size_ > remaining_allocation_size_) {
GetThreadState()->Heap().IncreaseAllocatedObjectSize(
last_remaining_allocation_size_ - remaining_allocation_size_);
......@@ -793,15 +793,6 @@ void NormalPageArena::SetRemainingAllocationSize(
last_remaining_allocation_size_ = remaining_allocation_size_;
}
void NormalPageArena::UpdateRemainingAllocationSize() {
if (last_remaining_allocation_size_ > RemainingAllocationSize()) {
GetThreadState()->Heap().IncreaseAllocatedObjectSize(
last_remaining_allocation_size_ - RemainingAllocationSize());
last_remaining_allocation_size_ = RemainingAllocationSize();
}
DCHECK_EQ(last_remaining_allocation_size_, RemainingAllocationSize());
}
void NormalPageArena::SetAllocationPoint(Address point, size_t size) {
#if DCHECK_IS_ON()
if (point) {
......@@ -811,13 +802,18 @@ void NormalPageArena::SetAllocationPoint(Address point, size_t size) {
DCHECK_LE(size, static_cast<NormalPage*>(page)->PayloadSize());
}
#endif
// Free and clear the old linear allocation area.
if (HasCurrentAllocationArea()) {
AddToFreeList(CurrentAllocationPoint(), RemainingAllocationSize());
SetRemainingAllocationSize(0);
}
UpdateRemainingAllocationSize();
// Set up a new linear allocation area.
current_allocation_point_ = point;
last_remaining_allocation_size_ = remaining_allocation_size_ = size;
if (point) {
// Only, update allocated size and object start bitmap if the area is
// actually set up with a non-null address.
GetThreadState()->Heap().IncreaseAllocatedObjectSize(size);
// Current allocation point can never be part of the object bitmap start
// because the area can grow or shrink. Will be added back before a GC when
// clearing the allocation point.
......@@ -843,7 +839,6 @@ Address NormalPageArena::OutOfLineAllocateImpl(size_t allocation_size,
return AllocateLargeObject(allocation_size, gc_info_index);
// 2. Try to allocate from a free list.
UpdateRemainingAllocationSize();
Address result = AllocateFromFreeList(allocation_size, gc_info_index);
if (result)
return result;
......
......@@ -885,7 +885,6 @@ class PLATFORM_EXPORT NormalPageArena final : public BaseArena {
void SetAllocationPoint(Address, size_t);
void SetRemainingAllocationSize(size_t);
void UpdateRemainingAllocationSize();
FreeList free_list_;
Address current_allocation_point_;
......
......@@ -23,17 +23,12 @@ void ThreadHeapStatsCollector::IncreaseCompactionFreedPages(size_t pages) {
void ThreadHeapStatsCollector::IncreaseAllocatedObjectSize(size_t bytes) {
// The current GC may not have been started. This is ok as recording considers
// the whole time range between garbage collections.
allocated_bytes_since_prev_gc_ += bytes;
allocated_bytes_since_prev_gc_ += static_cast<int64_t>(bytes);
}
void ThreadHeapStatsCollector::DecreaseAllocatedObjectSize(size_t bytes) {
// See IncreaseAllocatedObjectSize.
// TODO(mlippautz): Fix underflow.
if (bytes > allocated_bytes_since_prev_gc_)
return;
allocated_bytes_since_prev_gc_ -= bytes;
allocated_bytes_since_prev_gc_ -= static_cast<int64_t>(bytes);
}
void ThreadHeapStatsCollector::AllocatedObjectSizeSafepoint() {
......@@ -102,7 +97,11 @@ void ThreadHeapStatsCollector::UpdateReason(BlinkGC::GCReason reason) {
}
size_t ThreadHeapStatsCollector::object_size_in_bytes() const {
return previous().marked_bytes + allocated_bytes_since_prev_gc_;
DCHECK_GE(static_cast<int64_t>(previous().marked_bytes) +
allocated_bytes_since_prev_gc_,
0);
return static_cast<size_t>(static_cast<int64_t>(previous().marked_bytes) +
allocated_bytes_since_prev_gc_);
}
double ThreadHeapStatsCollector::estimated_marking_time_in_seconds() const {
......@@ -138,7 +137,7 @@ TimeDelta ThreadHeapStatsCollector::Event::sweeping_time() const {
scope_data[kLazySweepInIdle] + scope_data[kLazySweepOnAllocation];
}
size_t ThreadHeapStatsCollector::allocated_bytes_since_prev_gc() const {
int64_t ThreadHeapStatsCollector::allocated_bytes_since_prev_gc() const {
return allocated_bytes_since_prev_gc_;
}
......
......@@ -258,7 +258,7 @@ class PLATFORM_EXPORT ThreadHeapStatsCollector {
TimeDelta estimated_marking_time() const;
size_t marked_bytes() const;
size_t allocated_bytes_since_prev_gc() const;
int64_t allocated_bytes_since_prev_gc() const;
size_t allocated_space_bytes() const;
......@@ -287,9 +287,8 @@ class PLATFORM_EXPORT ThreadHeapStatsCollector {
Event previous_;
// Allocated bytes since the last garbage collection. These bytes are reset
// after marking as they should be accounted in marked_bytes then (which are
// only available after sweeping though).
size_t allocated_bytes_since_prev_gc_ = 0;
// after marking as they are accounted in marked_bytes then.
int64_t allocated_bytes_since_prev_gc_ = 0;
// Allocated space in bytes for all arenas.
size_t allocated_space_bytes_ = 0;
......
......@@ -1712,6 +1712,8 @@ TEST(HeapTest, BasicFunctionality) {
persistents[persistent_count++] = new Persistent<DynamicallySizedObject>(
DynamicallySizedObject::Create(size));
slack += 4;
// The allocations in the loop may trigger GC with lazy sweeping.
CompleteSweepingIfNeeded();
CheckWithSlack(base_level + total, heap.ObjectPayloadSizeForTesting(),
slack);
if (test_pages_allocated) {
......
......@@ -35,4 +35,9 @@ void ClearOutOldGarbage() {
}
}
void CompleteSweepingIfNeeded() {
if (ThreadState::Current()->IsSweepingInProgress())
ThreadState::Current()->CompleteSweep();
}
} // namespace blink
......@@ -20,6 +20,7 @@ void PreciselyCollectGarbage();
void ConservativelyCollectGarbage(
BlinkGC::SweepingType sweeping_type = BlinkGC::kEagerSweeping);
void ClearOutOldGarbage();
void CompleteSweepingIfNeeded();
template <typename T>
class ObjectWithCallbackBeforeInitializer
......
......@@ -396,7 +396,7 @@ bool ThreadState::JudgeGCThreshold(size_t allocated_object_size_threshold,
// If the allocated object size or the total memory size is small, don't
// trigger a GC.
if (heap_->stats_collector()->allocated_bytes_since_prev_gc() <
allocated_object_size_threshold ||
static_cast<int64_t>(allocated_object_size_threshold) ||
TotalMemorySize() < total_memory_size_threshold)
return false;
......@@ -916,10 +916,13 @@ void UpdateTraceCounters(const ThreadHeapStatsCollector& stats_collector) {
TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"),
"BlinkGC.AllocatedSpaceKB",
CappedSizeInKB(stats_collector.allocated_space_bytes()));
TRACE_COUNTER1(
TRACE_DISABLED_BY_DEFAULT("blink_gc"),
"BlinkGC.AllocatedObjectSizeSincePreviousGCKB",
CappedSizeInKB(stats_collector.allocated_bytes_since_prev_gc()));
size_t allocated_bytes_since_prev_gc =
stats_collector.allocated_bytes_since_prev_gc() > 0
? static_cast<size_t>(stats_collector.allocated_bytes_since_prev_gc())
: 0;
TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"),
"BlinkGC.AllocatedObjectSizeSincePreviousGCKB",
CappedSizeInKB(allocated_bytes_since_prev_gc));
TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"),
"PartitionAlloc.TotalSizeOfCommittedPagesKB",
CappedSizeInKB(WTF::Partitions::TotalSizeOfCommittedPages()));
......@@ -964,7 +967,8 @@ void UpdateHistograms(const ThreadHeapStatsCollector::Event& event) {
constexpr size_t kMinObjectSizeForReportingThroughput = 1024 * 1024;
if (WTF::TimeTicks::IsHighResolution() &&
(event.object_size_in_bytes_before_sweeping >
kMinObjectSizeForReportingThroughput)) {
kMinObjectSizeForReportingThroughput) &&
!marking_duration.is_zero()) {
DCHECK_GT(marking_duration.InMillisecondsF(), 0.0);
// For marking throughput computation all marking steps, independent of
// whether they are triggered from V8 or Blink, are relevant.
......@@ -1633,8 +1637,13 @@ void ThreadState::MarkPhaseEpilogue(BlinkGC::MarkingType marking_type) {
if (ShouldVerifyMarking())
VerifyMarking(marking_type);
ProcessHeap::DecreaseTotalAllocatedObjectSize(
Heap().stats_collector()->allocated_bytes_since_prev_gc());
if (Heap().stats_collector()->allocated_bytes_since_prev_gc() > 0) {
ProcessHeap::DecreaseTotalAllocatedObjectSize(static_cast<size_t>(
Heap().stats_collector()->allocated_bytes_since_prev_gc()));
} else {
ProcessHeap::IncreaseTotalAllocatedObjectSize(static_cast<size_t>(
-Heap().stats_collector()->allocated_bytes_since_prev_gc()));
}
ProcessHeap::DecreaseTotalMarkedObjectSize(
Heap().stats_collector()->previous().marked_bytes);
ProcessHeap::IncreaseTotalMarkedObjectSize(marked_bytes);
......
......@@ -182,7 +182,7 @@ bool UnifiedHeapController::IsRootForNonTracingGC(
}
void UnifiedHeapController::UpdateAllocatedObjectSize(
size_t allocated_bytes_since_prev_gc) {
int64_t allocated_bytes_since_prev_gc) {
int64_t delta =
allocated_bytes_since_prev_gc - old_allocated_bytes_since_prev_gc_;
old_allocated_bytes_since_prev_gc_ = allocated_bytes_since_prev_gc;
......
......@@ -48,7 +48,7 @@ class PLATFORM_EXPORT UnifiedHeapController final
ThreadState* thread_state() const { return thread_state_; }
// Forwarded from ThreadHeapStatsCollector.
void UpdateAllocatedObjectSize(size_t);
void UpdateAllocatedObjectSize(int64_t);
private:
static bool IsRootForNonTracingGCInternal(
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment