Commit 6f1bcac9 authored by Omer Katz's avatar Omer Katz Committed by Commit Bot

heap: Make sure all concurrently marked bytes are reported

CL [1] was submitted on August 24th as an attempt to address foreground
marking time regression caused by migrating to the jobs api.
That CL made it such that if there aren't many objects left in the
marking worklist, a concurrent marker might die without reporting its
marked bytes count. As a result, the marked bytes used to compute object
size in the heap statistics would be lower than it should be. If
collection backing stores are also promptly freed, the overall
alllocated bytes since previous gc statistic could be negative and
bigger in absolute value than the marked bytes of the previous GC.

Drive-by: check if should yield before popping from the worklist.

[1] https://chromium-review.googlesource.com/c/chromium/src/+/2368498

Bug: 1124159
Change-Id: Ifa0c74ffc9f521679a21fc0109f2e2f3796700e3
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2404856
Commit-Queue: Omer Katz <omerkatz@chromium.org>
Reviewed-by: default avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#806585}
parent 164e95b3
......@@ -313,11 +313,15 @@ bool DrainWorklist(Worklist* worklist,
Callback callback,
YieldPredicate should_yield,
int task_id) {
size_t processed_callback_count = 0;
// For concurrent markers, should_yield also reports marked bytes.
if (should_yield()) {
return false;
}
size_t processed_callback_count = kDeadlineCheckInterval;
typename Worklist::EntryType item;
while (worklist->Pop(task_id, &item)) {
callback(item);
if (processed_callback_count-- == 0) {
if (--processed_callback_count == 0) {
if (should_yield()) {
return false;
}
......
......@@ -304,6 +304,11 @@ ConcurrentMarkingVisitor::ConcurrentMarkingVisitor(ThreadState* state,
DCHECK_NE(WorklistTaskId::MutatorThread, task_id);
}
ConcurrentMarkingVisitor::~ConcurrentMarkingVisitor() {
// ConcurrentMarkingVisitor should report all its marked_bytes before dying.
DCHECK_EQ(marked_bytes_, last_marked_bytes_);
}
void ConcurrentMarkingVisitor::FlushWorklists() {
// Flush marking worklists for further marking on the mutator thread.
marking_worklist_.FlushToGlobal();
......
......@@ -230,7 +230,7 @@ ALWAYS_INLINE void MarkingVisitor::TraceMarkedBackingStore(const void* value) {
class PLATFORM_EXPORT ConcurrentMarkingVisitor : public MarkingVisitorBase {
public:
ConcurrentMarkingVisitor(ThreadState*, MarkingMode, int);
~ConcurrentMarkingVisitor() override = default;
~ConcurrentMarkingVisitor() override;
virtual void FlushWorklists();
......
......@@ -1746,6 +1746,9 @@ void ThreadState::PerformConcurrentMark(base::JobDelegate* job) {
Heap().AdvanceConcurrentMarking(concurrent_visitor.get(), job,
marking_scheduling_.get());
marking_scheduling_->AddConcurrentlyMarkedBytes(
concurrent_visitor->RecentlyMarkedBytes());
concurrent_visitor->FlushWorklists();
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment