Commit c367d33c authored by Michael Lippautz's avatar Michael Lippautz Committed by Commit Bot

Revert "heap: Stricter checks for backing modifications"

This reverts commit 7a19b1f0.

Reason for revert: Temporarily revert fix to get more information on the crasher.

Original change's description:
> heap: Stricter checks for backing modifications
> 
> Backings were allowed to be modified (shrinking, freeing) when
> incremental marking was running and the backing was not marked yet. This
> can be problematic when write barriers on nested containers fire
> eagerly, registering slots.
> 
> Bug: 918064
> Change-Id: Id6fa8ed5b3c3c2bdc04ed8f812f1b883d4e0d362
> Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1602498
> Reviewed-by: Kentaro Hara <haraken@chromium.org>
> Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#658101}

TBR=haraken@chromium.org,mlippautz@chromium.org

# Not skipping CQ checks because original CL landed > 1 day ago.

Bug: 918064
Change-Id: I01677e8d48a35e1c95fabe7133297c569eadee02
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1605910Reviewed-by: default avatarMichael Lippautz <mlippautz@chromium.org>
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#658580}
parent 82728505
...@@ -6,57 +6,30 @@ ...@@ -6,57 +6,30 @@
namespace blink { namespace blink {
namespace {
struct BackingModifier {
bool can_modify;
BasePage* const page;
HeapObjectHeader* const header;
};
BackingModifier CanFreeOrShrinkBacking(ThreadState* const state,
void* address) {
// - |SweepForbidden| protects against modifying objects from destructors.
// - |in_atomic_pause| protects against modifying objects from within the GC.
// This can
// e.g. happen when hash table buckets that have containers inlined are
// freed during weakness processing.
// - |IsMarkingInProgress| protects against incremental marking which may have
// registered callbacks.
if (state->SweepForbidden() || state->in_atomic_pause() ||
state->IsMarkingInProgress())
return {false, nullptr, nullptr};
// - Don't adjust large objects because their page is never reused.
// - Don't free backings allocated on other threads.
BasePage* page = PageFromObject(address);
if (page->IsLargeObjectPage() || page->Arena()->GetThreadState() != state)
return {false, nullptr, nullptr};
HeapObjectHeader* const header = HeapObjectHeader::FromPayload(address);
// - Guards against pages that have not been swept. Technically, it should be
// fine to modify those backings. We bail out to maintain the invariant that
// no marked backing is modified.
if (header->IsMarked())
return {false, nullptr, nullptr};
return {true, page, header};
}
} // namespace
void HeapAllocator::BackingFree(void* address) { void HeapAllocator::BackingFree(void* address) {
if (!address) if (!address)
return; return;
ThreadState* const state = ThreadState::Current(); ThreadState* state = ThreadState::Current();
BackingModifier result = CanFreeOrShrinkBacking(state, address); if (state->SweepForbidden())
if (!result.can_modify) return;
DCHECK(!state->in_atomic_pause());
// Don't promptly free large objects because their page is never reused.
// Don't free backings allocated on other threads.
BasePage* page = PageFromObject(address);
if (page->IsLargeObjectPage() || page->Arena()->GetThreadState() != state)
return; return;
state->Heap().PromptlyFreed(result.header->GcInfoIndex()); HeapObjectHeader* header = HeapObjectHeader::FromPayload(address);
static_cast<NormalPage*>(result.page) // Don't promptly free marked backing as they may be registered on the marking
->ArenaForNormalPage() // callback stack. The effect on non incremental marking GCs is that promptly
->PromptlyFreeObject(result.header); // free is disabled for surviving backings during lazy sweeping.
if (header->IsMarked())
return;
state->Heap().PromptlyFreed(header->GcInfoIndex());
static_cast<NormalPage*>(page)->ArenaForNormalPage()->PromptlyFreeObject(
header);
} }
void HeapAllocator::FreeVectorBacking(void* address) { void HeapAllocator::FreeVectorBacking(void* address) {
...@@ -68,7 +41,10 @@ void HeapAllocator::FreeInlineVectorBacking(void* address) { ...@@ -68,7 +41,10 @@ void HeapAllocator::FreeInlineVectorBacking(void* address) {
} }
void HeapAllocator::FreeHashTableBacking(void* address) { void HeapAllocator::FreeHashTableBacking(void* address) {
BackingFree(address); // When incremental marking is enabled weak callbacks may have been
// registered.
if (!ThreadState::Current()->IsMarkingInProgress())
BackingFree(address);
} }
bool HeapAllocator::BackingExpand(void* address, size_t new_size) { bool HeapAllocator::BackingExpand(void* address, size_t new_size) {
...@@ -116,27 +92,39 @@ bool HeapAllocator::BackingShrink(void* address, ...@@ -116,27 +92,39 @@ bool HeapAllocator::BackingShrink(void* address,
DCHECK_LT(quantized_shrunk_size, quantized_current_size); DCHECK_LT(quantized_shrunk_size, quantized_current_size);
ThreadState* const state = ThreadState::Current(); ThreadState* state = ThreadState::Current();
BackingModifier result = CanFreeOrShrinkBacking(state, address); if (state->SweepForbidden())
if (!result.can_modify)
return false; return false;
DCHECK(!state->in_atomic_pause());
DCHECK(state->IsAllocationAllowed()); DCHECK(state->IsAllocationAllowed());
DCHECK_EQ(&state->Heap(), &ThreadState::FromObject(address)->Heap()); DCHECK_EQ(&state->Heap(), &ThreadState::FromObject(address)->Heap());
NormalPageArena* arena = // FIXME: Support shrink for large objects.
static_cast<NormalPage*>(result.page)->ArenaForNormalPage(); // Don't shrink backings allocated on other threads.
BasePage* page = PageFromObject(address);
if (page->IsLargeObjectPage() || page->Arena()->GetThreadState() != state)
return false;
HeapObjectHeader* header = HeapObjectHeader::FromPayload(address);
// Compaction may register slots for compaction in slots of vector backings.
// E.g., when vectors are embedded in each other. To avoid dereferincing a
// broken slot, bail out on already marked backings.
if (header->IsMarked())
return false;
NormalPageArena* arena = static_cast<NormalPage*>(page)->ArenaForNormalPage();
// We shrink the object only if the shrinking will make a non-small // We shrink the object only if the shrinking will make a non-small
// prompt-free block. // prompt-free block.
// FIXME: Optimize the threshold size. // FIXME: Optimize the threshold size.
if (quantized_current_size <= quantized_shrunk_size + if (quantized_current_size <= quantized_shrunk_size +
sizeof(HeapObjectHeader) + sizeof(HeapObjectHeader) +
sizeof(void*) * 32 && sizeof(void*) * 32 &&
!arena->IsObjectAllocatedAtAllocationPoint(result.header)) !arena->IsObjectAllocatedAtAllocationPoint(header))
return true; return true;
bool succeeded_at_allocation_point = bool succeeded_at_allocation_point =
arena->ShrinkObject(result.header, quantized_shrunk_size); arena->ShrinkObject(header, quantized_shrunk_size);
if (succeeded_at_allocation_point) if (succeeded_at_allocation_point)
state->Heap().AllocationPointAdjusted(arena->ArenaIndex()); state->Heap().AllocationPointAdjusted(arena->ArenaIndex());
return true; return true;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment