Commit 5da6c653 authored by Omer Katz's avatar Omer Katz Committed by Commit Bot

heap: Accessing object size atomically

Object size and mark bit reside in the same half word. Reading the size
during marking causes data races. Specifically, the size is read from
the write barrier and from Member's CheckPointer method.
Adding an atomic version to accessing the size. This version is used by
FindHeaderFromObject in the write barrier slow path and by CheckPointer
in member methods other than allocation (members during allocation will
not be traced yet so atomics are not needed there).
Note that since the size never changed during marking (added a DCHECK to
verify that incremental marking is off during size change), it is safe
to read the size relaxed.

Bug: 986235
Change-Id: I73019f9f9bfeb9562d4d1f29fc020a9b6f71866a
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1893202
Commit-Queue: Omer Katz <omerkatz@chromium.org>
Reviewed-by: default avatarKentaro Hara <haraken@chromium.org>
Reviewed-by: default avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#712551}
parent dd76e3ec
......@@ -1717,16 +1717,6 @@ HeapObjectHeader* NormalPage::ConservativelyFindHeaderFromAddress(
return header;
}
HeapObjectHeader* NormalPage::FindHeaderFromAddress(Address address) {
DCHECK(ContainedInObjectPayload(address));
DCHECK(!ArenaForNormalPage()->IsInCurrentAllocationPointRegion(address));
HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(
object_start_bit_map()->FindHeader(address));
DCHECK_LT(0u, header->GcInfoIndex());
DCHECK_GT(header->PayloadEnd(), address);
return header;
}
void NormalPage::CollectStatistics(
ThreadState::Statistics::ArenaStatistics* arena_stats) {
HeapObjectHeader* header = nullptr;
......
......@@ -223,6 +223,7 @@ class PLATFORM_EXPORT HeapObjectHeader {
enum class AccessMode : uint8_t { kNonAtomic, kAtomic };
static HeapObjectHeader* FromPayload(const void*);
template <AccessMode = AccessMode::kNonAtomic>
static HeapObjectHeader* FromInnerAddress(const void*);
// Checks sanity of the header given a payload pointer.
......@@ -242,6 +243,7 @@ class PLATFORM_EXPORT HeapObjectHeader {
return (encoded & kHeaderGCInfoIndexMask) >> kHeaderGCInfoIndexShift;
}
template <AccessMode = AccessMode::kNonAtomic>
size_t size() const;
void SetSize(size_t size);
......@@ -265,6 +267,7 @@ class PLATFORM_EXPORT HeapObjectHeader {
// size does not include the sizeof(HeapObjectHeader).
Address Payload() const;
size_t PayloadSize() const;
template <AccessMode = AccessMode::kNonAtomic>
Address PayloadEnd() const;
void Finalize(Address, size_t);
......@@ -708,6 +711,8 @@ class PLATFORM_EXPORT NormalPage final : public BasePage {
// Uses the object_start_bit_map_ to find an object for a given address. It is
// assumed that the address points into a valid heap object. Use the
// conservative version if that assumption does not hold.
template <
HeapObjectHeader::AccessMode = HeapObjectHeader::AccessMode::kNonAtomic>
HeapObjectHeader* FindHeaderFromAddress(Address);
void VerifyMarking() override;
......@@ -1041,12 +1046,13 @@ inline HeapObjectHeader* HeapObjectHeader::FromPayload(const void* payload) {
return header;
}
template <HeapObjectHeader::AccessMode mode>
inline HeapObjectHeader* HeapObjectHeader::FromInnerAddress(
const void* address) {
BasePage* const page = PageFromObject(address);
return page->IsLargeObjectPage()
? static_cast<LargeObjectPage*>(page)->ObjectHeader()
: static_cast<NormalPage*>(page)->FindHeaderFromAddress(
: static_cast<NormalPage*>(page)->FindHeaderFromAddress<mode>(
reinterpret_cast<Address>(const_cast<void*>(address)));
}
......@@ -1054,8 +1060,22 @@ inline void HeapObjectHeader::CheckFromPayload(const void* payload) {
(void)FromPayload(payload);
}
template <HeapObjectHeader::AccessMode mode>
NO_SANITIZE_ADDRESS inline size_t HeapObjectHeader::size() const {
const size_t result = internal::DecodeSize(encoded_low_);
uint16_t encoded_low_value;
if (mode == AccessMode::kNonAtomic) {
encoded_low_value = encoded_low_;
} else {
// mode == AccessMode::kAtomic
// Relaxed load as size is immutable after construction while either
// marking or sweeping is running
internal::AsanUnpoisonScope unpoison_scope(
static_cast<const void*>(&encoded_low_), sizeof(encoded_low_));
encoded_low_value =
reinterpret_cast<const std::atomic<uint16_t>&>(encoded_low_)
.load(std::memory_order_relaxed);
}
const size_t result = internal::DecodeSize(encoded_low_value);
// Large objects should not refer to header->size() but use
// LargeObjectPage::PayloadSize().
DCHECK(result != kLargeObjectSizeInHeader);
......@@ -1064,6 +1084,7 @@ NO_SANITIZE_ADDRESS inline size_t HeapObjectHeader::size() const {
}
NO_SANITIZE_ADDRESS inline void HeapObjectHeader::SetSize(size_t size) {
DCHECK(!PageFromObject(Payload())->thread_state()->IsIncrementalMarking());
DCHECK_LT(size, kNonLargeObjectPageSizeMax);
encoded_low_ = static_cast<uint16_t>(internal::EncodeSize(size) |
(encoded_low_ & ~kHeaderSizeMask));
......@@ -1091,9 +1112,10 @@ inline Address HeapObjectHeader::Payload() const {
sizeof(HeapObjectHeader);
}
template <HeapObjectHeader::AccessMode mode>
inline Address HeapObjectHeader::PayloadEnd() const {
return reinterpret_cast<Address>(const_cast<HeapObjectHeader*>(this)) +
size();
size<mode>();
}
NO_SANITIZE_ADDRESS inline size_t HeapObjectHeader::PayloadSize() const {
......@@ -1286,6 +1308,17 @@ NO_SANITIZE_ADDRESS inline void HeapObjectHeader::StoreEncoded(uint16_t bits,
atomic_encoded->store(value, std::memory_order_release);
}
template <HeapObjectHeader::AccessMode mode>
HeapObjectHeader* NormalPage::FindHeaderFromAddress(Address address) {
DCHECK(ContainedInObjectPayload(address));
DCHECK(!ArenaForNormalPage()->IsInCurrentAllocationPointRegion(address));
HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(
object_start_bit_map()->FindHeader(address));
DCHECK_LT(0u, header->GcInfoIndex());
DCHECK_GT(header->PayloadEnd<mode>(), address);
return header;
}
} // namespace blink
#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_HEAP_PAGE_H_
......@@ -147,8 +147,9 @@ bool MarkingVisitor::WriteBarrierSlow(void* value) {
HeapObjectHeader* header;
if (LIKELY(!base_page->IsLargeObjectPage())) {
header = reinterpret_cast<HeapObjectHeader*>(
static_cast<NormalPage*>(base_page)->FindHeaderFromAddress(
reinterpret_cast<Address>(value)));
static_cast<NormalPage*>(base_page)
->FindHeaderFromAddress<HeapObjectHeader::AccessMode::kAtomic>(
reinterpret_cast<Address>(value)));
} else {
LargeObjectPage* large_page = static_cast<LargeObjectPage*>(base_page);
header = large_page->ObjectHeader();
......
......@@ -73,7 +73,8 @@ class MemberPointerVerifier {
if (IsFullyDefined<T>::value && !IsGarbageCollectedMixin<T>::value)
HeapObjectHeader::CheckFromPayload(pointer);
} else {
DCHECK(HeapObjectHeader::FromInnerAddress(pointer));
DCHECK(HeapObjectHeader::FromInnerAddress<
HeapObjectHeader::AccessMode::kAtomic>(pointer));
}
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment