Commit bc2e8648 authored by Michael Lippautz's avatar Michael Lippautz Committed by Commit Bot

heap: Optimize write barrier when not bailing out

- Fetch ThreadState from page header instead of TLS.
- Avoid virtual call when checking for large object pages.
- Directly go through object_start_bitmap() instead of using
  FromInnerAddress, as we know that the address is neither null, nor in
  the linear allocation area or pointing to free space.
- Fold TryMark and Mark at the expense of an Unmark() for the rare
  case where an object is in construction.

Output on Z840 (down from ~21.8x):
[ RUN      ] WriteBarrierPerfTest.MemberWritePerformance
*RESULT WriteBarrierPerfTest writes during GC: = 28129.39521800281 writes/ms
*RESULT WriteBarrierPerfTest writes outside GC: = 480769.2307692308 writes/ms
*RESULT WriteBarrierPerfTest relative speed difference: = 17.091346153846157 times
[       OK ] WriteBarrierPerfTest.MemberWritePerformance (32 ms)

Bug: 1014414
Change-Id: I9319a312a94a4fbafd4d87145414e7b2f4acd583
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1865163
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Reviewed-by: default avatarKentaro Hara <haraken@chromium.org>
Reviewed-by: default avatarAnton Bikineev <bikineev@chromium.org>
Cr-Commit-Position: refs/heads/master@{#706666}
parent 8a0fa25f
......@@ -1299,18 +1299,20 @@ void FreeList::CollectStatistics(
std::move(free_size)};
}
BasePage::BasePage(PageMemory* storage, BaseArena* arena)
BasePage::BasePage(PageMemory* storage, BaseArena* arena, PageType page_type)
: magic_(GetMagic()),
storage_(storage),
arena_(arena),
swept_(true) {
thread_state_(arena->GetThreadState()),
page_type_(page_type) {
#if DCHECK_IS_ON()
DCHECK(IsPageHeaderAddress(reinterpret_cast<Address>(this)));
#endif
}
NormalPage::NormalPage(PageMemory* storage, BaseArena* arena)
: BasePage(storage, arena), object_start_bit_map_(Payload()) {
: BasePage(storage, arena, PageType::kNormalPage),
object_start_bit_map_(Payload()) {
#if DCHECK_IS_ON()
DCHECK(IsPageHeaderAddress(reinterpret_cast<Address>(this)));
#endif // DCHECK_IS_ON()
......@@ -1710,7 +1712,8 @@ Address ObjectStartBitmap::FindHeader(
return object_offset + offset_;
}
HeapObjectHeader* NormalPage::FindHeaderFromAddress(Address address) {
HeapObjectHeader* NormalPage::ConservativelyFindHeaderFromAddress(
Address address) {
if (!ContainedInObjectPayload(address))
return nullptr;
if (ArenaForNormalPage()->IsInCurrentAllocationPointRegion(address))
......@@ -1725,6 +1728,17 @@ HeapObjectHeader* NormalPage::FindHeaderFromAddress(Address address) {
return header;
}
HeapObjectHeader* NormalPage::FindHeaderFromAddress(Address address) {
DCHECK(ContainedInObjectPayload(address));
DCHECK(!ArenaForNormalPage()->IsInCurrentAllocationPointRegion(address));
HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(
object_start_bit_map()->FindHeader(address));
DCHECK(header->IsValid());
DCHECK_LT(0u, header->GcInfoIndex());
DCHECK_GT(header->PayloadEnd(), address);
return header;
}
void NormalPage::CollectStatistics(
ThreadState::Statistics::ArenaStatistics* arena_stats) {
HeapObjectHeader* header = nullptr;
......@@ -1765,7 +1779,7 @@ bool NormalPage::Contains(Address addr) {
LargeObjectPage::LargeObjectPage(PageMemory* storage,
BaseArena* arena,
size_t object_size)
: BasePage(storage, arena),
: BasePage(storage, arena, PageType::kLargeObjectPage),
object_size_(object_size)
#ifdef ANNOTATE_CONTIGUOUS_CONTAINER
,
......
......@@ -471,6 +471,10 @@ enum class FinalizeType : uint8_t { kInlined, kDeferred };
// |SweepResult| indicates if page turned out to be empty after sweeping.
enum class SweepResult : uint8_t { kPageEmpty, kPageNotEmpty };
// |PageType| indicates whether a page is used for normal objects or whether it
// holds a large object.
enum class PageType : uint8_t { kNormalPage, kLargeObjectPage };
// |BasePage| is a base class for |NormalPage| and |LargeObjectPage|.
//
// - |NormalPage| is a page whose size is |kBlinkPageSize|. A |NormalPage| can
......@@ -487,7 +491,7 @@ class BasePage {
DISALLOW_NEW();
public:
BasePage(PageMemory*, BaseArena*);
BasePage(PageMemory*, BaseArena*, PageType);
virtual ~BasePage() = default;
// Virtual methods are slow. So performance-sensitive methods should be
......@@ -514,13 +518,14 @@ class BasePage {
virtual bool Contains(Address) = 0;
#endif
virtual size_t size() = 0;
virtual bool IsLargeObjectPage() { return false; }
Address GetAddress() { return reinterpret_cast<Address>(this); }
PageMemory* Storage() const { return storage_; }
BaseArena* Arena() const { return arena_; }
ThreadState* thread_state() const { return thread_state_; }
// Returns true if this page has been swept by the ongoing lazy sweep.
// Returns true if this page has been swept by the ongoing sweep; false
// otherwise.
bool HasBeenSwept() const { return swept_; }
void MarkAsSwept() {
......@@ -533,6 +538,11 @@ class BasePage {
swept_ = false;
}
// Returns true if this page is a large object page; false otherwise.
bool IsLargeObjectPage() const {
return page_type_ == PageType::kLargeObjectPage;
}
// Returns true if magic number is valid.
bool IsValid() const;
......@@ -545,10 +555,13 @@ class BasePage {
uint32_t const magic_;
PageMemory* const storage_;
BaseArena* const arena_;
ThreadState* const thread_state_;
// Track the sweeping state of a page. Set to false at the start of a sweep,
// true upon completion of lazy sweeping.
bool swept_;
// true upon completion of sweeping that page.
bool swept_ = true;
PageType page_type_;
friend class BaseArena;
};
......@@ -738,6 +751,11 @@ class PLATFORM_EXPORT NormalPage final : public BasePage {
// Uses the object_start_bit_map_ to find an object for a given address. The
// returned header is either nullptr, indicating that no object could be
// found, or it is pointing to valid object or free list entry.
HeapObjectHeader* ConservativelyFindHeaderFromAddress(Address);
// Uses the object_start_bit_map_ to find an object for a given address. It is
// assumed that the address points into a valid heap object. Use the
// conservative version if that assumption does not hold.
HeapObjectHeader* FindHeaderFromAddress(Address);
void VerifyMarking() override;
......@@ -830,8 +848,6 @@ class PLATFORM_EXPORT LargeObjectPage final : public BasePage {
void CollectStatistics(
ThreadState::Statistics::ArenaStatistics* arena_stats) override;
bool IsLargeObjectPage() override { return true; }
void VerifyMarking() override;
#if defined(ADDRESS_SANITIZER)
......
......@@ -80,28 +80,42 @@ void MarkingVisitorBase::AdjustMarkedBytes(HeapObjectHeader* header,
marked_bytes_ += header->size() - old_size;
}
// static
bool MarkingVisitor::WriteBarrierSlow(void* value) {
if (!value || IsHashTableDeleteValue(value))
return false;
ThreadState* const thread_state = ThreadState::Current();
// It is guaranteed that managed references point to either GarbageCollected
// or GarbageCollectedMixin. Mixins are restricted to regular objects sizes.
// It is thus possible to get to the page header by aligning properly.
BasePage* base_page = PageFromObject(value);
ThreadState* const thread_state = base_page->thread_state();
if (!thread_state->IsIncrementalMarking())
return false;
HeapObjectHeader* const header = HeapObjectHeader::FromInnerAddress(
reinterpret_cast<Address>(const_cast<void*>(value)));
if (header->IsMarked<HeapObjectHeader::AccessMode::kAtomic>())
HeapObjectHeader* header;
if (LIKELY(!base_page->IsLargeObjectPage())) {
header = reinterpret_cast<HeapObjectHeader*>(
static_cast<NormalPage*>(base_page)->FindHeaderFromAddress(
reinterpret_cast<Address>(value)));
} else {
header = static_cast<LargeObjectPage*>(base_page)->ObjectHeader();
}
DCHECK(header->IsValid());
if (!header->TryMark<HeapObjectHeader::AccessMode::kAtomic>())
return false;
if (header->IsInConstruction()) {
if (UNLIKELY(header->IsInConstruction())) {
// It is assumed that objects on not_fully_constructed_worklist_ are not
// marked.
header->Unmark();
thread_state->CurrentVisitor()->not_fully_constructed_worklist_.Push(
header->Payload());
return true;
}
// Mark and push trace callback.
if (!header->TryMark<HeapObjectHeader::AccessMode::kAtomic>())
return false;
MarkingVisitor* visitor = thread_state->CurrentVisitor();
visitor->AccountMarkedBytes(header);
visitor->marking_worklist_.Push(
......@@ -152,7 +166,8 @@ void MarkingVisitor::ConservativelyMarkAddress(BasePage* page,
HeapObjectHeader* const header =
page->IsLargeObjectPage()
? static_cast<LargeObjectPage*>(page)->ObjectHeader()
: static_cast<NormalPage*>(page)->FindHeaderFromAddress(address);
: static_cast<NormalPage*>(page)->ConservativelyFindHeaderFromAddress(
address);
if (!header || header->IsMarked())
return;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment