Commit 29dcab76 authored by Anton Bikineev's avatar Anton Bikineev Committed by Commit Bot

heap: Don't inject ObjectBitmap bit setting into HeapObjectHeader ctor

In the current design HeapObjectHeader sets ObjectStartBitmap in the
constructor, which mixes abstractions and violates single responsibility
principle, since ObjectStartBitmap is managed by a different entity -
NormalPage.

This patch decouples ObjectStartBitmap and HeapObjectHeader.

Bug: 1029379
Change-Id: Iceb7450be7240d9b0765887b3e06663b56da7617
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2002820
Commit-Queue: Anton Bikineev <bikineev@chromium.org>
Reviewed-by: default avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#732560}
parent 813c6b42
...@@ -424,6 +424,17 @@ NormalPageArena::NormalPageArena(ThreadState* state, int index) ...@@ -424,6 +424,17 @@ NormalPageArena::NormalPageArena(ThreadState* state, int index)
ClearFreeLists(); ClearFreeLists();
} }
void NormalPageArena::AddToFreeList(Address address, size_t size) {
#if DCHECK_IS_ON()
DCHECK(FindPageFromAddress(address));
DCHECK(FindPageFromAddress(address + size - 1));
#endif
free_list_.Add(address, size);
static_cast<NormalPage*>(PageFromObject(address))
->object_start_bit_map()
->SetBit(address);
}
void NormalPageArena::MakeConsistentForGC() { void NormalPageArena::MakeConsistentForGC() {
BaseArena::MakeConsistentForGC(); BaseArena::MakeConsistentForGC();
...@@ -799,9 +810,9 @@ bool NormalPageArena::ShrinkObject(HeapObjectHeader* header, size_t new_size) { ...@@ -799,9 +810,9 @@ bool NormalPageArena::ShrinkObject(HeapObjectHeader* header, size_t new_size) {
DCHECK_GE(shrink_size, sizeof(HeapObjectHeader)); DCHECK_GE(shrink_size, sizeof(HeapObjectHeader));
DCHECK_GT(header->GcInfoIndex(), 0u); DCHECK_GT(header->GcInfoIndex(), 0u);
Address shrink_address = header->PayloadEnd() - shrink_size; Address shrink_address = header->PayloadEnd() - shrink_size;
HeapObjectHeader* freed_header = HeapObjectHeader* freed_header = new (NotNull, shrink_address)
new (NotNull, shrink_address) HeapObjectHeader( HeapObjectHeader(shrink_size, header->GcInfoIndex());
shrink_size, header->GcInfoIndex(), HeapObjectHeader::kNormalPage); // Since only size has been changed, we don't need to update object starts.
PromptlyFreeObjectInFreeList(freed_header, shrink_size); PromptlyFreeObjectInFreeList(freed_header, shrink_size);
#if DCHECK_IS_ON() #if DCHECK_IS_ON()
DCHECK_EQ(PageFromObject(reinterpret_cast<Address>(header)), DCHECK_EQ(PageFromObject(reinterpret_cast<Address>(header)),
...@@ -994,8 +1005,8 @@ Address LargeObjectArena::DoAllocateLargeObjectPage(size_t allocation_size, ...@@ -994,8 +1005,8 @@ Address LargeObjectArena::DoAllocateLargeObjectPage(size_t allocation_size,
DCHECK_GT(gc_info_index, 0u); DCHECK_GT(gc_info_index, 0u);
LargeObjectPage* large_object = new (large_object_address) LargeObjectPage* large_object = new (large_object_address)
LargeObjectPage(page_memory, this, allocation_size); LargeObjectPage(page_memory, this, allocation_size);
HeapObjectHeader* header = new (NotNull, header_address) HeapObjectHeader( HeapObjectHeader* header = new (NotNull, header_address)
kLargeObjectSizeInHeader, gc_info_index, HeapObjectHeader::kLargePage); HeapObjectHeader(kLargeObjectSizeInHeader, gc_info_index);
Address result = header_address + sizeof(*header); Address result = header_address + sizeof(*header);
DCHECK(!(reinterpret_cast<uintptr_t>(result) & kAllocationMask)); DCHECK(!(reinterpret_cast<uintptr_t>(result) & kAllocationMask));
...@@ -1071,15 +1082,15 @@ void FreeList::Add(Address address, size_t size) { ...@@ -1071,15 +1082,15 @@ void FreeList::Add(Address address, size_t size) {
DCHECK(!((reinterpret_cast<uintptr_t>(address) + sizeof(HeapObjectHeader)) & DCHECK(!((reinterpret_cast<uintptr_t>(address) + sizeof(HeapObjectHeader)) &
kAllocationMask)); kAllocationMask));
DCHECK(!(size & kAllocationMask)); DCHECK(!(size & kAllocationMask));
DCHECK(!PageFromObject(address)->IsLargeObjectPage());
ASAN_UNPOISON_MEMORY_REGION(address, size); ASAN_UNPOISON_MEMORY_REGION(address, size);
FreeListEntry* entry; FreeListEntry* entry;
if (size < sizeof(*entry)) { if (size < sizeof(*entry)) {
// Create a dummy header with only a size and freelist bit set. // Create a dummy header with only a size and freelist bit set.
DCHECK_GE(size, sizeof(HeapObjectHeader)); DCHECK_GE(size, sizeof(HeapObjectHeader));
// Free list encode the size to mark the lost memory as freelist memory. // Free list encode the size to mark the lost memory as freelist memory.
new (NotNull, address) HeapObjectHeader(size, kGcInfoIndexForFreeListHeader, new (NotNull, address)
HeapObjectHeader::kNormalPage); HeapObjectHeader(size, kGcInfoIndexForFreeListHeader);
ASAN_POISON_MEMORY_REGION(address, size); ASAN_POISON_MEMORY_REGION(address, size);
// This memory gets lost. Sweeping can reclaim it. // This memory gets lost. Sweeping can reclaim it.
return; return;
...@@ -1394,7 +1405,6 @@ void NormalPage::FinalizeSweep(SweepResult action) { ...@@ -1394,7 +1405,6 @@ void NormalPage::FinalizeSweep(SweepResult action) {
object.Finalize(); object.Finalize();
} }
to_be_finalized_objects_.clear(); to_be_finalized_objects_.clear();
if (action == SweepResult::kPageNotEmpty) { if (action == SweepResult::kPageNotEmpty) {
MergeFreeLists(); MergeFreeLists();
MarkAsSwept(); MarkAsSwept();
...@@ -1416,6 +1426,7 @@ void NormalPage::AddToFreeList(Address start, ...@@ -1416,6 +1426,7 @@ void NormalPage::AddToFreeList(Address start,
unfinalized_freelist_.push_back(std::move(entry)); unfinalized_freelist_.push_back(std::move(entry));
} else { } else {
cached_freelist_.Add(start, size); cached_freelist_.Add(start, size);
object_start_bit_map()->SetBit(start);
#if !DCHECK_IS_ON() && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER) #if !DCHECK_IS_ON() && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER)
if (Arena()->GetThreadState()->IsMemoryReducingGC()) { if (Arena()->GetThreadState()->IsMemoryReducingGC()) {
DiscardPages(start + sizeof(FreeListEntry), start + size); DiscardPages(start + sizeof(FreeListEntry), start + size);
......
...@@ -220,7 +220,6 @@ class PLATFORM_EXPORT HeapObjectHeader { ...@@ -220,7 +220,6 @@ class PLATFORM_EXPORT HeapObjectHeader {
DISALLOW_NEW(); DISALLOW_NEW();
public: public:
enum HeaderLocation : uint8_t { kNormalPage, kLargePage };
enum class AccessMode : uint8_t { kNonAtomic, kAtomic }; enum class AccessMode : uint8_t { kNonAtomic, kAtomic };
static HeapObjectHeader* FromPayload(const void*); static HeapObjectHeader* FromPayload(const void*);
...@@ -231,7 +230,7 @@ class PLATFORM_EXPORT HeapObjectHeader { ...@@ -231,7 +230,7 @@ class PLATFORM_EXPORT HeapObjectHeader {
static void CheckFromPayload(const void*); static void CheckFromPayload(const void*);
// If |gc_info_index| is 0, this header is interpreted as a free list header. // If |gc_info_index| is 0, this header is interpreted as a free list header.
HeapObjectHeader(size_t, size_t, HeaderLocation); HeapObjectHeader(size_t, size_t);
template <AccessMode mode = AccessMode::kNonAtomic> template <AccessMode mode = AccessMode::kNonAtomic>
NO_SANITIZE_ADDRESS bool IsFree() const { NO_SANITIZE_ADDRESS bool IsFree() const {
...@@ -299,10 +298,7 @@ class FreeListEntry final : public HeapObjectHeader { ...@@ -299,10 +298,7 @@ class FreeListEntry final : public HeapObjectHeader {
public: public:
NO_SANITIZE_ADDRESS NO_SANITIZE_ADDRESS
explicit FreeListEntry(size_t size) explicit FreeListEntry(size_t size)
: HeapObjectHeader(size, : HeapObjectHeader(size, kGcInfoIndexForFreeListHeader), next_(nullptr) {}
kGcInfoIndexForFreeListHeader,
HeapObjectHeader::kNormalPage),
next_(nullptr) {}
Address GetAddress() { return reinterpret_cast<Address>(this); } Address GetAddress() { return reinterpret_cast<Address>(this); }
...@@ -1010,15 +1006,7 @@ class PLATFORM_EXPORT BaseArena { ...@@ -1010,15 +1006,7 @@ class PLATFORM_EXPORT BaseArena {
class PLATFORM_EXPORT NormalPageArena final : public BaseArena { class PLATFORM_EXPORT NormalPageArena final : public BaseArena {
public: public:
NormalPageArena(ThreadState*, int index); NormalPageArena(ThreadState*, int index);
void AddToFreeList(Address address, size_t size) { void AddToFreeList(Address address, size_t size);
#if DCHECK_IS_ON()
DCHECK(FindPageFromAddress(address));
// TODO(palmer): Do we need to handle about integer overflow here (and in
// similar expressions elsewhere)?
DCHECK(FindPageFromAddress(address + size - 1));
#endif
free_list_.Add(address, size);
}
void AddToFreeList(FreeList* other) { free_list_.MoveFrom(other); } void AddToFreeList(FreeList* other) { free_list_.MoveFrom(other); }
void ClearFreeLists() override; void ClearFreeLists() override;
void CollectFreeListStatistics( void CollectFreeListStatistics(
...@@ -1281,8 +1269,12 @@ inline Address NormalPageArena::AllocateObject(size_t allocation_size, ...@@ -1281,8 +1269,12 @@ inline Address NormalPageArena::AllocateObject(size_t allocation_size,
current_allocation_point_ += allocation_size; current_allocation_point_ += allocation_size;
remaining_allocation_size_ -= allocation_size; remaining_allocation_size_ -= allocation_size;
DCHECK_GT(gc_info_index, 0u); DCHECK_GT(gc_info_index, 0u);
new (NotNull, header_address) HeapObjectHeader( new (NotNull, header_address)
allocation_size, gc_info_index, HeapObjectHeader::kNormalPage); HeapObjectHeader(allocation_size, gc_info_index);
DCHECK(!PageFromObject(header_address)->IsLargeObjectPage());
static_cast<NormalPage*>(PageFromObject(header_address))
->object_start_bit_map()
->SetBit(header_address);
Address result = header_address + sizeof(HeapObjectHeader); Address result = header_address + sizeof(HeapObjectHeader);
DCHECK(!(reinterpret_cast<uintptr_t>(result) & kAllocationMask)); DCHECK(!(reinterpret_cast<uintptr_t>(result) & kAllocationMask));
...@@ -1376,8 +1368,7 @@ inline void ObjectStartBitmap::Iterate(Callback callback) const { ...@@ -1376,8 +1368,7 @@ inline void ObjectStartBitmap::Iterate(Callback callback) const {
NO_SANITIZE_ADDRESS inline HeapObjectHeader::HeapObjectHeader( NO_SANITIZE_ADDRESS inline HeapObjectHeader::HeapObjectHeader(
size_t size, size_t size,
size_t gc_info_index, size_t gc_info_index) {
HeaderLocation header_location) {
// sizeof(HeapObjectHeader) must be equal to or smaller than // sizeof(HeapObjectHeader) must be equal to or smaller than
// |kAllocationGranularity|, because |HeapObjectHeader| is used as a header // |kAllocationGranularity|, because |HeapObjectHeader| is used as a header
// for a freed entry. Given that the smallest entry size is // for a freed entry. Given that the smallest entry size is
...@@ -1392,14 +1383,6 @@ NO_SANITIZE_ADDRESS inline HeapObjectHeader::HeapObjectHeader( ...@@ -1392,14 +1383,6 @@ NO_SANITIZE_ADDRESS inline HeapObjectHeader::HeapObjectHeader(
encoded_high_ = encoded_high_ =
static_cast<uint16_t>(gc_info_index << kHeaderGCInfoIndexShift); static_cast<uint16_t>(gc_info_index << kHeaderGCInfoIndexShift);
encoded_low_ = internal::EncodeSize(size); encoded_low_ = internal::EncodeSize(size);
if (header_location == kNormalPage) {
DCHECK(!PageFromObject(this)->IsLargeObjectPage());
static_cast<NormalPage*>(PageFromObject(this))
->object_start_bit_map()
->SetBit(reinterpret_cast<Address>(this));
} else {
DCHECK(PageFromObject(this)->IsLargeObjectPage());
}
DCHECK(IsInConstruction()); DCHECK(IsInConstruction());
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment