Commit aa4ab4eb authored by Michael Lippautz's avatar Michael Lippautz Committed by Commit Bot

heap: Rewrite compaction of interior backings using std::map

Instead of maintaining two separate data structures (a bitmap) and an unordered
hashmap for updating interior slots, just keep track of interior slots in a
std::map.

- std::map::lower_bound is used to provide the bailout that was provided by the
  sparse bitmap.
- The iteration through the objects payload can just be done via iterator
  increment.

Change-Id: I569bd9155bf90bdf300104e8dc2d8ae2dd0dcb63
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1595873
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Reviewed-by: default avatarKentaro Hara <haraken@chromium.org>
Cr-Commit-Position: refs/heads/master@{#657439}
parent b8c45b5c
...@@ -60,8 +60,6 @@ blink_platform_sources("heap") { ...@@ -60,8 +60,6 @@ blink_platform_sources("heap") {
"process_heap.cc", "process_heap.cc",
"process_heap.h", "process_heap.h",
"self_keep_alive.h", "self_keep_alive.h",
"sparse_heap_bitmap.cc",
"sparse_heap_bitmap.h",
"thread_state.cc", "thread_state.cc",
"thread_state.h", "thread_state.h",
"thread_state_scopes.h", "thread_state_scopes.h",
...@@ -117,7 +115,6 @@ jumbo_source_set("blink_heap_unittests_sources") { ...@@ -117,7 +115,6 @@ jumbo_source_set("blink_heap_unittests_sources") {
"object_start_bitmap_test.cc", "object_start_bitmap_test.cc",
"persistent_test.cc", "persistent_test.cc",
"run_all_tests.cc", "run_all_tests.cc",
"sparse_heap_bitmap_test.cc",
"thread_state_scheduling_test.cc", "thread_state_scheduling_test.cc",
"worklist_test.cc", "worklist_test.cc",
] ]
......
...@@ -10,7 +10,6 @@ ...@@ -10,7 +10,6 @@
#include "base/memory/ptr_util.h" #include "base/memory/ptr_util.h"
#include "third_party/blink/renderer/platform/heap/heap.h" #include "third_party/blink/renderer/platform/heap/heap.h"
#include "third_party/blink/renderer/platform/heap/heap_stats_collector.h" #include "third_party/blink/renderer/platform/heap/heap_stats_collector.h"
#include "third_party/blink/renderer/platform/heap/sparse_heap_bitmap.h"
#include "third_party/blink/renderer/platform/histogram.h" #include "third_party/blink/renderer/platform/histogram.h"
#include "third_party/blink/renderer/platform/runtime_enabled_features.h" #include "third_party/blink/renderer/platform/runtime_enabled_features.h"
#include "third_party/blink/renderer/platform/wtf/allocator.h" #include "third_party/blink/renderer/platform/wtf/allocator.h"
...@@ -106,23 +105,10 @@ class HeapCompact::MovableObjectFixups final { ...@@ -106,23 +105,10 @@ class HeapCompact::MovableObjectFixups final {
if (LIKELY(!relocatable_pages_.Contains(slot_page))) if (LIKELY(!relocatable_pages_.Contains(slot_page)))
return; return;
// Interior slots are recorded as follows:
// - Storing it in the interior map, which maps the slot to its (eventual)
// location. Initially nullptr.
// - Mark it as being interior pointer within the page's interior bitmap.
// This bitmap is used when moving a backing store to check whether an
// interior slot is to be redirected.
auto interior_it = interior_fixups_.find(slot); auto interior_it = interior_fixups_.find(slot);
// Repeated registrations have been filtered above.
CHECK(interior_fixups_.end() == interior_it); CHECK(interior_fixups_.end() == interior_it);
interior_fixups_.insert({slot, nullptr}); interior_fixups_.insert({slot, nullptr});
LOG_HEAP_COMPACTION() << "Interior slot: " << slot; LOG_HEAP_COMPACTION() << "Interior slot: " << slot;
Address slot_address = reinterpret_cast<Address>(slot);
if (!interiors_) {
interiors_ = std::make_unique<SparseHeapBitmap>(slot_address);
return;
}
interiors_->Add(slot_address);
} }
void AddFixupCallback(MovableReference* slot, void AddFixupCallback(MovableReference* slot,
...@@ -134,47 +120,35 @@ class HeapCompact::MovableObjectFixups final { ...@@ -134,47 +120,35 @@ class HeapCompact::MovableObjectFixups final {
} }
void RelocateInteriorFixups(Address from, Address to, size_t size) { void RelocateInteriorFixups(Address from, Address to, size_t size) {
SparseHeapBitmap* range = interiors_->HasRange(from, size); // |from| is a valid address for a slot.
if (LIKELY(!range)) auto interior_it =
interior_fixups_.lower_bound(reinterpret_cast<MovableReference*>(from));
if (interior_it == interior_fixups_.end())
return; return;
// Scan through the payload, looking for interior pointer slots CHECK_GE(reinterpret_cast<Address>(interior_it->first), from);
// to adjust. If the backing store of such an interior slot hasn't size_t offset = reinterpret_cast<Address>(interior_it->first) - from;
// been moved already, update the slot -> real location mapping. while (offset < size) {
// When the backing store is eventually moved, it'll use that location. if (!interior_it->second) {
for (size_t offset = 0; offset < size; offset += sizeof(void*)) { // Update the interior fixup value, so that when the object the slot is
MovableReference* slot = // pointing to is moved, it can re-use this value.
reinterpret_cast<MovableReference*>(from + offset); Address fixup = to + offset;
interior_it->second = fixup;
// Early bailout.
if (!range->IsSet(reinterpret_cast<Address>(slot))) // If the |slot|'s content is pointing into the region [from, from +
continue; // size) we are dealing with an interior pointer that does not point to
// a valid HeapObjectHeader. Such references need to be fixed up
auto it = interior_fixups_.find(slot); // immediately.
if (it == interior_fixups_.end()) Address fixup_contents = *reinterpret_cast<Address*>(fixup);
continue; if (fixup_contents > from && fixup_contents < (from + size)) {
*reinterpret_cast<Address*>(fixup) = fixup_contents - from + to;
// If |slot|'s mapping is set, then the slot has been adjusted already. }
if (it->second)
continue;
Address fixup = to + offset;
LOG_HEAP_COMPACTION() << "Range interior fixup: " << (from + offset)
<< " " << it->second << " " << fixup;
// Fill in the relocated location of the original slot at |slot|.
// when the backing store corresponding to |slot| is eventually
// moved/compacted, it'll update |to + offset| with a pointer to the
// moved backing store.
interior_fixups_[slot] = fixup;
// If the |slot|'s content is pointing into the region [from, from + size)
// we are dealing with an interior pointer that does not point to a valid
// HeapObjectHeader. Such references need to be fixed up immediately.
Address fixup_contents = *reinterpret_cast<Address*>(fixup);
if (fixup_contents > from && fixup_contents < (from + size)) {
*reinterpret_cast<Address*>(fixup) = fixup_contents - from + to;
continue;
} }
interior_it++;
if (interior_it == interior_fixups_.end())
return;
offset = reinterpret_cast<Address>(interior_it->first) - from;
} }
} }
...@@ -208,12 +182,12 @@ class HeapCompact::MovableObjectFixups final { ...@@ -208,12 +182,12 @@ class HeapCompact::MovableObjectFixups final {
// If the object is referenced by a slot that is contained on a compacted // If the object is referenced by a slot that is contained on a compacted
// area itself, check whether it can be updated already. // area itself, check whether it can be updated already.
MovableReference* slot = reinterpret_cast<MovableReference*>(it->second); MovableReference* slot = reinterpret_cast<MovableReference*>(it->second);
auto interior = interior_fixups_.find(slot); auto interior_it = interior_fixups_.find(slot);
if (interior != interior_fixups_.end()) { if (interior_it != interior_fixups_.end()) {
MovableReference* slot_location = MovableReference* slot_location =
reinterpret_cast<MovableReference*>(interior->second); reinterpret_cast<MovableReference*>(interior_it->second);
if (!slot_location) { if (!slot_location) {
interior_fixups_[slot] = to; interior_it->second = to;
slot_type = kInteriorSlotPreMove; slot_type = kInteriorSlotPreMove;
} else { } else {
LOG_HEAP_COMPACTION() LOG_HEAP_COMPACTION()
...@@ -252,7 +226,7 @@ class HeapCompact::MovableObjectFixups final { ...@@ -252,7 +226,7 @@ class HeapCompact::MovableObjectFixups final {
callback->value.second(callback->value.first, from, to, size); callback->value.second(callback->value.first, from, to, size);
} }
if (!interiors_) if (interior_fixups_.empty())
return; return;
if (!size) if (!size)
...@@ -266,8 +240,7 @@ class HeapCompact::MovableObjectFixups final { ...@@ -266,8 +240,7 @@ class HeapCompact::MovableObjectFixups final {
<< " objects=" << fixups_.size() << " objects=" << fixups_.size()
<< " callbacks=" << fixup_callbacks_.size() << " callbacks=" << fixup_callbacks_.size()
<< " interior-size=" << " interior-size="
<< (interiors_ ? interiors_->IntervalCount() : 0, << interior_fixups_.size());
interior_fixups_.size());
} }
#endif #endif
...@@ -289,15 +262,19 @@ class HeapCompact::MovableObjectFixups final { ...@@ -289,15 +262,19 @@ class HeapCompact::MovableObjectFixups final {
HashMap<MovableReference*, std::pair<void*, MovingObjectCallback>> HashMap<MovableReference*, std::pair<void*, MovingObjectCallback>>
fixup_callbacks_; fixup_callbacks_;
// Slot => relocated slot/final location. // Map of interior slots to their final location. Needs to be an ordered map
std::unordered_map<MovableReference*, Address> interior_fixups_; // as it is used to walk through slots starting at a given memory address.
// Requires log(n) lookup to make the early bailout reasonably fast. Currently
// only std::map fullfills those requirements.
//
// - The initial value for a given key is nullptr.
// - Upon moving a an object this value is adjusted accordingly.
std::map<MovableReference*, Address> interior_fixups_;
// All pages that are being compacted. The set keeps references to // All pages that are being compacted. The set keeps references to
// BasePage instances. The void* type was selected to allow to check // BasePage instances. The void* type was selected to allow to check
// arbitrary addresses. // arbitrary addresses.
HashSet<void*> relocatable_pages_; HashSet<void*> relocatable_pages_;
std::unique_ptr<SparseHeapBitmap> interiors_;
}; };
void HeapCompact::MovableObjectFixups::VerifyUpdatedSlot( void HeapCompact::MovableObjectFixups::VerifyUpdatedSlot(
......
...@@ -8,7 +8,6 @@ ...@@ -8,7 +8,6 @@
#include "third_party/blink/renderer/platform/heap/handle.h" #include "third_party/blink/renderer/platform/heap/handle.h"
#include "third_party/blink/renderer/platform/heap/heap_test_utilities.h" #include "third_party/blink/renderer/platform/heap/heap_test_utilities.h"
#include "third_party/blink/renderer/platform/heap/persistent.h" #include "third_party/blink/renderer/platform/heap/persistent.h"
#include "third_party/blink/renderer/platform/heap/sparse_heap_bitmap.h"
#include "third_party/blink/renderer/platform/wtf/deque.h" #include "third_party/blink/renderer/platform/wtf/deque.h"
#include "third_party/blink/renderer/platform/wtf/hash_map.h" #include "third_party/blink/renderer/platform/wtf/hash_map.h"
#include "third_party/blink/renderer/platform/wtf/linked_hash_set.h" #include "third_party/blink/renderer/platform/wtf/linked_hash_set.h"
......
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "third_party/blink/renderer/platform/heap/sparse_heap_bitmap.h"
#include "third_party/blink/renderer/platform/heap/heap.h"
namespace blink {
// Return the subtree/bitmap that covers the
// [address, address + size) range. Null if there is none.
SparseHeapBitmap* SparseHeapBitmap::HasRange(Address address, size_t size) {
DCHECK(!(reinterpret_cast<uintptr_t>(address) & kPointerAlignmentMask));
SparseHeapBitmap* bitmap = this;
while (bitmap) {
// Interval starts after, |m_right| handles.
if (address > bitmap->end()) {
bitmap = bitmap->right_.get();
continue;
}
// Interval starts within, |bitmap| is included in the resulting range.
if (address >= bitmap->Base())
break;
Address right = address + size - 1;
// Interval starts before, but intersects with |bitmap|'s range.
if (right >= bitmap->Base())
break;
// Interval is before entirely, for |m_left| to handle.
bitmap = bitmap->left_.get();
}
return bitmap;
}
bool SparseHeapBitmap::IsSet(Address address) {
DCHECK(!(reinterpret_cast<uintptr_t>(address) & kPointerAlignmentMask));
SparseHeapBitmap* bitmap = this;
while (bitmap) {
if (address > bitmap->end()) {
bitmap = bitmap->right_.get();
continue;
}
if (address >= bitmap->Base()) {
if (bitmap->bitmap_) {
return bitmap->bitmap_->test((address - bitmap->Base()) >>
kPointerAlignmentInBits);
}
DCHECK(address == bitmap->Base());
DCHECK_EQ(bitmap->size(), 1u);
return true;
}
bitmap = bitmap->left_.get();
}
return false;
}
void SparseHeapBitmap::Add(Address address) {
DCHECK(!(reinterpret_cast<uintptr_t>(address) & kPointerAlignmentMask));
// |address| is beyond the maximum that this SparseHeapBitmap node can
// encompass.
if (address >= MaxEnd()) {
if (!right_) {
right_ = std::make_unique<SparseHeapBitmap>(address);
return;
}
right_->Add(address);
return;
}
// Same on the other side.
if (address < MinStart()) {
if (!left_) {
left_ = std::make_unique<SparseHeapBitmap>(address);
return;
}
left_->Add(address);
return;
}
if (address == Base())
return;
// |address| can be encompassed by |this| by expanding its size.
if (address > Base()) {
if (!bitmap_)
CreateBitmap();
bitmap_->set((address - Base()) >> kPointerAlignmentInBits);
return;
}
// Use |address| as the new base for this interval.
Address old_base = SwapBase(address);
CreateBitmap();
bitmap_->set((old_base - address) >> kPointerAlignmentInBits);
}
void SparseHeapBitmap::CreateBitmap() {
DCHECK(!bitmap_ && size() == 1);
bitmap_ = std::make_unique<std::bitset<kBitmapChunkSize>>();
size_ = kBitmapChunkRange;
bitmap_->set(0);
}
size_t SparseHeapBitmap::IntervalCount() const {
size_t count = 1;
if (left_)
count += left_->IntervalCount();
if (right_)
count += right_->IntervalCount();
return count;
}
} // namespace blink
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_SPARSE_HEAP_BITMAP_H_
#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_SPARSE_HEAP_BITMAP_H_
#include <bitset>
#include <memory>
#include "base/memory/ptr_util.h"
#include "third_party/blink/renderer/platform/heap/blink_gc.h"
#include "third_party/blink/renderer/platform/heap/heap_page.h"
namespace blink {
// A sparse bitmap of heap addresses where the (very few) addresses that are
// set are likely to be in small clusters. The abstraction is tailored to
// support heap compaction, assuming the following:
//
// - Addresses will be bitmap-marked from lower to higher addresses.
// - Bitmap lookups are performed for each object that is compacted
// and moved to some new location, supplying the (base, size)
// pair of the object's heap allocation.
// - If the sparse bitmap has any marked addresses in that range, it
// returns a sub-bitmap that can be quickly iterated over to check which
// addresses within the range are actually set.
// - The bitmap is needed to support something that is very rarely done
// by the current Blink codebase, which is to have nested collection
// part objects. Consequently, it is safe to assume sparseness.
//
// Support the above by having a sparse bitmap organized as a binary
// tree with nodes covering fixed size ranges via a simple bitmap/set.
// That is, each SparseHeapBitmap node will contain a bitmap/set for
// some fixed size range, along with pointers to SparseHeapBitmaps
// for addresses on each side its range.
//
// This bitmap tree isn't kept balanced across the Address additions
// made.
//
class PLATFORM_EXPORT SparseHeapBitmap {
public:
explicit SparseHeapBitmap(Address base) : base_(base), size_(1) {
DCHECK(!(reinterpret_cast<uintptr_t>(base_) & kPointerAlignmentMask));
static_assert(kPointerAlignmentMask <= kAllocationMask,
"address shift exceeds heap pointer alignment");
// For now, only recognize 8 and 4.
static_assert(alignof(void*) == 8 || alignof(void*) == 4,
"unsupported pointer alignment");
}
~SparseHeapBitmap() = default;
// Return the sparse bitmap subtree that at least covers the
// [address, address + size) range, or nullptr if none.
//
// The returned SparseHeapBitmap can be used to quickly lookup what
// addresses in that range are set or not; see |isSet()|. Its
// |isSet()| behavior outside that range is not defined.
SparseHeapBitmap* HasRange(Address, size_t);
// True iff |address| is set for this SparseHeapBitmap tree.
bool IsSet(Address);
// Mark |address| as present/set.
void Add(Address);
// The assumed minimum alignment of the pointers being added. Cannot
// exceed |log2(allocationGranularity)|; having it be equal to
// the platform pointer alignment is what's wanted.
static const int kPointerAlignmentInBits = alignof(void*) == 8 ? 3 : 2;
static const size_t kPointerAlignmentMask =
(0x1u << kPointerAlignmentInBits) - 1;
// Represent ranges in 0x100 bitset chunks; bit I is set iff Address
// |m_base + I * (0x1 << s_pointerAlignmentInBits)| has been added to the
// |SparseHeapBitmap|.
static const size_t kBitmapChunkSize = 0x100;
// A SparseHeapBitmap either contains a single Address or a bitmap
// recording the mapping for [m_base, m_base + s_bitmapChunkRange)
static const size_t kBitmapChunkRange = kBitmapChunkSize
<< kPointerAlignmentInBits;
// Return the number of nodes; for debug stats.
size_t IntervalCount() const;
private:
Address Base() const { return base_; }
size_t size() const { return size_; }
Address end() const { return Base() + (size_ - 1); }
Address MaxEnd() const { return Base() + kBitmapChunkRange; }
Address MinStart() const {
// If this bitmap node represents the sparse [m_base, s_bitmapChunkRange)
// range, do not allow it to be "left extended" as that would entail
// having to shift down the contents of the std::bitset somehow.
//
// This shouldn't be a real problem as any clusters of set addresses
// will be marked while iterating from lower to higher addresses, hence
// "left extension" are unlikely to be common.
if (bitmap_)
return Base();
return (base_ > reinterpret_cast<Address>(kBitmapChunkRange))
? (Base() - kBitmapChunkRange + 1)
: nullptr;
}
Address SwapBase(Address address) {
DCHECK(!(reinterpret_cast<uintptr_t>(address) & kPointerAlignmentMask));
Address old_base = base_;
base_ = address;
return old_base;
}
void CreateBitmap();
Address base_;
// Either 1 or |s_bitmapChunkRange|.
size_t size_;
// If non-null, contains a bitmap for addresses within [m_base, m_size)
std::unique_ptr<std::bitset<kBitmapChunkSize>> bitmap_;
std::unique_ptr<SparseHeapBitmap> left_;
std::unique_ptr<SparseHeapBitmap> right_;
};
} // namespace blink
#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_SPARSE_HEAP_BITMAP_H_
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "third_party/blink/renderer/platform/heap/sparse_heap_bitmap.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace blink {
namespace {
constexpr size_t kChunkRange = SparseHeapBitmap::kBitmapChunkRange;
constexpr size_t kUnitPointer = 0x1u
<< SparseHeapBitmap::kPointerAlignmentInBits;
} // namespace
TEST(SparseHeapBitmapTest, SparseBitmapBasic) {
Address base = reinterpret_cast<Address>(0x10000u);
auto bitmap = std::make_unique<SparseHeapBitmap>(base);
size_t double_chunk = 2 * kChunkRange;
// 101010... starting at |base|.
for (size_t i = 0; i < double_chunk; i += 2 * kUnitPointer)
bitmap->Add(base + i);
// Check that hasRange() returns a bitmap subtree, if any, for a given
// address.
EXPECT_TRUE(!!bitmap->HasRange(base, 1));
EXPECT_TRUE(!!bitmap->HasRange(base + kUnitPointer, 1));
EXPECT_FALSE(!!bitmap->HasRange(base - kUnitPointer, 1));
// Test implementation details.. that each SparseHeapBitmap node maps
// |s_bitmapChunkRange| ranges only.
EXPECT_EQ(bitmap->HasRange(base + kUnitPointer, 1),
bitmap->HasRange(base + 2 * kUnitPointer, 1));
// Second range will be just past the first.
EXPECT_NE(bitmap->HasRange(base, 1), bitmap->HasRange(base + kChunkRange, 1));
// Iterate a range that will encompass more than one 'chunk'.
SparseHeapBitmap* start =
bitmap->HasRange(base + 2 * kUnitPointer, double_chunk);
EXPECT_TRUE(!!start);
for (size_t i = 2 * kUnitPointer; i < double_chunk; i += 2 * kUnitPointer) {
EXPECT_TRUE(start->IsSet(base + i));
EXPECT_FALSE(start->IsSet(base + i + kUnitPointer));
}
}
TEST(SparseHeapBitmapTest, SparseBitmapBuild) {
Address base = reinterpret_cast<Address>(0x10000u);
auto bitmap = std::make_unique<SparseHeapBitmap>(base);
size_t double_chunk = 2 * kChunkRange;
// Create a sparse bitmap containing at least three chunks.
bitmap->Add(base - double_chunk);
bitmap->Add(base + double_chunk);
// This is sanity testing internal implementation details of
// SparseHeapBitmap; probing |isSet()| outside the bitmap
// of the range used in |hasRange()|, is not defined.
//
// Regardless, the testing here verifies that a |hasRange()| that
// straddles multiple internal nodes, returns a bitmap that is
// capable of returning correct |isSet()| results.
SparseHeapBitmap* start = bitmap->HasRange(
base - double_chunk - 2 * kUnitPointer, 4 * kUnitPointer);
EXPECT_TRUE(!!start);
EXPECT_TRUE(start->IsSet(base - double_chunk));
EXPECT_FALSE(start->IsSet(base - double_chunk + kUnitPointer));
EXPECT_FALSE(start->IsSet(base));
EXPECT_FALSE(start->IsSet(base + kUnitPointer));
EXPECT_FALSE(start->IsSet(base + double_chunk));
EXPECT_FALSE(start->IsSet(base + double_chunk + kUnitPointer));
start = bitmap->HasRange(base - double_chunk - 2 * kUnitPointer,
2 * double_chunk + 2 * kUnitPointer);
EXPECT_TRUE(!!start);
EXPECT_TRUE(start->IsSet(base - double_chunk));
EXPECT_FALSE(start->IsSet(base - double_chunk + kUnitPointer));
EXPECT_TRUE(start->IsSet(base));
EXPECT_FALSE(start->IsSet(base + kUnitPointer));
EXPECT_TRUE(start->IsSet(base + double_chunk));
EXPECT_FALSE(start->IsSet(base + double_chunk + kUnitPointer));
start = bitmap->HasRange(base, 20);
EXPECT_TRUE(!!start);
// Probing for values outside of hasRange() should be considered
// undefined, but do it to exercise the (left) tree traversal.
EXPECT_TRUE(start->IsSet(base - double_chunk));
EXPECT_FALSE(start->IsSet(base - double_chunk + kUnitPointer));
EXPECT_TRUE(start->IsSet(base));
EXPECT_FALSE(start->IsSet(base + kUnitPointer));
EXPECT_TRUE(start->IsSet(base + double_chunk));
EXPECT_FALSE(start->IsSet(base + double_chunk + kUnitPointer));
start = bitmap->HasRange(base + kChunkRange + 2 * kUnitPointer, 2048);
EXPECT_TRUE(!!start);
// Probing for values outside of hasRange() should be considered
// undefined, but do it to exercise node traversal.
EXPECT_FALSE(start->IsSet(base - double_chunk));
EXPECT_FALSE(start->IsSet(base - double_chunk + kUnitPointer));
EXPECT_FALSE(start->IsSet(base));
EXPECT_FALSE(start->IsSet(base + kUnitPointer));
EXPECT_FALSE(start->IsSet(base + kChunkRange));
EXPECT_TRUE(start->IsSet(base + double_chunk));
EXPECT_FALSE(start->IsSet(base + double_chunk + kUnitPointer));
}
TEST(SparseHeapBitmapTest, SparseBitmapLeftExtension) {
Address base = reinterpret_cast<Address>(0x10000u);
auto bitmap = std::make_unique<SparseHeapBitmap>(base);
SparseHeapBitmap* start = bitmap->HasRange(base, 1);
EXPECT_TRUE(start);
// Verify that re-adding is a no-op.
bitmap->Add(base);
EXPECT_EQ(start, bitmap->HasRange(base, 1));
// Adding an Address |A| before a single-address SparseHeapBitmap node should
// cause that node to be "left extended" to use |A| as its new base.
bitmap->Add(base - 2 * kUnitPointer);
EXPECT_EQ(bitmap->HasRange(base, 1),
bitmap->HasRange(base - 2 * kUnitPointer, 1));
// Reset.
bitmap = std::make_unique<SparseHeapBitmap>(base);
// If attempting same as above, but the Address |A| is outside the
// chunk size of a node, a new SparseHeapBitmap node needs to be
// created to the left of |bitmap|.
bitmap->Add(base - kChunkRange);
EXPECT_NE(bitmap->HasRange(base, 1),
bitmap->HasRange(base - 2 * kUnitPointer, 1));
bitmap = std::make_unique<SparseHeapBitmap>(base);
bitmap->Add(base - kChunkRange + kUnitPointer);
// This address is just inside the horizon and shouldn't create a new chunk.
EXPECT_EQ(bitmap->HasRange(base, 1),
bitmap->HasRange(base - 2 * kUnitPointer, 1));
// ..but this one should, like for the sub-test above.
bitmap->Add(base - kChunkRange);
EXPECT_EQ(bitmap->HasRange(base, 1),
bitmap->HasRange(base - 2 * kUnitPointer, 1));
EXPECT_NE(bitmap->HasRange(base, 1), bitmap->HasRange(base - kChunkRange, 1));
}
} // namespace blink
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment