Commit b34ff38e authored by Xianzhu Wang's avatar Xianzhu Wang Committed by Commit Bot

Remove unused ContiguousContainer methods and cleanups

- Remove unused ContiguousContainer methods such as Clear(), Swap(),
  etc.
- Replace RemoveLast() with ReplaceLastByMoving(). This removes the
  logic to deal with removed items, making ContiguousContainer
  appending only.
- Let ContiguousContainer store Buffer directly in the vector instead of
  through unique_ptr.
- Use 'item' instead of 3 different terms 'item', 'element', 'object' in
  ContiguousContainer
- Remove DisplayItem::IsMovedFromCachedSubsequence() because with the
  new PaintChunkSubset we can use
  PaintChunk.is_moved_from_cached_subsequence.
- etc.

Bug: 917911
Change-Id: I8b3004d001ea12a00fed96fc3e031986db1ee005
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2501064Reviewed-by: default avatarStefan Zager <szager@chromium.org>
Reviewed-by: default avatarStephen Chenney <schenney@chromium.org>
Commit-Queue: Xianzhu Wang <wangxianzhu@chromium.org>
Cr-Commit-Position: refs/heads/master@{#822912}
parent 4566783a
......@@ -7,167 +7,53 @@
#include <algorithm>
#include <memory>
#include "base/macros.h"
#include "third_party/blink/renderer/platform/wtf/allocator/allocator.h"
#include "third_party/blink/renderer/platform/wtf/allocator/partitions.h"
#include "third_party/blink/renderer/platform/wtf/container_annotations.h"
namespace blink {
class ContiguousContainerBase::Buffer {
USING_FAST_MALLOC(Buffer);
public:
Buffer(wtf_size_t buffer_size, const char* type_name) {
capacity_ = WTF::Partitions::BufferActualSize(buffer_size);
begin_ = end_ =
static_cast<char*>(WTF::Partitions::BufferMalloc(capacity_, type_name));
ANNOTATE_NEW_BUFFER(begin_, capacity_, 0);
}
~Buffer() {
ANNOTATE_DELETE_BUFFER(begin_, capacity_, UsedCapacity());
WTF::Partitions::BufferFree(begin_);
}
wtf_size_t Capacity() const { return capacity_; }
wtf_size_t UsedCapacity() const { return end_ - begin_; }
wtf_size_t UnusedCapacity() const { return Capacity() - UsedCapacity(); }
bool IsEmpty() const { return UsedCapacity() == 0; }
void* Allocate(wtf_size_t object_size) {
DCHECK_GE(UnusedCapacity(), object_size);
ANNOTATE_CHANGE_SIZE(begin_, capacity_, UsedCapacity(),
UsedCapacity() + object_size);
void* result = end_;
end_ += object_size;
return result;
}
void DeallocateLastObject(void* object) {
CHECK_LE(begin_, object);
CHECK_LT(object, end_);
ANNOTATE_CHANGE_SIZE(begin_, capacity_, UsedCapacity(),
static_cast<char*>(object) - begin_);
end_ = static_cast<char*>(object);
}
private:
// begin_ <= end_ <= begin_ + capacity_
char* begin_;
char* end_;
wtf_size_t capacity_;
DISALLOW_COPY_AND_ASSIGN(Buffer);
};
ContiguousContainerBase::ContiguousContainerBase(
wtf_size_t max_object_size,
wtf_size_t max_item_size,
wtf_size_t initial_capacity_in_bytes)
: end_index_(0),
max_object_size_(max_object_size),
: max_item_size_(max_item_size),
initial_capacity_in_bytes_(
std::max(max_object_size, initial_capacity_in_bytes)) {}
ContiguousContainerBase::ContiguousContainerBase(
ContiguousContainerBase&& source)
: ContiguousContainerBase(source.max_object_size_,
source.initial_capacity_in_bytes_) {
Swap(source);
}
std::max(max_item_size, initial_capacity_in_bytes)) {}
ContiguousContainerBase::~ContiguousContainerBase() = default;
ContiguousContainerBase& ContiguousContainerBase::operator=(
ContiguousContainerBase&& source) {
Swap(source);
return *this;
}
wtf_size_t ContiguousContainerBase::CapacityInBytes() const {
wtf_size_t capacity = 0;
for (const auto& buffer : buffers_)
capacity += buffer->Capacity();
capacity += buffer.Capacity();
return capacity;
}
wtf_size_t ContiguousContainerBase::UsedCapacityInBytes() const {
wtf_size_t used_capacity = 0;
for (const auto& buffer : buffers_)
used_capacity += buffer->UsedCapacity();
used_capacity += buffer.UsedCapacity();
return used_capacity;
}
wtf_size_t ContiguousContainerBase::MemoryUsageInBytes() const {
return sizeof(*this) + CapacityInBytes() + elements_.CapacityInBytes();
return sizeof(*this) + CapacityInBytes() + items_.CapacityInBytes();
}
void* ContiguousContainerBase::Allocate(wtf_size_t object_size,
uint8_t* ContiguousContainerBase::Allocate(wtf_size_t item_size,
const char* type_name) {
DCHECK_LE(object_size, max_object_size_);
DCHECK_LE(item_size, max_item_size_);
Buffer* buffer_for_alloc = nullptr;
if (!buffers_.IsEmpty()) {
Buffer* end_buffer = buffers_[end_index_].get();
if (end_buffer->UnusedCapacity() >= object_size)
buffer_for_alloc = end_buffer;
else if (end_index_ + 1 < buffers_.size())
buffer_for_alloc = buffers_[++end_index_].get();
}
if (!buffers_.IsEmpty() && buffers_.back().UnusedCapacity() >= item_size)
buffer_for_alloc = &buffers_.back();
if (!buffer_for_alloc) {
wtf_size_t new_buffer_size = buffers_.IsEmpty()
? initial_capacity_in_bytes_
: 2 * buffers_.back()->Capacity();
buffer_for_alloc =
AllocateNewBufferForNextAllocation(new_buffer_size, type_name);
: 2 * buffers_.back().Capacity();
buffer_for_alloc = &buffers_.emplace_back(new_buffer_size, type_name);
}
void* element = buffer_for_alloc->Allocate(object_size);
elements_.push_back(element);
return element;
}
void ContiguousContainerBase::RemoveLast() {
void* object = elements_.back();
elements_.pop_back();
Buffer* end_buffer = buffers_[end_index_].get();
end_buffer->DeallocateLastObject(object);
if (end_buffer->IsEmpty()) {
if (end_index_ > 0)
end_index_--;
if (end_index_ + 2 < buffers_.size())
buffers_.pop_back();
}
}
void ContiguousContainerBase::Clear() {
elements_.clear();
buffers_.clear();
end_index_ = 0;
}
void ContiguousContainerBase::Swap(ContiguousContainerBase& other) {
elements_.swap(other.elements_);
buffers_.swap(other.buffers_);
std::swap(end_index_, other.end_index_);
std::swap(max_object_size_, other.max_object_size_);
std::swap(initial_capacity_in_bytes_, other.initial_capacity_in_bytes_);
}
ContiguousContainerBase::Buffer*
ContiguousContainerBase::AllocateNewBufferForNextAllocation(
wtf_size_t buffer_size,
const char* type_name) {
DCHECK(buffers_.IsEmpty() || end_index_ == buffers_.size() - 1);
std::unique_ptr<Buffer> new_buffer =
std::make_unique<Buffer>(buffer_size, type_name);
Buffer* buffer_to_return = new_buffer.get();
buffers_.push_back(std::move(new_buffer));
end_index_ = buffers_.size() - 1;
return buffer_to_return;
uint8_t* item = buffer_for_alloc->Allocate(item_size);
items_.push_back(item);
return item;
}
} // namespace blink
......@@ -11,28 +11,28 @@
#include <utility>
#include "base/compiler_specific.h"
#include "base/macros.h"
#include "third_party/blink/renderer/platform/platform_export.h"
#include "third_party/blink/renderer/platform/wtf/allocator/allocator.h"
#include "third_party/blink/renderer/platform/wtf/type_traits.h"
#include "third_party/blink/renderer/platform/wtf/allocator/partitions.h"
#include "third_party/blink/renderer/platform/wtf/container_annotations.h"
#include "third_party/blink/renderer/platform/wtf/vector.h"
namespace blink {
// ContiguousContainer is a container which stores a list of heterogeneous
// objects (in particular, of varying sizes), packed next to one another in
// memory. Objects are never relocated, so it is safe to store pointers to them
// for the lifetime of the container (unless the object is removed).
// items (in particular, of varying sizes), packed next to one another in
// memory. Items are never relocated, so it is safe to store pointers to them
// for the lifetime of the container (unless the item is removed).
//
// Memory is allocated in a series of buffers (with exponential growth). When an
// object is allocated, it is given only the space it requires (possibly with
// item is allocated, it is given only the space it requires (possibly with
// enough padding to preserve alignment), rather than the maximum possible size.
// This allows small and large objects to coexist without wasting much space.
// This allows small and large items to coexist without wasting much space.
//
// Since it stores pointers to all of the objects it allocates in a vector, it
// Since it stores pointers to all of the items it allocates in a vector, it
// supports efficient iteration and indexing. However, for mutation the
// supported operations are limited to appending to, and removing from, the end
// of the list.
// supported operations are limited to appending to the end of the list and
// replacing the last item.
//
// Clients should instantiate ContiguousContainer; ContiguousContainerBase is an
// artifact of the implementation.
......@@ -40,40 +40,79 @@ namespace blink {
class PLATFORM_EXPORT ContiguousContainerBase {
DISALLOW_NEW();
public:
ContiguousContainerBase(const ContiguousContainerBase&) = delete;
ContiguousContainerBase& operator=(const ContiguousContainerBase&) = delete;
ContiguousContainerBase(ContiguousContainerBase&&) = delete;
ContiguousContainerBase& operator=(ContiguousContainerBase&&) = delete;
protected:
// The initial capacity will be allocated when the first item is added.
ContiguousContainerBase(wtf_size_t max_object_size,
ContiguousContainerBase(wtf_size_t max_item_size,
wtf_size_t initial_capacity_in_bytes);
ContiguousContainerBase(ContiguousContainerBase&&);
~ContiguousContainerBase();
ContiguousContainerBase& operator=(ContiguousContainerBase&&);
wtf_size_t size() const { return elements_.size(); }
wtf_size_t size() const { return items_.size(); }
bool IsEmpty() const { return !size(); }
wtf_size_t CapacityInBytes() const;
wtf_size_t UsedCapacityInBytes() const;
wtf_size_t MemoryUsageInBytes() const;
// These do not invoke constructors or destructors.
void* Allocate(wtf_size_t object_size, const char* type_name);
void RemoveLast();
void Clear();
void Swap(ContiguousContainerBase&);
uint8_t* Allocate(wtf_size_t item_size, const char* type_name);
wtf_size_t LastItemSize() const {
return static_cast<wtf_size_t>(buffers_.back().End() - items_.back());
}
Vector<void*> elements_;
using ItemVector = Vector<uint8_t*>;
ItemVector items_;
private:
class Buffer;
class Buffer {
public:
Buffer(wtf_size_t buffer_size, const char* type_name)
: capacity_(static_cast<wtf_size_t>(
WTF::Partitions::BufferActualSize(buffer_size))),
begin_(static_cast<uint8_t*>(
WTF::Partitions::BufferMalloc(capacity_, type_name))),
end_(begin_) {
ANNOTATE_NEW_BUFFER(begin_, capacity_, 0);
}
~Buffer() {
ANNOTATE_DELETE_BUFFER(begin_, capacity_, UsedCapacity());
WTF::Partitions::BufferFree(begin_);
}
wtf_size_t Capacity() const { return capacity_; }
wtf_size_t UsedCapacity() const {
return static_cast<wtf_size_t>(end_ - begin_);
}
wtf_size_t UnusedCapacity() const { return Capacity() - UsedCapacity(); }
bool IsEmpty() const { return UsedCapacity() == 0; }
uint8_t* Allocate(wtf_size_t item_size) {
DCHECK_GE(UnusedCapacity(), item_size);
ANNOTATE_CHANGE_SIZE(begin_, capacity_, UsedCapacity(),
UsedCapacity() + item_size);
uint8_t* result = end_;
end_ += item_size;
return result;
}
Buffer* AllocateNewBufferForNextAllocation(wtf_size_t, const char* type_name);
uint8_t* End() const { return end_; }
Vector<std::unique_ptr<Buffer>> buffers_;
unsigned end_index_;
wtf_size_t max_object_size_;
wtf_size_t initial_capacity_in_bytes_;
private:
// begin_ <= end_ <= begin_ + capacity_
wtf_size_t capacity_;
uint8_t* begin_;
uint8_t* end_;
};
DISALLOW_COPY_AND_ASSIGN(ContiguousContainerBase);
Vector<Buffer> buffers_;
wtf_size_t max_item_size_;
wtf_size_t initial_capacity_in_bytes_;
};
// For most cases, no alignment stricter than pointer alignment is required. If
......@@ -82,7 +121,7 @@ class PLATFORM_EXPORT ContiguousContainerBase {
// alignments. For small structs without pointers, it may be possible to reduce
// alignment for tighter packing.
template <class BaseElementType, unsigned alignment = sizeof(void*)>
template <class BaseItemType, unsigned alignment = sizeof(void*)>
class ContiguousContainer : public ContiguousContainerBase {
private:
// Declares itself as a forward iterator, but also supports a few more
......@@ -103,7 +142,7 @@ class ContiguousContainer : public ContiguousContainerBase {
bool operator<(const IteratorWrapper& other) const {
return it_ < other.it_;
}
ValueType& operator*() const { return *static_cast<ValueType*>(*it_); }
ValueType& operator*() const { return *reinterpret_cast<ValueType*>(*it_); }
ValueType* operator->() const { return &operator*(); }
IteratorWrapper operator+(std::ptrdiff_t n) const {
return IteratorWrapper(it_ + n);
......@@ -128,39 +167,25 @@ class ContiguousContainer : public ContiguousContainerBase {
};
public:
using iterator = IteratorWrapper<Vector<void*>::iterator, BaseElementType>;
using iterator = IteratorWrapper<ItemVector::iterator, BaseItemType>;
using const_iterator =
IteratorWrapper<Vector<void*>::const_iterator, const BaseElementType>;
IteratorWrapper<ItemVector::const_iterator, const BaseItemType>;
using reverse_iterator =
IteratorWrapper<Vector<void*>::reverse_iterator, BaseElementType>;
IteratorWrapper<ItemVector::reverse_iterator, BaseItemType>;
using const_reverse_iterator =
IteratorWrapper<Vector<void*>::const_reverse_iterator,
const BaseElementType>;
IteratorWrapper<ItemVector::const_reverse_iterator, const BaseItemType>;
using value_type = BaseElementType;
using value_type = BaseItemType;
ContiguousContainer(wtf_size_t max_object_size,
ContiguousContainer(wtf_size_t max_item_size,
wtf_size_t initial_capacity_in_bytes)
: ContiguousContainerBase(Align(max_object_size),
: ContiguousContainerBase(Align(max_item_size),
initial_capacity_in_bytes) {}
ContiguousContainer(ContiguousContainer&& source)
: ContiguousContainerBase(std::move(source)) {}
~ContiguousContainer() {
for (auto& element : *this) {
(void)element; // MSVC incorrectly reports this variable as unused.
element.~BaseElementType();
}
for (auto& item : *this) {
(void)item; // MSVC incorrectly reports this variable as unused.
item.~BaseItemType();
}
ContiguousContainer& operator=(ContiguousContainer&& source) {
// Must clear in the derived class to ensure that element destructors
// care called.
Clear();
ContiguousContainerBase::operator=(std::move(source));
return *this;
}
using ContiguousContainerBase::CapacityInBytes;
......@@ -169,71 +194,63 @@ class ContiguousContainer : public ContiguousContainerBase {
using ContiguousContainerBase::size;
using ContiguousContainerBase::UsedCapacityInBytes;
iterator begin() { return iterator(elements_.begin()); }
iterator end() { return iterator(elements_.end()); }
const_iterator begin() const { return const_iterator(elements_.begin()); }
const_iterator end() const { return const_iterator(elements_.end()); }
reverse_iterator rbegin() { return reverse_iterator(elements_.rbegin()); }
reverse_iterator rend() { return reverse_iterator(elements_.rend()); }
iterator begin() { return iterator(items_.begin()); }
iterator end() { return iterator(items_.end()); }
const_iterator begin() const { return const_iterator(items_.begin()); }
const_iterator end() const { return const_iterator(items_.end()); }
reverse_iterator rbegin() { return reverse_iterator(items_.rbegin()); }
reverse_iterator rend() { return reverse_iterator(items_.rend()); }
const_reverse_iterator rbegin() const {
return const_reverse_iterator(elements_.rbegin());
return const_reverse_iterator(items_.rbegin());
}
const_reverse_iterator rend() const {
return const_reverse_iterator(elements_.rend());
return const_reverse_iterator(items_.rend());
}
BaseElementType& First() { return *begin(); }
const BaseElementType& First() const { return *begin(); }
BaseElementType& Last() { return *rbegin(); }
const BaseElementType& Last() const { return *rbegin(); }
BaseElementType& operator[](wtf_size_t index) { return *(begin() + index); }
const BaseElementType& operator[](wtf_size_t index) const {
BaseItemType& front() { return *begin(); }
const BaseItemType& front() const { return *begin(); }
BaseItemType& back() { return *rbegin(); }
const BaseItemType& back() const { return *rbegin(); }
BaseItemType& operator[](wtf_size_t index) { return *(begin() + index); }
const BaseItemType& operator[](wtf_size_t index) const {
return *(begin() + index);
}
template <class DerivedElementType, typename... Args>
DerivedElementType& AllocateAndConstruct(Args&&... args) {
static_assert(WTF::IsSubclass<DerivedElementType, BaseElementType>::value,
"Must use subclass of BaseElementType.");
static_assert(alignment % alignof(DerivedElementType) == 0,
template <class DerivedItemType, typename... Args>
DerivedItemType& AllocateAndConstruct(Args&&... args) {
static_assert(WTF::IsSubclass<DerivedItemType, BaseItemType>::value,
"Must use subclass of BaseItemType.");
static_assert(alignment % alignof(DerivedItemType) == 0,
"Derived type requires stronger alignment.");
return *new (AlignedAllocate(sizeof(DerivedElementType)))
DerivedElementType(std::forward<Args>(args)...);
}
void RemoveLast() {
DCHECK(!IsEmpty());
Last().~BaseElementType();
ContiguousContainerBase::RemoveLast();
}
DISABLE_CFI_PERF
void Clear() {
for (auto& element : *this) {
(void)element; // MSVC incorrectly reports this variable as unused.
element.~BaseElementType();
}
ContiguousContainerBase::Clear();
return *new (AlignedAllocate(sizeof(DerivedItemType)))
DerivedItemType(std::forward<Args>(args)...);
}
void Swap(ContiguousContainer& other) {
ContiguousContainerBase::Swap(other);
}
// Appends a new element using memcpy, then default-constructs a base
// element in its place. Use with care.
BaseElementType& AppendByMoving(BaseElementType& item, wtf_size_t size) {
DCHECK_GE(size, sizeof(BaseElementType));
// Appends a new item using memcpy, then default-constructs a base item
// in its place. Use with care.
BaseItemType& AppendByMoving(BaseItemType& item, wtf_size_t size) {
DCHECK_GE(size, sizeof(BaseItemType));
void* new_item = AlignedAllocate(size);
memcpy(new_item, static_cast<void*>(&item), size);
new (&item) BaseElementType;
return *static_cast<BaseElementType*>(new_item);
new (&item) BaseItemType;
return *static_cast<BaseItemType*>(new_item);
}
// The caller must ensure that |size| (the actual size of |item|) is the same
// as or smaller than the replaced item.
BaseItemType& ReplaceLastByMoving(BaseItemType& item, wtf_size_t size) {
DCHECK_GE(size, sizeof(BaseItemType));
DCHECK_GE(LastItemSize(), size);
back().~BaseItemType();
memcpy(static_cast<void*>(&back()), static_cast<void*>(&item), size);
new (&item) BaseItemType;
return back();
}
private:
void* AlignedAllocate(wtf_size_t size) {
void* result = ContiguousContainerBase::Allocate(
Align(size), WTF_HEAP_PROFILER_TYPE_NAME(BaseElementType));
Align(size), WTF_HEAP_PROFILER_TYPE_NAME(BaseItemType));
DCHECK_EQ(reinterpret_cast<intptr_t>(result) & (alignment - 1), 0u);
return result;
}
......
......@@ -7,6 +7,7 @@
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "third_party/blink/renderer/platform/wtf/allocator/allocator.h"
#include "third_party/blink/renderer/platform/wtf/text/wtf_string.h"
#include "third_party/blink/renderer/platform/wtf/type_traits.h"
namespace blink {
......@@ -60,11 +61,8 @@ TEST(ContiguousContainerTest, SimpleStructs) {
TEST(ContiguousContainerTest, AllocateLots) {
PointList list;
for (int i = 0; i < static_cast<int>(kNumElements); i++) {
list.AllocateAndConstruct<Point2D>(i, i);
for (int i = 0; i < static_cast<int>(kNumElements); i++)
list.AllocateAndConstruct<Point2D>(i, i);
list.RemoveLast();
}
ASSERT_EQ(kNumElements, list.size());
for (int i = 0; i < static_cast<int>(kNumElements); i++) {
ASSERT_EQ(i, list[i].x);
......@@ -91,98 +89,8 @@ class MockDestructibleList : public ContiguousContainer<MockDestructible> {
TEST(ContiguousContainerTest, DestructorCalled) {
MockDestructibleList list;
auto& destructible = list.AllocateAndConstruct<MockDestructible>();
EXPECT_EQ(&destructible, &list.First());
EXPECT_CALL(destructible, Destruct());
}
TEST(ContiguousContainerTest, DestructorCalledOnceWhenClear) {
MockDestructibleList list;
auto& destructible = list.AllocateAndConstruct<MockDestructible>();
EXPECT_EQ(&destructible, &list.First());
testing::MockFunction<void()> separator;
{
testing::InSequence s;
EXPECT_EQ(&destructible, &list.front());
EXPECT_CALL(destructible, Destruct());
EXPECT_CALL(separator, Call());
EXPECT_CALL(destructible, Destruct()).Times(0);
}
list.Clear();
separator.Call();
}
TEST(ContiguousContainerTest, DestructorCalledOnceWhenRemoveLast) {
MockDestructibleList list;
auto& destructible = list.AllocateAndConstruct<MockDestructible>();
EXPECT_EQ(&destructible, &list.First());
testing::MockFunction<void()> separator;
{
testing::InSequence s;
EXPECT_CALL(destructible, Destruct());
EXPECT_CALL(separator, Call());
EXPECT_CALL(destructible, Destruct()).Times(0);
}
list.RemoveLast();
separator.Call();
}
TEST(ContiguousContainerTest, DestructorCalledWithMultipleRemoveLastCalls) {
// This container only requests space for one, but the implementation is
// free to use more space if the allocator provides it.
MockDestructibleList list(1 * sizeof(MockDestructible));
testing::MockFunction<void()> separator;
// We should be okay to allocate and remove a single one, like before.
list.AllocateAndConstruct<MockDestructible>();
EXPECT_EQ(1u, list.size());
{
testing::InSequence s;
EXPECT_CALL(list[0], Destruct());
EXPECT_CALL(separator, Call());
EXPECT_CALL(list[0], Destruct()).Times(0);
}
list.RemoveLast();
separator.Call();
EXPECT_EQ(0u, list.size());
testing::Mock::VerifyAndClearExpectations(&separator);
// We should also be okay to allocate and remove multiple.
list.AllocateAndConstruct<MockDestructible>();
list.AllocateAndConstruct<MockDestructible>();
list.AllocateAndConstruct<MockDestructible>();
list.AllocateAndConstruct<MockDestructible>();
list.AllocateAndConstruct<MockDestructible>();
list.AllocateAndConstruct<MockDestructible>();
EXPECT_EQ(6u, list.size());
{
// The last three should be destroyed by removeLast.
testing::InSequence s;
EXPECT_CALL(list[5], Destruct());
EXPECT_CALL(separator, Call());
EXPECT_CALL(list[5], Destruct()).Times(0);
EXPECT_CALL(list[4], Destruct());
EXPECT_CALL(separator, Call());
EXPECT_CALL(list[4], Destruct()).Times(0);
EXPECT_CALL(list[3], Destruct());
EXPECT_CALL(separator, Call());
EXPECT_CALL(list[3], Destruct()).Times(0);
}
list.RemoveLast();
separator.Call();
list.RemoveLast();
separator.Call();
list.RemoveLast();
separator.Call();
EXPECT_EQ(3u, list.size());
// The remaining ones are destroyed when the test finishes.
EXPECT_CALL(list[2], Destruct());
EXPECT_CALL(list[1], Destruct());
EXPECT_CALL(list[0], Destruct());
}
TEST(ContiguousContainerTest, InsertionAndIndexedAccess) {
......@@ -193,14 +101,14 @@ TEST(ContiguousContainerTest, InsertionAndIndexedAccess) {
auto& point3 = list.AllocateAndConstruct<Point2D>();
EXPECT_EQ(3u, list.size());
EXPECT_EQ(&point1, &list.First());
EXPECT_EQ(&point3, &list.Last());
EXPECT_EQ(&point1, &list.front());
EXPECT_EQ(&point3, &list.back());
EXPECT_EQ(&point1, &list[0]);
EXPECT_EQ(&point2, &list[1]);
EXPECT_EQ(&point3, &list[2]);
}
TEST(ContiguousContainerTest, InsertionAndClear) {
TEST(ContiguousContainerTest, Insertion) {
PointList list;
EXPECT_TRUE(list.IsEmpty());
EXPECT_EQ(0u, list.size());
......@@ -212,18 +120,6 @@ TEST(ContiguousContainerTest, InsertionAndClear) {
EXPECT_EQ(1u, list.size());
EXPECT_GE(list.CapacityInBytes(), kDefaultInitialCapacityInBytes);
EXPECT_EQ(sizeof(Point2D), list.UsedCapacityInBytes());
list.Clear();
EXPECT_TRUE(list.IsEmpty());
EXPECT_EQ(0u, list.size());
EXPECT_EQ(0u, list.CapacityInBytes());
EXPECT_EQ(0u, list.UsedCapacityInBytes());
list.AllocateAndConstruct<Point2D>();
EXPECT_FALSE(list.IsEmpty());
EXPECT_EQ(1u, list.size());
EXPECT_GE(list.CapacityInBytes(), kDefaultInitialCapacityInBytes);
EXPECT_EQ(sizeof(Point2D), list.UsedCapacityInBytes());
}
TEST(ContiguousContainerTest, ElementAddressesAreStable) {
......@@ -299,78 +195,23 @@ bool EqualPointers(It1 it1, const It1& end1, It2 it2) {
return true;
}
TEST(ContiguousContainerTest, IterationAfterRemoveLast) {
struct SmallStruct {
char dummy[16];
};
ContiguousContainer<SmallStruct> list(sizeof(SmallStruct),
1 * sizeof(SmallStruct));
Vector<SmallStruct*> pointers;
// Utilities which keep these two lists in sync and check that their
// iteration order matches.
auto push = [&list, &pointers]() {
pointers.push_back(&list.AllocateAndConstruct<SmallStruct>());
};
auto pop = [&list, &pointers]() {
pointers.pop_back();
list.RemoveLast();
};
auto check_equal = [&list, &pointers]() {
// They should be of the same size, and compare equal with all four
// kinds of iteration.
const auto& const_list = list;
const auto& const_pointers = pointers;
ASSERT_EQ(list.size(), pointers.size());
ASSERT_TRUE(EqualPointers(list.begin(), list.end(), pointers.begin()));
ASSERT_TRUE(EqualPointers(const_list.begin(), const_list.end(),
const_pointers.begin()));
ASSERT_TRUE(EqualPointers(list.rbegin(), list.rend(), pointers.rbegin()));
ASSERT_TRUE(EqualPointers(const_list.rbegin(), const_list.rend(),
const_pointers.rbegin()));
};
// Note that the allocations that actually happen may not match the
// idealized descriptions here, since the implementation takes advantage of
// space available in the underlying allocator.
check_equal(); // Initially empty.
push();
check_equal(); // One full inner list.
push();
check_equal(); // One full, one partially full.
push();
push();
check_equal(); // Two full, one partially full.
pop();
check_equal(); // Two full, one empty.
pop();
check_equal(); // One full, one partially full, one empty.
pop();
check_equal(); // One full, one empty.
push();
pop();
pop();
ASSERT_TRUE(list.IsEmpty());
check_equal(); // Empty.
}
TEST(ContiguousContainerTest, AppendByMovingSameList) {
PointList list;
list.AllocateAndConstruct<Point3D>(1, 2, 3);
// Moves the Point3D to the end, and default-constructs a Point2D in its
// place.
list.AppendByMoving(list.First(), sizeof(Point3D));
EXPECT_EQ(1, list.Last().x);
EXPECT_EQ(2, list.Last().y);
EXPECT_EQ(3, static_cast<const Point3D&>(list.Last()).z);
list.AppendByMoving(list.front(), sizeof(Point3D));
EXPECT_EQ(1, list.back().x);
EXPECT_EQ(2, list.back().y);
EXPECT_EQ(3, static_cast<const Point3D&>(list.back()).z);
EXPECT_EQ(2u, list.size());
// Moves that Point2D to the end, and default-constructs another in its
// place.
list.First().x = 4;
list.AppendByMoving(list.First(), sizeof(Point2D));
EXPECT_EQ(4, list.Last().x);
list.front().x = 4;
list.AppendByMoving(list.front(), sizeof(Point2D));
EXPECT_EQ(4, list.back().x);
EXPECT_EQ(3u, list.size());
}
......@@ -399,7 +240,7 @@ TEST(ContiguousContainerTest, AppendByMovingDoesNotDestruct) {
// Make sure destructor isn't called during appendByMoving.
ContiguousContainer<DestructionNotifier> list2(
sizeof(DestructionNotifier), kDefaultInitialCapacityInBytes);
list2.AppendByMoving(list1.Last(), sizeof(DestructionNotifier));
list2.AppendByMoving(list1.back(), sizeof(DestructionNotifier));
EXPECT_FALSE(destroyed);
}
// But it should be destroyed when list2 is.
......@@ -412,10 +253,10 @@ TEST(ContiguousContainerTest, AppendByMovingReturnsMovedPointer) {
Point2D& point = list1.AllocateAndConstruct<Point2D>();
Point2D& moved_point1 = list2.AppendByMoving(point, sizeof(Point2D));
EXPECT_EQ(&moved_point1, &list2.Last());
EXPECT_EQ(&moved_point1, &list2.back());
Point2D& moved_point2 = list1.AppendByMoving(moved_point1, sizeof(Point2D));
EXPECT_EQ(&moved_point2, &list1.Last());
EXPECT_EQ(&moved_point2, &list1.back());
EXPECT_NE(&moved_point1, &moved_point2);
}
......@@ -424,14 +265,14 @@ TEST(ContiguousContainerTest, AppendByMovingReplacesSourceWithNewElement) {
PointList list2;
list1.AllocateAndConstruct<Point2D>(1, 2);
EXPECT_EQ(1, list1.First().x);
EXPECT_EQ(2, list1.First().y);
EXPECT_EQ(1, list1.front().x);
EXPECT_EQ(2, list1.front().y);
list2.AppendByMoving(list1.First(), sizeof(Point2D));
EXPECT_EQ(0, list1.First().x);
EXPECT_EQ(0, list1.First().y);
EXPECT_EQ(1, list2.First().x);
EXPECT_EQ(2, list2.First().y);
list2.AppendByMoving(list1.front(), sizeof(Point2D));
EXPECT_EQ(0, list1.front().x);
EXPECT_EQ(0, list1.front().y);
EXPECT_EQ(1, list2.front().x);
EXPECT_EQ(2, list2.front().y);
EXPECT_EQ(1u, list1.size());
EXPECT_EQ(1u, list2.size());
......@@ -465,34 +306,6 @@ TEST(ContiguousContainerTest, AppendByMovingElementsOfDifferentSizes) {
EXPECT_EQ(5, list[3].y);
}
TEST(ContiguousContainerTest, Swap) {
PointList list1;
list1.AllocateAndConstruct<Point2D>(1, 2);
PointList list2;
list2.AllocateAndConstruct<Point2D>(3, 4);
list2.AllocateAndConstruct<Point2D>(5, 6);
EXPECT_EQ(1u, list1.size());
EXPECT_EQ(1, list1[0].x);
EXPECT_EQ(2, list1[0].y);
EXPECT_EQ(2u, list2.size());
EXPECT_EQ(3, list2[0].x);
EXPECT_EQ(4, list2[0].y);
EXPECT_EQ(5, list2[1].x);
EXPECT_EQ(6, list2[1].y);
list2.Swap(list1);
EXPECT_EQ(1u, list2.size());
EXPECT_EQ(1, list2[0].x);
EXPECT_EQ(2, list2[0].y);
EXPECT_EQ(2u, list1.size());
EXPECT_EQ(3, list1[0].x);
EXPECT_EQ(4, list1[0].y);
EXPECT_EQ(5, list1[1].x);
EXPECT_EQ(6, list1[1].y);
}
TEST(ContiguousContainerTest, CapacityInBytes) {
const int kIterations = 500;
const wtf_size_t kInitialCapacity = 10 * kMaxPointSize;
......@@ -515,43 +328,32 @@ TEST(ContiguousContainerTest, CapacityInBytes) {
}
}
TEST(ContiguousContainerTest, CapacityInBytesAfterClear) {
// Clearing should restore the capacity of the container to the same as a
// newly allocated one (without reserved capacity requested).
PointList list;
size_t empty_capacity = list.CapacityInBytes();
list.AllocateAndConstruct<Point2D>();
list.AllocateAndConstruct<Point2D>();
list.Clear();
EXPECT_EQ(empty_capacity, list.CapacityInBytes());
}
TEST(ContiguousContainerTest, Alignment) {
const size_t kMaxAlign = alignof(long double);
ContiguousContainer<Point2D, kMaxAlign> list(kMaxPointSize,
kDefaultInitialCapacityInBytes);
list.AllocateAndConstruct<Point2D>();
EXPECT_EQ(0u, reinterpret_cast<intptr_t>(&list.Last()) & (kMaxAlign - 1));
EXPECT_EQ(0u, reinterpret_cast<intptr_t>(&list.back()) & (kMaxAlign - 1));
list.AllocateAndConstruct<Point2D>();
EXPECT_EQ(0u, reinterpret_cast<intptr_t>(&list.Last()) & (kMaxAlign - 1));
EXPECT_EQ(0u, reinterpret_cast<intptr_t>(&list.back()) & (kMaxAlign - 1));
list.AllocateAndConstruct<Point3D>();
EXPECT_EQ(0u, reinterpret_cast<intptr_t>(&list.Last()) & (kMaxAlign - 1));
EXPECT_EQ(0u, reinterpret_cast<intptr_t>(&list.back()) & (kMaxAlign - 1));
list.AllocateAndConstruct<Point3D>();
EXPECT_EQ(0u, reinterpret_cast<intptr_t>(&list.Last()) & (kMaxAlign - 1));
EXPECT_EQ(0u, reinterpret_cast<intptr_t>(&list.back()) & (kMaxAlign - 1));
list.AllocateAndConstruct<Point2D>();
EXPECT_EQ(0u, reinterpret_cast<intptr_t>(&list.Last()) & (kMaxAlign - 1));
EXPECT_EQ(0u, reinterpret_cast<intptr_t>(&list.back()) & (kMaxAlign - 1));
list.AppendByMoving(list[0], sizeof(Point2D));
EXPECT_EQ(0u, reinterpret_cast<intptr_t>(&list.Last()) & (kMaxAlign - 1));
EXPECT_EQ(0u, reinterpret_cast<intptr_t>(&list.back()) & (kMaxAlign - 1));
list.AppendByMoving(list[1], sizeof(Point2D));
EXPECT_EQ(0u, reinterpret_cast<intptr_t>(&list.Last()) & (kMaxAlign - 1));
EXPECT_EQ(0u, reinterpret_cast<intptr_t>(&list.back()) & (kMaxAlign - 1));
list.AppendByMoving(list[2], sizeof(Point3D));
EXPECT_EQ(0u, reinterpret_cast<intptr_t>(&list.Last()) & (kMaxAlign - 1));
EXPECT_EQ(0u, reinterpret_cast<intptr_t>(&list.back()) & (kMaxAlign - 1));
list.AppendByMoving(list[3], sizeof(Point3D));
EXPECT_EQ(0u, reinterpret_cast<intptr_t>(&list.Last()) & (kMaxAlign - 1));
EXPECT_EQ(0u, reinterpret_cast<intptr_t>(&list.back()) & (kMaxAlign - 1));
list.AppendByMoving(list[4], sizeof(Point2D));
EXPECT_EQ(0u, reinterpret_cast<intptr_t>(&list.Last()) & (kMaxAlign - 1));
EXPECT_EQ(0u, reinterpret_cast<intptr_t>(&list.back()) & (kMaxAlign - 1));
}
} // namespace
......
......@@ -168,8 +168,7 @@ class PLATFORM_EXPORT DisplayItem {
static_cast<unsigned>(client.VisualRectOutsetForRasterEffects())),
draws_content_(draws_content),
is_cacheable_(client.IsCacheable()),
is_tombstone_(false),
is_moved_from_cached_subsequence_(false) {
is_tombstone_(false) {
// |derived_size| must fit in |derived_size_|.
// If it doesn't, enlarge |derived_size_| and fix this assert.
SECURITY_DCHECK(derived_size == derived_size_);
......@@ -258,13 +257,6 @@ class PLATFORM_EXPORT DisplayItem {
bool IsCacheable() const { return is_cacheable_; }
void SetUncacheable() { is_cacheable_ = false; }
bool IsMovedFromCachedSubsequence() const {
return is_moved_from_cached_subsequence_;
}
void SetMovedFromCachedSubsequence(bool b) {
is_moved_from_cached_subsequence_ = b;
}
virtual bool Equals(const DisplayItem& other) const {
// Failure of this DCHECK would cause bad casts in subclasses.
SECURITY_CHECK(!is_tombstone_);
......@@ -309,7 +301,6 @@ class PLATFORM_EXPORT DisplayItem {
unsigned draws_content_ : 1;
unsigned is_cacheable_ : 1;
unsigned is_tombstone_ : 1;
unsigned is_moved_from_cached_subsequence_ : 1;
};
inline bool operator==(const DisplayItem::Id& a, const DisplayItem::Id& b) {
......
......@@ -37,36 +37,20 @@ class PLATFORM_EXPORT DisplayItemList
? initial_capacity_in_bytes
: kDefaultCapacityInBytes) {}
DisplayItemList(DisplayItemList&& source)
: ContiguousContainer(std::move(source)) {}
DisplayItemList& operator=(DisplayItemList&& source) {
ContiguousContainer::operator=(std::move(source));
return *this;
}
DisplayItem& AppendByMoving(DisplayItem& item) {
SECURITY_CHECK(!item.IsTombstone());
DisplayItem& result =
ContiguousContainer::AppendByMoving(item, item.DerivedSize());
// ContiguousContainer::AppendByMoving() calls an in-place constructor
// on item which replaces it with a tombstone/"dead display item" that
// can be safely destructed but should never be used except for debugging
// and raster invalidation (see below).
DCHECK(item.IsTombstone());
// We need |visual_rect_| and |outset_for_raster_effects_| of the old
// display item for raster invalidation. Also, the fields that make up the
// ID (|client_|, |type_| and |fragment_|) need to match. As their values
// were either initialized to default values or were left uninitialized by
// DisplayItem's default constructor, now copy their original values back
// from |result|.
item.client_ = result.client_;
item.type_ = result.type_;
item.fragment_ = result.fragment_;
DCHECK(item.GetId() == result.GetId());
item.visual_rect_ = result.visual_rect_;
item.raster_effect_outset_ = result.raster_effect_outset_;
result.SetMovedFromCachedSubsequence(false);
SetupTombstone(item, result);
return result;
}
DisplayItem& ReplaceLastByMoving(DisplayItem& item) {
SECURITY_CHECK(!item.IsTombstone());
DCHECK_EQ(back().DerivedSize(), item.DerivedSize());
DisplayItem& result =
ContiguousContainer::ReplaceLastByMoving(item, item.DerivedSize());
SetupTombstone(item, result);
return result;
}
......@@ -116,6 +100,26 @@ class PLATFORM_EXPORT DisplayItemList
const Range<const_iterator>& display_items,
JsonFlags);
#endif // DCHECK_IS_ON()
private:
// Called by AppendByMoving() and ReplaceLastByMoving() which created a
// tombstone/"dead display item" that can be safely destructed but should
// never be used except for debugging and raster invalidation.
void SetupTombstone(DisplayItem& item, const DisplayItem& new_item) {
DCHECK(item.IsTombstone());
// We need |visual_rect_| and |outset_for_raster_effects_| of the old
// display item for raster invalidation. Also, the fields that make up the
// ID (|client_|, |type_| and |fragment_|) need to match. As their values
// were either initialized to default values or were left uninitialized by
// DisplayItem's default constructor, now copy their original values back
// from |result|.
item.client_ = new_item.client_;
item.type_ = new_item.type_;
item.fragment_ = new_item.fragment_;
DCHECK_EQ(item.GetId(), new_item.GetId());
item.visual_rect_ = new_item.visual_rect_;
item.raster_effect_outset_ = new_item.raster_effect_outset_;
}
};
using DisplayItemIterator = DisplayItemList::const_iterator;
......
......@@ -43,7 +43,7 @@ PaintController::~PaintController() {
void PaintController::EnsureChunk() {
if (paint_chunker_.EnsureChunk())
DidAppendChunk();
CheckNewChunk();
}
void PaintController::RecordHitTestData(const DisplayItemClient& client,
......@@ -65,7 +65,7 @@ void PaintController::RecordHitTestData(const DisplayItemClient& client,
CheckDuplicatePaintChunkId(id);
if (paint_chunker_.AddHitTestDataToCurrentChunk(id, rect, touch_action,
blocking_wheel))
DidAppendChunk();
CheckNewChunk();
}
void PaintController::RecordScrollHitTestData(
......@@ -76,7 +76,7 @@ void PaintController::RecordScrollHitTestData(
PaintChunk::Id id(client, type, current_fragment_);
CheckDuplicatePaintChunkId(id);
paint_chunker_.CreateScrollHitTestChunk(id, scroll_translation, rect);
DidAppendChunk();
CheckNewChunk();
}
void PaintController::SetPossibleBackgroundColor(
......@@ -87,7 +87,7 @@ void PaintController::SetPossibleBackgroundColor(
current_fragment_);
CheckDuplicatePaintChunkId(id);
if (paint_chunker_.ProcessBackgroundColorCandidate(id, color, area))
DidAppendChunk();
CheckNewChunk();
}
bool PaintController::UseCachedItemIfPossible(const DisplayItemClient& client,
......@@ -280,7 +280,7 @@ void PaintController::EndSubsequence(const DisplayItemClient& client,
&client, SubsequenceMarkers{start_chunk_index, end_chunk_index});
}
void PaintController::DidAppendItem(DisplayItem& display_item) {
void PaintController::CheckNewItem(DisplayItem& display_item) {
if (usage_ == kTransient)
return;
......@@ -313,7 +313,7 @@ void PaintController::ProcessNewItem(DisplayItem& display_item) {
}
if (paint_chunker_.IncrementDisplayItemIndex(display_item))
DidAppendChunk();
CheckNewChunk();
if (!frame_first_paints_.back().first_painted && display_item.IsDrawing() &&
// Here we ignore all document-background paintings because we don't
......@@ -325,7 +325,7 @@ void PaintController::ProcessNewItem(DisplayItem& display_item) {
SetFirstPainted();
}
DidAppendItem(display_item);
CheckNewItem(display_item);
}
DisplayItem& PaintController::MoveItemFromCurrentListToNewList(
......@@ -334,7 +334,7 @@ DisplayItem& PaintController::MoveItemFromCurrentListToNewList(
current_paint_artifact_->GetDisplayItemList()[index]);
}
void PaintController::DidAppendChunk() {
void PaintController::CheckNewChunk() {
#if DCHECK_IS_ON()
auto& chunks = new_paint_artifact_->PaintChunks();
if (chunks.back().is_cacheable) {
......@@ -374,7 +374,7 @@ void PaintController::UpdateCurrentPaintChunkProperties(
void PaintController::AppendChunkByMoving(PaintChunk&& chunk) {
CheckDuplicatePaintChunkId(chunk.id);
paint_chunker_.AppendByMoving(std::move(chunk));
DidAppendChunk();
CheckNewChunk();
}
bool PaintController::ClientCacheIsValid(
......@@ -504,9 +504,7 @@ void PaintController::CopyCachedSubsequence(wtf_size_t start_chunk_index,
SECURITY_CHECK(!cached_item.IsTombstone());
DCHECK(!cached_item.IsCacheable() ||
ClientCacheIsValid(cached_item.Client()));
auto& item = MoveItemFromCurrentListToNewList(cached_item_index++);
item.SetMovedFromCachedSubsequence(true);
DidAppendItem(item);
CheckNewItem(MoveItemFromCurrentListToNewList(cached_item_index++));
}
DCHECK_EQ(cached_item_index, cached_chunk.end_index);
......@@ -593,20 +591,20 @@ void PaintController::FinishCycle() {
chunk.client_is_just_created = false;
const auto& client = chunk.id.client;
if (chunk.is_moved_from_cached_subsequence) {
DCHECK(!chunk.is_cacheable || ClientCacheIsValid(client));
// We don't need to validate the clients of paint chunks and display
// items that are moved from a cached subsequence, because they should be
// already valid. See http://crbug.com/1050090 for more details.
#if DCHECK_IS_ON()
DCHECK(ClientCacheIsValid(client));
for (const auto& item : current_paint_artifact_->DisplayItemsInChunk(i))
DCHECK(!item.IsCacheable() || ClientCacheIsValid(item.Client()));
#endif
continue;
}
if (client.IsCacheable())
client.Validate();
for (const auto& item : current_paint_artifact_->DisplayItemsInChunk(i)) {
if (item.IsMovedFromCachedSubsequence()) {
// We don't need to validate the clients of a display item that is
// copied from a cached subsequence, because it should be already
// valid. See http://crbug.com/1050090 for more details.
DCHECK(!item.IsCacheable() || ClientCacheIsValid(item.Client()));
continue;
}
item.Client().ClearPartialInvalidationVisualRect();
if (item.Client().IsCacheable())
item.Client().Validate();
......@@ -716,7 +714,7 @@ void PaintController::CheckUnderInvalidation() {
return;
}
DisplayItem& new_item = new_paint_artifact_->GetDisplayItemList().Last();
DisplayItem& new_item = new_paint_artifact_->GetDisplayItemList().back();
auto old_item_index = under_invalidation_checking_begin_;
DisplayItem* old_item =
old_item_index < current_paint_artifact_->GetDisplayItemList().size()
......@@ -736,8 +734,8 @@ void PaintController::CheckUnderInvalidation() {
// non-under-invalidation-checking path to empty the original cached slot,
// leaving only disappeared or invalidated display items in the old list after
// painting.
new_paint_artifact_->GetDisplayItemList().RemoveLast();
MoveItemFromCurrentListToNewList(old_item_index);
new_paint_artifact_->GetDisplayItemList().ReplaceLastByMoving(
current_paint_artifact_->GetDisplayItemList()[old_item_index]);
++under_invalidation_checking_begin_;
}
......
......@@ -286,9 +286,9 @@ class PLATFORM_EXPORT PaintController {
// Set new item state (cache skipping, etc) for the last new display item.
void ProcessNewItem(DisplayItem&);
void DidAppendItem(DisplayItem&);
void CheckNewItem(DisplayItem&);
DisplayItem& MoveItemFromCurrentListToNewList(wtf_size_t);
void DidAppendChunk();
void CheckNewChunk();
struct IdAsHashKey {
IdAsHashKey() = default;
......
......@@ -147,7 +147,7 @@ FakeDisplayItemClient& TestPaintArtifact::Client(wtf_size_t i) const {
void TestPaintArtifact::DidAddDisplayItem() {
auto& chunk = paint_artifact_->PaintChunks().back();
DCHECK_EQ(chunk.end_index, paint_artifact_->GetDisplayItemList().size() - 1);
const auto& item = paint_artifact_->GetDisplayItemList().Last();
const auto& item = paint_artifact_->GetDisplayItemList().back();
chunk.bounds.Unite(item.VisualRect());
if (item.DrawsContent())
chunk.drawable_bounds.Unite(item.VisualRect());
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment