Commit c1f1214e authored by mmocny@chromium.org's avatar mmocny@chromium.org

GpuMemoryManager suggests values for renderer Contents Texture Managers' preferred memory limit.


BUG=123382
TEST=Manual


Committed: http://src.chromium.org/viewvc/chrome?view=rev&revision=134428

Review URL: http://codereview.chromium.org/10083056

git-svn-id: svn://svn.chromium.org/chrome/trunk/src@134801 0039d316-1c4b-4281-b951-d872f2087c98
parent 570977f4
...@@ -105,6 +105,8 @@ void CommandBufferProxyImpl::SetMemoryAllocationChangedCallback( ...@@ -105,6 +105,8 @@ void CommandBufferProxyImpl::SetMemoryAllocationChangedCallback(
const base::Callback<void(const GpuMemoryAllocationForRenderer&)>& const base::Callback<void(const GpuMemoryAllocationForRenderer&)>&
callback) { callback) {
memory_allocation_changed_callback_ = callback; memory_allocation_changed_callback_ = callback;
Send(new GpuCommandBufferMsg_SetClientHasMemoryAllocationChangedCallback(
route_id_, !memory_allocation_changed_callback_.is_null()));
} }
void CommandBufferProxyImpl::OnSetMemoryAllocation( void CommandBufferProxyImpl::OnSetMemoryAllocation(
......
...@@ -264,10 +264,6 @@ bool WebGraphicsContext3DCommandBufferImpl::MaybeInitializeGL( ...@@ -264,10 +264,6 @@ bool WebGraphicsContext3DCommandBufferImpl::MaybeInitializeGL(
g_all_shared_contexts.Pointer()->insert(this); g_all_shared_contexts.Pointer()->insert(this);
} }
command_buffer_->SetMemoryAllocationChangedCallback(base::Bind(
&WebGraphicsContext3DCommandBufferImpl::OnMemoryAllocationChanged,
weak_ptr_factory_.GetWeakPtr()));
visible_ = true; visible_ = true;
initialized_ = true; initialized_ = true;
return true; return true;
...@@ -680,6 +676,17 @@ void WebGraphicsContext3DCommandBufferImpl:: ...@@ -680,6 +676,17 @@ void WebGraphicsContext3DCommandBufferImpl::
setMemoryAllocationChangedCallbackCHROMIUM( setMemoryAllocationChangedCallbackCHROMIUM(
WebGraphicsMemoryAllocationChangedCallbackCHROMIUM* callback) { WebGraphicsMemoryAllocationChangedCallbackCHROMIUM* callback) {
memory_allocation_changed_callback_ = callback; memory_allocation_changed_callback_ = callback;
if (!command_buffer_)
return;
if (callback)
command_buffer_->SetMemoryAllocationChangedCallback(base::Bind(
&WebGraphicsContext3DCommandBufferImpl::OnMemoryAllocationChanged,
weak_ptr_factory_.GetWeakPtr()));
else
command_buffer_->SetMemoryAllocationChangedCallback(
base::Callback<void(const GpuMemoryAllocationForRenderer&)>());
} }
......
...@@ -59,7 +59,9 @@ GpuCommandBufferStub::GpuCommandBufferStub( ...@@ -59,7 +59,9 @@ GpuCommandBufferStub::GpuCommandBufferStub(
route_id_(route_id), route_id_(route_id),
software_(software), software_(software),
last_flush_count_(0), last_flush_count_(0),
allocation_(GpuMemoryAllocation::INVALID_RESOURCE_SIZE, true, true), allocation_(GpuMemoryAllocation::INVALID_RESOURCE_SIZE,
GpuMemoryAllocation::kHasFrontbuffer |
GpuMemoryAllocation::kHasBackbuffer),
parent_stub_for_initialization_(), parent_stub_for_initialization_(),
parent_texture_for_initialization_(0), parent_texture_for_initialization_(0),
watchdog_(watchdog) { watchdog_(watchdog) {
...@@ -131,6 +133,9 @@ bool GpuCommandBufferStub::OnMessageReceived(const IPC::Message& message) { ...@@ -131,6 +133,9 @@ bool GpuCommandBufferStub::OnMessageReceived(const IPC::Message& message) {
OnDiscardBackbuffer) OnDiscardBackbuffer)
IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_EnsureBackbuffer, IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_EnsureBackbuffer,
OnEnsureBackbuffer) OnEnsureBackbuffer)
IPC_MESSAGE_HANDLER(
GpuCommandBufferMsg_SetClientHasMemoryAllocationChangedCallback,
OnSetClientHasMemoryAllocationChangedCallback)
IPC_MESSAGE_UNHANDLED(handled = false) IPC_MESSAGE_UNHANDLED(handled = false)
IPC_END_MESSAGE_MAP() IPC_END_MESSAGE_MAP()
...@@ -589,6 +594,12 @@ void GpuCommandBufferStub::OnEnsureBackbuffer() { ...@@ -589,6 +594,12 @@ void GpuCommandBufferStub::OnEnsureBackbuffer() {
gfx::GLSurface::BUFFER_ALLOCATION_FRONT_AND_BACK); gfx::GLSurface::BUFFER_ALLOCATION_FRONT_AND_BACK);
} }
void GpuCommandBufferStub::OnSetClientHasMemoryAllocationChangedCallback(
bool has_callback) {
client_has_memory_allocation_changed_callback_ = has_callback;
channel_->gpu_channel_manager()->gpu_memory_manager()->ScheduleManage();
}
void GpuCommandBufferStub::SendConsoleMessage( void GpuCommandBufferStub::SendConsoleMessage(
int32 id, int32 id,
const std::string& message) { const std::string& message) {
...@@ -617,6 +628,11 @@ bool GpuCommandBufferStub::IsInSameContextShareGroup( ...@@ -617,6 +628,11 @@ bool GpuCommandBufferStub::IsInSameContextShareGroup(
static_cast<const GpuCommandBufferStub&>(other).context_group_; static_cast<const GpuCommandBufferStub&>(other).context_group_;
} }
bool GpuCommandBufferStub::
client_has_memory_allocation_changed_callback() const {
return client_has_memory_allocation_changed_callback_;
}
bool GpuCommandBufferStub::has_surface_state() const { bool GpuCommandBufferStub::has_surface_state() const {
return surface_state_ != NULL; return surface_state_ != NULL;
} }
......
...@@ -63,6 +63,7 @@ class CONTENT_EXPORT GpuCommandBufferStubBase { ...@@ -63,6 +63,7 @@ class CONTENT_EXPORT GpuCommandBufferStubBase {
virtual ~GpuCommandBufferStubBase() {} virtual ~GpuCommandBufferStubBase() {}
// Will not have surface state if this is an offscreen commandbuffer. // Will not have surface state if this is an offscreen commandbuffer.
virtual bool client_has_memory_allocation_changed_callback() const = 0;
virtual bool has_surface_state() const = 0; virtual bool has_surface_state() const = 0;
virtual const SurfaceState& surface_state() const = 0; virtual const SurfaceState& surface_state() const = 0;
...@@ -114,6 +115,7 @@ class GpuCommandBufferStub ...@@ -114,6 +115,7 @@ class GpuCommandBufferStub
virtual bool Send(IPC::Message* msg) OVERRIDE; virtual bool Send(IPC::Message* msg) OVERRIDE;
// GpuCommandBufferStubBase implementation: // GpuCommandBufferStubBase implementation:
virtual bool client_has_memory_allocation_changed_callback() const OVERRIDE;
virtual bool has_surface_state() const OVERRIDE; virtual bool has_surface_state() const OVERRIDE;
virtual const GpuCommandBufferStubBase::SurfaceState& surface_state() const virtual const GpuCommandBufferStubBase::SurfaceState& surface_state() const
OVERRIDE; OVERRIDE;
...@@ -205,6 +207,8 @@ class GpuCommandBufferStub ...@@ -205,6 +207,8 @@ class GpuCommandBufferStub
void OnDiscardBackbuffer(); void OnDiscardBackbuffer();
void OnEnsureBackbuffer(); void OnEnsureBackbuffer();
void OnSetClientHasMemoryAllocationChangedCallback(bool);
void OnReschedule(); void OnReschedule();
void OnCommandProcessed(); void OnCommandProcessed();
...@@ -228,6 +232,7 @@ class GpuCommandBufferStub ...@@ -228,6 +232,7 @@ class GpuCommandBufferStub
gfx::GpuPreference gpu_preference_; gfx::GpuPreference gpu_preference_;
int32 route_id_; int32 route_id_;
bool software_; bool software_;
bool client_has_memory_allocation_changed_callback_;
uint32 last_flush_count_; uint32 last_flush_count_;
scoped_ptr<GpuCommandBufferStubBase::SurfaceState> surface_state_; scoped_ptr<GpuCommandBufferStubBase::SurfaceState> surface_state_;
GpuMemoryAllocation allocation_; GpuMemoryAllocation allocation_;
......
...@@ -12,7 +12,6 @@ ...@@ -12,7 +12,6 @@
// and assigned to the browser and renderer context. // and assigned to the browser and renderer context.
// They will change over time, given memory availability, and browser state. // They will change over time, given memory availability, and browser state.
// Memory Allocation which will be assigned to the renderer context. // Memory Allocation which will be assigned to the renderer context.
struct GpuMemoryAllocationForRenderer { struct GpuMemoryAllocationForRenderer {
enum { enum {
...@@ -68,17 +67,24 @@ struct GpuMemoryAllocationForBrowser { ...@@ -68,17 +67,24 @@ struct GpuMemoryAllocationForBrowser {
// GpuMemoryManager. // GpuMemoryManager.
struct GpuMemoryAllocation : public GpuMemoryAllocationForRenderer, struct GpuMemoryAllocation : public GpuMemoryAllocationForRenderer,
public GpuMemoryAllocationForBrowser { public GpuMemoryAllocationForBrowser {
// Bitmap
enum BufferAllocation {
kHasNoBuffers = 0,
kHasFrontbuffer = 1,
kHasBackbuffer = 2
};
GpuMemoryAllocation() GpuMemoryAllocation()
: GpuMemoryAllocationForRenderer(), : GpuMemoryAllocationForRenderer(),
GpuMemoryAllocationForBrowser() { GpuMemoryAllocationForBrowser() {
} }
GpuMemoryAllocation(size_t gpu_resource_size_in_bytes, GpuMemoryAllocation(size_t gpu_resource_size_in_bytes,
bool suggest_have_backbuffer, int allocationBitmap)
bool suggest_have_frontbuffer)
: GpuMemoryAllocationForRenderer(gpu_resource_size_in_bytes, : GpuMemoryAllocationForRenderer(gpu_resource_size_in_bytes,
suggest_have_backbuffer), (allocationBitmap & kHasBackbuffer) == kHasBackbuffer),
GpuMemoryAllocationForBrowser(suggest_have_frontbuffer) { GpuMemoryAllocationForBrowser(
(allocationBitmap & kHasFrontbuffer) == kHasFrontbuffer) {
} }
bool operator==(const GpuMemoryAllocation& other) const { bool operator==(const GpuMemoryAllocation& other) const {
......
...@@ -15,15 +15,6 @@ ...@@ -15,15 +15,6 @@
namespace { namespace {
// These are predefined values (in bytes) for
// GpuMemoryAllocation::gpuResourceSizeInBytes. Currently, the value is only
// used to check if it is 0 or non-0. In the future, these values will not
// come from constants, but rather will be distributed dynamically.
enum {
kResourceSizeNonHibernatedTab = 1,
kResourceSizeHibernatedTab = 0
};
bool IsInSameContextShareGroupAsAnyOf( bool IsInSameContextShareGroupAsAnyOf(
const GpuCommandBufferStubBase* stub, const GpuCommandBufferStubBase* stub,
const std::vector<GpuCommandBufferStubBase*>& stubs) { const std::vector<GpuCommandBufferStubBase*>& stubs) {
...@@ -35,6 +26,14 @@ bool IsInSameContextShareGroupAsAnyOf( ...@@ -35,6 +26,14 @@ bool IsInSameContextShareGroupAsAnyOf(
return false; return false;
} }
void AssignMemoryAllocations(std::vector<GpuCommandBufferStubBase*>& stubs,
GpuMemoryAllocation allocation) {
for (std::vector<GpuCommandBufferStubBase*>::iterator it = stubs.begin();
it != stubs.end(); ++it) {
(*it)->SetMemoryAllocation(allocation);
}
}
} }
GpuMemoryManager::GpuMemoryManager(GpuMemoryManagerClient* client, GpuMemoryManager::GpuMemoryManager(GpuMemoryManagerClient* client,
...@@ -100,14 +99,6 @@ void GpuMemoryManager::ScheduleManage() { ...@@ -100,14 +99,6 @@ void GpuMemoryManager::ScheduleManage() {
// 1. Find the most visible context-with-a-surface within each // 1. Find the most visible context-with-a-surface within each
// context-without-a-surface's share group, and inherit its visibilty. // context-without-a-surface's share group, and inherit its visibilty.
void GpuMemoryManager::Manage() { void GpuMemoryManager::Manage() {
// Set up three allocation values for the three possible stub states
const GpuMemoryAllocation all_buffers_allocation(
kResourceSizeNonHibernatedTab, true, true);
const GpuMemoryAllocation front_buffers_allocation(
kResourceSizeNonHibernatedTab, false, true);
const GpuMemoryAllocation no_buffers_allocation(
kResourceSizeHibernatedTab, false, false);
manage_scheduled_ = false; manage_scheduled_ = false;
// Create stub lists by separating out the two types received from client // Create stub lists by separating out the two types received from client
...@@ -120,6 +111,8 @@ void GpuMemoryManager::Manage() { ...@@ -120,6 +111,8 @@ void GpuMemoryManager::Manage() {
for (std::vector<GpuCommandBufferStubBase*>::iterator it = stubs.begin(); for (std::vector<GpuCommandBufferStubBase*>::iterator it = stubs.begin();
it != stubs.end(); ++it) { it != stubs.end(); ++it) {
GpuCommandBufferStubBase* stub = *it; GpuCommandBufferStubBase* stub = *it;
if (!stub->client_has_memory_allocation_changed_callback())
continue;
if (stub->has_surface_state()) if (stub->has_surface_state())
stubs_with_surface.push_back(stub); stubs_with_surface.push_back(stub);
else else
...@@ -136,37 +129,73 @@ void GpuMemoryManager::Manage() { ...@@ -136,37 +129,73 @@ void GpuMemoryManager::Manage() {
stubs_with_surface.end()); stubs_with_surface.end());
// Separate stubs into memory allocation sets. // Separate stubs into memory allocation sets.
std::vector<GpuCommandBufferStubBase*> all_buffers, front_buffers, no_buffers; std::vector<GpuCommandBufferStubBase*> stubs_with_surface_foreground,
stubs_with_surface_background,
stubs_with_surface_hibernated,
stubs_without_surface_foreground,
stubs_without_surface_background,
stubs_without_surface_hibernated;
for (size_t i = 0; i < stubs_with_surface.size(); ++i) { for (size_t i = 0; i < stubs_with_surface.size(); ++i) {
GpuCommandBufferStubBase* stub = stubs_with_surface[i]; GpuCommandBufferStubBase* stub = stubs_with_surface[i];
DCHECK(stub->has_surface_state()); DCHECK(stub->has_surface_state());
if (stub->surface_state().visible) { if (stub->surface_state().visible)
all_buffers.push_back(stub); stubs_with_surface_foreground.push_back(stub);
stub->SetMemoryAllocation(all_buffers_allocation); else if (i < max_surfaces_with_frontbuffer_soft_limit_)
} else if (i < max_surfaces_with_frontbuffer_soft_limit_) { stubs_with_surface_background.push_back(stub);
front_buffers.push_back(stub); else
stub->SetMemoryAllocation(front_buffers_allocation); stubs_with_surface_hibernated.push_back(stub);
} else {
no_buffers.push_back(stub);
stub->SetMemoryAllocation(no_buffers_allocation);
}
} }
// Now, go through the stubs without surfaces and deduce visibility using the
// visibility of stubs which are in the same context share group.
for (std::vector<GpuCommandBufferStubBase*>::const_iterator it = for (std::vector<GpuCommandBufferStubBase*>::const_iterator it =
stubs_without_surface.begin(); it != stubs_without_surface.end(); ++it) { stubs_without_surface.begin(); it != stubs_without_surface.end(); ++it) {
GpuCommandBufferStubBase* stub = *it; GpuCommandBufferStubBase* stub = *it;
DCHECK(!stub->has_surface_state()); DCHECK(!stub->has_surface_state());
if (IsInSameContextShareGroupAsAnyOf(stub, all_buffers)) {
stub->SetMemoryAllocation(all_buffers_allocation); // Stubs without surfaces have deduced allocation state using the state
} else if (IsInSameContextShareGroupAsAnyOf(stub, front_buffers)) { // of surface stubs which are in the same context share group.
stub->SetMemoryAllocation(front_buffers_allocation); if (IsInSameContextShareGroupAsAnyOf(stub, stubs_with_surface_foreground))
} else { stubs_without_surface_foreground.push_back(stub);
stub->SetMemoryAllocation(no_buffers_allocation); else if (IsInSameContextShareGroupAsAnyOf(
} stub, stubs_with_surface_background))
stubs_without_surface_background.push_back(stub);
else
stubs_without_surface_hibernated.push_back(stub);
} }
// Calculate memory allocation size in bytes given to each stub, by sharing
// global limit equally among those that need it.
size_t num_stubs_need_mem = stubs_with_surface_foreground.size() +
stubs_without_surface_foreground.size() +
stubs_without_surface_background.size();
size_t base_allocation_size = kMinimumAllocationForTab * num_stubs_need_mem;
size_t bonus_allocation = 0;
if (base_allocation_size < kMaximumAllocationForTabs &&
!stubs_with_surface_foreground.empty())
bonus_allocation = (kMaximumAllocationForTabs - base_allocation_size) /
stubs_with_surface_foreground.size();
// Now give out allocations to everyone.
AssignMemoryAllocations(stubs_with_surface_foreground,
GpuMemoryAllocation(kMinimumAllocationForTab + bonus_allocation,
GpuMemoryAllocation::kHasFrontbuffer |
GpuMemoryAllocation::kHasBackbuffer));
AssignMemoryAllocations(stubs_with_surface_background,
GpuMemoryAllocation(0, GpuMemoryAllocation::kHasFrontbuffer));
AssignMemoryAllocations(stubs_with_surface_hibernated,
GpuMemoryAllocation(0, GpuMemoryAllocation::kHasNoBuffers));
AssignMemoryAllocations(stubs_without_surface_foreground,
GpuMemoryAllocation(kMinimumAllocationForTab,
GpuMemoryAllocation::kHasNoBuffers));
AssignMemoryAllocations(stubs_without_surface_background,
GpuMemoryAllocation(kMinimumAllocationForTab,
GpuMemoryAllocation::kHasNoBuffers));
AssignMemoryAllocations(stubs_without_surface_hibernated,
GpuMemoryAllocation(0, GpuMemoryAllocation::kHasNoBuffers));
} }
#endif #endif
...@@ -28,6 +28,23 @@ class CONTENT_EXPORT GpuMemoryManager { ...@@ -28,6 +28,23 @@ class CONTENT_EXPORT GpuMemoryManager {
public: public:
enum { kDefaultMaxSurfacesWithFrontbufferSoftLimit = 8 }; enum { kDefaultMaxSurfacesWithFrontbufferSoftLimit = 8 };
// These are predefined values (in bytes) for
// GpuMemoryAllocation::gpuResourceSizeInBytes.
// Maximum Allocation for all tabs is a soft limit that can be exceeded
// during the time it takes for renderers to respect new allocations,
// including when switching tabs or opening a new window.
// To alleviate some pressure, we decrease our desired limit by "one tabs'
// worth" of memory.
enum {
#if defined(OS_ANDROID)
kMinimumAllocationForTab = 32 * 1024 * 1024,
kMaximumAllocationForTabs = 64 * 1024 * 1024,
#else
kMinimumAllocationForTab = 64 * 1024 * 1024,
kMaximumAllocationForTabs = 512 * 1024 * 1024 - kMinimumAllocationForTab,
#endif
};
GpuMemoryManager(GpuMemoryManagerClient* client, GpuMemoryManager(GpuMemoryManagerClient* client,
size_t max_surfaces_with_frontbuffer_soft_limit); size_t max_surfaces_with_frontbuffer_soft_limit);
~GpuMemoryManager(); ~GpuMemoryManager();
......
...@@ -23,6 +23,9 @@ class FakeCommandBufferStub : public GpuCommandBufferStubBase { ...@@ -23,6 +23,9 @@ class FakeCommandBufferStub : public GpuCommandBufferStubBase {
: surface_state_(surface_id, visible, last_used_time) { : surface_state_(surface_id, visible, last_used_time) {
} }
virtual bool client_has_memory_allocation_changed_callback() const {
return true;
}
virtual bool has_surface_state() const { virtual bool has_surface_state() const {
return surface_state_.surface_id != 0; return surface_state_.surface_id != 0;
} }
...@@ -49,6 +52,9 @@ class FakeCommandBufferStubWithoutSurface : public GpuCommandBufferStubBase { ...@@ -49,6 +52,9 @@ class FakeCommandBufferStubWithoutSurface : public GpuCommandBufferStubBase {
FakeCommandBufferStubWithoutSurface() { FakeCommandBufferStubWithoutSurface() {
} }
virtual bool client_has_memory_allocation_changed_callback() const {
return true;
}
virtual bool has_surface_state() const { virtual bool has_surface_state() const {
return false; return false;
} }
...@@ -100,11 +106,51 @@ class GpuMemoryManagerTest : public testing::Test { ...@@ -100,11 +106,51 @@ class GpuMemoryManagerTest : public testing::Test {
return surface_id_++; return surface_id_++;
} }
static bool is_more_important(GpuCommandBufferStubBase* lhs, static bool IsMoreImportant(GpuCommandBufferStubBase* lhs,
GpuCommandBufferStubBase* rhs) { GpuCommandBufferStubBase* rhs) {
return GpuMemoryManager::StubWithSurfaceComparator()(lhs, rhs); return GpuMemoryManager::StubWithSurfaceComparator()(lhs, rhs);
} }
static bool IsAllocationForegroundForSurfaceYes(
const GpuMemoryAllocation& alloc) {
return alloc.suggest_have_frontbuffer &&
alloc.suggest_have_backbuffer &&
alloc.gpu_resource_size_in_bytes >=
GpuMemoryManager::kMinimumAllocationForTab;
}
static bool IsAllocationBackgroundForSurfaceYes(
const GpuMemoryAllocation& alloc) {
return alloc.suggest_have_frontbuffer &&
!alloc.suggest_have_backbuffer &&
alloc.gpu_resource_size_in_bytes == 0;
}
static bool IsAllocationHibernatedForSurfaceYes(
const GpuMemoryAllocation& alloc) {
return !alloc.suggest_have_frontbuffer &&
!alloc.suggest_have_backbuffer &&
alloc.gpu_resource_size_in_bytes == 0;
}
static bool IsAllocationForegroundForSurfaceNo(
const GpuMemoryAllocation& alloc) {
return !alloc.suggest_have_frontbuffer &&
!alloc.suggest_have_backbuffer &&
alloc.gpu_resource_size_in_bytes ==
GpuMemoryManager::kMinimumAllocationForTab;
}
static bool IsAllocationBackgroundForSurfaceNo(
const GpuMemoryAllocation& alloc) {
return !alloc.suggest_have_frontbuffer &&
!alloc.suggest_have_backbuffer &&
alloc.gpu_resource_size_in_bytes ==
GpuMemoryManager::kMinimumAllocationForTab;
}
static bool IsAllocationHibernatedForSurfaceNo(
const GpuMemoryAllocation& alloc) {
return !alloc.suggest_have_frontbuffer &&
!alloc.suggest_have_backbuffer &&
alloc.gpu_resource_size_in_bytes == 0;
}
void Manage() { void Manage() {
memory_manager_.Manage(); memory_manager_.Manage();
} }
...@@ -126,50 +172,50 @@ TEST_F(GpuMemoryManagerTest, ComparatorTests) { ...@@ -126,50 +172,50 @@ TEST_F(GpuMemoryManagerTest, ComparatorTests) {
stub_false3(GenerateUniqueSurfaceId(), false, newest_); stub_false3(GenerateUniqueSurfaceId(), false, newest_);
// Should never be more important than self: // Should never be more important than self:
EXPECT_FALSE(is_more_important(&stub_true1, &stub_true1)); EXPECT_FALSE(IsMoreImportant(&stub_true1, &stub_true1));
EXPECT_FALSE(is_more_important(&stub_true2, &stub_true2)); EXPECT_FALSE(IsMoreImportant(&stub_true2, &stub_true2));
EXPECT_FALSE(is_more_important(&stub_true3, &stub_true3)); EXPECT_FALSE(IsMoreImportant(&stub_true3, &stub_true3));
EXPECT_FALSE(is_more_important(&stub_false1, &stub_false1)); EXPECT_FALSE(IsMoreImportant(&stub_false1, &stub_false1));
EXPECT_FALSE(is_more_important(&stub_false2, &stub_false2)); EXPECT_FALSE(IsMoreImportant(&stub_false2, &stub_false2));
EXPECT_FALSE(is_more_important(&stub_false3, &stub_false3)); EXPECT_FALSE(IsMoreImportant(&stub_false3, &stub_false3));
// Visible should always be more important than non visible: // Visible should always be more important than non visible:
EXPECT_TRUE(is_more_important(&stub_true1, &stub_false1)); EXPECT_TRUE(IsMoreImportant(&stub_true1, &stub_false1));
EXPECT_TRUE(is_more_important(&stub_true1, &stub_false2)); EXPECT_TRUE(IsMoreImportant(&stub_true1, &stub_false2));
EXPECT_TRUE(is_more_important(&stub_true1, &stub_false3)); EXPECT_TRUE(IsMoreImportant(&stub_true1, &stub_false3));
EXPECT_TRUE(is_more_important(&stub_true2, &stub_false1)); EXPECT_TRUE(IsMoreImportant(&stub_true2, &stub_false1));
EXPECT_TRUE(is_more_important(&stub_true2, &stub_false2)); EXPECT_TRUE(IsMoreImportant(&stub_true2, &stub_false2));
EXPECT_TRUE(is_more_important(&stub_true2, &stub_false3)); EXPECT_TRUE(IsMoreImportant(&stub_true2, &stub_false3));
EXPECT_TRUE(is_more_important(&stub_true3, &stub_false1)); EXPECT_TRUE(IsMoreImportant(&stub_true3, &stub_false1));
EXPECT_TRUE(is_more_important(&stub_true3, &stub_false2)); EXPECT_TRUE(IsMoreImportant(&stub_true3, &stub_false2));
EXPECT_TRUE(is_more_important(&stub_true3, &stub_false3)); EXPECT_TRUE(IsMoreImportant(&stub_true3, &stub_false3));
// Not visible should never be more important than visible: // Not visible should never be more important than visible:
EXPECT_FALSE(is_more_important(&stub_false1, &stub_true1)); EXPECT_FALSE(IsMoreImportant(&stub_false1, &stub_true1));
EXPECT_FALSE(is_more_important(&stub_false1, &stub_true2)); EXPECT_FALSE(IsMoreImportant(&stub_false1, &stub_true2));
EXPECT_FALSE(is_more_important(&stub_false1, &stub_true3)); EXPECT_FALSE(IsMoreImportant(&stub_false1, &stub_true3));
EXPECT_FALSE(is_more_important(&stub_false2, &stub_true1)); EXPECT_FALSE(IsMoreImportant(&stub_false2, &stub_true1));
EXPECT_FALSE(is_more_important(&stub_false2, &stub_true2)); EXPECT_FALSE(IsMoreImportant(&stub_false2, &stub_true2));
EXPECT_FALSE(is_more_important(&stub_false2, &stub_true3)); EXPECT_FALSE(IsMoreImportant(&stub_false2, &stub_true3));
EXPECT_FALSE(is_more_important(&stub_false3, &stub_true1)); EXPECT_FALSE(IsMoreImportant(&stub_false3, &stub_true1));
EXPECT_FALSE(is_more_important(&stub_false3, &stub_true2)); EXPECT_FALSE(IsMoreImportant(&stub_false3, &stub_true2));
EXPECT_FALSE(is_more_important(&stub_false3, &stub_true3)); EXPECT_FALSE(IsMoreImportant(&stub_false3, &stub_true3));
// Newer should always be more important than older: // Newer should always be more important than older:
EXPECT_TRUE(is_more_important(&stub_true2, &stub_true1)); EXPECT_TRUE(IsMoreImportant(&stub_true2, &stub_true1));
EXPECT_TRUE(is_more_important(&stub_true3, &stub_true1)); EXPECT_TRUE(IsMoreImportant(&stub_true3, &stub_true1));
EXPECT_TRUE(is_more_important(&stub_true3, &stub_true2)); EXPECT_TRUE(IsMoreImportant(&stub_true3, &stub_true2));
EXPECT_TRUE(is_more_important(&stub_false2, &stub_false1)); EXPECT_TRUE(IsMoreImportant(&stub_false2, &stub_false1));
EXPECT_TRUE(is_more_important(&stub_false3, &stub_false1)); EXPECT_TRUE(IsMoreImportant(&stub_false3, &stub_false1));
EXPECT_TRUE(is_more_important(&stub_false3, &stub_false2)); EXPECT_TRUE(IsMoreImportant(&stub_false3, &stub_false2));
// Older should never be more important than newer: // Older should never be more important than newer:
EXPECT_FALSE(is_more_important(&stub_true1, &stub_true2)); EXPECT_FALSE(IsMoreImportant(&stub_true1, &stub_true2));
EXPECT_FALSE(is_more_important(&stub_true1, &stub_true3)); EXPECT_FALSE(IsMoreImportant(&stub_true1, &stub_true3));
EXPECT_FALSE(is_more_important(&stub_true2, &stub_true3)); EXPECT_FALSE(IsMoreImportant(&stub_true2, &stub_true3));
EXPECT_FALSE(is_more_important(&stub_false1, &stub_false2)); EXPECT_FALSE(IsMoreImportant(&stub_false1, &stub_false2));
EXPECT_FALSE(is_more_important(&stub_false1, &stub_false3)); EXPECT_FALSE(IsMoreImportant(&stub_false1, &stub_false3));
EXPECT_FALSE(is_more_important(&stub_false2, &stub_false3)); EXPECT_FALSE(IsMoreImportant(&stub_false2, &stub_false3));
} }
// Test GpuMemoryManager::Manage basic functionality. // Test GpuMemoryManager::Manage basic functionality.
...@@ -185,10 +231,8 @@ TEST_F(GpuMemoryManagerTest, TestManageBasicFunctionality) { ...@@ -185,10 +231,8 @@ TEST_F(GpuMemoryManagerTest, TestManageBasicFunctionality) {
client_.stubs_.push_back(&stub2); client_.stubs_.push_back(&stub2);
Manage(); Manage();
EXPECT_EQ(stub1.allocation_.suggest_have_frontbuffer, true); EXPECT_TRUE(IsAllocationForegroundForSurfaceYes(stub1.allocation_));
EXPECT_EQ(stub1.allocation_.suggest_have_backbuffer, true); EXPECT_TRUE(IsAllocationBackgroundForSurfaceYes(stub2.allocation_));
EXPECT_EQ(stub2.allocation_.suggest_have_frontbuffer, true);
EXPECT_EQ(stub2.allocation_.suggest_have_backbuffer, false);
// Test stubs without surface, with share group of 1 stub. // Test stubs without surface, with share group of 1 stub.
FakeCommandBufferStubWithoutSurface stub3, stub4; FakeCommandBufferStubWithoutSurface stub3, stub4;
...@@ -198,8 +242,10 @@ TEST_F(GpuMemoryManagerTest, TestManageBasicFunctionality) { ...@@ -198,8 +242,10 @@ TEST_F(GpuMemoryManagerTest, TestManageBasicFunctionality) {
client_.stubs_.push_back(&stub4); client_.stubs_.push_back(&stub4);
Manage(); Manage();
EXPECT_EQ(stub1.allocation_, stub3.allocation_); EXPECT_TRUE(IsAllocationForegroundForSurfaceYes(stub1.allocation_));
EXPECT_EQ(stub2.allocation_, stub4.allocation_); EXPECT_TRUE(IsAllocationBackgroundForSurfaceYes(stub2.allocation_));
EXPECT_TRUE(IsAllocationForegroundForSurfaceNo(stub3.allocation_));
EXPECT_TRUE(IsAllocationBackgroundForSurfaceNo(stub4.allocation_));
// Test stub without surface, with share group of multiple stubs. // Test stub without surface, with share group of multiple stubs.
FakeCommandBufferStubWithoutSurface stub5; FakeCommandBufferStubWithoutSurface stub5;
...@@ -208,7 +254,7 @@ TEST_F(GpuMemoryManagerTest, TestManageBasicFunctionality) { ...@@ -208,7 +254,7 @@ TEST_F(GpuMemoryManagerTest, TestManageBasicFunctionality) {
client_.stubs_.push_back(&stub5); client_.stubs_.push_back(&stub5);
Manage(); Manage();
EXPECT_EQ(stub1.allocation_, stub5.allocation_); EXPECT_TRUE(IsAllocationForegroundForSurfaceNo(stub4.allocation_));
} }
// Test GpuMemoryManager::Manage functionality: changing visibility. // Test GpuMemoryManager::Manage functionality: changing visibility.
...@@ -234,25 +280,21 @@ TEST_F(GpuMemoryManagerTest, TestManageChangingVisibility) { ...@@ -234,25 +280,21 @@ TEST_F(GpuMemoryManagerTest, TestManageChangingVisibility) {
client_.stubs_.push_back(&stub5); client_.stubs_.push_back(&stub5);
Manage(); Manage();
EXPECT_EQ(stub1.allocation_.suggest_have_frontbuffer, true); EXPECT_TRUE(IsAllocationForegroundForSurfaceYes(stub1.allocation_));
EXPECT_EQ(stub1.allocation_.suggest_have_backbuffer, true); EXPECT_TRUE(IsAllocationBackgroundForSurfaceYes(stub2.allocation_));
EXPECT_EQ(stub2.allocation_.suggest_have_frontbuffer, true); EXPECT_TRUE(IsAllocationForegroundForSurfaceNo(stub3.allocation_));
EXPECT_EQ(stub2.allocation_.suggest_have_backbuffer, false); EXPECT_TRUE(IsAllocationBackgroundForSurfaceNo(stub4.allocation_));
EXPECT_EQ(stub1.allocation_, stub3.allocation_); EXPECT_TRUE(IsAllocationForegroundForSurfaceNo(stub5.allocation_));
EXPECT_EQ(stub2.allocation_, stub4.allocation_);
EXPECT_EQ(stub1.allocation_, stub5.allocation_);
stub1.surface_state_.visible = false; stub1.surface_state_.visible = false;
stub2.surface_state_.visible = true; stub2.surface_state_.visible = true;
Manage(); Manage();
EXPECT_EQ(stub1.allocation_.suggest_have_frontbuffer, true); EXPECT_TRUE(IsAllocationBackgroundForSurfaceYes(stub1.allocation_));
EXPECT_EQ(stub1.allocation_.suggest_have_backbuffer, false); EXPECT_TRUE(IsAllocationForegroundForSurfaceYes(stub2.allocation_));
EXPECT_EQ(stub2.allocation_.suggest_have_frontbuffer, true); EXPECT_TRUE(IsAllocationBackgroundForSurfaceNo(stub3.allocation_));
EXPECT_EQ(stub2.allocation_.suggest_have_backbuffer, true); EXPECT_TRUE(IsAllocationForegroundForSurfaceNo(stub4.allocation_));
EXPECT_EQ(stub1.allocation_, stub3.allocation_); EXPECT_TRUE(IsAllocationForegroundForSurfaceNo(stub5.allocation_));
EXPECT_EQ(stub2.allocation_, stub4.allocation_);
EXPECT_EQ(stub2.allocation_, stub5.allocation_);
} }
// Test GpuMemoryManager::Manage functionality: Test more than threshold number // Test GpuMemoryManager::Manage functionality: Test more than threshold number
...@@ -280,17 +322,13 @@ TEST_F(GpuMemoryManagerTest, TestManageManyVisibleStubs) { ...@@ -280,17 +322,13 @@ TEST_F(GpuMemoryManagerTest, TestManageManyVisibleStubs) {
client_.stubs_.push_back(&stub7); client_.stubs_.push_back(&stub7);
Manage(); Manage();
EXPECT_EQ(stub1.allocation_.suggest_have_frontbuffer, true); EXPECT_TRUE(IsAllocationForegroundForSurfaceYes(stub1.allocation_));
EXPECT_EQ(stub1.allocation_.suggest_have_backbuffer, true); EXPECT_TRUE(IsAllocationForegroundForSurfaceYes(stub2.allocation_));
EXPECT_EQ(stub2.allocation_.suggest_have_frontbuffer, true); EXPECT_TRUE(IsAllocationForegroundForSurfaceYes(stub3.allocation_));
EXPECT_EQ(stub2.allocation_.suggest_have_backbuffer, true); EXPECT_TRUE(IsAllocationForegroundForSurfaceYes(stub4.allocation_));
EXPECT_EQ(stub3.allocation_.suggest_have_frontbuffer, true); EXPECT_TRUE(IsAllocationForegroundForSurfaceNo(stub5.allocation_));
EXPECT_EQ(stub3.allocation_.suggest_have_backbuffer, true); EXPECT_TRUE(IsAllocationForegroundForSurfaceNo(stub6.allocation_));
EXPECT_EQ(stub4.allocation_.suggest_have_frontbuffer, true); EXPECT_TRUE(IsAllocationForegroundForSurfaceNo(stub7.allocation_));
EXPECT_EQ(stub4.allocation_.suggest_have_backbuffer, true);
EXPECT_EQ(stub5.allocation_, stub1.allocation_);
EXPECT_EQ(stub6.allocation_, stub2.allocation_);
EXPECT_EQ(stub7.allocation_, stub1.allocation_);
} }
// Test GpuMemoryManager::Manage functionality: Test more than threshold number // Test GpuMemoryManager::Manage functionality: Test more than threshold number
...@@ -318,17 +356,13 @@ TEST_F(GpuMemoryManagerTest, TestManageManyNotVisibleStubs) { ...@@ -318,17 +356,13 @@ TEST_F(GpuMemoryManagerTest, TestManageManyNotVisibleStubs) {
client_.stubs_.push_back(&stub7); client_.stubs_.push_back(&stub7);
Manage(); Manage();
EXPECT_EQ(stub1.allocation_.suggest_have_frontbuffer, true); EXPECT_TRUE(IsAllocationBackgroundForSurfaceYes(stub1.allocation_));
EXPECT_EQ(stub1.allocation_.suggest_have_backbuffer, false); EXPECT_TRUE(IsAllocationBackgroundForSurfaceYes(stub2.allocation_));
EXPECT_EQ(stub2.allocation_.suggest_have_frontbuffer, true); EXPECT_TRUE(IsAllocationBackgroundForSurfaceYes(stub3.allocation_));
EXPECT_EQ(stub2.allocation_.suggest_have_backbuffer, false); EXPECT_TRUE(IsAllocationHibernatedForSurfaceYes(stub4.allocation_));
EXPECT_EQ(stub3.allocation_.suggest_have_frontbuffer, true); EXPECT_TRUE(IsAllocationBackgroundForSurfaceNo(stub5.allocation_));
EXPECT_EQ(stub3.allocation_.suggest_have_backbuffer, false); EXPECT_TRUE(IsAllocationHibernatedForSurfaceNo(stub6.allocation_));
EXPECT_EQ(stub4.allocation_.suggest_have_frontbuffer, false); EXPECT_TRUE(IsAllocationBackgroundForSurfaceNo(stub7.allocation_));
EXPECT_EQ(stub4.allocation_.suggest_have_backbuffer, false);
EXPECT_EQ(stub5.allocation_, stub1.allocation_);
EXPECT_EQ(stub6.allocation_, stub4.allocation_);
EXPECT_EQ(stub7.allocation_, stub1.allocation_);
} }
// Test GpuMemoryManager::Manage functionality: Test changing the last used // Test GpuMemoryManager::Manage functionality: Test changing the last used
...@@ -356,25 +390,25 @@ TEST_F(GpuMemoryManagerTest, TestManageChangingLastUsedTime) { ...@@ -356,25 +390,25 @@ TEST_F(GpuMemoryManagerTest, TestManageChangingLastUsedTime) {
client_.stubs_.push_back(&stub7); client_.stubs_.push_back(&stub7);
Manage(); Manage();
EXPECT_EQ(stub3.allocation_.suggest_have_frontbuffer, true); EXPECT_TRUE(IsAllocationBackgroundForSurfaceYes(stub1.allocation_));
EXPECT_EQ(stub3.allocation_.suggest_have_backbuffer, false); EXPECT_TRUE(IsAllocationBackgroundForSurfaceYes(stub2.allocation_));
EXPECT_EQ(stub4.allocation_.suggest_have_frontbuffer, false); EXPECT_TRUE(IsAllocationBackgroundForSurfaceYes(stub3.allocation_));
EXPECT_EQ(stub4.allocation_.suggest_have_backbuffer, false); EXPECT_TRUE(IsAllocationHibernatedForSurfaceYes(stub4.allocation_));
EXPECT_EQ(stub5.allocation_, stub3.allocation_); EXPECT_TRUE(IsAllocationBackgroundForSurfaceNo(stub5.allocation_));
EXPECT_EQ(stub6.allocation_, stub4.allocation_); EXPECT_TRUE(IsAllocationHibernatedForSurfaceNo(stub6.allocation_));
EXPECT_EQ(stub7.allocation_, stub3.allocation_); EXPECT_TRUE(IsAllocationBackgroundForSurfaceNo(stub7.allocation_));
stub3.surface_state_.last_used_time = older_; stub3.surface_state_.last_used_time = older_;
stub4.surface_state_.last_used_time = newer_; stub4.surface_state_.last_used_time = newer_;
Manage(); Manage();
EXPECT_EQ(stub3.allocation_.suggest_have_frontbuffer, false); EXPECT_TRUE(IsAllocationBackgroundForSurfaceYes(stub1.allocation_));
EXPECT_EQ(stub3.allocation_.suggest_have_backbuffer, false); EXPECT_TRUE(IsAllocationBackgroundForSurfaceYes(stub2.allocation_));
EXPECT_EQ(stub4.allocation_.suggest_have_frontbuffer, true); EXPECT_TRUE(IsAllocationHibernatedForSurfaceYes(stub3.allocation_));
EXPECT_EQ(stub4.allocation_.suggest_have_backbuffer, false); EXPECT_TRUE(IsAllocationBackgroundForSurfaceYes(stub4.allocation_));
EXPECT_EQ(stub5.allocation_, stub3.allocation_); EXPECT_TRUE(IsAllocationHibernatedForSurfaceNo(stub5.allocation_));
EXPECT_EQ(stub6.allocation_, stub4.allocation_); EXPECT_TRUE(IsAllocationBackgroundForSurfaceNo(stub6.allocation_));
EXPECT_EQ(stub7.allocation_, stub4.allocation_); EXPECT_TRUE(IsAllocationBackgroundForSurfaceNo(stub7.allocation_));
} }
// Test GpuMemoryManager::Manage functionality: Test changing importance of // Test GpuMemoryManager::Manage functionality: Test changing importance of
...@@ -382,14 +416,14 @@ TEST_F(GpuMemoryManagerTest, TestManageChangingLastUsedTime) { ...@@ -382,14 +416,14 @@ TEST_F(GpuMemoryManagerTest, TestManageChangingLastUsedTime) {
// Expect memory allocation of the stubs without surface to share memory // Expect memory allocation of the stubs without surface to share memory
// allocation with the most visible stub in share group. // allocation with the most visible stub in share group.
TEST_F(GpuMemoryManagerTest, TestManageChangingImportanceShareGroup) { TEST_F(GpuMemoryManagerTest, TestManageChangingImportanceShareGroup) {
FakeCommandBufferStub stubA(GenerateUniqueSurfaceId(), true, newer_), FakeCommandBufferStub stubIgnoreA(GenerateUniqueSurfaceId(), true, newer_),
stubB(GenerateUniqueSurfaceId(), false, newer_), stubIgnoreB(GenerateUniqueSurfaceId(), false, newer_),
stubC(GenerateUniqueSurfaceId(), false, newer_); stubIgnoreC(GenerateUniqueSurfaceId(), false, newer_);
FakeCommandBufferStub stub1(GenerateUniqueSurfaceId(), true, newest_), FakeCommandBufferStub stub1(GenerateUniqueSurfaceId(), true, newest_),
stub2(GenerateUniqueSurfaceId(), true, newest_); stub2(GenerateUniqueSurfaceId(), true, newest_);
client_.stubs_.push_back(&stubA); client_.stubs_.push_back(&stubIgnoreA);
client_.stubs_.push_back(&stubB); client_.stubs_.push_back(&stubIgnoreB);
client_.stubs_.push_back(&stubC); client_.stubs_.push_back(&stubIgnoreC);
client_.stubs_.push_back(&stub1); client_.stubs_.push_back(&stub1);
client_.stubs_.push_back(&stub2); client_.stubs_.push_back(&stub2);
...@@ -402,62 +436,91 @@ TEST_F(GpuMemoryManagerTest, TestManageChangingImportanceShareGroup) { ...@@ -402,62 +436,91 @@ TEST_F(GpuMemoryManagerTest, TestManageChangingImportanceShareGroup) {
client_.stubs_.push_back(&stub4); client_.stubs_.push_back(&stub4);
Manage(); Manage();
EXPECT_EQ(stub1.allocation_.suggest_have_frontbuffer, true); EXPECT_TRUE(IsAllocationForegroundForSurfaceYes(stub1.allocation_));
EXPECT_EQ(stub1.allocation_.suggest_have_backbuffer, true); EXPECT_TRUE(IsAllocationForegroundForSurfaceYes(stub2.allocation_));
EXPECT_EQ(stub2.allocation_.suggest_have_frontbuffer, true); EXPECT_TRUE(IsAllocationForegroundForSurfaceNo(stub3.allocation_));
EXPECT_EQ(stub2.allocation_.suggest_have_backbuffer, true); EXPECT_TRUE(IsAllocationForegroundForSurfaceNo(stub4.allocation_));
EXPECT_EQ(stub3.allocation_, stub1.allocation_);
EXPECT_EQ(stub3.allocation_, stub2.allocation_);
EXPECT_EQ(stub4.allocation_, stub1.allocation_);
EXPECT_EQ(stub4.allocation_, stub2.allocation_);
stub1.surface_state_.visible = false; stub1.surface_state_.visible = false;
Manage(); Manage();
EXPECT_EQ(stub1.allocation_.suggest_have_frontbuffer, true); EXPECT_TRUE(IsAllocationBackgroundForSurfaceYes(stub1.allocation_));
EXPECT_EQ(stub1.allocation_.suggest_have_backbuffer, false); EXPECT_TRUE(IsAllocationForegroundForSurfaceYes(stub2.allocation_));
EXPECT_EQ(stub2.allocation_.suggest_have_frontbuffer, true); EXPECT_TRUE(IsAllocationBackgroundForSurfaceNo(stub3.allocation_));
EXPECT_EQ(stub2.allocation_.suggest_have_backbuffer, true); EXPECT_TRUE(IsAllocationForegroundForSurfaceNo(stub4.allocation_));
EXPECT_NE(stub3.allocation_, stub1.allocation_);
EXPECT_EQ(stub3.allocation_, stub2.allocation_);
EXPECT_NE(stub4.allocation_, stub1.allocation_);
EXPECT_EQ(stub4.allocation_, stub2.allocation_);
stub2.surface_state_.visible = false; stub2.surface_state_.visible = false;
Manage(); Manage();
EXPECT_EQ(stub1.allocation_.suggest_have_frontbuffer, true); EXPECT_TRUE(IsAllocationBackgroundForSurfaceYes(stub1.allocation_));
EXPECT_EQ(stub1.allocation_.suggest_have_backbuffer, false); EXPECT_TRUE(IsAllocationBackgroundForSurfaceYes(stub2.allocation_));
EXPECT_EQ(stub2.allocation_.suggest_have_frontbuffer, true); EXPECT_TRUE(IsAllocationBackgroundForSurfaceNo(stub3.allocation_));
EXPECT_EQ(stub2.allocation_.suggest_have_backbuffer, false); EXPECT_TRUE(IsAllocationBackgroundForSurfaceNo(stub4.allocation_));
EXPECT_EQ(stub3.allocation_, stub1.allocation_);
EXPECT_EQ(stub3.allocation_, stub2.allocation_);
EXPECT_EQ(stub4.allocation_, stub1.allocation_);
EXPECT_EQ(stub4.allocation_, stub2.allocation_);
stub1.surface_state_.last_used_time = older_; stub1.surface_state_.last_used_time = older_;
Manage(); Manage();
EXPECT_EQ(stub1.allocation_.suggest_have_frontbuffer, false); EXPECT_TRUE(IsAllocationHibernatedForSurfaceYes(stub1.allocation_));
EXPECT_EQ(stub1.allocation_.suggest_have_backbuffer, false); EXPECT_TRUE(IsAllocationBackgroundForSurfaceYes(stub2.allocation_));
EXPECT_EQ(stub2.allocation_.suggest_have_frontbuffer, true); EXPECT_TRUE(IsAllocationBackgroundForSurfaceNo(stub3.allocation_));
EXPECT_EQ(stub2.allocation_.suggest_have_backbuffer, false); EXPECT_TRUE(IsAllocationBackgroundForSurfaceNo(stub4.allocation_));
EXPECT_NE(stub3.allocation_, stub1.allocation_);
EXPECT_EQ(stub3.allocation_, stub2.allocation_);
EXPECT_NE(stub4.allocation_, stub1.allocation_);
EXPECT_EQ(stub4.allocation_, stub2.allocation_);
stub2.surface_state_.last_used_time = older_; stub2.surface_state_.last_used_time = older_;
Manage(); Manage();
EXPECT_EQ(stub1.allocation_.suggest_have_frontbuffer, false); EXPECT_TRUE(IsAllocationHibernatedForSurfaceYes(stub1.allocation_));
EXPECT_EQ(stub1.allocation_.suggest_have_backbuffer, false); EXPECT_TRUE(IsAllocationHibernatedForSurfaceYes(stub2.allocation_));
EXPECT_EQ(stub2.allocation_.suggest_have_frontbuffer, false); EXPECT_TRUE(IsAllocationHibernatedForSurfaceNo(stub3.allocation_));
EXPECT_EQ(stub2.allocation_.suggest_have_backbuffer, false); EXPECT_TRUE(IsAllocationHibernatedForSurfaceNo(stub4.allocation_));
EXPECT_EQ(stub3.allocation_, stub1.allocation_); }
EXPECT_EQ(stub3.allocation_, stub2.allocation_);
EXPECT_EQ(stub4.allocation_, stub1.allocation_); // Test GpuMemoryAllocation memory allocation bonuses:
EXPECT_EQ(stub4.allocation_, stub2.allocation_); // When the number of visible tabs is small, each tab should get a
// gpu_resource_size_in_bytes allocation value that is greater than
// kMinimumAllocationForTab, and when the number of tabs is large, each should
// get exactly kMinimumAllocationForTab and not less.
TEST_F(GpuMemoryManagerTest, TestForegroundStubsGetBonusAllocation) {
FakeCommandBufferStub stub1(GenerateUniqueSurfaceId(), true, older_),
stub2(GenerateUniqueSurfaceId(), true, older_),
stub3(GenerateUniqueSurfaceId(), true, older_);
client_.stubs_.push_back(&stub1);
client_.stubs_.push_back(&stub2);
client_.stubs_.push_back(&stub3);
Manage();
EXPECT_TRUE(IsAllocationForegroundForSurfaceYes(stub1.allocation_));
EXPECT_TRUE(IsAllocationForegroundForSurfaceYes(stub2.allocation_));
EXPECT_TRUE(IsAllocationForegroundForSurfaceYes(stub3.allocation_));
EXPECT_GT(stub1.allocation_.gpu_resource_size_in_bytes,
static_cast<size_t>(GpuMemoryManager::kMinimumAllocationForTab));
EXPECT_GT(stub2.allocation_.gpu_resource_size_in_bytes,
static_cast<size_t>(GpuMemoryManager::kMinimumAllocationForTab));
EXPECT_GT(stub3.allocation_.gpu_resource_size_in_bytes,
static_cast<size_t>(GpuMemoryManager::kMinimumAllocationForTab));
FakeCommandBufferStub stub4(GenerateUniqueSurfaceId(), true, older_),
stub5(GenerateUniqueSurfaceId(), true, older_),
stub6(GenerateUniqueSurfaceId(), true, older_),
stub7(GenerateUniqueSurfaceId(), true, older_),
stub8(GenerateUniqueSurfaceId(), true, older_),
stub9(GenerateUniqueSurfaceId(), true, older_);
client_.stubs_.push_back(&stub4);
client_.stubs_.push_back(&stub5);
client_.stubs_.push_back(&stub6);
client_.stubs_.push_back(&stub7);
client_.stubs_.push_back(&stub8);
client_.stubs_.push_back(&stub9);
Manage();
EXPECT_TRUE(IsAllocationForegroundForSurfaceYes(stub1.allocation_));
EXPECT_TRUE(IsAllocationForegroundForSurfaceYes(stub2.allocation_));
EXPECT_TRUE(IsAllocationForegroundForSurfaceYes(stub3.allocation_));
EXPECT_EQ(stub1.allocation_.gpu_resource_size_in_bytes,
GpuMemoryManager::kMinimumAllocationForTab);
EXPECT_EQ(stub2.allocation_.gpu_resource_size_in_bytes,
GpuMemoryManager::kMinimumAllocationForTab);
EXPECT_EQ(stub3.allocation_.gpu_resource_size_in_bytes,
GpuMemoryManager::kMinimumAllocationForTab);
} }
// Test GpuMemoryAllocation comparison operators: Iterate over all possible // Test GpuMemoryAllocation comparison operators: Iterate over all possible
...@@ -465,21 +528,35 @@ TEST_F(GpuMemoryManagerTest, TestManageChangingImportanceShareGroup) { ...@@ -465,21 +528,35 @@ TEST_F(GpuMemoryManagerTest, TestManageChangingImportanceShareGroup) {
// suggest_have_frontbuffer, and make sure allocations with equal values test // suggest_have_frontbuffer, and make sure allocations with equal values test
// equal and non equal values test not equal. // equal and non equal values test not equal.
TEST_F(GpuMemoryManagerTest, GpuMemoryAllocationCompareTests) { TEST_F(GpuMemoryManagerTest, GpuMemoryAllocationCompareTests) {
int gpu_resource_size_in_bytes_values[] = { 0, 1, 12345678 }; std::vector<int> gpu_resource_size_in_bytes_values;
bool suggest_have_backbuffer_values[] = { false, true }; gpu_resource_size_in_bytes_values.push_back(0);
bool suggest_have_frontbuffer_values[] = { false, true }; gpu_resource_size_in_bytes_values.push_back(1);
gpu_resource_size_in_bytes_values.push_back(12345678);
for(int* sz = &gpu_resource_size_in_bytes_values[0];
sz != &gpu_resource_size_in_bytes_values[3]; ++sz) { std::vector<int> suggested_buffer_allocation_values;
for(bool* shbb = &suggest_have_backbuffer_values[0]; suggested_buffer_allocation_values.push_back(
shbb != &suggest_have_backbuffer_values[2]; ++shbb) { GpuMemoryAllocation::kHasFrontbuffer |
for(bool* shfb = &suggest_have_frontbuffer_values[0]; GpuMemoryAllocation::kHasBackbuffer);
shfb != &suggest_have_frontbuffer_values[2]; ++shfb) { suggested_buffer_allocation_values.push_back(
GpuMemoryAllocation allocation(*sz, *shbb, *shfb); GpuMemoryAllocation::kHasFrontbuffer);
EXPECT_EQ(allocation, GpuMemoryAllocation(*sz, *shbb, *shfb)); suggested_buffer_allocation_values.push_back(
EXPECT_NE(allocation, GpuMemoryAllocation(*sz+1, *shbb, *shfb)); GpuMemoryAllocation::kHasBackbuffer);
EXPECT_NE(allocation, GpuMemoryAllocation(*sz, !*shbb, *shfb)); suggested_buffer_allocation_values.push_back(
EXPECT_NE(allocation, GpuMemoryAllocation(*sz, *shbb, !*shfb)); GpuMemoryAllocation::kHasNoBuffers);
for(size_t i = 0; i != gpu_resource_size_in_bytes_values.size(); ++i) {
for(size_t j = 0; j != suggested_buffer_allocation_values.size(); ++j) {
int sz = gpu_resource_size_in_bytes_values[i];
int buffer_allocation = suggested_buffer_allocation_values[j];
GpuMemoryAllocation allocation(sz, buffer_allocation);
EXPECT_EQ(allocation, GpuMemoryAllocation(sz, buffer_allocation));
EXPECT_NE(allocation, GpuMemoryAllocation(sz+1, buffer_allocation));
for(size_t k = 0; k != suggested_buffer_allocation_values.size(); ++k) {
int buffer_allocation_other = suggested_buffer_allocation_values[k];
if (buffer_allocation == buffer_allocation_other) continue;
EXPECT_NE(allocation, GpuMemoryAllocation(sz, buffer_allocation_other));
} }
} }
} }
......
...@@ -445,6 +445,11 @@ IPC_MESSAGE_ROUTED0(GpuCommandBufferMsg_EnsureBackbuffer) ...@@ -445,6 +445,11 @@ IPC_MESSAGE_ROUTED0(GpuCommandBufferMsg_EnsureBackbuffer)
IPC_MESSAGE_ROUTED1(GpuCommandBufferMsg_SetMemoryAllocation, IPC_MESSAGE_ROUTED1(GpuCommandBufferMsg_SetMemoryAllocation,
GpuMemoryAllocationForRenderer /* allocation */) GpuMemoryAllocationForRenderer /* allocation */)
// Sent to stub when proxy is assigned a memory allocation changed callback.
IPC_MESSAGE_ROUTED1(
GpuCommandBufferMsg_SetClientHasMemoryAllocationChangedCallback,
bool /* has_callback */)
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
// Accelerated Video Decoder Messages // Accelerated Video Decoder Messages
// These messages are sent from Renderer process to GPU process. // These messages are sent from Renderer process to GPU process.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment