Commit c1f1214e authored by mmocny@chromium.org's avatar mmocny@chromium.org

GpuMemoryManager suggests values for renderer Contents Texture Managers' preferred memory limit.


BUG=123382
TEST=Manual


Committed: http://src.chromium.org/viewvc/chrome?view=rev&revision=134428

Review URL: http://codereview.chromium.org/10083056

git-svn-id: svn://svn.chromium.org/chrome/trunk/src@134801 0039d316-1c4b-4281-b951-d872f2087c98
parent 570977f4
......@@ -105,6 +105,8 @@ void CommandBufferProxyImpl::SetMemoryAllocationChangedCallback(
const base::Callback<void(const GpuMemoryAllocationForRenderer&)>&
callback) {
memory_allocation_changed_callback_ = callback;
Send(new GpuCommandBufferMsg_SetClientHasMemoryAllocationChangedCallback(
route_id_, !memory_allocation_changed_callback_.is_null()));
}
void CommandBufferProxyImpl::OnSetMemoryAllocation(
......
......@@ -264,10 +264,6 @@ bool WebGraphicsContext3DCommandBufferImpl::MaybeInitializeGL(
g_all_shared_contexts.Pointer()->insert(this);
}
command_buffer_->SetMemoryAllocationChangedCallback(base::Bind(
&WebGraphicsContext3DCommandBufferImpl::OnMemoryAllocationChanged,
weak_ptr_factory_.GetWeakPtr()));
visible_ = true;
initialized_ = true;
return true;
......@@ -680,6 +676,17 @@ void WebGraphicsContext3DCommandBufferImpl::
setMemoryAllocationChangedCallbackCHROMIUM(
WebGraphicsMemoryAllocationChangedCallbackCHROMIUM* callback) {
memory_allocation_changed_callback_ = callback;
if (!command_buffer_)
return;
if (callback)
command_buffer_->SetMemoryAllocationChangedCallback(base::Bind(
&WebGraphicsContext3DCommandBufferImpl::OnMemoryAllocationChanged,
weak_ptr_factory_.GetWeakPtr()));
else
command_buffer_->SetMemoryAllocationChangedCallback(
base::Callback<void(const GpuMemoryAllocationForRenderer&)>());
}
......
......@@ -59,7 +59,9 @@ GpuCommandBufferStub::GpuCommandBufferStub(
route_id_(route_id),
software_(software),
last_flush_count_(0),
allocation_(GpuMemoryAllocation::INVALID_RESOURCE_SIZE, true, true),
allocation_(GpuMemoryAllocation::INVALID_RESOURCE_SIZE,
GpuMemoryAllocation::kHasFrontbuffer |
GpuMemoryAllocation::kHasBackbuffer),
parent_stub_for_initialization_(),
parent_texture_for_initialization_(0),
watchdog_(watchdog) {
......@@ -131,6 +133,9 @@ bool GpuCommandBufferStub::OnMessageReceived(const IPC::Message& message) {
OnDiscardBackbuffer)
IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_EnsureBackbuffer,
OnEnsureBackbuffer)
IPC_MESSAGE_HANDLER(
GpuCommandBufferMsg_SetClientHasMemoryAllocationChangedCallback,
OnSetClientHasMemoryAllocationChangedCallback)
IPC_MESSAGE_UNHANDLED(handled = false)
IPC_END_MESSAGE_MAP()
......@@ -589,6 +594,12 @@ void GpuCommandBufferStub::OnEnsureBackbuffer() {
gfx::GLSurface::BUFFER_ALLOCATION_FRONT_AND_BACK);
}
void GpuCommandBufferStub::OnSetClientHasMemoryAllocationChangedCallback(
bool has_callback) {
client_has_memory_allocation_changed_callback_ = has_callback;
channel_->gpu_channel_manager()->gpu_memory_manager()->ScheduleManage();
}
void GpuCommandBufferStub::SendConsoleMessage(
int32 id,
const std::string& message) {
......@@ -617,6 +628,11 @@ bool GpuCommandBufferStub::IsInSameContextShareGroup(
static_cast<const GpuCommandBufferStub&>(other).context_group_;
}
bool GpuCommandBufferStub::
client_has_memory_allocation_changed_callback() const {
return client_has_memory_allocation_changed_callback_;
}
bool GpuCommandBufferStub::has_surface_state() const {
return surface_state_ != NULL;
}
......
......@@ -63,6 +63,7 @@ class CONTENT_EXPORT GpuCommandBufferStubBase {
virtual ~GpuCommandBufferStubBase() {}
// Will not have surface state if this is an offscreen commandbuffer.
virtual bool client_has_memory_allocation_changed_callback() const = 0;
virtual bool has_surface_state() const = 0;
virtual const SurfaceState& surface_state() const = 0;
......@@ -114,6 +115,7 @@ class GpuCommandBufferStub
virtual bool Send(IPC::Message* msg) OVERRIDE;
// GpuCommandBufferStubBase implementation:
virtual bool client_has_memory_allocation_changed_callback() const OVERRIDE;
virtual bool has_surface_state() const OVERRIDE;
virtual const GpuCommandBufferStubBase::SurfaceState& surface_state() const
OVERRIDE;
......@@ -205,6 +207,8 @@ class GpuCommandBufferStub
void OnDiscardBackbuffer();
void OnEnsureBackbuffer();
void OnSetClientHasMemoryAllocationChangedCallback(bool);
void OnReschedule();
void OnCommandProcessed();
......@@ -228,6 +232,7 @@ class GpuCommandBufferStub
gfx::GpuPreference gpu_preference_;
int32 route_id_;
bool software_;
bool client_has_memory_allocation_changed_callback_;
uint32 last_flush_count_;
scoped_ptr<GpuCommandBufferStubBase::SurfaceState> surface_state_;
GpuMemoryAllocation allocation_;
......
......@@ -12,7 +12,6 @@
// and assigned to the browser and renderer context.
// They will change over time, given memory availability, and browser state.
// Memory Allocation which will be assigned to the renderer context.
struct GpuMemoryAllocationForRenderer {
enum {
......@@ -68,17 +67,24 @@ struct GpuMemoryAllocationForBrowser {
// GpuMemoryManager.
struct GpuMemoryAllocation : public GpuMemoryAllocationForRenderer,
public GpuMemoryAllocationForBrowser {
// Bitmap
enum BufferAllocation {
kHasNoBuffers = 0,
kHasFrontbuffer = 1,
kHasBackbuffer = 2
};
GpuMemoryAllocation()
: GpuMemoryAllocationForRenderer(),
GpuMemoryAllocationForBrowser() {
}
GpuMemoryAllocation(size_t gpu_resource_size_in_bytes,
bool suggest_have_backbuffer,
bool suggest_have_frontbuffer)
int allocationBitmap)
: GpuMemoryAllocationForRenderer(gpu_resource_size_in_bytes,
suggest_have_backbuffer),
GpuMemoryAllocationForBrowser(suggest_have_frontbuffer) {
(allocationBitmap & kHasBackbuffer) == kHasBackbuffer),
GpuMemoryAllocationForBrowser(
(allocationBitmap & kHasFrontbuffer) == kHasFrontbuffer) {
}
bool operator==(const GpuMemoryAllocation& other) const {
......
......@@ -15,15 +15,6 @@
namespace {
// These are predefined values (in bytes) for
// GpuMemoryAllocation::gpuResourceSizeInBytes. Currently, the value is only
// used to check if it is 0 or non-0. In the future, these values will not
// come from constants, but rather will be distributed dynamically.
enum {
kResourceSizeNonHibernatedTab = 1,
kResourceSizeHibernatedTab = 0
};
bool IsInSameContextShareGroupAsAnyOf(
const GpuCommandBufferStubBase* stub,
const std::vector<GpuCommandBufferStubBase*>& stubs) {
......@@ -35,6 +26,14 @@ bool IsInSameContextShareGroupAsAnyOf(
return false;
}
void AssignMemoryAllocations(std::vector<GpuCommandBufferStubBase*>& stubs,
GpuMemoryAllocation allocation) {
for (std::vector<GpuCommandBufferStubBase*>::iterator it = stubs.begin();
it != stubs.end(); ++it) {
(*it)->SetMemoryAllocation(allocation);
}
}
}
GpuMemoryManager::GpuMemoryManager(GpuMemoryManagerClient* client,
......@@ -100,14 +99,6 @@ void GpuMemoryManager::ScheduleManage() {
// 1. Find the most visible context-with-a-surface within each
// context-without-a-surface's share group, and inherit its visibilty.
void GpuMemoryManager::Manage() {
// Set up three allocation values for the three possible stub states
const GpuMemoryAllocation all_buffers_allocation(
kResourceSizeNonHibernatedTab, true, true);
const GpuMemoryAllocation front_buffers_allocation(
kResourceSizeNonHibernatedTab, false, true);
const GpuMemoryAllocation no_buffers_allocation(
kResourceSizeHibernatedTab, false, false);
manage_scheduled_ = false;
// Create stub lists by separating out the two types received from client
......@@ -120,6 +111,8 @@ void GpuMemoryManager::Manage() {
for (std::vector<GpuCommandBufferStubBase*>::iterator it = stubs.begin();
it != stubs.end(); ++it) {
GpuCommandBufferStubBase* stub = *it;
if (!stub->client_has_memory_allocation_changed_callback())
continue;
if (stub->has_surface_state())
stubs_with_surface.push_back(stub);
else
......@@ -136,37 +129,73 @@ void GpuMemoryManager::Manage() {
stubs_with_surface.end());
// Separate stubs into memory allocation sets.
std::vector<GpuCommandBufferStubBase*> all_buffers, front_buffers, no_buffers;
std::vector<GpuCommandBufferStubBase*> stubs_with_surface_foreground,
stubs_with_surface_background,
stubs_with_surface_hibernated,
stubs_without_surface_foreground,
stubs_without_surface_background,
stubs_without_surface_hibernated;
for (size_t i = 0; i < stubs_with_surface.size(); ++i) {
GpuCommandBufferStubBase* stub = stubs_with_surface[i];
DCHECK(stub->has_surface_state());
if (stub->surface_state().visible) {
all_buffers.push_back(stub);
stub->SetMemoryAllocation(all_buffers_allocation);
} else if (i < max_surfaces_with_frontbuffer_soft_limit_) {
front_buffers.push_back(stub);
stub->SetMemoryAllocation(front_buffers_allocation);
} else {
no_buffers.push_back(stub);
stub->SetMemoryAllocation(no_buffers_allocation);
}
if (stub->surface_state().visible)
stubs_with_surface_foreground.push_back(stub);
else if (i < max_surfaces_with_frontbuffer_soft_limit_)
stubs_with_surface_background.push_back(stub);
else
stubs_with_surface_hibernated.push_back(stub);
}
// Now, go through the stubs without surfaces and deduce visibility using the
// visibility of stubs which are in the same context share group.
for (std::vector<GpuCommandBufferStubBase*>::const_iterator it =
stubs_without_surface.begin(); it != stubs_without_surface.end(); ++it) {
GpuCommandBufferStubBase* stub = *it;
DCHECK(!stub->has_surface_state());
if (IsInSameContextShareGroupAsAnyOf(stub, all_buffers)) {
stub->SetMemoryAllocation(all_buffers_allocation);
} else if (IsInSameContextShareGroupAsAnyOf(stub, front_buffers)) {
stub->SetMemoryAllocation(front_buffers_allocation);
} else {
stub->SetMemoryAllocation(no_buffers_allocation);
}
// Stubs without surfaces have deduced allocation state using the state
// of surface stubs which are in the same context share group.
if (IsInSameContextShareGroupAsAnyOf(stub, stubs_with_surface_foreground))
stubs_without_surface_foreground.push_back(stub);
else if (IsInSameContextShareGroupAsAnyOf(
stub, stubs_with_surface_background))
stubs_without_surface_background.push_back(stub);
else
stubs_without_surface_hibernated.push_back(stub);
}
// Calculate memory allocation size in bytes given to each stub, by sharing
// global limit equally among those that need it.
size_t num_stubs_need_mem = stubs_with_surface_foreground.size() +
stubs_without_surface_foreground.size() +
stubs_without_surface_background.size();
size_t base_allocation_size = kMinimumAllocationForTab * num_stubs_need_mem;
size_t bonus_allocation = 0;
if (base_allocation_size < kMaximumAllocationForTabs &&
!stubs_with_surface_foreground.empty())
bonus_allocation = (kMaximumAllocationForTabs - base_allocation_size) /
stubs_with_surface_foreground.size();
// Now give out allocations to everyone.
AssignMemoryAllocations(stubs_with_surface_foreground,
GpuMemoryAllocation(kMinimumAllocationForTab + bonus_allocation,
GpuMemoryAllocation::kHasFrontbuffer |
GpuMemoryAllocation::kHasBackbuffer));
AssignMemoryAllocations(stubs_with_surface_background,
GpuMemoryAllocation(0, GpuMemoryAllocation::kHasFrontbuffer));
AssignMemoryAllocations(stubs_with_surface_hibernated,
GpuMemoryAllocation(0, GpuMemoryAllocation::kHasNoBuffers));
AssignMemoryAllocations(stubs_without_surface_foreground,
GpuMemoryAllocation(kMinimumAllocationForTab,
GpuMemoryAllocation::kHasNoBuffers));
AssignMemoryAllocations(stubs_without_surface_background,
GpuMemoryAllocation(kMinimumAllocationForTab,
GpuMemoryAllocation::kHasNoBuffers));
AssignMemoryAllocations(stubs_without_surface_hibernated,
GpuMemoryAllocation(0, GpuMemoryAllocation::kHasNoBuffers));
}
#endif
......@@ -28,6 +28,23 @@ class CONTENT_EXPORT GpuMemoryManager {
public:
enum { kDefaultMaxSurfacesWithFrontbufferSoftLimit = 8 };
// These are predefined values (in bytes) for
// GpuMemoryAllocation::gpuResourceSizeInBytes.
// Maximum Allocation for all tabs is a soft limit that can be exceeded
// during the time it takes for renderers to respect new allocations,
// including when switching tabs or opening a new window.
// To alleviate some pressure, we decrease our desired limit by "one tabs'
// worth" of memory.
enum {
#if defined(OS_ANDROID)
kMinimumAllocationForTab = 32 * 1024 * 1024,
kMaximumAllocationForTabs = 64 * 1024 * 1024,
#else
kMinimumAllocationForTab = 64 * 1024 * 1024,
kMaximumAllocationForTabs = 512 * 1024 * 1024 - kMinimumAllocationForTab,
#endif
};
GpuMemoryManager(GpuMemoryManagerClient* client,
size_t max_surfaces_with_frontbuffer_soft_limit);
~GpuMemoryManager();
......
......@@ -23,6 +23,9 @@ class FakeCommandBufferStub : public GpuCommandBufferStubBase {
: surface_state_(surface_id, visible, last_used_time) {
}
virtual bool client_has_memory_allocation_changed_callback() const {
return true;
}
virtual bool has_surface_state() const {
return surface_state_.surface_id != 0;
}
......@@ -49,6 +52,9 @@ class FakeCommandBufferStubWithoutSurface : public GpuCommandBufferStubBase {
FakeCommandBufferStubWithoutSurface() {
}
virtual bool client_has_memory_allocation_changed_callback() const {
return true;
}
virtual bool has_surface_state() const {
return false;
}
......@@ -100,11 +106,51 @@ class GpuMemoryManagerTest : public testing::Test {
return surface_id_++;
}
static bool is_more_important(GpuCommandBufferStubBase* lhs,
GpuCommandBufferStubBase* rhs) {
static bool IsMoreImportant(GpuCommandBufferStubBase* lhs,
GpuCommandBufferStubBase* rhs) {
return GpuMemoryManager::StubWithSurfaceComparator()(lhs, rhs);
}
static bool IsAllocationForegroundForSurfaceYes(
const GpuMemoryAllocation& alloc) {
return alloc.suggest_have_frontbuffer &&
alloc.suggest_have_backbuffer &&
alloc.gpu_resource_size_in_bytes >=
GpuMemoryManager::kMinimumAllocationForTab;
}
static bool IsAllocationBackgroundForSurfaceYes(
const GpuMemoryAllocation& alloc) {
return alloc.suggest_have_frontbuffer &&
!alloc.suggest_have_backbuffer &&
alloc.gpu_resource_size_in_bytes == 0;
}
static bool IsAllocationHibernatedForSurfaceYes(
const GpuMemoryAllocation& alloc) {
return !alloc.suggest_have_frontbuffer &&
!alloc.suggest_have_backbuffer &&
alloc.gpu_resource_size_in_bytes == 0;
}
static bool IsAllocationForegroundForSurfaceNo(
const GpuMemoryAllocation& alloc) {
return !alloc.suggest_have_frontbuffer &&
!alloc.suggest_have_backbuffer &&
alloc.gpu_resource_size_in_bytes ==
GpuMemoryManager::kMinimumAllocationForTab;
}
static bool IsAllocationBackgroundForSurfaceNo(
const GpuMemoryAllocation& alloc) {
return !alloc.suggest_have_frontbuffer &&
!alloc.suggest_have_backbuffer &&
alloc.gpu_resource_size_in_bytes ==
GpuMemoryManager::kMinimumAllocationForTab;
}
static bool IsAllocationHibernatedForSurfaceNo(
const GpuMemoryAllocation& alloc) {
return !alloc.suggest_have_frontbuffer &&
!alloc.suggest_have_backbuffer &&
alloc.gpu_resource_size_in_bytes == 0;
}
void Manage() {
memory_manager_.Manage();
}
......@@ -126,50 +172,50 @@ TEST_F(GpuMemoryManagerTest, ComparatorTests) {
stub_false3(GenerateUniqueSurfaceId(), false, newest_);
// Should never be more important than self:
EXPECT_FALSE(is_more_important(&stub_true1, &stub_true1));
EXPECT_FALSE(is_more_important(&stub_true2, &stub_true2));
EXPECT_FALSE(is_more_important(&stub_true3, &stub_true3));
EXPECT_FALSE(is_more_important(&stub_false1, &stub_false1));
EXPECT_FALSE(is_more_important(&stub_false2, &stub_false2));
EXPECT_FALSE(is_more_important(&stub_false3, &stub_false3));
EXPECT_FALSE(IsMoreImportant(&stub_true1, &stub_true1));
EXPECT_FALSE(IsMoreImportant(&stub_true2, &stub_true2));
EXPECT_FALSE(IsMoreImportant(&stub_true3, &stub_true3));
EXPECT_FALSE(IsMoreImportant(&stub_false1, &stub_false1));
EXPECT_FALSE(IsMoreImportant(&stub_false2, &stub_false2));
EXPECT_FALSE(IsMoreImportant(&stub_false3, &stub_false3));
// Visible should always be more important than non visible:
EXPECT_TRUE(is_more_important(&stub_true1, &stub_false1));
EXPECT_TRUE(is_more_important(&stub_true1, &stub_false2));
EXPECT_TRUE(is_more_important(&stub_true1, &stub_false3));
EXPECT_TRUE(is_more_important(&stub_true2, &stub_false1));
EXPECT_TRUE(is_more_important(&stub_true2, &stub_false2));
EXPECT_TRUE(is_more_important(&stub_true2, &stub_false3));
EXPECT_TRUE(is_more_important(&stub_true3, &stub_false1));
EXPECT_TRUE(is_more_important(&stub_true3, &stub_false2));
EXPECT_TRUE(is_more_important(&stub_true3, &stub_false3));
EXPECT_TRUE(IsMoreImportant(&stub_true1, &stub_false1));
EXPECT_TRUE(IsMoreImportant(&stub_true1, &stub_false2));
EXPECT_TRUE(IsMoreImportant(&stub_true1, &stub_false3));
EXPECT_TRUE(IsMoreImportant(&stub_true2, &stub_false1));
EXPECT_TRUE(IsMoreImportant(&stub_true2, &stub_false2));
EXPECT_TRUE(IsMoreImportant(&stub_true2, &stub_false3));
EXPECT_TRUE(IsMoreImportant(&stub_true3, &stub_false1));
EXPECT_TRUE(IsMoreImportant(&stub_true3, &stub_false2));
EXPECT_TRUE(IsMoreImportant(&stub_true3, &stub_false3));
// Not visible should never be more important than visible:
EXPECT_FALSE(is_more_important(&stub_false1, &stub_true1));
EXPECT_FALSE(is_more_important(&stub_false1, &stub_true2));
EXPECT_FALSE(is_more_important(&stub_false1, &stub_true3));
EXPECT_FALSE(is_more_important(&stub_false2, &stub_true1));
EXPECT_FALSE(is_more_important(&stub_false2, &stub_true2));
EXPECT_FALSE(is_more_important(&stub_false2, &stub_true3));
EXPECT_FALSE(is_more_important(&stub_false3, &stub_true1));
EXPECT_FALSE(is_more_important(&stub_false3, &stub_true2));
EXPECT_FALSE(is_more_important(&stub_false3, &stub_true3));
EXPECT_FALSE(IsMoreImportant(&stub_false1, &stub_true1));
EXPECT_FALSE(IsMoreImportant(&stub_false1, &stub_true2));
EXPECT_FALSE(IsMoreImportant(&stub_false1, &stub_true3));
EXPECT_FALSE(IsMoreImportant(&stub_false2, &stub_true1));
EXPECT_FALSE(IsMoreImportant(&stub_false2, &stub_true2));
EXPECT_FALSE(IsMoreImportant(&stub_false2, &stub_true3));
EXPECT_FALSE(IsMoreImportant(&stub_false3, &stub_true1));
EXPECT_FALSE(IsMoreImportant(&stub_false3, &stub_true2));
EXPECT_FALSE(IsMoreImportant(&stub_false3, &stub_true3));
// Newer should always be more important than older:
EXPECT_TRUE(is_more_important(&stub_true2, &stub_true1));
EXPECT_TRUE(is_more_important(&stub_true3, &stub_true1));
EXPECT_TRUE(is_more_important(&stub_true3, &stub_true2));
EXPECT_TRUE(is_more_important(&stub_false2, &stub_false1));
EXPECT_TRUE(is_more_important(&stub_false3, &stub_false1));
EXPECT_TRUE(is_more_important(&stub_false3, &stub_false2));
EXPECT_TRUE(IsMoreImportant(&stub_true2, &stub_true1));
EXPECT_TRUE(IsMoreImportant(&stub_true3, &stub_true1));
EXPECT_TRUE(IsMoreImportant(&stub_true3, &stub_true2));
EXPECT_TRUE(IsMoreImportant(&stub_false2, &stub_false1));
EXPECT_TRUE(IsMoreImportant(&stub_false3, &stub_false1));
EXPECT_TRUE(IsMoreImportant(&stub_false3, &stub_false2));
// Older should never be more important than newer:
EXPECT_FALSE(is_more_important(&stub_true1, &stub_true2));
EXPECT_FALSE(is_more_important(&stub_true1, &stub_true3));
EXPECT_FALSE(is_more_important(&stub_true2, &stub_true3));
EXPECT_FALSE(is_more_important(&stub_false1, &stub_false2));
EXPECT_FALSE(is_more_important(&stub_false1, &stub_false3));
EXPECT_FALSE(is_more_important(&stub_false2, &stub_false3));
EXPECT_FALSE(IsMoreImportant(&stub_true1, &stub_true2));
EXPECT_FALSE(IsMoreImportant(&stub_true1, &stub_true3));
EXPECT_FALSE(IsMoreImportant(&stub_true2, &stub_true3));
EXPECT_FALSE(IsMoreImportant(&stub_false1, &stub_false2));
EXPECT_FALSE(IsMoreImportant(&stub_false1, &stub_false3));
EXPECT_FALSE(IsMoreImportant(&stub_false2, &stub_false3));
}
// Test GpuMemoryManager::Manage basic functionality.
......@@ -185,10 +231,8 @@ TEST_F(GpuMemoryManagerTest, TestManageBasicFunctionality) {
client_.stubs_.push_back(&stub2);
Manage();
EXPECT_EQ(stub1.allocation_.suggest_have_frontbuffer, true);
EXPECT_EQ(stub1.allocation_.suggest_have_backbuffer, true);
EXPECT_EQ(stub2.allocation_.suggest_have_frontbuffer, true);
EXPECT_EQ(stub2.allocation_.suggest_have_backbuffer, false);
EXPECT_TRUE(IsAllocationForegroundForSurfaceYes(stub1.allocation_));
EXPECT_TRUE(IsAllocationBackgroundForSurfaceYes(stub2.allocation_));
// Test stubs without surface, with share group of 1 stub.
FakeCommandBufferStubWithoutSurface stub3, stub4;
......@@ -198,8 +242,10 @@ TEST_F(GpuMemoryManagerTest, TestManageBasicFunctionality) {
client_.stubs_.push_back(&stub4);
Manage();
EXPECT_EQ(stub1.allocation_, stub3.allocation_);
EXPECT_EQ(stub2.allocation_, stub4.allocation_);
EXPECT_TRUE(IsAllocationForegroundForSurfaceYes(stub1.allocation_));
EXPECT_TRUE(IsAllocationBackgroundForSurfaceYes(stub2.allocation_));
EXPECT_TRUE(IsAllocationForegroundForSurfaceNo(stub3.allocation_));
EXPECT_TRUE(IsAllocationBackgroundForSurfaceNo(stub4.allocation_));
// Test stub without surface, with share group of multiple stubs.
FakeCommandBufferStubWithoutSurface stub5;
......@@ -208,7 +254,7 @@ TEST_F(GpuMemoryManagerTest, TestManageBasicFunctionality) {
client_.stubs_.push_back(&stub5);
Manage();
EXPECT_EQ(stub1.allocation_, stub5.allocation_);
EXPECT_TRUE(IsAllocationForegroundForSurfaceNo(stub4.allocation_));
}
// Test GpuMemoryManager::Manage functionality: changing visibility.
......@@ -234,25 +280,21 @@ TEST_F(GpuMemoryManagerTest, TestManageChangingVisibility) {
client_.stubs_.push_back(&stub5);
Manage();
EXPECT_EQ(stub1.allocation_.suggest_have_frontbuffer, true);
EXPECT_EQ(stub1.allocation_.suggest_have_backbuffer, true);
EXPECT_EQ(stub2.allocation_.suggest_have_frontbuffer, true);
EXPECT_EQ(stub2.allocation_.suggest_have_backbuffer, false);
EXPECT_EQ(stub1.allocation_, stub3.allocation_);
EXPECT_EQ(stub2.allocation_, stub4.allocation_);
EXPECT_EQ(stub1.allocation_, stub5.allocation_);
EXPECT_TRUE(IsAllocationForegroundForSurfaceYes(stub1.allocation_));
EXPECT_TRUE(IsAllocationBackgroundForSurfaceYes(stub2.allocation_));
EXPECT_TRUE(IsAllocationForegroundForSurfaceNo(stub3.allocation_));
EXPECT_TRUE(IsAllocationBackgroundForSurfaceNo(stub4.allocation_));
EXPECT_TRUE(IsAllocationForegroundForSurfaceNo(stub5.allocation_));
stub1.surface_state_.visible = false;
stub2.surface_state_.visible = true;
Manage();
EXPECT_EQ(stub1.allocation_.suggest_have_frontbuffer, true);
EXPECT_EQ(stub1.allocation_.suggest_have_backbuffer, false);
EXPECT_EQ(stub2.allocation_.suggest_have_frontbuffer, true);
EXPECT_EQ(stub2.allocation_.suggest_have_backbuffer, true);
EXPECT_EQ(stub1.allocation_, stub3.allocation_);
EXPECT_EQ(stub2.allocation_, stub4.allocation_);
EXPECT_EQ(stub2.allocation_, stub5.allocation_);
EXPECT_TRUE(IsAllocationBackgroundForSurfaceYes(stub1.allocation_));
EXPECT_TRUE(IsAllocationForegroundForSurfaceYes(stub2.allocation_));
EXPECT_TRUE(IsAllocationBackgroundForSurfaceNo(stub3.allocation_));
EXPECT_TRUE(IsAllocationForegroundForSurfaceNo(stub4.allocation_));
EXPECT_TRUE(IsAllocationForegroundForSurfaceNo(stub5.allocation_));
}
// Test GpuMemoryManager::Manage functionality: Test more than threshold number
......@@ -280,17 +322,13 @@ TEST_F(GpuMemoryManagerTest, TestManageManyVisibleStubs) {
client_.stubs_.push_back(&stub7);
Manage();
EXPECT_EQ(stub1.allocation_.suggest_have_frontbuffer, true);
EXPECT_EQ(stub1.allocation_.suggest_have_backbuffer, true);
EXPECT_EQ(stub2.allocation_.suggest_have_frontbuffer, true);
EXPECT_EQ(stub2.allocation_.suggest_have_backbuffer, true);
EXPECT_EQ(stub3.allocation_.suggest_have_frontbuffer, true);
EXPECT_EQ(stub3.allocation_.suggest_have_backbuffer, true);
EXPECT_EQ(stub4.allocation_.suggest_have_frontbuffer, true);
EXPECT_EQ(stub4.allocation_.suggest_have_backbuffer, true);
EXPECT_EQ(stub5.allocation_, stub1.allocation_);
EXPECT_EQ(stub6.allocation_, stub2.allocation_);
EXPECT_EQ(stub7.allocation_, stub1.allocation_);
EXPECT_TRUE(IsAllocationForegroundForSurfaceYes(stub1.allocation_));
EXPECT_TRUE(IsAllocationForegroundForSurfaceYes(stub2.allocation_));
EXPECT_TRUE(IsAllocationForegroundForSurfaceYes(stub3.allocation_));
EXPECT_TRUE(IsAllocationForegroundForSurfaceYes(stub4.allocation_));
EXPECT_TRUE(IsAllocationForegroundForSurfaceNo(stub5.allocation_));
EXPECT_TRUE(IsAllocationForegroundForSurfaceNo(stub6.allocation_));
EXPECT_TRUE(IsAllocationForegroundForSurfaceNo(stub7.allocation_));
}
// Test GpuMemoryManager::Manage functionality: Test more than threshold number
......@@ -318,17 +356,13 @@ TEST_F(GpuMemoryManagerTest, TestManageManyNotVisibleStubs) {
client_.stubs_.push_back(&stub7);
Manage();
EXPECT_EQ(stub1.allocation_.suggest_have_frontbuffer, true);
EXPECT_EQ(stub1.allocation_.suggest_have_backbuffer, false);
EXPECT_EQ(stub2.allocation_.suggest_have_frontbuffer, true);
EXPECT_EQ(stub2.allocation_.suggest_have_backbuffer, false);
EXPECT_EQ(stub3.allocation_.suggest_have_frontbuffer, true);
EXPECT_EQ(stub3.allocation_.suggest_have_backbuffer, false);
EXPECT_EQ(stub4.allocation_.suggest_have_frontbuffer, false);
EXPECT_EQ(stub4.allocation_.suggest_have_backbuffer, false);
EXPECT_EQ(stub5.allocation_, stub1.allocation_);
EXPECT_EQ(stub6.allocation_, stub4.allocation_);
EXPECT_EQ(stub7.allocation_, stub1.allocation_);
EXPECT_TRUE(IsAllocationBackgroundForSurfaceYes(stub1.allocation_));
EXPECT_TRUE(IsAllocationBackgroundForSurfaceYes(stub2.allocation_));
EXPECT_TRUE(IsAllocationBackgroundForSurfaceYes(stub3.allocation_));
EXPECT_TRUE(IsAllocationHibernatedForSurfaceYes(stub4.allocation_));
EXPECT_TRUE(IsAllocationBackgroundForSurfaceNo(stub5.allocation_));
EXPECT_TRUE(IsAllocationHibernatedForSurfaceNo(stub6.allocation_));
EXPECT_TRUE(IsAllocationBackgroundForSurfaceNo(stub7.allocation_));
}
// Test GpuMemoryManager::Manage functionality: Test changing the last used
......@@ -356,25 +390,25 @@ TEST_F(GpuMemoryManagerTest, TestManageChangingLastUsedTime) {
client_.stubs_.push_back(&stub7);
Manage();
EXPECT_EQ(stub3.allocation_.suggest_have_frontbuffer, true);
EXPECT_EQ(stub3.allocation_.suggest_have_backbuffer, false);
EXPECT_EQ(stub4.allocation_.suggest_have_frontbuffer, false);
EXPECT_EQ(stub4.allocation_.suggest_have_backbuffer, false);
EXPECT_EQ(stub5.allocation_, stub3.allocation_);
EXPECT_EQ(stub6.allocation_, stub4.allocation_);
EXPECT_EQ(stub7.allocation_, stub3.allocation_);
EXPECT_TRUE(IsAllocationBackgroundForSurfaceYes(stub1.allocation_));
EXPECT_TRUE(IsAllocationBackgroundForSurfaceYes(stub2.allocation_));
EXPECT_TRUE(IsAllocationBackgroundForSurfaceYes(stub3.allocation_));
EXPECT_TRUE(IsAllocationHibernatedForSurfaceYes(stub4.allocation_));
EXPECT_TRUE(IsAllocationBackgroundForSurfaceNo(stub5.allocation_));
EXPECT_TRUE(IsAllocationHibernatedForSurfaceNo(stub6.allocation_));
EXPECT_TRUE(IsAllocationBackgroundForSurfaceNo(stub7.allocation_));
stub3.surface_state_.last_used_time = older_;
stub4.surface_state_.last_used_time = newer_;
Manage();
EXPECT_EQ(stub3.allocation_.suggest_have_frontbuffer, false);
EXPECT_EQ(stub3.allocation_.suggest_have_backbuffer, false);
EXPECT_EQ(stub4.allocation_.suggest_have_frontbuffer, true);
EXPECT_EQ(stub4.allocation_.suggest_have_backbuffer, false);
EXPECT_EQ(stub5.allocation_, stub3.allocation_);
EXPECT_EQ(stub6.allocation_, stub4.allocation_);
EXPECT_EQ(stub7.allocation_, stub4.allocation_);
EXPECT_TRUE(IsAllocationBackgroundForSurfaceYes(stub1.allocation_));
EXPECT_TRUE(IsAllocationBackgroundForSurfaceYes(stub2.allocation_));
EXPECT_TRUE(IsAllocationHibernatedForSurfaceYes(stub3.allocation_));
EXPECT_TRUE(IsAllocationBackgroundForSurfaceYes(stub4.allocation_));
EXPECT_TRUE(IsAllocationHibernatedForSurfaceNo(stub5.allocation_));
EXPECT_TRUE(IsAllocationBackgroundForSurfaceNo(stub6.allocation_));
EXPECT_TRUE(IsAllocationBackgroundForSurfaceNo(stub7.allocation_));
}
// Test GpuMemoryManager::Manage functionality: Test changing importance of
......@@ -382,14 +416,14 @@ TEST_F(GpuMemoryManagerTest, TestManageChangingLastUsedTime) {
// Expect memory allocation of the stubs without surface to share memory
// allocation with the most visible stub in share group.
TEST_F(GpuMemoryManagerTest, TestManageChangingImportanceShareGroup) {
FakeCommandBufferStub stubA(GenerateUniqueSurfaceId(), true, newer_),
stubB(GenerateUniqueSurfaceId(), false, newer_),
stubC(GenerateUniqueSurfaceId(), false, newer_);
FakeCommandBufferStub stubIgnoreA(GenerateUniqueSurfaceId(), true, newer_),
stubIgnoreB(GenerateUniqueSurfaceId(), false, newer_),
stubIgnoreC(GenerateUniqueSurfaceId(), false, newer_);
FakeCommandBufferStub stub1(GenerateUniqueSurfaceId(), true, newest_),
stub2(GenerateUniqueSurfaceId(), true, newest_);
client_.stubs_.push_back(&stubA);
client_.stubs_.push_back(&stubB);
client_.stubs_.push_back(&stubC);
client_.stubs_.push_back(&stubIgnoreA);
client_.stubs_.push_back(&stubIgnoreB);
client_.stubs_.push_back(&stubIgnoreC);
client_.stubs_.push_back(&stub1);
client_.stubs_.push_back(&stub2);
......@@ -402,62 +436,91 @@ TEST_F(GpuMemoryManagerTest, TestManageChangingImportanceShareGroup) {
client_.stubs_.push_back(&stub4);
Manage();
EXPECT_EQ(stub1.allocation_.suggest_have_frontbuffer, true);
EXPECT_EQ(stub1.allocation_.suggest_have_backbuffer, true);
EXPECT_EQ(stub2.allocation_.suggest_have_frontbuffer, true);
EXPECT_EQ(stub2.allocation_.suggest_have_backbuffer, true);
EXPECT_EQ(stub3.allocation_, stub1.allocation_);
EXPECT_EQ(stub3.allocation_, stub2.allocation_);
EXPECT_EQ(stub4.allocation_, stub1.allocation_);
EXPECT_EQ(stub4.allocation_, stub2.allocation_);
EXPECT_TRUE(IsAllocationForegroundForSurfaceYes(stub1.allocation_));
EXPECT_TRUE(IsAllocationForegroundForSurfaceYes(stub2.allocation_));
EXPECT_TRUE(IsAllocationForegroundForSurfaceNo(stub3.allocation_));
EXPECT_TRUE(IsAllocationForegroundForSurfaceNo(stub4.allocation_));
stub1.surface_state_.visible = false;
Manage();
EXPECT_EQ(stub1.allocation_.suggest_have_frontbuffer, true);
EXPECT_EQ(stub1.allocation_.suggest_have_backbuffer, false);
EXPECT_EQ(stub2.allocation_.suggest_have_frontbuffer, true);
EXPECT_EQ(stub2.allocation_.suggest_have_backbuffer, true);
EXPECT_NE(stub3.allocation_, stub1.allocation_);
EXPECT_EQ(stub3.allocation_, stub2.allocation_);
EXPECT_NE(stub4.allocation_, stub1.allocation_);
EXPECT_EQ(stub4.allocation_, stub2.allocation_);
EXPECT_TRUE(IsAllocationBackgroundForSurfaceYes(stub1.allocation_));
EXPECT_TRUE(IsAllocationForegroundForSurfaceYes(stub2.allocation_));
EXPECT_TRUE(IsAllocationBackgroundForSurfaceNo(stub3.allocation_));
EXPECT_TRUE(IsAllocationForegroundForSurfaceNo(stub4.allocation_));
stub2.surface_state_.visible = false;
Manage();
EXPECT_EQ(stub1.allocation_.suggest_have_frontbuffer, true);
EXPECT_EQ(stub1.allocation_.suggest_have_backbuffer, false);
EXPECT_EQ(stub2.allocation_.suggest_have_frontbuffer, true);
EXPECT_EQ(stub2.allocation_.suggest_have_backbuffer, false);
EXPECT_EQ(stub3.allocation_, stub1.allocation_);
EXPECT_EQ(stub3.allocation_, stub2.allocation_);
EXPECT_EQ(stub4.allocation_, stub1.allocation_);
EXPECT_EQ(stub4.allocation_, stub2.allocation_);
EXPECT_TRUE(IsAllocationBackgroundForSurfaceYes(stub1.allocation_));
EXPECT_TRUE(IsAllocationBackgroundForSurfaceYes(stub2.allocation_));
EXPECT_TRUE(IsAllocationBackgroundForSurfaceNo(stub3.allocation_));
EXPECT_TRUE(IsAllocationBackgroundForSurfaceNo(stub4.allocation_));
stub1.surface_state_.last_used_time = older_;
Manage();
EXPECT_EQ(stub1.allocation_.suggest_have_frontbuffer, false);
EXPECT_EQ(stub1.allocation_.suggest_have_backbuffer, false);
EXPECT_EQ(stub2.allocation_.suggest_have_frontbuffer, true);
EXPECT_EQ(stub2.allocation_.suggest_have_backbuffer, false);
EXPECT_NE(stub3.allocation_, stub1.allocation_);
EXPECT_EQ(stub3.allocation_, stub2.allocation_);
EXPECT_NE(stub4.allocation_, stub1.allocation_);
EXPECT_EQ(stub4.allocation_, stub2.allocation_);
EXPECT_TRUE(IsAllocationHibernatedForSurfaceYes(stub1.allocation_));
EXPECT_TRUE(IsAllocationBackgroundForSurfaceYes(stub2.allocation_));
EXPECT_TRUE(IsAllocationBackgroundForSurfaceNo(stub3.allocation_));
EXPECT_TRUE(IsAllocationBackgroundForSurfaceNo(stub4.allocation_));
stub2.surface_state_.last_used_time = older_;
Manage();
EXPECT_EQ(stub1.allocation_.suggest_have_frontbuffer, false);
EXPECT_EQ(stub1.allocation_.suggest_have_backbuffer, false);
EXPECT_EQ(stub2.allocation_.suggest_have_frontbuffer, false);
EXPECT_EQ(stub2.allocation_.suggest_have_backbuffer, false);
EXPECT_EQ(stub3.allocation_, stub1.allocation_);
EXPECT_EQ(stub3.allocation_, stub2.allocation_);
EXPECT_EQ(stub4.allocation_, stub1.allocation_);
EXPECT_EQ(stub4.allocation_, stub2.allocation_);
EXPECT_TRUE(IsAllocationHibernatedForSurfaceYes(stub1.allocation_));
EXPECT_TRUE(IsAllocationHibernatedForSurfaceYes(stub2.allocation_));
EXPECT_TRUE(IsAllocationHibernatedForSurfaceNo(stub3.allocation_));
EXPECT_TRUE(IsAllocationHibernatedForSurfaceNo(stub4.allocation_));
}
// Test GpuMemoryAllocation memory allocation bonuses:
// When the number of visible tabs is small, each tab should get a
// gpu_resource_size_in_bytes allocation value that is greater than
// kMinimumAllocationForTab, and when the number of tabs is large, each should
// get exactly kMinimumAllocationForTab and not less.
TEST_F(GpuMemoryManagerTest, TestForegroundStubsGetBonusAllocation) {
FakeCommandBufferStub stub1(GenerateUniqueSurfaceId(), true, older_),
stub2(GenerateUniqueSurfaceId(), true, older_),
stub3(GenerateUniqueSurfaceId(), true, older_);
client_.stubs_.push_back(&stub1);
client_.stubs_.push_back(&stub2);
client_.stubs_.push_back(&stub3);
Manage();
EXPECT_TRUE(IsAllocationForegroundForSurfaceYes(stub1.allocation_));
EXPECT_TRUE(IsAllocationForegroundForSurfaceYes(stub2.allocation_));
EXPECT_TRUE(IsAllocationForegroundForSurfaceYes(stub3.allocation_));
EXPECT_GT(stub1.allocation_.gpu_resource_size_in_bytes,
static_cast<size_t>(GpuMemoryManager::kMinimumAllocationForTab));
EXPECT_GT(stub2.allocation_.gpu_resource_size_in_bytes,
static_cast<size_t>(GpuMemoryManager::kMinimumAllocationForTab));
EXPECT_GT(stub3.allocation_.gpu_resource_size_in_bytes,
static_cast<size_t>(GpuMemoryManager::kMinimumAllocationForTab));
FakeCommandBufferStub stub4(GenerateUniqueSurfaceId(), true, older_),
stub5(GenerateUniqueSurfaceId(), true, older_),
stub6(GenerateUniqueSurfaceId(), true, older_),
stub7(GenerateUniqueSurfaceId(), true, older_),
stub8(GenerateUniqueSurfaceId(), true, older_),
stub9(GenerateUniqueSurfaceId(), true, older_);
client_.stubs_.push_back(&stub4);
client_.stubs_.push_back(&stub5);
client_.stubs_.push_back(&stub6);
client_.stubs_.push_back(&stub7);
client_.stubs_.push_back(&stub8);
client_.stubs_.push_back(&stub9);
Manage();
EXPECT_TRUE(IsAllocationForegroundForSurfaceYes(stub1.allocation_));
EXPECT_TRUE(IsAllocationForegroundForSurfaceYes(stub2.allocation_));
EXPECT_TRUE(IsAllocationForegroundForSurfaceYes(stub3.allocation_));
EXPECT_EQ(stub1.allocation_.gpu_resource_size_in_bytes,
GpuMemoryManager::kMinimumAllocationForTab);
EXPECT_EQ(stub2.allocation_.gpu_resource_size_in_bytes,
GpuMemoryManager::kMinimumAllocationForTab);
EXPECT_EQ(stub3.allocation_.gpu_resource_size_in_bytes,
GpuMemoryManager::kMinimumAllocationForTab);
}
// Test GpuMemoryAllocation comparison operators: Iterate over all possible
......@@ -465,21 +528,35 @@ TEST_F(GpuMemoryManagerTest, TestManageChangingImportanceShareGroup) {
// suggest_have_frontbuffer, and make sure allocations with equal values test
// equal and non equal values test not equal.
TEST_F(GpuMemoryManagerTest, GpuMemoryAllocationCompareTests) {
int gpu_resource_size_in_bytes_values[] = { 0, 1, 12345678 };
bool suggest_have_backbuffer_values[] = { false, true };
bool suggest_have_frontbuffer_values[] = { false, true };
for(int* sz = &gpu_resource_size_in_bytes_values[0];
sz != &gpu_resource_size_in_bytes_values[3]; ++sz) {
for(bool* shbb = &suggest_have_backbuffer_values[0];
shbb != &suggest_have_backbuffer_values[2]; ++shbb) {
for(bool* shfb = &suggest_have_frontbuffer_values[0];
shfb != &suggest_have_frontbuffer_values[2]; ++shfb) {
GpuMemoryAllocation allocation(*sz, *shbb, *shfb);
EXPECT_EQ(allocation, GpuMemoryAllocation(*sz, *shbb, *shfb));
EXPECT_NE(allocation, GpuMemoryAllocation(*sz+1, *shbb, *shfb));
EXPECT_NE(allocation, GpuMemoryAllocation(*sz, !*shbb, *shfb));
EXPECT_NE(allocation, GpuMemoryAllocation(*sz, *shbb, !*shfb));
std::vector<int> gpu_resource_size_in_bytes_values;
gpu_resource_size_in_bytes_values.push_back(0);
gpu_resource_size_in_bytes_values.push_back(1);
gpu_resource_size_in_bytes_values.push_back(12345678);
std::vector<int> suggested_buffer_allocation_values;
suggested_buffer_allocation_values.push_back(
GpuMemoryAllocation::kHasFrontbuffer |
GpuMemoryAllocation::kHasBackbuffer);
suggested_buffer_allocation_values.push_back(
GpuMemoryAllocation::kHasFrontbuffer);
suggested_buffer_allocation_values.push_back(
GpuMemoryAllocation::kHasBackbuffer);
suggested_buffer_allocation_values.push_back(
GpuMemoryAllocation::kHasNoBuffers);
for(size_t i = 0; i != gpu_resource_size_in_bytes_values.size(); ++i) {
for(size_t j = 0; j != suggested_buffer_allocation_values.size(); ++j) {
int sz = gpu_resource_size_in_bytes_values[i];
int buffer_allocation = suggested_buffer_allocation_values[j];
GpuMemoryAllocation allocation(sz, buffer_allocation);
EXPECT_EQ(allocation, GpuMemoryAllocation(sz, buffer_allocation));
EXPECT_NE(allocation, GpuMemoryAllocation(sz+1, buffer_allocation));
for(size_t k = 0; k != suggested_buffer_allocation_values.size(); ++k) {
int buffer_allocation_other = suggested_buffer_allocation_values[k];
if (buffer_allocation == buffer_allocation_other) continue;
EXPECT_NE(allocation, GpuMemoryAllocation(sz, buffer_allocation_other));
}
}
}
......
......@@ -445,6 +445,11 @@ IPC_MESSAGE_ROUTED0(GpuCommandBufferMsg_EnsureBackbuffer)
IPC_MESSAGE_ROUTED1(GpuCommandBufferMsg_SetMemoryAllocation,
GpuMemoryAllocationForRenderer /* allocation */)
// Sent to stub when proxy is assigned a memory allocation changed callback.
IPC_MESSAGE_ROUTED1(
GpuCommandBufferMsg_SetClientHasMemoryAllocationChangedCallback,
bool /* has_callback */)
//------------------------------------------------------------------------------
// Accelerated Video Decoder Messages
// These messages are sent from Renderer process to GPU process.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment