Commit b4dcf955 authored by sergeyu@chromium.org's avatar sergeyu@chromium.org

Revert 181950 - Broke content_unittests on Android

> Distribute extra memory evenly among visible clients
> 
> After computing all clients' memory budgets, take any
> extra memory left over and distribute it amongst the
> visible clients (so that, if their memory requirments
> suddenly jump, they don't need to wait for a roundtrip
> to the GPU process to get extra memory).
> 
> Disallow keeping around backgrounded tabs' contents on
> Android, to keep the maximum for the main tab.
> 
> Explicitly limit the memory to use for prepainting by
> specifying NiceToHave on Mac to avoid performance problems.
> 
> BUG=175125
> 
> Review URL: https://codereview.chromium.org/12226082

TBR=ccameron@chromium.org
Review URL: https://codereview.chromium.org/12208134

git-svn-id: svn://svn.chromium.org/chrome/trunk/src@181962 0039d316-1c4b-4281-b951-d872f2087c98
parent 93c7d591
......@@ -531,9 +531,6 @@ uint64 GpuMemoryManager::ComputeCap(
size_t bytes_size = bytes.size();
uint64 bytes_sum = 0;
if (bytes_size == 0)
return std::numeric_limits<uint64>::max();
// Sort and add up all entries
std::sort(bytes.begin(), bytes.end());
for (size_t i = 0; i < bytes_size; ++i)
......@@ -745,11 +742,6 @@ void GpuMemoryManager::ComputeNonvisibleSurfacesAllocationsNonuniform() {
bytes_available_total - bytes_allocated_visible);
}
// On Android, always discard everything that is nonvisible.
#if defined(OS_ANDROID)
bytes_available_nonvisible = 0;
#endif
// Determine which now-visible clients should keep their contents when
// they are made nonvisible.
for (ClientStateList::const_iterator it = clients_visible_mru_.begin();
......@@ -799,58 +791,11 @@ void GpuMemoryManager::ComputeNonvisibleSurfacesAllocationsNonuniform() {
}
}
void GpuMemoryManager::DistributeRemainingMemoryToVisibleSurfaces() {
uint64 bytes_available_total = GetAvailableGpuMemory();
uint64 bytes_allocated_total = 0;
for (ClientStateList::const_iterator it = clients_visible_mru_.begin();
it != clients_visible_mru_.end();
++it) {
GpuMemoryManagerClientState* client_state = *it;
bytes_allocated_total += client_state->bytes_allocation_when_visible_;
}
for (ClientStateList::const_iterator it = clients_nonvisible_mru_.begin();
it != clients_nonvisible_mru_.end();
++it) {
GpuMemoryManagerClientState* client_state = *it;
bytes_allocated_total += client_state->bytes_allocation_when_nonvisible_;
}
if (bytes_allocated_total >= bytes_available_total)
return;
std::vector<uint64> bytes_extra_requests;
for (ClientStateList::const_iterator it = clients_visible_mru_.begin();
it != clients_visible_mru_.end();
++it) {
GpuMemoryManagerClientState* client_state = *it;
CHECK(GetMaximumClientAllocation() >=
client_state->bytes_allocation_when_visible_);
uint64 bytes_extra = GetMaximumClientAllocation() -
client_state->bytes_allocation_when_visible_;
bytes_extra_requests.push_back(bytes_extra);
}
uint64 bytes_extra_cap = ComputeCap(
bytes_extra_requests, bytes_available_total - bytes_allocated_total);
for (ClientStateList::const_iterator it = clients_visible_mru_.begin();
it != clients_visible_mru_.end();
++it) {
GpuMemoryManagerClientState* client_state = *it;
uint64 bytes_extra = GetMaximumClientAllocation() -
client_state->bytes_allocation_when_visible_;
client_state->bytes_allocation_when_visible_ += std::min(
bytes_extra, bytes_extra_cap);
}
}
void GpuMemoryManager::AssignSurfacesAllocationsNonuniform() {
// Compute allocation when for all clients.
ComputeVisibleSurfacesAllocationsNonuniform();
ComputeNonvisibleSurfacesAllocationsNonuniform();
// Distribute the remaining memory to visible clients.
DistributeRemainingMemoryToVisibleSurfaces();
// Send that allocation to the clients.
ClientStateList clients = clients_visible_mru_;
clients.insert(clients.end(),
......@@ -877,11 +822,7 @@ void GpuMemoryManager::AssignSurfacesAllocationsNonuniform() {
allocation.renderer_allocation.bytes_limit_when_visible =
client_state->bytes_allocation_when_visible_;
allocation.renderer_allocation.priority_cutoff_when_visible =
#if defined(OS_MACOSX)
GpuMemoryAllocationForRenderer::kPriorityCutoffAllowNiceToHave;
#else
GpuMemoryAllocationForRenderer::kPriorityCutoffAllowEverything;
#endif
allocation.renderer_allocation.bytes_limit_when_not_visible =
client_state->bytes_allocation_when_nonvisible_;
......
......@@ -124,7 +124,6 @@ class CONTENT_EXPORT GpuMemoryManager :
// Compute the allocation for clients when visible and not visible.
void ComputeVisibleSurfacesAllocationsNonuniform();
void ComputeNonvisibleSurfacesAllocationsNonuniform();
void DistributeRemainingMemoryToVisibleSurfaces();
// Compute the budget for a client. Allow at most bytes_above_required_cap
// bytes above client_state's required level. Allow at most
......
......@@ -924,6 +924,7 @@ TEST_F(GpuMemoryManagerTestNonuniform, BackgroundMru) {
EXPECT_GE(stub3.BytesWhenVisible(), 23u);
EXPECT_LT(stub1.BytesWhenVisible(), 32u);
EXPECT_LT(stub2.BytesWhenVisible(), 32u);
EXPECT_LT(stub3.BytesWhenVisible(), 32u);
EXPECT_GE(stub1.BytesWhenNotVisible(), 6u);
EXPECT_GE(stub2.BytesWhenNotVisible(), 6u);
EXPECT_GE(stub3.BytesWhenNotVisible(), 6u);
......@@ -1040,10 +1041,10 @@ TEST_F(GpuMemoryManagerTestNonuniform, DefaultAllocation) {
FakeClient stub1(&memmgr_, GenerateUniqueSurfaceId(), true);
// Expect that a client which has not sent stats receive at
// least the default allocation.
// Expect that a client which has not sent stats receive the
// default allocation.
Manage();
EXPECT_GE(stub1.BytesWhenVisible(),
EXPECT_EQ(stub1.BytesWhenVisible(),
memmgr_.GetDefaultClientAllocation());
EXPECT_EQ(stub1.BytesWhenNotVisible(), 0u);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment