Commit fee15043 authored by ccameron@chromium.org's avatar ccameron@chromium.org

Revert 274326 "Lobotomize the GPU memory manager"

BUG=380666

> Lobotomize the GPU memory manager
> 
> On desktop, always set the memory limit to 256MB, and ask that the renderer
> only use that memory to draw content that is near the viewport. The effective
> limit prior to this patch was 384MB, but would quickly get cut down as multiple
> windows are opened, but would not fall too much lower than 256MB (e.g, 128
> on Mac).
> 
> On mobile, use all available memory for the current, only, renderer.
> 
> Do not take into account unmanaged (e.g, WebGL) memory usage.
> 
> BUG=377065
> 
> Review URL: https://codereview.chromium.org/308743005

TBR=ccameron@chromium.org

Review URL: https://codereview.chromium.org/313163002

git-svn-id: svn://svn.chromium.org/chrome/trunk/src@274879 0039d316-1c4b-4281-b951-d872f2087c98
parent 20e65f2f
...@@ -5,9 +5,6 @@ sievers@chromium.org ...@@ -5,9 +5,6 @@ sievers@chromium.org
# GPU memory buffer implementations. # GPU memory buffer implementations.
per-file *gpu_memory_buffer*=reveman@chromium.org per-file *gpu_memory_buffer*=reveman@chromium.org
# GPU memory manager.
per-file *gpu_memory_manager*=ccameron@chromium.org
# For security review of IPC message files. # For security review of IPC message files.
per-file *_messages*.h=set noparent per-file *_messages*.h=set noparent
per-file *_messages*.h=cdn@chromium.org per-file *_messages*.h=cdn@chromium.org
......
This diff is collapsed.
...@@ -63,8 +63,11 @@ class CONTENT_EXPORT GpuMemoryManager : ...@@ -63,8 +63,11 @@ class CONTENT_EXPORT GpuMemoryManager :
base::ProcessId pid, gpu::gles2::MemoryTracker* memory_tracker); base::ProcessId pid, gpu::gles2::MemoryTracker* memory_tracker);
uint64 GetClientMemoryUsage(const GpuMemoryManagerClient* client) const; uint64 GetClientMemoryUsage(const GpuMemoryManagerClient* client) const;
uint64 GetMaximumClientAllocation() const {
return client_hard_limit_bytes_; // The maximum and minimum amount of memory that a client may be assigned.
uint64 GetMaximumClientAllocation() const;
uint64 GetMinimumClientAllocation() const {
return bytes_minimum_per_client_;
} }
private: private:
...@@ -115,9 +118,46 @@ class CONTENT_EXPORT GpuMemoryManager : ...@@ -115,9 +118,46 @@ class CONTENT_EXPORT GpuMemoryManager :
void AssignSurfacesAllocations(); void AssignSurfacesAllocations();
void AssignNonSurfacesAllocations(); void AssignNonSurfacesAllocations();
// Math helper function to compute the maximum value of cap such that
// sum_i min(bytes[i], cap) <= bytes_sum_limit
static uint64 ComputeCap(std::vector<uint64> bytes, uint64 bytes_sum_limit);
// Compute the allocation for clients when visible and not visible.
void ComputeVisibleSurfacesAllocations();
void DistributeRemainingMemoryToVisibleSurfaces();
// Compute the budget for a client. Allow at most bytes_above_required_cap
// bytes above client_state's required level. Allow at most
// bytes_above_minimum_cap bytes above client_state's minimum level. Allow
// at most bytes_overall_cap bytes total.
uint64 ComputeClientAllocationWhenVisible(
GpuMemoryManagerClientState* client_state,
uint64 bytes_above_required_cap,
uint64 bytes_above_minimum_cap,
uint64 bytes_overall_cap);
// Update the amount of GPU memory we think we have in the system, based // Update the amount of GPU memory we think we have in the system, based
// on what the stubs' contexts report. // on what the stubs' contexts report.
void UpdateAvailableGpuMemory(); void UpdateAvailableGpuMemory();
void UpdateUnmanagedMemoryLimits();
// The amount of video memory which is available for allocation.
uint64 GetAvailableGpuMemory() const;
// Minimum value of available GPU memory, no matter how little the GPU
// reports. This is the default value.
uint64 GetDefaultAvailableGpuMemory() const;
// Maximum cap on total GPU memory, no matter how much the GPU reports.
uint64 GetMaximumTotalGpuMemory() const;
// The default amount of memory that a client is assigned, if it has not
// reported any memory usage stats yet.
uint64 GetDefaultClientAllocation() const {
return bytes_default_per_client_;
}
static uint64 CalcAvailableFromGpuTotal(uint64 total_gpu_memory);
// Send memory usage stats to the browser process. // Send memory usage stats to the browser process.
void SendUmaStatsToBrowser(); void SendUmaStatsToBrowser();
...@@ -153,6 +193,22 @@ class CONTENT_EXPORT GpuMemoryManager : ...@@ -153,6 +193,22 @@ class CONTENT_EXPORT GpuMemoryManager :
// Interfaces for testing // Interfaces for testing
void TestingDisableScheduleManage() { disable_schedule_manage_ = true; } void TestingDisableScheduleManage() { disable_schedule_manage_ = true; }
void TestingSetAvailableGpuMemory(uint64 bytes) {
bytes_available_gpu_memory_ = bytes;
bytes_available_gpu_memory_overridden_ = true;
}
void TestingSetMinimumClientAllocation(uint64 bytes) {
bytes_minimum_per_client_ = bytes;
}
void TestingSetDefaultClientAllocation(uint64 bytes) {
bytes_default_per_client_ = bytes;
}
void TestingSetUnmanagedLimitStep(uint64 bytes) {
bytes_unmanaged_limit_step_ = bytes;
}
GpuChannelManager* channel_manager_; GpuChannelManager* channel_manager_;
...@@ -169,18 +225,36 @@ class CONTENT_EXPORT GpuMemoryManager : ...@@ -169,18 +225,36 @@ class CONTENT_EXPORT GpuMemoryManager :
base::CancelableClosure delayed_manage_callback_; base::CancelableClosure delayed_manage_callback_;
bool manage_immediate_scheduled_; bool manage_immediate_scheduled_;
bool disable_schedule_manage_;
uint64 max_surfaces_with_frontbuffer_soft_limit_; uint64 max_surfaces_with_frontbuffer_soft_limit_;
// The maximum amount of memory that may be allocated for a single client. // The priority cutoff used for all renderers.
uint64 client_hard_limit_bytes_; gpu::MemoryAllocation::PriorityCutoff priority_cutoff_;
// The maximum amount of memory that may be allocated for GPU resources
uint64 bytes_available_gpu_memory_;
bool bytes_available_gpu_memory_overridden_;
// The minimum and default allocations for a single client.
uint64 bytes_minimum_per_client_;
uint64 bytes_default_per_client_;
// The current total memory usage, and historical maximum memory usage // The current total memory usage, and historical maximum memory usage
uint64 bytes_allocated_managed_current_; uint64 bytes_allocated_managed_current_;
uint64 bytes_allocated_unmanaged_current_; uint64 bytes_allocated_unmanaged_current_;
uint64 bytes_allocated_historical_max_; uint64 bytes_allocated_historical_max_;
// If bytes_allocated_unmanaged_current_ leaves the interval [low_, high_),
// then ScheduleManage to take the change into account.
uint64 bytes_allocated_unmanaged_high_;
uint64 bytes_allocated_unmanaged_low_;
// Update bytes_allocated_unmanaged_low/high_ in intervals of step_.
uint64 bytes_unmanaged_limit_step_;
// Used to disable automatic changes to Manage() in testing.
bool disable_schedule_manage_;
DISALLOW_COPY_AND_ASSIGN(GpuMemoryManager); DISALLOW_COPY_AND_ASSIGN(GpuMemoryManager);
}; };
......
...@@ -20,6 +20,13 @@ GpuMemoryManagerClientState::GpuMemoryManagerClientState( ...@@ -20,6 +20,13 @@ GpuMemoryManagerClientState::GpuMemoryManagerClientState(
has_surface_(has_surface), has_surface_(has_surface),
visible_(visible), visible_(visible),
list_iterator_valid_(false), list_iterator_valid_(false),
managed_memory_stats_received_(false),
bytes_nicetohave_limit_low_(0),
bytes_nicetohave_limit_high_(0),
bytes_allocation_when_visible_(0),
bytes_allocation_ideal_nicetohave_(0),
bytes_allocation_ideal_required_(0),
bytes_allocation_ideal_minimum_(0),
hibernated_(false) { hibernated_(false) {
} }
......
...@@ -81,6 +81,25 @@ class CONTENT_EXPORT GpuMemoryManagerClientState { ...@@ -81,6 +81,25 @@ class CONTENT_EXPORT GpuMemoryManagerClientState {
std::list<GpuMemoryManagerClientState*>::iterator list_iterator_; std::list<GpuMemoryManagerClientState*>::iterator list_iterator_;
bool list_iterator_valid_; bool list_iterator_valid_;
// Statistics about memory usage.
gpu::ManagedMemoryStats managed_memory_stats_;
bool managed_memory_stats_received_;
// When managed_memory_stats_.bytes_nicetohave leaves the range
// [low_, high_], then re-adjust memory limits.
uint64 bytes_nicetohave_limit_low_;
uint64 bytes_nicetohave_limit_high_;
// The allocation for this client, used transiently during memory policy
// calculation.
uint64 bytes_allocation_when_visible_;
// The ideal allocation for this client for three performance levels, used
// transiently during memory policy calculation.
uint64 bytes_allocation_ideal_nicetohave_;
uint64 bytes_allocation_ideal_required_;
uint64 bytes_allocation_ideal_minimum_;
// Set to disable allocating a frontbuffer or to disable allocations // Set to disable allocating a frontbuffer or to disable allocations
// for clients that don't have surfaces. // for clients that don't have surfaces.
bool hibernated_; bool hibernated_;
......
...@@ -190,11 +190,13 @@ class GpuMemoryManagerTest : public testing::Test { ...@@ -190,11 +190,13 @@ class GpuMemoryManagerTest : public testing::Test {
} }
bool IsAllocationForegroundForSurfaceNo( bool IsAllocationForegroundForSurfaceNo(
const MemoryAllocation& alloc) { const MemoryAllocation& alloc) {
return alloc.bytes_limit_when_visible == 1; return alloc.bytes_limit_when_visible ==
GetMinimumClientAllocation();
} }
bool IsAllocationBackgroundForSurfaceNo( bool IsAllocationBackgroundForSurfaceNo(
const MemoryAllocation& alloc) { const MemoryAllocation& alloc) {
return alloc.bytes_limit_when_visible == 1; return alloc.bytes_limit_when_visible ==
GetMinimumClientAllocation();
} }
bool IsAllocationHibernatedForSurfaceNo( bool IsAllocationHibernatedForSurfaceNo(
const MemoryAllocation& alloc) { const MemoryAllocation& alloc) {
...@@ -206,6 +208,28 @@ class GpuMemoryManagerTest : public testing::Test { ...@@ -206,6 +208,28 @@ class GpuMemoryManagerTest : public testing::Test {
memmgr_.Manage(); memmgr_.Manage();
} }
uint64 CalcAvailableFromGpuTotal(uint64 bytes) {
return GpuMemoryManager::CalcAvailableFromGpuTotal(bytes);
}
uint64 CalcAvailableClamped(uint64 bytes) {
bytes = std::max(bytes, memmgr_.GetDefaultAvailableGpuMemory());
bytes = std::min(bytes, memmgr_.GetMaximumTotalGpuMemory());
return bytes;
}
uint64 GetAvailableGpuMemory() {
return memmgr_.GetAvailableGpuMemory();
}
uint64 GetMaximumClientAllocation() {
return memmgr_.GetMaximumClientAllocation();
}
uint64 GetMinimumClientAllocation() {
return memmgr_.GetMinimumClientAllocation();
}
void SetClientStats( void SetClientStats(
FakeClient* client, FakeClient* client,
uint64 required, uint64 required,
...@@ -429,4 +453,160 @@ TEST_F(GpuMemoryManagerTest, TestManageChangingImportanceShareGroup) { ...@@ -429,4 +453,160 @@ TEST_F(GpuMemoryManagerTest, TestManageChangingImportanceShareGroup) {
EXPECT_TRUE(IsAllocationHibernatedForSurfaceNo(stub4.allocation_)); EXPECT_TRUE(IsAllocationHibernatedForSurfaceNo(stub4.allocation_));
} }
// Test GpuMemoryManager::UpdateAvailableGpuMemory functionality
TEST_F(GpuMemoryManagerTest, TestUpdateAvailableGpuMemory) {
FakeClient stub1(&memmgr_, GenerateUniqueSurfaceId(), true),
stub2(&memmgr_, GenerateUniqueSurfaceId(), false),
stub3(&memmgr_, GenerateUniqueSurfaceId(), true),
stub4(&memmgr_, GenerateUniqueSurfaceId(), false);
// We take the lowest GPU's total memory as the limit
uint64 expected = 400 * 1024 * 1024;
stub1.SetTotalGpuMemory(expected); // GPU Memory
stub2.SetTotalGpuMemory(expected - 1024 * 1024); // Smaller but not visible.
stub3.SetTotalGpuMemory(expected + 1024 * 1024); // Visible but larger.
stub4.SetTotalGpuMemory(expected + 1024 * 1024); // Not visible and larger.
Manage();
uint64 bytes_expected = CalcAvailableFromGpuTotal(expected);
EXPECT_EQ(GetAvailableGpuMemory(), CalcAvailableClamped(bytes_expected));
}
// Test GpuMemoryManager Stub Memory Stats functionality:
// Creates various surface/non-surface stubs and switches stub visibility and
// tests to see that stats data structure values are correct.
TEST_F(GpuMemoryManagerTest, StubMemoryStatsForLastManageTests) {
ClientAssignmentCollector::ClientMemoryStatMap stats;
Manage();
stats = ClientAssignmentCollector::GetClientStatsForLastManage();
EXPECT_EQ(stats.size(), 0ul);
FakeClient stub1(&memmgr_, GenerateUniqueSurfaceId(), true);
Manage();
stats = ClientAssignmentCollector::GetClientStatsForLastManage();
uint64 stub1allocation1 =
stats[&stub1].allocation.bytes_limit_when_visible;
EXPECT_EQ(stats.size(), 1ul);
EXPECT_GT(stub1allocation1, 0ul);
FakeClient stub2(&memmgr_, &stub1);
Manage();
stats = ClientAssignmentCollector::GetClientStatsForLastManage();
EXPECT_EQ(stats.count(&stub1), 1ul);
uint64 stub1allocation2 =
stats[&stub1].allocation.bytes_limit_when_visible;
EXPECT_EQ(stats.count(&stub2), 1ul);
uint64 stub2allocation2 =
stats[&stub2].allocation.bytes_limit_when_visible;
EXPECT_EQ(stats.size(), 2ul);
EXPECT_GT(stub1allocation2, 0ul);
EXPECT_GT(stub2allocation2, 0ul);
if (stub1allocation2 != GetMaximumClientAllocation())
EXPECT_LT(stub1allocation2, stub1allocation1);
FakeClient stub3(&memmgr_, GenerateUniqueSurfaceId(), true);
Manage();
stats = ClientAssignmentCollector::GetClientStatsForLastManage();
uint64 stub1allocation3 =
stats[&stub1].allocation.bytes_limit_when_visible;
uint64 stub2allocation3 =
stats[&stub2].allocation.bytes_limit_when_visible;
uint64 stub3allocation3 =
stats[&stub3].allocation.bytes_limit_when_visible;
EXPECT_EQ(stats.size(), 3ul);
EXPECT_GT(stub1allocation3, 0ul);
EXPECT_GT(stub2allocation3, 0ul);
EXPECT_GT(stub3allocation3, 0ul);
if (stub1allocation3 != GetMaximumClientAllocation())
EXPECT_LT(stub1allocation3, stub1allocation2);
stub1.SetVisible(false);
Manage();
stats = ClientAssignmentCollector::GetClientStatsForLastManage();
uint64 stub1allocation4 =
stats[&stub1].allocation.bytes_limit_when_visible;
uint64 stub2allocation4 =
stats[&stub2].allocation.bytes_limit_when_visible;
uint64 stub3allocation4 =
stats[&stub3].allocation.bytes_limit_when_visible;
EXPECT_EQ(stats.size(), 3ul);
EXPECT_GT(stub1allocation4, 0ul);
EXPECT_GE(stub2allocation4, 0ul);
EXPECT_GT(stub3allocation4, 0ul);
if (stub3allocation3 != GetMaximumClientAllocation())
EXPECT_GT(stub3allocation4, stub3allocation3);
}
// Test tracking of unmanaged (e.g, WebGL) memory.
TEST_F(GpuMemoryManagerTest, UnmanagedTracking) {
// Set memory manager constants for this test
memmgr_.TestingSetAvailableGpuMemory(64);
memmgr_.TestingSetMinimumClientAllocation(8);
memmgr_.TestingSetUnmanagedLimitStep(16);
FakeClient stub1(&memmgr_, GenerateUniqueSurfaceId(), true);
// Expect that the one stub get its nicetohave level.
SetClientStats(&stub1, 16, 32);
Manage();
EXPECT_GE(stub1.BytesWhenVisible(), 32u);
// Now allocate some unmanaged memory and make sure the amount
// goes down.
memmgr_.TrackMemoryAllocatedChange(
stub1.tracking_group_.get(),
0,
48,
gpu::gles2::MemoryTracker::kUnmanaged);
Manage();
EXPECT_LT(stub1.BytesWhenVisible(), 24u);
// Now allocate the entire FB worth of unmanaged memory, and
// make sure that we stay stuck at the minimum tab allocation.
memmgr_.TrackMemoryAllocatedChange(
stub1.tracking_group_.get(),
48,
64,
gpu::gles2::MemoryTracker::kUnmanaged);
Manage();
EXPECT_EQ(stub1.BytesWhenVisible(), 8u);
// Far-oversubscribe the entire FB, and make sure we stay at
// the minimum allocation, and don't blow up.
memmgr_.TrackMemoryAllocatedChange(
stub1.tracking_group_.get(),
64,
999,
gpu::gles2::MemoryTracker::kUnmanaged);
Manage();
EXPECT_EQ(stub1.BytesWhenVisible(), 8u);
// Delete all tracked memory so we don't hit leak checks.
memmgr_.TrackMemoryAllocatedChange(
stub1.tracking_group_.get(),
999,
0,
gpu::gles2::MemoryTracker::kUnmanaged);
}
// Test the default allocation levels are used.
TEST_F(GpuMemoryManagerTest, DefaultAllocation) {
// Set memory manager constants for this test
memmgr_.TestingSetAvailableGpuMemory(64);
memmgr_.TestingSetMinimumClientAllocation(8);
memmgr_.TestingSetDefaultClientAllocation(16);
FakeClient stub1(&memmgr_, GenerateUniqueSurfaceId(), true);
// Expect that a client which has not sent stats receive at
// least the default allocation.
Manage();
EXPECT_GE(stub1.BytesWhenVisible(),
memmgr_.GetDefaultClientAllocation());
}
} // namespace content } // namespace content
...@@ -8,9 +8,9 @@ from telemetry.page import page_test ...@@ -8,9 +8,9 @@ from telemetry.page import page_test
from telemetry.core.timeline import counter from telemetry.core.timeline import counter
from telemetry.core.timeline import model from telemetry.core.timeline import model
MEMORY_LIMIT_MB = 192 MEMORY_LIMIT_MB = 256
SINGLE_TAB_LIMIT_MB = 192 SINGLE_TAB_LIMIT_MB = 128
WIGGLE_ROOM_MB = 8 WIGGLE_ROOM_MB = 4
test_harness_script = r""" test_harness_script = r"""
var domAutomationController = {}; var domAutomationController = {};
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment