Commit c1f1214e authored by mmocny@chromium.org's avatar mmocny@chromium.org

GpuMemoryManager suggests values for renderer Contents Texture Managers' preferred memory limit.


BUG=123382
TEST=Manual


Committed: http://src.chromium.org/viewvc/chrome?view=rev&revision=134428

Review URL: http://codereview.chromium.org/10083056

git-svn-id: svn://svn.chromium.org/chrome/trunk/src@134801 0039d316-1c4b-4281-b951-d872f2087c98
parent 570977f4
...@@ -105,6 +105,8 @@ void CommandBufferProxyImpl::SetMemoryAllocationChangedCallback( ...@@ -105,6 +105,8 @@ void CommandBufferProxyImpl::SetMemoryAllocationChangedCallback(
const base::Callback<void(const GpuMemoryAllocationForRenderer&)>& const base::Callback<void(const GpuMemoryAllocationForRenderer&)>&
callback) { callback) {
memory_allocation_changed_callback_ = callback; memory_allocation_changed_callback_ = callback;
Send(new GpuCommandBufferMsg_SetClientHasMemoryAllocationChangedCallback(
route_id_, !memory_allocation_changed_callback_.is_null()));
} }
void CommandBufferProxyImpl::OnSetMemoryAllocation( void CommandBufferProxyImpl::OnSetMemoryAllocation(
......
...@@ -264,10 +264,6 @@ bool WebGraphicsContext3DCommandBufferImpl::MaybeInitializeGL( ...@@ -264,10 +264,6 @@ bool WebGraphicsContext3DCommandBufferImpl::MaybeInitializeGL(
g_all_shared_contexts.Pointer()->insert(this); g_all_shared_contexts.Pointer()->insert(this);
} }
command_buffer_->SetMemoryAllocationChangedCallback(base::Bind(
&WebGraphicsContext3DCommandBufferImpl::OnMemoryAllocationChanged,
weak_ptr_factory_.GetWeakPtr()));
visible_ = true; visible_ = true;
initialized_ = true; initialized_ = true;
return true; return true;
...@@ -680,6 +676,17 @@ void WebGraphicsContext3DCommandBufferImpl:: ...@@ -680,6 +676,17 @@ void WebGraphicsContext3DCommandBufferImpl::
setMemoryAllocationChangedCallbackCHROMIUM( setMemoryAllocationChangedCallbackCHROMIUM(
WebGraphicsMemoryAllocationChangedCallbackCHROMIUM* callback) { WebGraphicsMemoryAllocationChangedCallbackCHROMIUM* callback) {
memory_allocation_changed_callback_ = callback; memory_allocation_changed_callback_ = callback;
if (!command_buffer_)
return;
if (callback)
command_buffer_->SetMemoryAllocationChangedCallback(base::Bind(
&WebGraphicsContext3DCommandBufferImpl::OnMemoryAllocationChanged,
weak_ptr_factory_.GetWeakPtr()));
else
command_buffer_->SetMemoryAllocationChangedCallback(
base::Callback<void(const GpuMemoryAllocationForRenderer&)>());
} }
......
...@@ -59,7 +59,9 @@ GpuCommandBufferStub::GpuCommandBufferStub( ...@@ -59,7 +59,9 @@ GpuCommandBufferStub::GpuCommandBufferStub(
route_id_(route_id), route_id_(route_id),
software_(software), software_(software),
last_flush_count_(0), last_flush_count_(0),
allocation_(GpuMemoryAllocation::INVALID_RESOURCE_SIZE, true, true), allocation_(GpuMemoryAllocation::INVALID_RESOURCE_SIZE,
GpuMemoryAllocation::kHasFrontbuffer |
GpuMemoryAllocation::kHasBackbuffer),
parent_stub_for_initialization_(), parent_stub_for_initialization_(),
parent_texture_for_initialization_(0), parent_texture_for_initialization_(0),
watchdog_(watchdog) { watchdog_(watchdog) {
...@@ -131,6 +133,9 @@ bool GpuCommandBufferStub::OnMessageReceived(const IPC::Message& message) { ...@@ -131,6 +133,9 @@ bool GpuCommandBufferStub::OnMessageReceived(const IPC::Message& message) {
OnDiscardBackbuffer) OnDiscardBackbuffer)
IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_EnsureBackbuffer, IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_EnsureBackbuffer,
OnEnsureBackbuffer) OnEnsureBackbuffer)
IPC_MESSAGE_HANDLER(
GpuCommandBufferMsg_SetClientHasMemoryAllocationChangedCallback,
OnSetClientHasMemoryAllocationChangedCallback)
IPC_MESSAGE_UNHANDLED(handled = false) IPC_MESSAGE_UNHANDLED(handled = false)
IPC_END_MESSAGE_MAP() IPC_END_MESSAGE_MAP()
...@@ -589,6 +594,12 @@ void GpuCommandBufferStub::OnEnsureBackbuffer() { ...@@ -589,6 +594,12 @@ void GpuCommandBufferStub::OnEnsureBackbuffer() {
gfx::GLSurface::BUFFER_ALLOCATION_FRONT_AND_BACK); gfx::GLSurface::BUFFER_ALLOCATION_FRONT_AND_BACK);
} }
void GpuCommandBufferStub::OnSetClientHasMemoryAllocationChangedCallback(
bool has_callback) {
client_has_memory_allocation_changed_callback_ = has_callback;
channel_->gpu_channel_manager()->gpu_memory_manager()->ScheduleManage();
}
void GpuCommandBufferStub::SendConsoleMessage( void GpuCommandBufferStub::SendConsoleMessage(
int32 id, int32 id,
const std::string& message) { const std::string& message) {
...@@ -617,6 +628,11 @@ bool GpuCommandBufferStub::IsInSameContextShareGroup( ...@@ -617,6 +628,11 @@ bool GpuCommandBufferStub::IsInSameContextShareGroup(
static_cast<const GpuCommandBufferStub&>(other).context_group_; static_cast<const GpuCommandBufferStub&>(other).context_group_;
} }
bool GpuCommandBufferStub::
client_has_memory_allocation_changed_callback() const {
return client_has_memory_allocation_changed_callback_;
}
bool GpuCommandBufferStub::has_surface_state() const { bool GpuCommandBufferStub::has_surface_state() const {
return surface_state_ != NULL; return surface_state_ != NULL;
} }
......
...@@ -63,6 +63,7 @@ class CONTENT_EXPORT GpuCommandBufferStubBase { ...@@ -63,6 +63,7 @@ class CONTENT_EXPORT GpuCommandBufferStubBase {
virtual ~GpuCommandBufferStubBase() {} virtual ~GpuCommandBufferStubBase() {}
// Will not have surface state if this is an offscreen commandbuffer. // Will not have surface state if this is an offscreen commandbuffer.
virtual bool client_has_memory_allocation_changed_callback() const = 0;
virtual bool has_surface_state() const = 0; virtual bool has_surface_state() const = 0;
virtual const SurfaceState& surface_state() const = 0; virtual const SurfaceState& surface_state() const = 0;
...@@ -114,6 +115,7 @@ class GpuCommandBufferStub ...@@ -114,6 +115,7 @@ class GpuCommandBufferStub
virtual bool Send(IPC::Message* msg) OVERRIDE; virtual bool Send(IPC::Message* msg) OVERRIDE;
// GpuCommandBufferStubBase implementation: // GpuCommandBufferStubBase implementation:
virtual bool client_has_memory_allocation_changed_callback() const OVERRIDE;
virtual bool has_surface_state() const OVERRIDE; virtual bool has_surface_state() const OVERRIDE;
virtual const GpuCommandBufferStubBase::SurfaceState& surface_state() const virtual const GpuCommandBufferStubBase::SurfaceState& surface_state() const
OVERRIDE; OVERRIDE;
...@@ -205,6 +207,8 @@ class GpuCommandBufferStub ...@@ -205,6 +207,8 @@ class GpuCommandBufferStub
void OnDiscardBackbuffer(); void OnDiscardBackbuffer();
void OnEnsureBackbuffer(); void OnEnsureBackbuffer();
void OnSetClientHasMemoryAllocationChangedCallback(bool);
void OnReschedule(); void OnReschedule();
void OnCommandProcessed(); void OnCommandProcessed();
...@@ -228,6 +232,7 @@ class GpuCommandBufferStub ...@@ -228,6 +232,7 @@ class GpuCommandBufferStub
gfx::GpuPreference gpu_preference_; gfx::GpuPreference gpu_preference_;
int32 route_id_; int32 route_id_;
bool software_; bool software_;
bool client_has_memory_allocation_changed_callback_;
uint32 last_flush_count_; uint32 last_flush_count_;
scoped_ptr<GpuCommandBufferStubBase::SurfaceState> surface_state_; scoped_ptr<GpuCommandBufferStubBase::SurfaceState> surface_state_;
GpuMemoryAllocation allocation_; GpuMemoryAllocation allocation_;
......
...@@ -12,7 +12,6 @@ ...@@ -12,7 +12,6 @@
// and assigned to the browser and renderer context. // and assigned to the browser and renderer context.
// They will change over time, given memory availability, and browser state. // They will change over time, given memory availability, and browser state.
// Memory Allocation which will be assigned to the renderer context. // Memory Allocation which will be assigned to the renderer context.
struct GpuMemoryAllocationForRenderer { struct GpuMemoryAllocationForRenderer {
enum { enum {
...@@ -68,17 +67,24 @@ struct GpuMemoryAllocationForBrowser { ...@@ -68,17 +67,24 @@ struct GpuMemoryAllocationForBrowser {
// GpuMemoryManager. // GpuMemoryManager.
struct GpuMemoryAllocation : public GpuMemoryAllocationForRenderer, struct GpuMemoryAllocation : public GpuMemoryAllocationForRenderer,
public GpuMemoryAllocationForBrowser { public GpuMemoryAllocationForBrowser {
// Bitmap
enum BufferAllocation {
kHasNoBuffers = 0,
kHasFrontbuffer = 1,
kHasBackbuffer = 2
};
GpuMemoryAllocation() GpuMemoryAllocation()
: GpuMemoryAllocationForRenderer(), : GpuMemoryAllocationForRenderer(),
GpuMemoryAllocationForBrowser() { GpuMemoryAllocationForBrowser() {
} }
GpuMemoryAllocation(size_t gpu_resource_size_in_bytes, GpuMemoryAllocation(size_t gpu_resource_size_in_bytes,
bool suggest_have_backbuffer, int allocationBitmap)
bool suggest_have_frontbuffer)
: GpuMemoryAllocationForRenderer(gpu_resource_size_in_bytes, : GpuMemoryAllocationForRenderer(gpu_resource_size_in_bytes,
suggest_have_backbuffer), (allocationBitmap & kHasBackbuffer) == kHasBackbuffer),
GpuMemoryAllocationForBrowser(suggest_have_frontbuffer) { GpuMemoryAllocationForBrowser(
(allocationBitmap & kHasFrontbuffer) == kHasFrontbuffer) {
} }
bool operator==(const GpuMemoryAllocation& other) const { bool operator==(const GpuMemoryAllocation& other) const {
......
...@@ -15,15 +15,6 @@ ...@@ -15,15 +15,6 @@
namespace { namespace {
// These are predefined values (in bytes) for
// GpuMemoryAllocation::gpuResourceSizeInBytes. Currently, the value is only
// used to check if it is 0 or non-0. In the future, these values will not
// come from constants, but rather will be distributed dynamically.
enum {
kResourceSizeNonHibernatedTab = 1,
kResourceSizeHibernatedTab = 0
};
bool IsInSameContextShareGroupAsAnyOf( bool IsInSameContextShareGroupAsAnyOf(
const GpuCommandBufferStubBase* stub, const GpuCommandBufferStubBase* stub,
const std::vector<GpuCommandBufferStubBase*>& stubs) { const std::vector<GpuCommandBufferStubBase*>& stubs) {
...@@ -35,6 +26,14 @@ bool IsInSameContextShareGroupAsAnyOf( ...@@ -35,6 +26,14 @@ bool IsInSameContextShareGroupAsAnyOf(
return false; return false;
} }
void AssignMemoryAllocations(std::vector<GpuCommandBufferStubBase*>& stubs,
GpuMemoryAllocation allocation) {
for (std::vector<GpuCommandBufferStubBase*>::iterator it = stubs.begin();
it != stubs.end(); ++it) {
(*it)->SetMemoryAllocation(allocation);
}
}
} }
GpuMemoryManager::GpuMemoryManager(GpuMemoryManagerClient* client, GpuMemoryManager::GpuMemoryManager(GpuMemoryManagerClient* client,
...@@ -100,14 +99,6 @@ void GpuMemoryManager::ScheduleManage() { ...@@ -100,14 +99,6 @@ void GpuMemoryManager::ScheduleManage() {
// 1. Find the most visible context-with-a-surface within each // 1. Find the most visible context-with-a-surface within each
// context-without-a-surface's share group, and inherit its visibilty. // context-without-a-surface's share group, and inherit its visibilty.
void GpuMemoryManager::Manage() { void GpuMemoryManager::Manage() {
// Set up three allocation values for the three possible stub states
const GpuMemoryAllocation all_buffers_allocation(
kResourceSizeNonHibernatedTab, true, true);
const GpuMemoryAllocation front_buffers_allocation(
kResourceSizeNonHibernatedTab, false, true);
const GpuMemoryAllocation no_buffers_allocation(
kResourceSizeHibernatedTab, false, false);
manage_scheduled_ = false; manage_scheduled_ = false;
// Create stub lists by separating out the two types received from client // Create stub lists by separating out the two types received from client
...@@ -120,6 +111,8 @@ void GpuMemoryManager::Manage() { ...@@ -120,6 +111,8 @@ void GpuMemoryManager::Manage() {
for (std::vector<GpuCommandBufferStubBase*>::iterator it = stubs.begin(); for (std::vector<GpuCommandBufferStubBase*>::iterator it = stubs.begin();
it != stubs.end(); ++it) { it != stubs.end(); ++it) {
GpuCommandBufferStubBase* stub = *it; GpuCommandBufferStubBase* stub = *it;
if (!stub->client_has_memory_allocation_changed_callback())
continue;
if (stub->has_surface_state()) if (stub->has_surface_state())
stubs_with_surface.push_back(stub); stubs_with_surface.push_back(stub);
else else
...@@ -136,37 +129,73 @@ void GpuMemoryManager::Manage() { ...@@ -136,37 +129,73 @@ void GpuMemoryManager::Manage() {
stubs_with_surface.end()); stubs_with_surface.end());
// Separate stubs into memory allocation sets. // Separate stubs into memory allocation sets.
std::vector<GpuCommandBufferStubBase*> all_buffers, front_buffers, no_buffers; std::vector<GpuCommandBufferStubBase*> stubs_with_surface_foreground,
stubs_with_surface_background,
stubs_with_surface_hibernated,
stubs_without_surface_foreground,
stubs_without_surface_background,
stubs_without_surface_hibernated;
for (size_t i = 0; i < stubs_with_surface.size(); ++i) { for (size_t i = 0; i < stubs_with_surface.size(); ++i) {
GpuCommandBufferStubBase* stub = stubs_with_surface[i]; GpuCommandBufferStubBase* stub = stubs_with_surface[i];
DCHECK(stub->has_surface_state()); DCHECK(stub->has_surface_state());
if (stub->surface_state().visible) { if (stub->surface_state().visible)
all_buffers.push_back(stub); stubs_with_surface_foreground.push_back(stub);
stub->SetMemoryAllocation(all_buffers_allocation); else if (i < max_surfaces_with_frontbuffer_soft_limit_)
} else if (i < max_surfaces_with_frontbuffer_soft_limit_) { stubs_with_surface_background.push_back(stub);
front_buffers.push_back(stub); else
stub->SetMemoryAllocation(front_buffers_allocation); stubs_with_surface_hibernated.push_back(stub);
} else {
no_buffers.push_back(stub);
stub->SetMemoryAllocation(no_buffers_allocation);
}
} }
// Now, go through the stubs without surfaces and deduce visibility using the
// visibility of stubs which are in the same context share group.
for (std::vector<GpuCommandBufferStubBase*>::const_iterator it = for (std::vector<GpuCommandBufferStubBase*>::const_iterator it =
stubs_without_surface.begin(); it != stubs_without_surface.end(); ++it) { stubs_without_surface.begin(); it != stubs_without_surface.end(); ++it) {
GpuCommandBufferStubBase* stub = *it; GpuCommandBufferStubBase* stub = *it;
DCHECK(!stub->has_surface_state()); DCHECK(!stub->has_surface_state());
if (IsInSameContextShareGroupAsAnyOf(stub, all_buffers)) {
stub->SetMemoryAllocation(all_buffers_allocation); // Stubs without surfaces have deduced allocation state using the state
} else if (IsInSameContextShareGroupAsAnyOf(stub, front_buffers)) { // of surface stubs which are in the same context share group.
stub->SetMemoryAllocation(front_buffers_allocation); if (IsInSameContextShareGroupAsAnyOf(stub, stubs_with_surface_foreground))
} else { stubs_without_surface_foreground.push_back(stub);
stub->SetMemoryAllocation(no_buffers_allocation); else if (IsInSameContextShareGroupAsAnyOf(
} stub, stubs_with_surface_background))
stubs_without_surface_background.push_back(stub);
else
stubs_without_surface_hibernated.push_back(stub);
} }
// Calculate memory allocation size in bytes given to each stub, by sharing
// global limit equally among those that need it.
size_t num_stubs_need_mem = stubs_with_surface_foreground.size() +
stubs_without_surface_foreground.size() +
stubs_without_surface_background.size();
size_t base_allocation_size = kMinimumAllocationForTab * num_stubs_need_mem;
size_t bonus_allocation = 0;
if (base_allocation_size < kMaximumAllocationForTabs &&
!stubs_with_surface_foreground.empty())
bonus_allocation = (kMaximumAllocationForTabs - base_allocation_size) /
stubs_with_surface_foreground.size();
// Now give out allocations to everyone.
AssignMemoryAllocations(stubs_with_surface_foreground,
GpuMemoryAllocation(kMinimumAllocationForTab + bonus_allocation,
GpuMemoryAllocation::kHasFrontbuffer |
GpuMemoryAllocation::kHasBackbuffer));
AssignMemoryAllocations(stubs_with_surface_background,
GpuMemoryAllocation(0, GpuMemoryAllocation::kHasFrontbuffer));
AssignMemoryAllocations(stubs_with_surface_hibernated,
GpuMemoryAllocation(0, GpuMemoryAllocation::kHasNoBuffers));
AssignMemoryAllocations(stubs_without_surface_foreground,
GpuMemoryAllocation(kMinimumAllocationForTab,
GpuMemoryAllocation::kHasNoBuffers));
AssignMemoryAllocations(stubs_without_surface_background,
GpuMemoryAllocation(kMinimumAllocationForTab,
GpuMemoryAllocation::kHasNoBuffers));
AssignMemoryAllocations(stubs_without_surface_hibernated,
GpuMemoryAllocation(0, GpuMemoryAllocation::kHasNoBuffers));
} }
#endif #endif
...@@ -28,6 +28,23 @@ class CONTENT_EXPORT GpuMemoryManager { ...@@ -28,6 +28,23 @@ class CONTENT_EXPORT GpuMemoryManager {
public: public:
enum { kDefaultMaxSurfacesWithFrontbufferSoftLimit = 8 }; enum { kDefaultMaxSurfacesWithFrontbufferSoftLimit = 8 };
// These are predefined values (in bytes) for
// GpuMemoryAllocation::gpuResourceSizeInBytes.
// Maximum Allocation for all tabs is a soft limit that can be exceeded
// during the time it takes for renderers to respect new allocations,
// including when switching tabs or opening a new window.
// To alleviate some pressure, we decrease our desired limit by "one tabs'
// worth" of memory.
enum {
#if defined(OS_ANDROID)
kMinimumAllocationForTab = 32 * 1024 * 1024,
kMaximumAllocationForTabs = 64 * 1024 * 1024,
#else
kMinimumAllocationForTab = 64 * 1024 * 1024,
kMaximumAllocationForTabs = 512 * 1024 * 1024 - kMinimumAllocationForTab,
#endif
};
GpuMemoryManager(GpuMemoryManagerClient* client, GpuMemoryManager(GpuMemoryManagerClient* client,
size_t max_surfaces_with_frontbuffer_soft_limit); size_t max_surfaces_with_frontbuffer_soft_limit);
~GpuMemoryManager(); ~GpuMemoryManager();
......
...@@ -445,6 +445,11 @@ IPC_MESSAGE_ROUTED0(GpuCommandBufferMsg_EnsureBackbuffer) ...@@ -445,6 +445,11 @@ IPC_MESSAGE_ROUTED0(GpuCommandBufferMsg_EnsureBackbuffer)
IPC_MESSAGE_ROUTED1(GpuCommandBufferMsg_SetMemoryAllocation, IPC_MESSAGE_ROUTED1(GpuCommandBufferMsg_SetMemoryAllocation,
GpuMemoryAllocationForRenderer /* allocation */) GpuMemoryAllocationForRenderer /* allocation */)
// Sent to stub when proxy is assigned a memory allocation changed callback.
IPC_MESSAGE_ROUTED1(
GpuCommandBufferMsg_SetClientHasMemoryAllocationChangedCallback,
bool /* has_callback */)
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
// Accelerated Video Decoder Messages // Accelerated Video Decoder Messages
// These messages are sent from Renderer process to GPU process. // These messages are sent from Renderer process to GPU process.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment