Commit c1f1214e authored by mmocny@chromium.org's avatar mmocny@chromium.org

GpuMemoryManager suggests values for renderer Contents Texture Managers' preferred memory limit.


BUG=123382
TEST=Manual


Committed: http://src.chromium.org/viewvc/chrome?view=rev&revision=134428

Review URL: http://codereview.chromium.org/10083056

git-svn-id: svn://svn.chromium.org/chrome/trunk/src@134801 0039d316-1c4b-4281-b951-d872f2087c98
parent 570977f4
......@@ -105,6 +105,8 @@ void CommandBufferProxyImpl::SetMemoryAllocationChangedCallback(
const base::Callback<void(const GpuMemoryAllocationForRenderer&)>&
callback) {
memory_allocation_changed_callback_ = callback;
Send(new GpuCommandBufferMsg_SetClientHasMemoryAllocationChangedCallback(
route_id_, !memory_allocation_changed_callback_.is_null()));
}
void CommandBufferProxyImpl::OnSetMemoryAllocation(
......
......@@ -264,10 +264,6 @@ bool WebGraphicsContext3DCommandBufferImpl::MaybeInitializeGL(
g_all_shared_contexts.Pointer()->insert(this);
}
command_buffer_->SetMemoryAllocationChangedCallback(base::Bind(
&WebGraphicsContext3DCommandBufferImpl::OnMemoryAllocationChanged,
weak_ptr_factory_.GetWeakPtr()));
visible_ = true;
initialized_ = true;
return true;
......@@ -680,6 +676,17 @@ void WebGraphicsContext3DCommandBufferImpl::
setMemoryAllocationChangedCallbackCHROMIUM(
WebGraphicsMemoryAllocationChangedCallbackCHROMIUM* callback) {
memory_allocation_changed_callback_ = callback;
if (!command_buffer_)
return;
if (callback)
command_buffer_->SetMemoryAllocationChangedCallback(base::Bind(
&WebGraphicsContext3DCommandBufferImpl::OnMemoryAllocationChanged,
weak_ptr_factory_.GetWeakPtr()));
else
command_buffer_->SetMemoryAllocationChangedCallback(
base::Callback<void(const GpuMemoryAllocationForRenderer&)>());
}
......
......@@ -59,7 +59,9 @@ GpuCommandBufferStub::GpuCommandBufferStub(
route_id_(route_id),
software_(software),
last_flush_count_(0),
allocation_(GpuMemoryAllocation::INVALID_RESOURCE_SIZE, true, true),
allocation_(GpuMemoryAllocation::INVALID_RESOURCE_SIZE,
GpuMemoryAllocation::kHasFrontbuffer |
GpuMemoryAllocation::kHasBackbuffer),
parent_stub_for_initialization_(),
parent_texture_for_initialization_(0),
watchdog_(watchdog) {
......@@ -131,6 +133,9 @@ bool GpuCommandBufferStub::OnMessageReceived(const IPC::Message& message) {
OnDiscardBackbuffer)
IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_EnsureBackbuffer,
OnEnsureBackbuffer)
IPC_MESSAGE_HANDLER(
GpuCommandBufferMsg_SetClientHasMemoryAllocationChangedCallback,
OnSetClientHasMemoryAllocationChangedCallback)
IPC_MESSAGE_UNHANDLED(handled = false)
IPC_END_MESSAGE_MAP()
......@@ -589,6 +594,12 @@ void GpuCommandBufferStub::OnEnsureBackbuffer() {
gfx::GLSurface::BUFFER_ALLOCATION_FRONT_AND_BACK);
}
void GpuCommandBufferStub::OnSetClientHasMemoryAllocationChangedCallback(
bool has_callback) {
client_has_memory_allocation_changed_callback_ = has_callback;
channel_->gpu_channel_manager()->gpu_memory_manager()->ScheduleManage();
}
void GpuCommandBufferStub::SendConsoleMessage(
int32 id,
const std::string& message) {
......@@ -617,6 +628,11 @@ bool GpuCommandBufferStub::IsInSameContextShareGroup(
static_cast<const GpuCommandBufferStub&>(other).context_group_;
}
bool GpuCommandBufferStub::
client_has_memory_allocation_changed_callback() const {
return client_has_memory_allocation_changed_callback_;
}
bool GpuCommandBufferStub::has_surface_state() const {
return surface_state_ != NULL;
}
......
......@@ -63,6 +63,7 @@ class CONTENT_EXPORT GpuCommandBufferStubBase {
virtual ~GpuCommandBufferStubBase() {}
// Will not have surface state if this is an offscreen commandbuffer.
virtual bool client_has_memory_allocation_changed_callback() const = 0;
virtual bool has_surface_state() const = 0;
virtual const SurfaceState& surface_state() const = 0;
......@@ -114,6 +115,7 @@ class GpuCommandBufferStub
virtual bool Send(IPC::Message* msg) OVERRIDE;
// GpuCommandBufferStubBase implementation:
virtual bool client_has_memory_allocation_changed_callback() const OVERRIDE;
virtual bool has_surface_state() const OVERRIDE;
virtual const GpuCommandBufferStubBase::SurfaceState& surface_state() const
OVERRIDE;
......@@ -205,6 +207,8 @@ class GpuCommandBufferStub
void OnDiscardBackbuffer();
void OnEnsureBackbuffer();
void OnSetClientHasMemoryAllocationChangedCallback(bool);
void OnReschedule();
void OnCommandProcessed();
......@@ -228,6 +232,7 @@ class GpuCommandBufferStub
gfx::GpuPreference gpu_preference_;
int32 route_id_;
bool software_;
bool client_has_memory_allocation_changed_callback_;
uint32 last_flush_count_;
scoped_ptr<GpuCommandBufferStubBase::SurfaceState> surface_state_;
GpuMemoryAllocation allocation_;
......
......@@ -12,7 +12,6 @@
// and assigned to the browser and renderer context.
// They will change over time, given memory availability, and browser state.
// Memory Allocation which will be assigned to the renderer context.
struct GpuMemoryAllocationForRenderer {
enum {
......@@ -68,17 +67,24 @@ struct GpuMemoryAllocationForBrowser {
// GpuMemoryManager.
struct GpuMemoryAllocation : public GpuMemoryAllocationForRenderer,
public GpuMemoryAllocationForBrowser {
// Bitmap
enum BufferAllocation {
kHasNoBuffers = 0,
kHasFrontbuffer = 1,
kHasBackbuffer = 2
};
GpuMemoryAllocation()
: GpuMemoryAllocationForRenderer(),
GpuMemoryAllocationForBrowser() {
}
GpuMemoryAllocation(size_t gpu_resource_size_in_bytes,
bool suggest_have_backbuffer,
bool suggest_have_frontbuffer)
int allocationBitmap)
: GpuMemoryAllocationForRenderer(gpu_resource_size_in_bytes,
suggest_have_backbuffer),
GpuMemoryAllocationForBrowser(suggest_have_frontbuffer) {
(allocationBitmap & kHasBackbuffer) == kHasBackbuffer),
GpuMemoryAllocationForBrowser(
(allocationBitmap & kHasFrontbuffer) == kHasFrontbuffer) {
}
bool operator==(const GpuMemoryAllocation& other) const {
......
......@@ -15,15 +15,6 @@
namespace {
// These are predefined values (in bytes) for
// GpuMemoryAllocation::gpuResourceSizeInBytes. Currently, the value is only
// used to check if it is 0 or non-0. In the future, these values will not
// come from constants, but rather will be distributed dynamically.
enum {
kResourceSizeNonHibernatedTab = 1,
kResourceSizeHibernatedTab = 0
};
bool IsInSameContextShareGroupAsAnyOf(
const GpuCommandBufferStubBase* stub,
const std::vector<GpuCommandBufferStubBase*>& stubs) {
......@@ -35,6 +26,14 @@ bool IsInSameContextShareGroupAsAnyOf(
return false;
}
void AssignMemoryAllocations(std::vector<GpuCommandBufferStubBase*>& stubs,
GpuMemoryAllocation allocation) {
for (std::vector<GpuCommandBufferStubBase*>::iterator it = stubs.begin();
it != stubs.end(); ++it) {
(*it)->SetMemoryAllocation(allocation);
}
}
}
GpuMemoryManager::GpuMemoryManager(GpuMemoryManagerClient* client,
......@@ -100,14 +99,6 @@ void GpuMemoryManager::ScheduleManage() {
// 1. Find the most visible context-with-a-surface within each
// context-without-a-surface's share group, and inherit its visibilty.
void GpuMemoryManager::Manage() {
// Set up three allocation values for the three possible stub states
const GpuMemoryAllocation all_buffers_allocation(
kResourceSizeNonHibernatedTab, true, true);
const GpuMemoryAllocation front_buffers_allocation(
kResourceSizeNonHibernatedTab, false, true);
const GpuMemoryAllocation no_buffers_allocation(
kResourceSizeHibernatedTab, false, false);
manage_scheduled_ = false;
// Create stub lists by separating out the two types received from client
......@@ -120,6 +111,8 @@ void GpuMemoryManager::Manage() {
for (std::vector<GpuCommandBufferStubBase*>::iterator it = stubs.begin();
it != stubs.end(); ++it) {
GpuCommandBufferStubBase* stub = *it;
if (!stub->client_has_memory_allocation_changed_callback())
continue;
if (stub->has_surface_state())
stubs_with_surface.push_back(stub);
else
......@@ -136,37 +129,73 @@ void GpuMemoryManager::Manage() {
stubs_with_surface.end());
// Separate stubs into memory allocation sets.
std::vector<GpuCommandBufferStubBase*> all_buffers, front_buffers, no_buffers;
std::vector<GpuCommandBufferStubBase*> stubs_with_surface_foreground,
stubs_with_surface_background,
stubs_with_surface_hibernated,
stubs_without_surface_foreground,
stubs_without_surface_background,
stubs_without_surface_hibernated;
for (size_t i = 0; i < stubs_with_surface.size(); ++i) {
GpuCommandBufferStubBase* stub = stubs_with_surface[i];
DCHECK(stub->has_surface_state());
if (stub->surface_state().visible) {
all_buffers.push_back(stub);
stub->SetMemoryAllocation(all_buffers_allocation);
} else if (i < max_surfaces_with_frontbuffer_soft_limit_) {
front_buffers.push_back(stub);
stub->SetMemoryAllocation(front_buffers_allocation);
} else {
no_buffers.push_back(stub);
stub->SetMemoryAllocation(no_buffers_allocation);
}
if (stub->surface_state().visible)
stubs_with_surface_foreground.push_back(stub);
else if (i < max_surfaces_with_frontbuffer_soft_limit_)
stubs_with_surface_background.push_back(stub);
else
stubs_with_surface_hibernated.push_back(stub);
}
// Now, go through the stubs without surfaces and deduce visibility using the
// visibility of stubs which are in the same context share group.
for (std::vector<GpuCommandBufferStubBase*>::const_iterator it =
stubs_without_surface.begin(); it != stubs_without_surface.end(); ++it) {
GpuCommandBufferStubBase* stub = *it;
DCHECK(!stub->has_surface_state());
if (IsInSameContextShareGroupAsAnyOf(stub, all_buffers)) {
stub->SetMemoryAllocation(all_buffers_allocation);
} else if (IsInSameContextShareGroupAsAnyOf(stub, front_buffers)) {
stub->SetMemoryAllocation(front_buffers_allocation);
} else {
stub->SetMemoryAllocation(no_buffers_allocation);
}
// Stubs without surfaces have deduced allocation state using the state
// of surface stubs which are in the same context share group.
if (IsInSameContextShareGroupAsAnyOf(stub, stubs_with_surface_foreground))
stubs_without_surface_foreground.push_back(stub);
else if (IsInSameContextShareGroupAsAnyOf(
stub, stubs_with_surface_background))
stubs_without_surface_background.push_back(stub);
else
stubs_without_surface_hibernated.push_back(stub);
}
// Calculate memory allocation size in bytes given to each stub, by sharing
// global limit equally among those that need it.
size_t num_stubs_need_mem = stubs_with_surface_foreground.size() +
stubs_without_surface_foreground.size() +
stubs_without_surface_background.size();
size_t base_allocation_size = kMinimumAllocationForTab * num_stubs_need_mem;
size_t bonus_allocation = 0;
if (base_allocation_size < kMaximumAllocationForTabs &&
!stubs_with_surface_foreground.empty())
bonus_allocation = (kMaximumAllocationForTabs - base_allocation_size) /
stubs_with_surface_foreground.size();
// Now give out allocations to everyone.
AssignMemoryAllocations(stubs_with_surface_foreground,
GpuMemoryAllocation(kMinimumAllocationForTab + bonus_allocation,
GpuMemoryAllocation::kHasFrontbuffer |
GpuMemoryAllocation::kHasBackbuffer));
AssignMemoryAllocations(stubs_with_surface_background,
GpuMemoryAllocation(0, GpuMemoryAllocation::kHasFrontbuffer));
AssignMemoryAllocations(stubs_with_surface_hibernated,
GpuMemoryAllocation(0, GpuMemoryAllocation::kHasNoBuffers));
AssignMemoryAllocations(stubs_without_surface_foreground,
GpuMemoryAllocation(kMinimumAllocationForTab,
GpuMemoryAllocation::kHasNoBuffers));
AssignMemoryAllocations(stubs_without_surface_background,
GpuMemoryAllocation(kMinimumAllocationForTab,
GpuMemoryAllocation::kHasNoBuffers));
AssignMemoryAllocations(stubs_without_surface_hibernated,
GpuMemoryAllocation(0, GpuMemoryAllocation::kHasNoBuffers));
}
#endif
......@@ -28,6 +28,23 @@ class CONTENT_EXPORT GpuMemoryManager {
public:
enum { kDefaultMaxSurfacesWithFrontbufferSoftLimit = 8 };
// These are predefined values (in bytes) for
// GpuMemoryAllocation::gpuResourceSizeInBytes.
// Maximum Allocation for all tabs is a soft limit that can be exceeded
// during the time it takes for renderers to respect new allocations,
// including when switching tabs or opening a new window.
// To alleviate some pressure, we decrease our desired limit by "one tabs'
// worth" of memory.
enum {
#if defined(OS_ANDROID)
kMinimumAllocationForTab = 32 * 1024 * 1024,
kMaximumAllocationForTabs = 64 * 1024 * 1024,
#else
kMinimumAllocationForTab = 64 * 1024 * 1024,
kMaximumAllocationForTabs = 512 * 1024 * 1024 - kMinimumAllocationForTab,
#endif
};
GpuMemoryManager(GpuMemoryManagerClient* client,
size_t max_surfaces_with_frontbuffer_soft_limit);
~GpuMemoryManager();
......
......@@ -445,6 +445,11 @@ IPC_MESSAGE_ROUTED0(GpuCommandBufferMsg_EnsureBackbuffer)
IPC_MESSAGE_ROUTED1(GpuCommandBufferMsg_SetMemoryAllocation,
GpuMemoryAllocationForRenderer /* allocation */)
// Sent to stub when proxy is assigned a memory allocation changed callback.
IPC_MESSAGE_ROUTED1(
GpuCommandBufferMsg_SetClientHasMemoryAllocationChangedCallback,
bool /* has_callback */)
//------------------------------------------------------------------------------
// Accelerated Video Decoder Messages
// These messages are sent from Renderer process to GPU process.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment