Commit be3028d7 authored by ericrk's avatar ericrk Committed by Commit bot

Avoid double-counting in GpuChannel memory tracking.

GpuChannel currently calculates current memory usage by iterating over
all stubs and summing their individual memory usage. This results in
double counting as a given stub may share its memory tracker with other
stubs, if they are in a share group.

To avoid double counting, we now iterate over stubs to collect a set of
unique MemoryTrackers, then sum the memory usage of each unique tracker.

BUG=512535

Review URL: https://codereview.chromium.org/1244843004

Cr-Commit-Position: refs/heads/master@{#339746}
parent cce531ce
......@@ -844,11 +844,20 @@ void GpuChannel::RemoveFilter(IPC::MessageFilter* filter) {
}
uint64 GpuChannel::GetMemoryUsage() {
uint64 size = 0;
// Collect the unique memory trackers in use by the |stubs_|.
std::set<gpu::gles2::MemoryTracker*> unique_memory_trackers;
for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_);
!it.IsAtEnd(); it.Advance()) {
size += it.GetCurrentValue()->GetMemoryUsage();
unique_memory_trackers.insert(it.GetCurrentValue()->GetMemoryTracker());
}
// Sum the memory usage for all unique memory trackers.
uint64 size = 0;
for (auto* tracker : unique_memory_trackers) {
size += gpu_channel_manager()->gpu_memory_manager()->GetTrackerMemoryUsage(
tracker);
}
return size;
}
......
......@@ -1149,10 +1149,6 @@ void GpuCommandBufferStub::MarkContextLost() {
command_buffer_->SetParseError(gpu::error::kLostContext);
}
uint64 GpuCommandBufferStub::GetMemoryUsage() const {
return GetMemoryManager()->GetClientMemoryUsage(this);
}
void GpuCommandBufferStub::SendSwapBuffersCompleted(
const std::vector<ui::LatencyInfo>& latency_info,
gfx::SwapResult result) {
......
......@@ -148,8 +148,6 @@ class GpuCommandBufferStub
const gpu::gles2::FeatureInfo* GetFeatureInfo() const;
uint64 GetMemoryUsage() const;
void SendSwapBuffersCompleted(
const std::vector<ui::LatencyInfo>& latency_info,
gfx::SwapResult result);
......
......@@ -206,10 +206,10 @@ void GpuMemoryManager::SetClientStateVisible(
ScheduleManage(visible ? kScheduleManageNow : kScheduleManageLater);
}
uint64 GpuMemoryManager::GetClientMemoryUsage(
const GpuMemoryManagerClient* client) const {
uint64 GpuMemoryManager::GetTrackerMemoryUsage(
gpu::gles2::MemoryTracker* tracker) const {
TrackingGroupMap::const_iterator tracking_group_it =
tracking_groups_.find(client->GetMemoryTracker());
tracking_groups_.find(tracker);
DCHECK(tracking_group_it != tracking_groups_.end());
return tracking_group_it->second->GetSize();
}
......
......@@ -62,7 +62,7 @@ class CONTENT_EXPORT GpuMemoryManager :
GpuMemoryTrackingGroup* CreateTrackingGroup(
base::ProcessId pid, gpu::gles2::MemoryTracker* memory_tracker);
uint64 GetClientMemoryUsage(const GpuMemoryManagerClient* client) const;
uint64 GetTrackerMemoryUsage(gpu::gles2::MemoryTracker* tracker) const;
uint64 GetMaximumClientAllocation() const {
return client_hard_limit_bytes_;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment