Commit 37097a96 authored by erikchen's avatar erikchen Committed by Commit Bot

Remove implementation of legacy heap profiler.

This CL removes unused code. It has no intended behavior change.

The out of process heap profiler [see components/heap_profiling and
components/services/heap_profiling] has replaced the legacy heap profiler.

This CL removes the implementation and allocator hooks. Future CLs will remove
the plumbing to enable/disable legacy heap profiler.

Bug: 822843
Change-Id: I79e320c3f17bacf2414c5d0815b5feb4288de7d7
Reviewed-on: https://chromium-review.googlesource.com/1066576
Commit-Queue: Erik Chen <erikchen@chromium.org>
Reviewed-by: default avatarDaniel Cheng <dcheng@chromium.org>
Reviewed-by: default avatarKentaro Hara <haraken@chromium.org>
Reviewed-by: default avatarSiddhartha S <ssid@chromium.org>
Cr-Commit-Position: refs/heads/master@{#560757}
parent cbb0df9b
...@@ -949,9 +949,6 @@ jumbo_component("base") { ...@@ -949,9 +949,6 @@ jumbo_component("base") {
"trace_event/heap_profiler_allocation_context.h", "trace_event/heap_profiler_allocation_context.h",
"trace_event/heap_profiler_allocation_context_tracker.cc", "trace_event/heap_profiler_allocation_context_tracker.cc",
"trace_event/heap_profiler_allocation_context_tracker.h", "trace_event/heap_profiler_allocation_context_tracker.h",
"trace_event/heap_profiler_allocation_register.cc",
"trace_event/heap_profiler_allocation_register.h",
"trace_event/heap_profiler_allocation_register_win.cc",
"trace_event/heap_profiler_event_filter.cc", "trace_event/heap_profiler_event_filter.cc",
"trace_event/heap_profiler_event_filter.h", "trace_event/heap_profiler_event_filter.h",
"trace_event/heap_profiler_heap_dump_writer.cc", "trace_event/heap_profiler_heap_dump_writer.cc",
...@@ -988,8 +985,6 @@ jumbo_component("base") { ...@@ -988,8 +985,6 @@ jumbo_component("base") {
"trace_event/memory_usage_estimator.h", "trace_event/memory_usage_estimator.h",
"trace_event/process_memory_dump.cc", "trace_event/process_memory_dump.cc",
"trace_event/process_memory_dump.h", "trace_event/process_memory_dump.h",
"trace_event/sharded_allocation_register.cc",
"trace_event/sharded_allocation_register.h",
"trace_event/trace_buffer.cc", "trace_event/trace_buffer.cc",
"trace_event/trace_buffer.h", "trace_event/trace_buffer.h",
"trace_event/trace_category.h", "trace_event/trace_category.h",
...@@ -1157,7 +1152,6 @@ jumbo_component("base") { ...@@ -1157,7 +1152,6 @@ jumbo_component("base") {
"threading/platform_thread_posix.cc", "threading/platform_thread_posix.cc",
"threading/thread_local_storage_posix.cc", "threading/thread_local_storage_posix.cc",
"timer/hi_res_timer_manager_posix.cc", "timer/hi_res_timer_manager_posix.cc",
"trace_event/heap_profiler_allocation_register_posix.cc",
] ]
} }
...@@ -1405,7 +1399,6 @@ jumbo_component("base") { ...@@ -1405,7 +1399,6 @@ jumbo_component("base") {
"time/time_exploded_posix.cc", "time/time_exploded_posix.cc",
"time/time_fuchsia.cc", "time/time_fuchsia.cc",
"timer/hi_res_timer_manager_posix.cc", "timer/hi_res_timer_manager_posix.cc",
"trace_event/heap_profiler_allocation_register_posix.cc",
] ]
# These only need to be public deps because of includes of their headers # These only need to be public deps because of includes of their headers
...@@ -2410,7 +2403,6 @@ test("base_unittests") { ...@@ -2410,7 +2403,6 @@ test("base_unittests") {
"trace_event/blame_context_unittest.cc", "trace_event/blame_context_unittest.cc",
"trace_event/event_name_filter_unittest.cc", "trace_event/event_name_filter_unittest.cc",
"trace_event/heap_profiler_allocation_context_tracker_unittest.cc", "trace_event/heap_profiler_allocation_context_tracker_unittest.cc",
"trace_event/heap_profiler_allocation_register_unittest.cc",
"trace_event/heap_profiler_heap_dump_writer_unittest.cc", "trace_event/heap_profiler_heap_dump_writer_unittest.cc",
"trace_event/heap_profiler_stack_frame_deduplicator_unittest.cc", "trace_event/heap_profiler_stack_frame_deduplicator_unittest.cc",
"trace_event/heap_profiler_type_name_deduplicator_unittest.cc", "trace_event/heap_profiler_type_name_deduplicator_unittest.cc",
......
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/trace_event/heap_profiler_allocation_register.h"
#include <algorithm>
#include <limits>
namespace base {
namespace trace_event {
size_t AllocationRegister::AddressHasher::operator()(
const void* address) const {
// The multiplicative hashing scheme from [Knuth 1998]. The value of |a| has
// been chosen carefully based on measurements with real-word data (addresses
// recorded from a Chrome trace run). It is the first prime after 2^17. For
// |shift|, 15 yield good results for both 2^18 and 2^19 bucket sizes.
// Microbenchmarks show that this simple scheme outperforms fancy hashes like
// Murmur3 by 20 to 40 percent.
const uintptr_t key = reinterpret_cast<uintptr_t>(address);
const uintptr_t a = 131101;
const uintptr_t shift = 15;
const uintptr_t h = (key * a) >> shift;
return h;
}
AllocationRegister::ConstIterator::ConstIterator(
const AllocationRegister& alloc_register,
AllocationIndex index)
: register_(alloc_register), index_(index) {}
void AllocationRegister::ConstIterator::operator++() {
index_ = register_.allocations_.Next(index_ + 1);
}
bool AllocationRegister::ConstIterator::operator!=(
const ConstIterator& other) const {
return index_ != other.index_;
}
AllocationRegister::Allocation AllocationRegister::ConstIterator::operator*()
const {
return register_.GetAllocation(index_);
}
size_t AllocationRegister::BacktraceHasher::operator()(
const Backtrace& backtrace) const {
const size_t kSampleLength = 10;
uintptr_t total_value = 0;
size_t head_end = std::min(backtrace.frame_count, kSampleLength);
for (size_t i = 0; i != head_end; ++i) {
total_value += reinterpret_cast<uintptr_t>(backtrace.frames[i].value);
}
size_t tail_start = backtrace.frame_count -
std::min(backtrace.frame_count - head_end, kSampleLength);
for (size_t i = tail_start; i != backtrace.frame_count; ++i) {
total_value += reinterpret_cast<uintptr_t>(backtrace.frames[i].value);
}
total_value += backtrace.frame_count;
// These magic constants give best results in terms of average collisions
// per backtrace. They were found by replaying real backtraces from Linux
// and Android against different hash functions.
return (total_value * 131101) >> 14;
}
AllocationRegister::AllocationRegister()
: AllocationRegister(kAllocationCapacity, kBacktraceCapacity) {}
AllocationRegister::AllocationRegister(size_t allocation_capacity,
size_t backtrace_capacity)
: allocations_(allocation_capacity), backtraces_(backtrace_capacity) {
Backtrace sentinel = {};
sentinel.frames[0] = StackFrame::FromThreadName("[out of heap profiler mem]");
sentinel.frame_count = 1;
// Rationale for max / 2: in theory we could just start the sentinel with a
// refcount == 0. However, using max / 2 allows short circuiting of the
// conditional in RemoveBacktrace() keeping the sentinel logic out of the fast
// path. From a functional viewpoint, the sentinel is safe even if we wrap
// over refcount because .
BacktraceMap::KVPair::second_type sentinel_refcount =
std::numeric_limits<BacktraceMap::KVPair::second_type>::max() / 2;
auto index_and_flag = backtraces_.Insert(sentinel, sentinel_refcount);
DCHECK(index_and_flag.second);
DCHECK_EQ(index_and_flag.first, kOutOfStorageBacktraceIndex);
}
AllocationRegister::~AllocationRegister() = default;
bool AllocationRegister::Insert(const void* address,
size_t size,
const AllocationContext& context) {
DCHECK(address != nullptr);
if (size == 0) {
return false;
}
AllocationInfo info = {size, context.type_name,
InsertBacktrace(context.backtrace)};
// Try to insert the allocation.
auto index_and_flag = allocations_.Insert(address, info);
if (!index_and_flag.second &&
index_and_flag.first != AllocationMap::kInvalidKVIndex) {
// |address| is already there - overwrite the allocation info.
auto& old_info = allocations_.Get(index_and_flag.first).second;
RemoveBacktrace(old_info.backtrace_index);
old_info = info;
return true;
}
return index_and_flag.second;
}
void AllocationRegister::Remove(const void* address) {
auto index = allocations_.Find(address);
if (index == AllocationMap::kInvalidKVIndex) {
return;
}
const AllocationInfo& info = allocations_.Get(index).second;
RemoveBacktrace(info.backtrace_index);
allocations_.Remove(index);
}
bool AllocationRegister::Get(const void* address,
Allocation* out_allocation) const {
auto index = allocations_.Find(address);
if (index == AllocationMap::kInvalidKVIndex) {
return false;
}
if (out_allocation) {
*out_allocation = GetAllocation(index);
}
return true;
}
AllocationRegister::ConstIterator AllocationRegister::begin() const {
return ConstIterator(*this, allocations_.Next(0));
}
AllocationRegister::ConstIterator AllocationRegister::end() const {
return ConstIterator(*this, AllocationMap::kInvalidKVIndex);
}
size_t AllocationRegister::EstimateAllocatedMemory() const {
return sizeof(AllocationRegister);
}
size_t AllocationRegister::EstimateResidentMemory() const {
return sizeof(AllocationRegister) + allocations_.EstimateUsedMemory() +
backtraces_.EstimateUsedMemory();
}
AllocationRegister::BacktraceMap::KVIndex AllocationRegister::InsertBacktrace(
const Backtrace& backtrace) {
auto index = backtraces_.Insert(backtrace, 0).first;
if (index == BacktraceMap::kInvalidKVIndex)
return kOutOfStorageBacktraceIndex;
auto& backtrace_and_count = backtraces_.Get(index);
backtrace_and_count.second++;
return index;
}
void AllocationRegister::RemoveBacktrace(BacktraceMap::KVIndex index) {
auto& backtrace_and_count = backtraces_.Get(index);
if (--backtrace_and_count.second == 0 &&
index != kOutOfStorageBacktraceIndex) {
// Backtrace is not referenced anymore - remove it.
backtraces_.Remove(index);
}
}
AllocationRegister::Allocation AllocationRegister::GetAllocation(
AllocationMap::KVIndex index) const {
const auto& address_and_info = allocations_.Get(index);
const auto& backtrace_and_count =
backtraces_.Get(address_and_info.second.backtrace_index);
return {address_and_info.first, address_and_info.second.size,
AllocationContext(backtrace_and_count.first,
address_and_info.second.type_name)};
}
} // namespace trace_event
} // namespace base
This diff is collapsed.
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/trace_event/heap_profiler_allocation_register.h"
#include <stddef.h>
#include <sys/mman.h>
#include <unistd.h>
#include "base/bits.h"
#include "base/logging.h"
#include "base/process/process_metrics.h"
#ifndef MAP_ANONYMOUS
#define MAP_ANONYMOUS MAP_ANON
#endif
#if defined(OS_FUCHSIA)
#include <zircon/process.h>
#include <zircon/syscalls.h>
#endif // OS_FUCHSIA
namespace base {
namespace trace_event {
namespace internal {
namespace {
size_t GetGuardSize() {
return GetPageSize();
}
}
void* AllocateGuardedVirtualMemory(size_t size) {
size = bits::Align(size, GetPageSize());
// Add space for a guard page at the end.
size_t map_size = size + GetGuardSize();
#if defined(OS_FUCHSIA)
// Fuchsia does not currently support PROT_NONE, see MG-546 upstream. Instead,
// create a virtual mapping with the size of the guard page unmapped after the
// block.
zx_handle_t vmo;
CHECK(zx_vmo_create(size, 0, &vmo) == ZX_OK);
zx_handle_t vmar;
uintptr_t addr_uint;
CHECK(zx_vmar_allocate(zx_vmar_root_self(), 0, map_size,
ZX_VM_FLAG_CAN_MAP_READ | ZX_VM_FLAG_CAN_MAP_WRITE |
ZX_VM_FLAG_CAN_MAP_SPECIFIC,
&vmar, &addr_uint) == ZX_OK);
CHECK(zx_vmar_map(
vmar, 0, vmo, 0, size,
ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE | ZX_VM_FLAG_SPECIFIC,
&addr_uint) == ZX_OK);
CHECK(zx_handle_close(vmar) == ZX_OK);
CHECK(zx_handle_close(vmo) == ZX_OK);
void* addr = reinterpret_cast<void*>(addr_uint);
#else
void* addr = mmap(nullptr, map_size, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
PCHECK(addr != MAP_FAILED);
// Mark the last page of the allocated address space as inaccessible
// (PROT_NONE). The read/write accessible space is still at least |size|
// bytes.
void* guard_addr =
reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) + size);
int result = mprotect(guard_addr, GetGuardSize(), PROT_NONE);
PCHECK(result == 0);
#endif // defined(OS_FUCHSIA)
return addr;
}
void FreeGuardedVirtualMemory(void* address, size_t allocated_size) {
size_t size = bits::Align(allocated_size, GetPageSize()) + GetGuardSize();
#if defined(OS_FUCHSIA)
zx_status_t status = zx_vmar_unmap(
zx_vmar_root_self(), reinterpret_cast<uintptr_t>(address), size);
if (status != ZX_OK) {
DLOG(ERROR) << "zx_vmar_unmap failed, status=" << status;
}
#else
munmap(address, size);
#endif
}
} // namespace internal
} // namespace trace_event
} // namespace base
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/trace_event/heap_profiler_allocation_register.h"
#include <windows.h>
#include <stddef.h>
#include "base/bits.h"
#include "base/logging.h"
#include "base/process/process_metrics.h"
namespace base {
namespace trace_event {
namespace internal {
namespace {
size_t GetGuardSize() {
return GetPageSize();
}
}
void* AllocateGuardedVirtualMemory(size_t size) {
size = bits::Align(size, GetPageSize());
// Add space for a guard page at the end.
size_t map_size = size + GetGuardSize();
// Reserve the address space. This does not make the memory usable yet.
void* addr = VirtualAlloc(nullptr, map_size, MEM_RESERVE, PAGE_NOACCESS);
PCHECK(addr != nullptr);
// Commit the non-guard pages as read-write memory.
void* result = VirtualAlloc(addr, size, MEM_COMMIT, PAGE_READWRITE);
PCHECK(result != nullptr);
// Mark the last page of the allocated address space as guard page. (NB: The
// |PAGE_GUARD| flag is not the flag to use here, that flag can be used to
// detect and intercept access to a certain memory region. Accessing a
// |PAGE_NOACCESS| page will raise a general protection fault.) The
// read/write accessible space is still at least |min_size| bytes.
void* guard_addr =
reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) + size);
result = VirtualAlloc(guard_addr, GetGuardSize(), MEM_COMMIT, PAGE_NOACCESS);
PCHECK(result != nullptr);
return addr;
}
void FreeGuardedVirtualMemory(void* address, size_t allocated_size) {
// For |VirtualFree|, the size passed with |MEM_RELEASE| must be 0. Windows
// automatically frees the entire region that was reserved by the
// |VirtualAlloc| with flag |MEM_RESERVE|.
VirtualFree(address, 0, MEM_RELEASE);
}
} // namespace internal
} // namespace trace_event
} // namespace base
...@@ -9,13 +9,8 @@ ...@@ -9,13 +9,8 @@
#include <unordered_map> #include <unordered_map>
#include "base/allocator/allocator_extension.h" #include "base/allocator/allocator_extension.h"
#include "base/allocator/allocator_shim.h"
#include "base/allocator/buildflags.h" #include "base/allocator/buildflags.h"
#include "base/debug/profiler.h" #include "base/debug/profiler.h"
#include "base/threading/thread_local_storage.h"
#include "base/trace_event/heap_profiler_allocation_context.h"
#include "base/trace_event/heap_profiler_allocation_context_tracker.h"
#include "base/trace_event/heap_profiler_heap_dump_writer.h"
#include "base/trace_event/process_memory_dump.h" #include "base/trace_event/process_memory_dump.h"
#include "base/trace_event/trace_event_argument.h" #include "base/trace_event/trace_event_argument.h"
#include "build/build_config.h" #include "build/build_config.h"
...@@ -33,115 +28,6 @@ namespace base { ...@@ -33,115 +28,6 @@ namespace base {
namespace trace_event { namespace trace_event {
namespace { namespace {
#if BUILDFLAG(USE_ALLOCATOR_SHIM)
using allocator::AllocatorDispatch;
void* HookAlloc(const AllocatorDispatch* self, size_t size, void* context) {
const AllocatorDispatch* const next = self->next;
void* ptr = next->alloc_function(next, size, context);
if (ptr)
MallocDumpProvider::GetInstance()->InsertAllocation(ptr, size);
return ptr;
}
void* HookZeroInitAlloc(const AllocatorDispatch* self,
size_t n,
size_t size,
void* context) {
const AllocatorDispatch* const next = self->next;
void* ptr = next->alloc_zero_initialized_function(next, n, size, context);
if (ptr)
MallocDumpProvider::GetInstance()->InsertAllocation(ptr, n * size);
return ptr;
}
void* HookAllocAligned(const AllocatorDispatch* self,
size_t alignment,
size_t size,
void* context) {
const AllocatorDispatch* const next = self->next;
void* ptr = next->alloc_aligned_function(next, alignment, size, context);
if (ptr)
MallocDumpProvider::GetInstance()->InsertAllocation(ptr, size);
return ptr;
}
void* HookRealloc(const AllocatorDispatch* self,
void* address,
size_t size,
void* context) {
const AllocatorDispatch* const next = self->next;
void* ptr = next->realloc_function(next, address, size, context);
MallocDumpProvider::GetInstance()->RemoveAllocation(address);
if (size > 0) // realloc(size == 0) means free().
MallocDumpProvider::GetInstance()->InsertAllocation(ptr, size);
return ptr;
}
void HookFree(const AllocatorDispatch* self, void* address, void* context) {
if (address)
MallocDumpProvider::GetInstance()->RemoveAllocation(address);
const AllocatorDispatch* const next = self->next;
next->free_function(next, address, context);
}
size_t HookGetSizeEstimate(const AllocatorDispatch* self,
void* address,
void* context) {
const AllocatorDispatch* const next = self->next;
return next->get_size_estimate_function(next, address, context);
}
unsigned HookBatchMalloc(const AllocatorDispatch* self,
size_t size,
void** results,
unsigned num_requested,
void* context) {
const AllocatorDispatch* const next = self->next;
unsigned count =
next->batch_malloc_function(next, size, results, num_requested, context);
for (unsigned i = 0; i < count; ++i) {
MallocDumpProvider::GetInstance()->InsertAllocation(results[i], size);
}
return count;
}
void HookBatchFree(const AllocatorDispatch* self,
void** to_be_freed,
unsigned num_to_be_freed,
void* context) {
const AllocatorDispatch* const next = self->next;
for (unsigned i = 0; i < num_to_be_freed; ++i) {
MallocDumpProvider::GetInstance()->RemoveAllocation(to_be_freed[i]);
}
next->batch_free_function(next, to_be_freed, num_to_be_freed, context);
}
void HookFreeDefiniteSize(const AllocatorDispatch* self,
void* ptr,
size_t size,
void* context) {
if (ptr)
MallocDumpProvider::GetInstance()->RemoveAllocation(ptr);
const AllocatorDispatch* const next = self->next;
next->free_definite_size_function(next, ptr, size, context);
}
AllocatorDispatch g_allocator_hooks = {
&HookAlloc, /* alloc_function */
&HookZeroInitAlloc, /* alloc_zero_initialized_function */
&HookAllocAligned, /* alloc_aligned_function */
&HookRealloc, /* realloc_function */
&HookFree, /* free_function */
&HookGetSizeEstimate, /* get_size_estimate_function */
&HookBatchMalloc, /* batch_malloc_function */
&HookBatchFree, /* batch_free_function */
&HookFreeDefiniteSize, /* free_definite_size_function */
nullptr, /* next */
};
#endif // BUILDFLAG(USE_ALLOCATOR_SHIM)
#if defined(OS_WIN) #if defined(OS_WIN)
// A structure containing some information about a given heap. // A structure containing some information about a given heap.
struct WinHeapInfo { struct WinHeapInfo {
...@@ -184,9 +70,7 @@ MallocDumpProvider* MallocDumpProvider::GetInstance() { ...@@ -184,9 +70,7 @@ MallocDumpProvider* MallocDumpProvider::GetInstance() {
LeakySingletonTraits<MallocDumpProvider>>::get(); LeakySingletonTraits<MallocDumpProvider>>::get();
} }
MallocDumpProvider::MallocDumpProvider() MallocDumpProvider::MallocDumpProvider() = default;
: tid_dumping_heap_(kInvalidThreadId) {}
MallocDumpProvider::~MallocDumpProvider() = default; MallocDumpProvider::~MallocDumpProvider() = default;
// Called at trace dump point time. Creates a snapshot the memory counters for // Called at trace dump point time. Creates a snapshot the memory counters for
...@@ -288,93 +172,9 @@ bool MallocDumpProvider::OnMemoryDump(const MemoryDumpArgs& args, ...@@ -288,93 +172,9 @@ bool MallocDumpProvider::OnMemoryDump(const MemoryDumpArgs& args,
MemoryAllocatorDump::kUnitsBytes, MemoryAllocatorDump::kUnitsBytes,
resident_size - allocated_objects_size); resident_size - allocated_objects_size);
} }
// Heap profiler dumps.
if (!allocation_register_.is_enabled())
return true;
tid_dumping_heap_ = PlatformThread::CurrentId();
// At this point the Insert/RemoveAllocation hooks will ignore this thread.
// Enclosing all the temporary data structures in a scope, so that the heap
// profiler does not see unbalanced malloc/free calls from these containers.
{
TraceEventMemoryOverhead overhead;
std::unordered_map<AllocationContext, AllocationMetrics> metrics_by_context;
if (AllocationContextTracker::capture_mode() !=
AllocationContextTracker::CaptureMode::DISABLED) {
ShardedAllocationRegister::OutputMetrics shim_metrics =
allocation_register_.UpdateAndReturnsMetrics(metrics_by_context);
// Aggregate data for objects allocated through the shim.
inner_dump->AddScalar("shim_allocated_objects_size",
MemoryAllocatorDump::kUnitsBytes,
shim_metrics.size);
inner_dump->AddScalar("shim_allocator_object_count",
MemoryAllocatorDump::kUnitsObjects,
shim_metrics.count);
}
allocation_register_.EstimateTraceMemoryOverhead(&overhead);
pmd->DumpHeapUsage(metrics_by_context, overhead, "malloc");
}
tid_dumping_heap_ = kInvalidThreadId;
return true; return true;
} }
void MallocDumpProvider::OnHeapProfilingEnabled(bool enabled) {
#if BUILDFLAG(USE_ALLOCATOR_SHIM)
if (enabled) {
allocation_register_.SetEnabled();
allocator::InsertAllocatorDispatch(&g_allocator_hooks);
} else {
allocation_register_.SetDisabled();
}
#endif
}
void MallocDumpProvider::InsertAllocation(void* address, size_t size) {
if (UNLIKELY(base::ThreadLocalStorage::HasBeenDestroyed()))
return;
// CurrentId() can be a slow operation (crbug.com/497226). This apparently
// redundant condition short circuits the CurrentID() calls when unnecessary.
if (tid_dumping_heap_ != kInvalidThreadId &&
tid_dumping_heap_ == PlatformThread::CurrentId())
return;
// AllocationContextTracker will return nullptr when called re-reentrantly.
// This is the case of GetInstanceForCurrentThread() being called for the
// first time, which causes a new() inside the tracker which re-enters the
// heap profiler, in which case we just want to early out.
auto* tracker = AllocationContextTracker::GetInstanceForCurrentThread();
if (!tracker)
return;
AllocationContext context;
if (!tracker->GetContextSnapshot(&context))
return;
if (!allocation_register_.is_enabled())
return;
allocation_register_.Insert(address, size, context);
}
void MallocDumpProvider::RemoveAllocation(void* address) {
if (UNLIKELY(base::ThreadLocalStorage::HasBeenDestroyed()))
return;
// No re-entrancy is expected here as none of the calls below should
// cause a free()-s (|allocation_register_| does its own heap management).
if (tid_dumping_heap_ != kInvalidThreadId &&
tid_dumping_heap_ == PlatformThread::CurrentId())
return;
if (!allocation_register_.is_enabled())
return;
allocation_register_.Remove(address);
}
void MallocDumpProvider::EnableMetrics() { void MallocDumpProvider::EnableMetrics() {
base::AutoLock auto_lock(emit_metrics_on_memory_dump_lock_); base::AutoLock auto_lock(emit_metrics_on_memory_dump_lock_);
emit_metrics_on_memory_dump_ = true; emit_metrics_on_memory_dump_ = true;
......
...@@ -8,9 +8,7 @@ ...@@ -8,9 +8,7 @@
#include "base/macros.h" #include "base/macros.h"
#include "base/memory/singleton.h" #include "base/memory/singleton.h"
#include "base/synchronization/lock.h" #include "base/synchronization/lock.h"
#include "base/threading/platform_thread.h"
#include "base/trace_event/memory_dump_provider.h" #include "base/trace_event/memory_dump_provider.h"
#include "base/trace_event/sharded_allocation_register.h"
#include "build/build_config.h" #include "build/build_config.h"
#if defined(OS_LINUX) || defined(OS_ANDROID) || defined(OS_WIN) || \ #if defined(OS_LINUX) || defined(OS_ANDROID) || defined(OS_WIN) || \
...@@ -34,12 +32,6 @@ class BASE_EXPORT MallocDumpProvider : public MemoryDumpProvider { ...@@ -34,12 +32,6 @@ class BASE_EXPORT MallocDumpProvider : public MemoryDumpProvider {
bool OnMemoryDump(const MemoryDumpArgs& args, bool OnMemoryDump(const MemoryDumpArgs& args,
ProcessMemoryDump* pmd) override; ProcessMemoryDump* pmd) override;
void OnHeapProfilingEnabled(bool enabled) override;
// For heap profiling.
void InsertAllocation(void* address, size_t size);
void RemoveAllocation(void* address);
// Used by out-of-process heap-profiling. When malloc is profiled by an // Used by out-of-process heap-profiling. When malloc is profiled by an
// external process, that process will be responsible for emitting metrics on // external process, that process will be responsible for emitting metrics on
// behalf of this one. Thus, MallocDumpProvider should not do anything. // behalf of this one. Thus, MallocDumpProvider should not do anything.
...@@ -52,14 +44,6 @@ class BASE_EXPORT MallocDumpProvider : public MemoryDumpProvider { ...@@ -52,14 +44,6 @@ class BASE_EXPORT MallocDumpProvider : public MemoryDumpProvider {
MallocDumpProvider(); MallocDumpProvider();
~MallocDumpProvider() override; ~MallocDumpProvider() override;
// For heap profiling.
ShardedAllocationRegister allocation_register_;
// When in OnMemoryDump(), this contains the current thread ID.
// This is to prevent re-entrancy in the heap profiler when the heap dump
// generation is malloc/new-ing for its own bookkeeping data structures.
PlatformThreadId tid_dumping_heap_;
bool emit_metrics_on_memory_dump_ = true; bool emit_metrics_on_memory_dump_ = true;
base::Lock emit_metrics_on_memory_dump_lock_; base::Lock emit_metrics_on_memory_dump_lock_;
......
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/trace_event/sharded_allocation_register.h"
#include "base/trace_event/trace_event_memory_overhead.h"
#include "build/build_config.h"
namespace base {
namespace trace_event {
// This number affects the bucket and capacity counts of AllocationRegister at
// "base/trace_event/heap_profiler_allocation_register.h".
#if defined(OS_ANDROID) || defined(OS_IOS)
const size_t ShardCount = 1;
#elif defined(OS_MACOSX)
const size_t ShardCount = 64;
#else
// Using ShardCount = 64 adds about 1.6GB of committed memory, which triggers
// the sandbox's committed memory limit.
const size_t ShardCount = 16;
#endif
ShardedAllocationRegister::ShardedAllocationRegister() : enabled_(false) {}
ShardedAllocationRegister::~ShardedAllocationRegister() = default;
void ShardedAllocationRegister::SetEnabled() {
if (!allocation_registers_)
allocation_registers_.reset(new RegisterAndLock[ShardCount]);
base::subtle::Release_Store(&enabled_, 1);
}
void ShardedAllocationRegister::SetDisabled() {
base::subtle::Release_Store(&enabled_, 0);
}
bool ShardedAllocationRegister::Insert(const void* address,
size_t size,
const AllocationContext& context) {
AllocationRegister::AddressHasher hasher;
size_t index = hasher(address) % ShardCount;
RegisterAndLock& ral = allocation_registers_[index];
AutoLock lock(ral.lock);
return ral.allocation_register.Insert(address, size, context);
}
void ShardedAllocationRegister::Remove(const void* address) {
AllocationRegister::AddressHasher hasher;
size_t index = hasher(address) % ShardCount;
RegisterAndLock& ral = allocation_registers_[index];
AutoLock lock(ral.lock);
return ral.allocation_register.Remove(address);
}
bool ShardedAllocationRegister::Get(
const void* address,
AllocationRegister::Allocation* out_allocation) const {
AllocationRegister::AddressHasher hasher;
size_t index = hasher(address) % ShardCount;
RegisterAndLock& ral = allocation_registers_[index];
AutoLock lock(ral.lock);
return ral.allocation_register.Get(address, out_allocation);
}
void ShardedAllocationRegister::EstimateTraceMemoryOverhead(
TraceEventMemoryOverhead* overhead) const {
size_t allocated = 0;
size_t resident = 0;
for (size_t i = 0; i < ShardCount; ++i) {
RegisterAndLock& ral = allocation_registers_[i];
AutoLock lock(ral.lock);
allocated += ral.allocation_register.EstimateAllocatedMemory();
resident += ral.allocation_register.EstimateResidentMemory();
}
overhead->Add(TraceEventMemoryOverhead::kHeapProfilerAllocationRegister,
allocated, resident);
}
ShardedAllocationRegister::OutputMetrics
ShardedAllocationRegister::UpdateAndReturnsMetrics(MetricsMap& map) const {
OutputMetrics output_metrics;
output_metrics.size = 0;
output_metrics.count = 0;
for (size_t i = 0; i < ShardCount; ++i) {
RegisterAndLock& ral = allocation_registers_[i];
AutoLock lock(ral.lock);
for (const auto& alloc_size : ral.allocation_register) {
AllocationMetrics& metrics = map[alloc_size.context];
metrics.size += alloc_size.size;
metrics.count++;
output_metrics.size += alloc_size.size;
output_metrics.count++;
}
}
return output_metrics;
}
ShardedAllocationRegister::RegisterAndLock::RegisterAndLock() = default;
ShardedAllocationRegister::RegisterAndLock::~RegisterAndLock() = default;
} // namespace trace_event
} // namespace base
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TRACE_EVENT_SHARDED_ALLOCATION_REGISTER_H_
#define BASE_TRACE_EVENT_SHARDED_ALLOCATION_REGISTER_H_
#include <memory>
#include <unordered_map>
#include <vector>
#include "base/atomicops.h"
#include "base/base_export.h"
#include "base/macros.h"
#include "base/synchronization/lock.h"
#include "base/trace_event/heap_profiler_allocation_register.h"
namespace base {
namespace trace_event {
class TraceEventMemoryOverhead;
// This container holds allocations, and context for each allocation [in the
// form of a back trace].
// This container is thread-safe.
class BASE_EXPORT ShardedAllocationRegister {
public:
using MetricsMap = std::unordered_map<AllocationContext, AllocationMetrics>;
struct OutputMetrics {
// Total size of allocated objects.
size_t size;
// Total count of allocated objects.
size_t count;
};
ShardedAllocationRegister();
// This class must be enabled before calling Insert() or Remove(). Once the
// class is enabled, it's okay if Insert() or Remove() is called [due to
// races] after the class is disabled.
void SetEnabled();
void SetDisabled();
bool is_enabled() const { return !!base::subtle::Acquire_Load(&enabled_); }
~ShardedAllocationRegister();
// Inserts allocation details into the container. If the address was present
// already, its details are updated. |address| must not be null.
//
// Returns true if an insert occurred. Inserts may fail because the table
// is full.
bool Insert(const void* address,
size_t size,
const AllocationContext& context);
// Removes the address from the container if it is present. It is ok to call
// this with a null pointer.
void Remove(const void* address);
// Finds allocation for the address and fills |out_allocation|.
bool Get(const void* address,
AllocationRegister::Allocation* out_allocation) const;
// Estimates memory overhead including |sizeof(AllocationRegister)|.
void EstimateTraceMemoryOverhead(TraceEventMemoryOverhead* overhead) const;
// Updates |map| with all allocated objects and their statistics.
// Returns aggregate statistics.
OutputMetrics UpdateAndReturnsMetrics(MetricsMap& map) const;
private:
struct RegisterAndLock {
RegisterAndLock();
~RegisterAndLock();
AllocationRegister allocation_register;
Lock lock;
};
std::unique_ptr<RegisterAndLock[]> allocation_registers_;
// This member needs to be checked on every allocation and deallocation [fast
// path] when heap profiling is enabled. Using a lock here causes significant
// contention.
base::subtle::Atomic32 enabled_;
DISALLOW_COPY_AND_ASSIGN(ShardedAllocationRegister);
};
} // namespace trace_event
} // namespace base
#endif // BASE_TRACE_EVENT_SHARDED_ALLOCATION_REGISTER_H_
...@@ -153,54 +153,4 @@ IN_PROC_BROWSER_TEST_F(MemoryTracingBrowserTest, ...@@ -153,54 +153,4 @@ IN_PROC_BROWSER_TEST_F(MemoryTracingBrowserTest,
base::trace_event::MemoryDumpLevelOfDetail::BACKGROUND, &json_events); base::trace_event::MemoryDumpLevelOfDetail::BACKGROUND, &json_events);
} }
#if BUILDFLAG(USE_ALLOCATOR_SHIM) && !defined(OS_NACL)
#if defined(OS_MACOSX)
#define MAYBE_TestHeapProfilingPseudo DISABLED_TestHeapProfilingPseudo
#else
#define MAYBE_TestHeapProfilingPseudo TestHeapProfilingPseudo
#endif
IN_PROC_BROWSER_TEST_F(MemoryTracingBrowserTest,
MAYBE_TestHeapProfilingPseudo) {
should_test_memory_dump_success_ = true;
// TODO(ssid): Enable heap profiling on all processes once the
// memory_instrumentation api is available, crbug.com/757747.
base::trace_event::MemoryDumpManager::GetInstance()->EnableHeapProfiling(
base::trace_event::kHeapProfilingModePseudo);
std::string json_events;
PerformDumpMemoryTestActions(
base::trace_event::TraceConfig(
base::trace_event::TraceConfigMemoryTestUtil::
GetTraceConfig_PeriodicTriggers(100, 500)),
base::trace_event::MemoryDumpLevelOfDetail::DETAILED, &json_events);
EXPECT_NE(std::string::npos, json_events.find("stackFrames"));
// TODO(ssid): Fix mac and win to get thread names in the allocation context,
// crbug.com/764454.
EXPECT_NE(std::string::npos, json_events.find("[Thread:"));
EXPECT_NE(std::string::npos, json_events.find("MessageLoop::RunTask"));
EXPECT_NE(std::string::npos, json_events.find("typeNames"));
EXPECT_NE(std::string::npos, json_events.find("content/browser"));
EXPECT_NE(std::string::npos, json_events.find("\"malloc\":{\"entries\""));
}
IN_PROC_BROWSER_TEST_F(MemoryTracingBrowserTest, TestHeapProfilingNoStack) {
should_test_memory_dump_success_ = true;
// TODO(ssid): Enable heap profiling on all processes once the
// memory_instrumentation api is available, crbug.com/757747.
base::trace_event::MemoryDumpManager::GetInstance()->EnableHeapProfiling(
base::trace_event::kHeapProfilingModeBackground);
std::string json_events;
PerformDumpMemoryTestActions(
base::trace_event::TraceConfig(
base::trace_event::TraceConfigMemoryTestUtil::
GetTraceConfig_BackgroundTrigger(200)),
base::trace_event::MemoryDumpLevelOfDetail::BACKGROUND, &json_events);
EXPECT_NE(std::string::npos, json_events.find("stackFrames"));
EXPECT_NE(std::string::npos, json_events.find("[Thread:"));
EXPECT_EQ(std::string::npos, json_events.find("MessageLoop::RunTask"));
EXPECT_NE(std::string::npos, json_events.find("typeNames"));
EXPECT_NE(std::string::npos, json_events.find("content/browser"));
EXPECT_NE(std::string::npos, json_events.find("\"malloc\":{\"entries\""));
}
#endif // BUILDFLAG(USE_ALLOCATOR_SHIM) && !defined(OS_NACL)
} // namespace } // namespace
...@@ -20,7 +20,6 @@ ...@@ -20,7 +20,6 @@
#include "base/threading/thread_local.h" #include "base/threading/thread_local.h"
#include "base/threading/thread_local_storage.h" #include "base/threading/thread_local_storage.h"
#include "base/trace_event/heap_profiler_allocation_context_tracker.h" #include "base/trace_event/heap_profiler_allocation_context_tracker.h"
#include "base/trace_event/heap_profiler_allocation_register.h"
#include "base/trace_event/heap_profiler_event_filter.h" #include "base/trace_event/heap_profiler_event_filter.h"
#include "base/trace_event/memory_dump_manager.h" #include "base/trace_event/memory_dump_manager.h"
#include "build/build_config.h" #include "build/build_config.h"
...@@ -300,14 +299,23 @@ class AtomicallyConsistentSendBufferArray { ...@@ -300,14 +299,23 @@ class AtomicallyConsistentSendBufferArray {
// nullptr. // nullptr.
AtomicallyConsistentSendBufferArray g_send_buffers; AtomicallyConsistentSendBufferArray g_send_buffers;
size_t HashAddress(const void* address) {
// The multiplicative hashing scheme from [Knuth 1998].
// |a| is the first prime after 2^17.
const uintptr_t key = reinterpret_cast<uintptr_t>(address);
const uintptr_t a = 131101;
const uintptr_t shift = 15;
const uintptr_t h = (key * a) >> shift;
return h;
}
// "address" is the address in question, which is used to select which send // "address" is the address in question, which is used to select which send
// buffer to use. // buffer to use.
void DoSend(const void* address, void DoSend(const void* address,
const void* data, const void* data,
size_t size, size_t size,
SendBuffer* send_buffers) { SendBuffer* send_buffers) {
base::trace_event::AllocationRegister::AddressHasher hasher; int bin_to_use = HashAddress(address) % kNumSendBuffers;
int bin_to_use = hasher(address) % kNumSendBuffers;
send_buffers[bin_to_use].Send(data, size); send_buffers[bin_to_use].Send(data, size);
} }
......
...@@ -6,10 +6,8 @@ ...@@ -6,10 +6,8 @@
#include <unordered_map> #include <unordered_map>
#include "base/trace_event/heap_profiler_allocation_context_tracker.h"
#include "base/trace_event/memory_allocator_dump.h" #include "base/trace_event/memory_allocator_dump.h"
#include "base/trace_event/process_memory_dump.h" #include "base/trace_event/process_memory_dump.h"
#include "base/trace_event/trace_event_memory_overhead.h"
#include "third_party/blink/public/platform/platform.h" #include "third_party/blink/public/platform/platform.h"
#include "third_party/blink/renderer/platform/heap/handle.h" #include "third_party/blink/renderer/platform/heap/handle.h"
#include "third_party/blink/renderer/platform/instrumentation/tracing/web_memory_allocator_dump.h" #include "third_party/blink/renderer/platform/instrumentation/tracing/web_memory_allocator_dump.h"
...@@ -35,14 +33,6 @@ void DumpMemoryTotals(base::trace_event::ProcessMemoryDump* memory_dump) { ...@@ -35,14 +33,6 @@ void DumpMemoryTotals(base::trace_event::ProcessMemoryDump* memory_dump) {
ProcessHeap::TotalMarkedObjectSize()); ProcessHeap::TotalMarkedObjectSize());
} }
void ReportAllocation(Address address, size_t size, const char* type_name) {
BlinkGCMemoryDumpProvider::Instance()->insert(address, size, type_name);
}
void ReportFree(Address address) {
BlinkGCMemoryDumpProvider::Instance()->Remove(address);
}
} // namespace } // namespace
BlinkGCMemoryDumpProvider* BlinkGCMemoryDumpProvider::Instance() { BlinkGCMemoryDumpProvider* BlinkGCMemoryDumpProvider::Instance() {
...@@ -66,37 +56,12 @@ bool BlinkGCMemoryDumpProvider::OnMemoryDump( ...@@ -66,37 +56,12 @@ bool BlinkGCMemoryDumpProvider::OnMemoryDump(
} }
DumpMemoryTotals(memory_dump); DumpMemoryTotals(memory_dump);
if (allocation_register_.is_enabled()) {
// Overhead should always be reported, regardless of light vs. heavy.
base::trace_event::TraceEventMemoryOverhead overhead;
std::unordered_map<base::trace_event::AllocationContext,
base::trace_event::AllocationMetrics>
metrics_by_context;
if (level_of_detail == MemoryDumpLevelOfDetail::DETAILED) {
allocation_register_.UpdateAndReturnsMetrics(metrics_by_context);
}
allocation_register_.EstimateTraceMemoryOverhead(&overhead);
memory_dump->DumpHeapUsage(metrics_by_context, overhead, "blink_gc");
}
// Merge all dumps collected by ThreadHeap::collectGarbage. // Merge all dumps collected by ThreadHeap::collectGarbage.
if (level_of_detail == MemoryDumpLevelOfDetail::DETAILED) if (level_of_detail == MemoryDumpLevelOfDetail::DETAILED)
memory_dump->TakeAllDumpsFrom(current_process_memory_dump_.get()); memory_dump->TakeAllDumpsFrom(current_process_memory_dump_.get());
return true; return true;
} }
void BlinkGCMemoryDumpProvider::OnHeapProfilingEnabled(bool enabled) {
if (enabled) {
allocation_register_.SetEnabled();
HeapAllocHooks::SetAllocationHook(ReportAllocation);
HeapAllocHooks::SetFreeHook(ReportFree);
} else {
HeapAllocHooks::SetAllocationHook(nullptr);
HeapAllocHooks::SetFreeHook(nullptr);
allocation_register_.SetDisabled();
}
}
base::trace_event::MemoryAllocatorDump* base::trace_event::MemoryAllocatorDump*
BlinkGCMemoryDumpProvider::CreateMemoryAllocatorDumpForCurrentGC( BlinkGCMemoryDumpProvider::CreateMemoryAllocatorDumpForCurrentGC(
const String& absolute_name) { const String& absolute_name) {
...@@ -114,24 +79,4 @@ BlinkGCMemoryDumpProvider::BlinkGCMemoryDumpProvider() ...@@ -114,24 +79,4 @@ BlinkGCMemoryDumpProvider::BlinkGCMemoryDumpProvider()
nullptr, nullptr,
{base::trace_event::MemoryDumpLevelOfDetail::DETAILED})) {} {base::trace_event::MemoryDumpLevelOfDetail::DETAILED})) {}
void BlinkGCMemoryDumpProvider::insert(Address address,
size_t size,
const char* type_name) {
base::trace_event::AllocationContext context;
if (!base::trace_event::AllocationContextTracker::
GetInstanceForCurrentThread()
->GetContextSnapshot(&context))
return;
context.type_name = type_name;
if (!allocation_register_.is_enabled())
return;
allocation_register_.Insert(address, size, context);
}
void BlinkGCMemoryDumpProvider::Remove(Address address) {
if (!allocation_register_.is_enabled())
return;
allocation_register_.Remove(address);
}
} // namespace blink } // namespace blink
...@@ -6,7 +6,6 @@ ...@@ -6,7 +6,6 @@
#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_BLINK_GC_MEMORY_DUMP_PROVIDER_H_ #define THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_BLINK_GC_MEMORY_DUMP_PROVIDER_H_
#include "base/trace_event/memory_dump_provider.h" #include "base/trace_event/memory_dump_provider.h"
#include "base/trace_event/sharded_allocation_register.h"
#include "third_party/blink/renderer/platform/heap/blink_gc.h" #include "third_party/blink/renderer/platform/heap/blink_gc.h"
#include "third_party/blink/renderer/platform/platform_export.h" #include "third_party/blink/renderer/platform/platform_export.h"
#include "third_party/blink/renderer/platform/wtf/allocator.h" #include "third_party/blink/renderer/platform/wtf/allocator.h"
...@@ -34,7 +33,6 @@ class PLATFORM_EXPORT BlinkGCMemoryDumpProvider final ...@@ -34,7 +33,6 @@ class PLATFORM_EXPORT BlinkGCMemoryDumpProvider final
// MemoryDumpProvider implementation. // MemoryDumpProvider implementation.
bool OnMemoryDump(const base::trace_event::MemoryDumpArgs&, bool OnMemoryDump(const base::trace_event::MemoryDumpArgs&,
base::trace_event::ProcessMemoryDump*) override; base::trace_event::ProcessMemoryDump*) override;
void OnHeapProfilingEnabled(bool) override;
// The returned WebMemoryAllocatorDump is owned by // The returned WebMemoryAllocatorDump is owned by
// BlinkGCMemoryDumpProvider, and should not be retained (just used to // BlinkGCMemoryDumpProvider, and should not be retained (just used to
...@@ -50,13 +48,9 @@ class PLATFORM_EXPORT BlinkGCMemoryDumpProvider final ...@@ -50,13 +48,9 @@ class PLATFORM_EXPORT BlinkGCMemoryDumpProvider final
return current_process_memory_dump_.get(); return current_process_memory_dump_.get();
} }
void insert(Address, size_t, const char*);
void Remove(Address);
private: private:
BlinkGCMemoryDumpProvider(); BlinkGCMemoryDumpProvider();
base::trace_event::ShardedAllocationRegister allocation_register_;
std::unique_ptr<base::trace_event::ProcessMemoryDump> std::unique_ptr<base::trace_event::ProcessMemoryDump>
current_process_memory_dump_; current_process_memory_dump_;
}; };
......
...@@ -7,10 +7,7 @@ ...@@ -7,10 +7,7 @@
#include <unordered_map> #include <unordered_map>
#include "base/strings/stringprintf.h" #include "base/strings/stringprintf.h"
#include "base/trace_event/heap_profiler_allocation_context_tracker.h"
#include "base/trace_event/heap_profiler_allocation_register.h"
#include "base/trace_event/process_memory_dump.h" #include "base/trace_event/process_memory_dump.h"
#include "base/trace_event/trace_event_memory_overhead.h"
#include "third_party/blink/renderer/platform/wtf/allocator/partitions.h" #include "third_party/blink/renderer/platform/wtf/allocator/partitions.h"
#include "third_party/blink/renderer/platform/wtf/text/wtf_string.h" #include "third_party/blink/renderer/platform/wtf/text/wtf_string.h"
...@@ -18,15 +15,6 @@ namespace blink { ...@@ -18,15 +15,6 @@ namespace blink {
namespace { namespace {
void ReportAllocation(void* address, size_t size, const char* type_name) {
PartitionAllocMemoryDumpProvider::Instance()->insert(address, size,
type_name);
}
void ReportFree(void* address) {
PartitionAllocMemoryDumpProvider::Instance()->Remove(address);
}
const char kPartitionAllocDumpName[] = "partition_alloc"; const char kPartitionAllocDumpName[] = "partition_alloc";
const char kPartitionsDumpName[] = "partitions"; const char kPartitionsDumpName[] = "partitions";
...@@ -132,20 +120,6 @@ bool PartitionAllocMemoryDumpProvider::OnMemoryDump( ...@@ -132,20 +120,6 @@ bool PartitionAllocMemoryDumpProvider::OnMemoryDump(
using base::trace_event::MemoryDumpLevelOfDetail; using base::trace_event::MemoryDumpLevelOfDetail;
MemoryDumpLevelOfDetail level_of_detail = args.level_of_detail; MemoryDumpLevelOfDetail level_of_detail = args.level_of_detail;
if (allocation_register_.is_enabled()) {
// Overhead should always be reported, regardless of light vs. heavy.
base::trace_event::TraceEventMemoryOverhead overhead;
std::unordered_map<base::trace_event::AllocationContext,
base::trace_event::AllocationMetrics>
metrics_by_context;
// Dump only the overhead estimation in non-detailed dumps.
if (level_of_detail == MemoryDumpLevelOfDetail::DETAILED) {
allocation_register_.UpdateAndReturnsMetrics(metrics_by_context);
}
allocation_register_.EstimateTraceMemoryOverhead(&overhead);
memory_dump->DumpHeapUsage(metrics_by_context, overhead, "partition_alloc");
}
PartitionStatsDumperImpl partition_stats_dumper(memory_dump, level_of_detail); PartitionStatsDumperImpl partition_stats_dumper(memory_dump, level_of_detail);
base::trace_event::MemoryAllocatorDump* partitions_dump = base::trace_event::MemoryAllocatorDump* partitions_dump =
...@@ -169,43 +143,7 @@ bool PartitionAllocMemoryDumpProvider::OnMemoryDump( ...@@ -169,43 +143,7 @@ bool PartitionAllocMemoryDumpProvider::OnMemoryDump(
return true; return true;
} }
// |m_allocationRegister| should be initialized only when necessary to avoid
// waste of memory.
PartitionAllocMemoryDumpProvider::PartitionAllocMemoryDumpProvider() = default; PartitionAllocMemoryDumpProvider::PartitionAllocMemoryDumpProvider() = default;
PartitionAllocMemoryDumpProvider::~PartitionAllocMemoryDumpProvider() = default; PartitionAllocMemoryDumpProvider::~PartitionAllocMemoryDumpProvider() = default;
void PartitionAllocMemoryDumpProvider::OnHeapProfilingEnabled(bool enabled) {
if (enabled) {
allocation_register_.SetEnabled();
WTF::PartitionAllocHooks::SetAllocationHook(ReportAllocation);
WTF::PartitionAllocHooks::SetFreeHook(ReportFree);
} else {
WTF::PartitionAllocHooks::SetAllocationHook(nullptr);
WTF::PartitionAllocHooks::SetFreeHook(nullptr);
allocation_register_.SetDisabled();
}
}
void PartitionAllocMemoryDumpProvider::insert(void* address,
size_t size,
const char* type_name) {
base::trace_event::AllocationContext context;
if (!base::trace_event::AllocationContextTracker::
GetInstanceForCurrentThread()
->GetContextSnapshot(&context))
return;
context.type_name = type_name;
if (!allocation_register_.is_enabled())
return;
allocation_register_.Insert(address, size, context);
}
void PartitionAllocMemoryDumpProvider::Remove(void* address) {
if (!allocation_register_.is_enabled())
return;
allocation_register_.Remove(address);
}
} // namespace blink } // namespace blink
...@@ -6,7 +6,6 @@ ...@@ -6,7 +6,6 @@
#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_PARTITION_ALLOC_MEMORY_DUMP_PROVIDER_H_ #define THIRD_PARTY_BLINK_RENDERER_PLATFORM_PARTITION_ALLOC_MEMORY_DUMP_PROVIDER_H_
#include "base/trace_event/memory_dump_provider.h" #include "base/trace_event/memory_dump_provider.h"
#include "base/trace_event/sharded_allocation_register.h"
#include "third_party/blink/public/platform/web_common.h" #include "third_party/blink/public/platform/web_common.h"
#include "third_party/blink/renderer/platform/wtf/noncopyable.h" #include "third_party/blink/renderer/platform/wtf/noncopyable.h"
...@@ -25,16 +24,9 @@ class BLINK_PLATFORM_EXPORT PartitionAllocMemoryDumpProvider final ...@@ -25,16 +24,9 @@ class BLINK_PLATFORM_EXPORT PartitionAllocMemoryDumpProvider final
// MemoryDumpProvider implementation. // MemoryDumpProvider implementation.
bool OnMemoryDump(const base::trace_event::MemoryDumpArgs&, bool OnMemoryDump(const base::trace_event::MemoryDumpArgs&,
base::trace_event::ProcessMemoryDump*) override; base::trace_event::ProcessMemoryDump*) override;
void OnHeapProfilingEnabled(bool) override;
// These methods are called only from PartitionAllocHooks' callbacks.
void insert(void*, size_t, const char*);
void Remove(void*);
private: private:
PartitionAllocMemoryDumpProvider(); PartitionAllocMemoryDumpProvider();
base::trace_event::ShardedAllocationRegister allocation_register_;
}; };
} // namespace blink } // namespace blink
......
...@@ -633,7 +633,6 @@ def write_gn_ninja(path, root_gen_dir, options, windows_x64_toolchain): ...@@ -633,7 +633,6 @@ def write_gn_ninja(path, root_gen_dir, options, windows_x64_toolchain):
'base/trace_event/event_name_filter.cc', 'base/trace_event/event_name_filter.cc',
'base/trace_event/heap_profiler_allocation_context.cc', 'base/trace_event/heap_profiler_allocation_context.cc',
'base/trace_event/heap_profiler_allocation_context_tracker.cc', 'base/trace_event/heap_profiler_allocation_context_tracker.cc',
'base/trace_event/heap_profiler_allocation_register.cc',
'base/trace_event/heap_profiler_event_filter.cc', 'base/trace_event/heap_profiler_event_filter.cc',
'base/trace_event/heap_profiler_heap_dump_writer.cc', 'base/trace_event/heap_profiler_heap_dump_writer.cc',
'base/trace_event/heap_profiler_serialization_state.cc', 'base/trace_event/heap_profiler_serialization_state.cc',
...@@ -709,7 +708,6 @@ def write_gn_ninja(path, root_gen_dir, options, windows_x64_toolchain): ...@@ -709,7 +708,6 @@ def write_gn_ninja(path, root_gen_dir, options, windows_x64_toolchain):
'base/threading/platform_thread_posix.cc', 'base/threading/platform_thread_posix.cc',
'base/threading/thread_local_storage_posix.cc', 'base/threading/thread_local_storage_posix.cc',
'base/time/time_conversion_posix.cc', 'base/time/time_conversion_posix.cc',
'base/trace_event/heap_profiler_allocation_register_posix.cc',
]) ])
static_libraries['libevent'] = { static_libraries['libevent'] = {
'sources': [ 'sources': [
...@@ -949,7 +947,6 @@ def write_gn_ninja(path, root_gen_dir, options, windows_x64_toolchain): ...@@ -949,7 +947,6 @@ def write_gn_ninja(path, root_gen_dir, options, windows_x64_toolchain):
'base/threading/thread_local_storage_win.cc', 'base/threading/thread_local_storage_win.cc',
'base/time/time_win.cc', 'base/time/time_win.cc',
'base/timer/hi_res_timer_manager_win.cc', 'base/timer/hi_res_timer_manager_win.cc',
'base/trace_event/heap_profiler_allocation_register_win.cc',
'base/trace_event/trace_event_etw_export_win.cc', 'base/trace_event/trace_event_etw_export_win.cc',
'base/win/core_winrt_util.cc', 'base/win/core_winrt_util.cc',
'base/win/enum_variant.cc', 'base/win/enum_variant.cc',
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment