Commit 535ef8e3 authored by erikchen's avatar erikchen Committed by Commit Bot

Use unique IDs for OOP HP heap dumps.

The heap dump v2 format requires that multiple heap dumps in the same trace
share IDs for nodes and strings. Since the JSON exporter is stateless, and isn't
aware of whether the dump will be part of a given trace, just give all nodes
and strings a unique ID.

Bug: 758739
Change-Id: I87002ce9a84c60ee7a41ac38d3702248a4aa6ae4
Reviewed-on: https://chromium-review.googlesource.com/895666Reviewed-by: default avatarDmitry Skiba <dskiba@chromium.org>
Commit-Queue: Erik Chen <erikchen@chromium.org>
Cr-Commit-Position: refs/heads/master@{#533731}
parent 3452c6ab
......@@ -167,9 +167,10 @@ void WriteMemoryMaps(const ExportParams& params, std::ostream& out) {
}
// Inserts or retrieves the ID for a string in the string table.
size_t AddOrGetString(const std::string& str, StringTable* string_table) {
// The lowest ID should be 1. The chrome://tracing UI doesn't handle ID 0.
auto result = string_table->emplace(str, string_table->size() + 1);
size_t AddOrGetString(const std::string& str,
StringTable* string_table,
ExportParams* params) {
auto result = string_table->emplace(str, params->next_id++);
// "result.first" is an iterator into the map.
return result.first->second;
}
......@@ -178,7 +179,7 @@ size_t AddOrGetString(const std::string& str, StringTable* string_table) {
// Strings are added for each referenced context and a mapping between
// context IDs and string IDs is filled in for each.
void FillContextStrings(const UniqueAllocationMap& allocations,
const std::map<std::string, int>& context_map,
ExportParams* params,
StringTable* string_table,
std::map<int, size_t>* context_to_string_map) {
std::set<int> used_context;
......@@ -187,25 +188,24 @@ void FillContextStrings(const UniqueAllocationMap& allocations,
if (used_context.find(kUnknownTypeId) != used_context.end()) {
// Hard code a string for the unknown context type.
context_to_string_map->emplace(kUnknownTypeId,
AddOrGetString("[unknown]", string_table));
context_to_string_map->emplace(
kUnknownTypeId, AddOrGetString("[unknown]", string_table, params));
}
// The context map is backwards from what we need, so iterate through the
// whole thing and see which ones are used.
for (const auto& context : context_map) {
for (const auto& context : params->context_map) {
if (used_context.find(context.second) != used_context.end()) {
size_t string_id = AddOrGetString(context.first, string_table);
size_t string_id = AddOrGetString(context.first, string_table, params);
context_to_string_map->emplace(context.second, string_id);
}
}
}
size_t AddOrGetBacktraceNode(BacktraceNode node,
BacktraceTable* backtrace_table) {
// The lowest ID should be 1. The chrome://tracing UI doesn't handle ID 0.
auto result =
backtrace_table->emplace(std::move(node), backtrace_table->size() + 1);
BacktraceTable* backtrace_table,
ExportParams* params) {
auto result = backtrace_table->emplace(std::move(node), params->next_id++);
// "result.first" is an iterator into the map.
return result.first->second;
}
......@@ -213,18 +213,17 @@ size_t AddOrGetBacktraceNode(BacktraceNode node,
// Returns the index into nodes of the node to reference for this stack. That
// node will reference its parent node, etc. to allow the full stack to
// be represented.
size_t AppendBacktraceStrings(
const Backtrace& backtrace,
BacktraceTable* backtrace_table,
StringTable* string_table,
const std::unordered_map<uint64_t, std::string>& mapped_strings) {
size_t AppendBacktraceStrings(const Backtrace& backtrace,
BacktraceTable* backtrace_table,
StringTable* string_table,
ExportParams* params) {
int parent = -1;
// Addresses must be outputted in reverse order.
for (const Address& addr : base::Reversed(backtrace.addrs())) {
size_t sid;
auto it = mapped_strings.find(addr.value);
if (it != mapped_strings.end()) {
sid = AddOrGetString(it->second, string_table);
auto it = params->mapped_strings.find(addr.value);
if (it != params->mapped_strings.end()) {
sid = AddOrGetString(it->second, string_table, params);
} else {
static constexpr char kPcPrefix[] = "pc:";
// std::numeric_limits<>::digits gives the number of bits in the value.
......@@ -236,9 +235,10 @@ size_t AppendBacktraceStrings(
(std::numeric_limits<decltype(addr.value)>::digits / 4);
char buf[kBufSize];
snprintf(buf, kBufSize, "%s%" PRIx64, kPcPrefix, addr.value);
sid = AddOrGetString(buf, string_table);
sid = AddOrGetString(buf, string_table, params);
}
parent = AddOrGetBacktraceNode(BacktraceNode(sid, parent), backtrace_table);
parent = AddOrGetBacktraceNode(BacktraceNode(sid, parent), backtrace_table,
params);
}
return parent; // Last item is the end of this stack.
}
......@@ -379,12 +379,12 @@ void WriteAllocatorNodes(const UniqueAllocationMap& allocations,
ExportParams::ExportParams() = default;
ExportParams::~ExportParams() = default;
void ExportMemoryMapsAndV2StackTraceToJSON(const ExportParams& params,
void ExportMemoryMapsAndV2StackTraceToJSON(ExportParams* params,
std::ostream& out) {
// Start dictionary.
out << "{\n";
WriteMemoryMaps(params, out);
WriteMemoryMaps(*params, out);
out << ",\n";
// Write level of detail.
......@@ -395,7 +395,7 @@ void ExportMemoryMapsAndV2StackTraceToJSON(const ExportParams& params,
size_t total_size[kAllocatorCount] = {0};
size_t total_count[kAllocatorCount] = {0};
UniqueAllocationMap filtered_allocations[kAllocatorCount];
for (const auto& alloc_pair : params.allocs) {
for (const auto& alloc_pair : params->allocs) {
uint32_t allocator_index =
static_cast<uint32_t>(alloc_pair.first.allocator());
size_t alloc_count = alloc_pair.second;
......@@ -418,8 +418,8 @@ void ExportMemoryMapsAndV2StackTraceToJSON(const ExportParams& params,
for (uint32_t i = 0; i < kAllocatorCount; i++) {
for (auto alloc = filtered_allocations[i].begin();
alloc != filtered_allocations[i].end();) {
if (alloc->second.size < params.min_size_threshold &&
alloc->second.count < params.min_count_threshold) {
if (alloc->second.size < params->min_size_threshold &&
alloc->second.count < params->min_count_threshold) {
alloc = filtered_allocations[i].erase(alloc);
} else {
++alloc;
......@@ -439,8 +439,8 @@ void ExportMemoryMapsAndV2StackTraceToJSON(const ExportParams& params,
// mapping from allocation context_id to string ID.
std::map<int, size_t> context_to_string_map;
for (uint32_t i = 0; i < kAllocatorCount; i++) {
FillContextStrings(filtered_allocations[i], params.context_map,
&string_table, &context_to_string_map);
FillContextStrings(filtered_allocations[i], params, &string_table,
&context_to_string_map);
}
// Find all backtraces referenced by the set and not filtered. The backtrace
......@@ -460,8 +460,8 @@ void ExportMemoryMapsAndV2StackTraceToJSON(const ExportParams& params,
BacktraceTable nodes;
VLOG(1) << "Number of backtraces " << backtraces.size();
for (auto& bt : backtraces)
bt.second = AppendBacktraceStrings(*bt.first, &nodes, &string_table,
params.mapped_strings);
bt.second =
AppendBacktraceStrings(*bt.first, &nodes, &string_table, params);
// Maps section.
out << "\"maps\": {\n";
......
......@@ -47,11 +47,20 @@ struct ExportParams {
// anonymizes the trace, since the paths could potentially contain a username.
// However, it prevents symbolization of locally built instances of Chrome.
bool strip_path_from_mapped_files = false;
// The heaps_v2 trace format requires that ids are unique across heap dumps in
// a single trace. This class is currently stateless, and does not know
// whether a heap dump will be in a trace with other heap dumps. To work
// around this, just make all IDs unique. The parameter is an input parameter
// that tells the exporter which ID to start from. It is also an output
// parameter, and tells the caller the next unused ID.
// See https://crbug.com/808066.
size_t next_id = 1;
};
// Creates a JSON string representing a JSON dictionary that contains memory
// maps and v2 format stack traces.
void ExportMemoryMapsAndV2StackTraceToJSON(const ExportParams& params,
void ExportMemoryMapsAndV2StackTraceToJSON(ExportParams* params,
std::ostream& out);
} // namespace profiling
......
......@@ -160,7 +160,7 @@ TEST(ProfilingJsonExporterTest, Simple) {
params.allocs = AllocationEventSetToCountMap(events);
params.min_size_threshold = kNoSizeThreshold;
params.min_count_threshold = kNoCountThreshold;
ExportMemoryMapsAndV2StackTraceToJSON(params, stream);
ExportMemoryMapsAndV2StackTraceToJSON(&params, stream);
std::string json = stream.str();
// JSON should parse.
......@@ -327,7 +327,7 @@ TEST(ProfilingJsonExporterTest, SimpleWithFilteredAllocations) {
params.allocs = AllocationEventSetToCountMap(events);
params.min_size_threshold = kSizeThreshold;
params.min_count_threshold = kCountThreshold;
ExportMemoryMapsAndV2StackTraceToJSON(params, stream);
ExportMemoryMapsAndV2StackTraceToJSON(&params, stream);
std::string json = stream.str();
// JSON should parse.
......@@ -394,7 +394,7 @@ TEST(ProfilingJsonExporterTest, MemoryMaps) {
params.allocs = AllocationEventSetToCountMap(events);
params.min_size_threshold = kNoSizeThreshold;
params.min_count_threshold = kNoCountThreshold;
ExportMemoryMapsAndV2StackTraceToJSON(params, stream);
ExportMemoryMapsAndV2StackTraceToJSON(&params, stream);
std::string json = stream.str();
// JSON should parse.
......@@ -453,7 +453,7 @@ TEST(ProfilingJsonExporterTest, Context) {
params.allocs = AllocationEventSetToCountMap(events);
params.min_size_threshold = kNoSizeThreshold;
params.min_count_threshold = kNoCountThreshold;
ExportMemoryMapsAndV2StackTraceToJSON(params, stream);
ExportMemoryMapsAndV2StackTraceToJSON(&params, stream);
std::string json = stream.str();
// JSON should parse.
......
......@@ -289,11 +289,14 @@ void MemlogConnectionManager::DoDumpOneProcessForTracing(
params.min_size_threshold = keep_small_allocations ? 0 : kMinSizeThreshold;
params.min_count_threshold = keep_small_allocations ? 0 : kMinCountThreshold;
params.strip_path_from_mapped_files = strip_path_from_mapped_files;
params.next_id = next_id_;
std::ostringstream oss;
ExportMemoryMapsAndV2StackTraceToJSON(params, oss);
ExportMemoryMapsAndV2StackTraceToJSON(&params, oss);
std::string reply = oss.str();
next_id_ = params.next_id;
mojo::ScopedSharedBufferHandle buffer =
mojo::SharedBufferHandle::Create(reply.size());
if (!buffer.is_valid()) {
......
......@@ -114,6 +114,9 @@ class MemlogConnectionManager {
// to ensure barrier IDs are unique.
uint32_t next_barrier_id_ = 1;
// The next ID to use when exporting a heap dump.
size_t next_id_ = 1;
// Maps process ID to the connection information for it.
base::flat_map<base::ProcessId, std::unique_ptr<Connection>> connections_;
base::Lock connections_lock_;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment