Commit e356ae50 authored by Brett Wilson's avatar Brett Wilson Committed by Commit Bot

Hook up allocator types in OOP memlog.

Writes separate sections of allocator types for the OOP memory log.
Sone refactoring in support of this.

Adds tests for allocator splits and context information.

Bug: 763173
Change-Id: I81aeae226075b221d5b0b944fe74980557ffce32
Reviewed-on: https://chromium-review.googlesource.com/664083
Commit-Queue: Brett Wilson <brettw@chromium.org>
Reviewed-by: default avatarErik Chen <erikchen@chromium.org>
Cr-Commit-Position: refs/heads/master@{#501674}
parent 0f282076
...@@ -19,10 +19,12 @@ constexpr uint32_t kFreePacketType = 0xFEFEFEFE; ...@@ -19,10 +19,12 @@ constexpr uint32_t kFreePacketType = 0xFEFEFEFE;
constexpr uint32_t kMaxStackEntries = 256; constexpr uint32_t kMaxStackEntries = 256;
constexpr uint32_t kMaxContextLen = 256; constexpr uint32_t kMaxContextLen = 256;
// This should count up from 0 so it can be used to index into an array.
enum class AllocatorType : uint32_t { enum class AllocatorType : uint32_t {
kMalloc = 0, kMalloc = 0,
kPartitionAlloc = 1, kPartitionAlloc = 1,
kOilpan = 2 kOilpan = 2,
kCount // Number of allocator types.
}; };
#pragma pack(push, 1) #pragma pack(push, 1)
......
...@@ -19,4 +19,11 @@ AllocationEvent::AllocationEvent(AllocatorType allocator, ...@@ -19,4 +19,11 @@ AllocationEvent::AllocationEvent(AllocatorType allocator,
AllocationEvent::AllocationEvent(Address addr) : address_(addr) {} AllocationEvent::AllocationEvent(Address addr) : address_(addr) {}
AllocationCountMap AllocationEventSetToCountMap(const AllocationEventSet& set) {
AllocationCountMap map;
for (const auto& alloc : set)
map[alloc]++;
return map;
}
} // namespace profiling } // namespace profiling
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#define CHROME_PROFILING_ALLOCATION_EVENT_H_ #define CHROME_PROFILING_ALLOCATION_EVENT_H_
#include <functional> #include <functional>
#include <map>
#include <set> #include <set>
#include "chrome/common/profiling/memlog_stream.h" #include "chrome/common/profiling/memlog_stream.h"
...@@ -50,6 +51,19 @@ class AllocationEvent { ...@@ -50,6 +51,19 @@ class AllocationEvent {
} }
}; };
// Implements < for AllocationEvent using everything but the address.
struct MetadataPartialLess {
bool operator()(const AllocationEvent& lhs,
const AllocationEvent& rhs) const {
// Note that we're using pointer compiarisons on the backtrace objects
// since they're atoms and the actual ordering is not important.
return std::tie(lhs.size_, lhs.backtrace_, lhs.context_id_,
lhs.allocator_) < std::tie(rhs.size_, rhs.backtrace_,
rhs.context_id_,
rhs.allocator_);
}
};
// Implements == for AllocationEvents using address only. This is not a raw // Implements == for AllocationEvents using address only. This is not a raw
// operator because it only implements a comparison on the one field. // operator because it only implements a comparison on the one field.
struct AddressPartialEqual { struct AddressPartialEqual {
...@@ -59,6 +73,19 @@ class AllocationEvent { ...@@ -59,6 +73,19 @@ class AllocationEvent {
} }
}; };
// Implements < for AllocationEvent using everything but the address.
struct MetadataPartialEqual {
bool operator()(const AllocationEvent& lhs,
const AllocationEvent& rhs) const {
// Note that we're using pointer compiarisons on the backtrace objects
// since they're atoms.
return std::tie(lhs.size_, lhs.backtrace_, lhs.context_id_,
lhs.allocator_) == std::tie(rhs.size_, rhs.backtrace_,
rhs.context_id_,
rhs.allocator_);
}
};
private: private:
AllocatorType allocator_ = AllocatorType::kMalloc; AllocatorType allocator_ = AllocatorType::kMalloc;
Address address_; Address address_;
...@@ -67,9 +94,20 @@ class AllocationEvent { ...@@ -67,9 +94,20 @@ class AllocationEvent {
int context_id_ = 0; int context_id_ = 0;
}; };
// Unique set based on addresses of allocations.
using AllocationEventSet = using AllocationEventSet =
std::set<AllocationEvent, AllocationEvent::AddressPartialLess>; std::set<AllocationEvent, AllocationEvent::AddressPartialLess>;
// Maps allocation metadata to allocation counts of that type. In this case,
// the address of the AllocationEvent is unused.
using AllocationCountMap =
std::map<AllocationEvent, int, AllocationEvent::MetadataPartialLess>;
// Aggregates the allocation events to a count map. The address of the
// allocation event in the returned map will be the address of the first item
// in the set with that metadata.
AllocationCountMap AllocationEventSetToCountMap(const AllocationEventSet& set);
} // namespace profiling } // namespace profiling
#endif // CHROME_PROFILING_ALLOCATION_EVENT_H_ #endif // CHROME_PROFILING_ALLOCATION_EVENT_H_
...@@ -58,4 +58,8 @@ void AllocationTracker::OnComplete() { ...@@ -58,4 +58,8 @@ void AllocationTracker::OnComplete() {
std::move(complete_callback_)); std::move(complete_callback_));
} }
AllocationCountMap AllocationTracker::GetCounts() const {
return AllocationEventSetToCountMap(live_allocs_);
}
} // namespace profiling } // namespace profiling
...@@ -36,11 +36,22 @@ class AllocationTracker : public MemlogReceiver { ...@@ -36,11 +36,22 @@ class AllocationTracker : public MemlogReceiver {
const AllocationEventSet& live_allocs() const { return live_allocs_; } const AllocationEventSet& live_allocs() const { return live_allocs_; }
const ContextMap& context() const { return context_; } const ContextMap& context() const { return context_; }
// Returns the aggregated allocation counts currently live.
AllocationCountMap GetCounts() const;
private: private:
CompleteCallback complete_callback_; CompleteCallback complete_callback_;
BacktraceStorage* backtrace_storage_; BacktraceStorage* backtrace_storage_;
// Need to track all live objects. Since the free information doesn't have
// the metadata, we can't keep a map of counts indexed by just the metadata
// (which is all the trace JSON needs), but need to keep an index by address.
//
// This could be a two-level index, where one set of metadata is kept and
// addresses index into that. But a full copy of the metadata is about the
// same size as the internal map node required for this second index, with
// additional complexity.
AllocationEventSet live_allocs_; AllocationEventSet live_allocs_;
// The context strings are atoms. Since there are O(100's) of these, we do // The context strings are atoms. Since there are O(100's) of these, we do
......
...@@ -22,6 +22,9 @@ namespace { ...@@ -22,6 +22,9 @@ namespace {
// Maps strings to integers for the JSON string table. // Maps strings to integers for the JSON string table.
using StringTable = std::map<std::string, size_t>; using StringTable = std::map<std::string, size_t>;
constexpr uint32_t kAllocatorCount =
static_cast<uint32_t>(AllocatorType::kCount);
struct BacktraceNode { struct BacktraceNode {
BacktraceNode(size_t sid, size_t p) : string_id(sid), parent(p) {} BacktraceNode(size_t sid, size_t p) : string_id(sid), parent(p) {}
...@@ -39,31 +42,23 @@ struct BacktraceNode { ...@@ -39,31 +42,23 @@ struct BacktraceNode {
using BacktraceTable = std::map<BacktraceNode, size_t>; using BacktraceTable = std::map<BacktraceNode, size_t>;
// Used as a map key to uniquify an allocation with a given size and stack.
// Since backtraces are uniquified, this does pointer comparisons on the
// backtrace to give a stable ordering, even if that ordering has no
// intrinsic meaning.
struct UniqueAlloc {
UniqueAlloc(AllocatorType alloc, const Backtrace* bt, size_t sz, int ctx_id)
: allocator(alloc), backtrace(bt), size(sz), context_id(ctx_id) {}
bool operator<(const UniqueAlloc& other) const {
return std::tie(allocator, backtrace, size, context_id) <
std::tie(other.allocator, other.backtrace, other.size,
other.context_id);
}
AllocatorType allocator;
const Backtrace* backtrace;
size_t size;
int context_id;
};
using UniqueAllocCount = std::map<UniqueAlloc, int>;
// The hardcoded ID for having no context for an allocation. // The hardcoded ID for having no context for an allocation.
constexpr int kUnknownTypeId = 0; constexpr int kUnknownTypeId = 0;
const char* StringForAllocatorType(uint32_t type) {
switch (static_cast<AllocatorType>(type)) {
case AllocatorType::kMalloc:
return "malloc";
case AllocatorType::kPartitionAlloc:
return "partition_alloc";
case AllocatorType::kOilpan:
return "blink_gc";
default:
NOTREACHED();
return "unknown";
}
}
// Writes a dummy process name entry given a PID. When we have more information // Writes a dummy process name entry given a PID. When we have more information
// on a process it can be filled in here. But for now the tracing tools expect // on a process it can be filled in here. But for now the tracing tools expect
// this entry since everything is associated with a PID. // this entry since everything is associated with a PID.
...@@ -82,6 +77,64 @@ void WriteProcessName(int pid, std::ostream& out) { ...@@ -82,6 +77,64 @@ void WriteProcessName(int pid, std::ostream& out) {
<< "\"tid\": 1, \"args\": {}}"; << "\"tid\": 1, \"args\": {}}";
} }
// Writes the top-level allocators section. This section is used by the tracing
// UI to show a small summary for each allocator. It's necessary as a
// placeholder to allow the stack-viewing UI to be shown.
//
// Each array should be the number of allocators long.
void WriteAllocatorsSummary(size_t total_size[],
size_t total_count[],
std::ostream& out) {
out << "\"allocators\":{\n";
for (uint32_t i = 0; i < kAllocatorCount; i++) {
const char* alloc_type = StringForAllocatorType(i);
// Overall sizes.
const char kAttrsSizeBody[] = R"(
"%s": {
"attrs": {
"virtual_size": {
"type": "scalar",
"units": "bytes",
"value": "%zx"
},
"size": {
"type": "scalar",
"units": "bytes",
"value": "%zx"
}
}
},)";
out << base::StringPrintf(kAttrsSizeBody, alloc_type, total_size[i],
total_size[i]);
// Allocated objects.
const char kAttrsObjectsBody[] = R"(
"%s/allocated_objects": {
"attrs": {
"shim_allocated_objects_count": {
"type": "scalar",
"units": "objects",
"value": "%zx"
},
"shim_allocated_objects_size": {
"type": "scalar",
"units": "bytes",
"value": "%zx"
}
}
})";
out << base::StringPrintf(kAttrsObjectsBody, alloc_type, total_count[i],
total_size[i]);
// Comma except for the last time.
if (i < kAllocatorCount - 1)
out << ',';
out << "\n";
}
out << "},\n";
}
// Writes the dictionary keys to preceed a "dumps" trace argument. // Writes the dictionary keys to preceed a "dumps" trace argument.
void WriteDumpsHeader(int pid, std::ostream& out) { void WriteDumpsHeader(int pid, std::ostream& out) {
out << "{ \"pid\":" << pid << ","; out << "{ \"pid\":" << pid << ",";
...@@ -127,13 +180,13 @@ size_t AddOrGetString(std::string str, StringTable* string_table) { ...@@ -127,13 +180,13 @@ size_t AddOrGetString(std::string str, StringTable* string_table) {
// Processes the context information needed for the give set of allocations. // Processes the context information needed for the give set of allocations.
// Strings are added for each referenced context and a mapping between // Strings are added for each referenced context and a mapping between
// context IDs and string IDs is filled in for each. // context IDs and string IDs is filled in for each.
void FillContextStrings(UniqueAllocCount alloc_counts, void FillContextStrings(AllocationCountMap alloc_counts,
const std::map<std::string, int>& context_map, const std::map<std::string, int>& context_map,
StringTable* string_table, StringTable* string_table,
std::map<int, size_t>* context_to_string_map) { std::map<int, size_t>* context_to_string_map) {
std::set<int> used_context; std::set<int> used_context;
for (const auto& alloc : alloc_counts) for (const auto& alloc : alloc_counts)
used_context.insert(alloc.first.context_id); used_context.insert(alloc.first.context_id());
if (used_context.find(kUnknownTypeId) != used_context.end()) { if (used_context.find(kUnknownTypeId) != used_context.end()) {
// Hard code a string for the unknown context type. // Hard code a string for the unknown context type.
...@@ -253,12 +306,12 @@ void WriteTypeNodes(const std::map<int, size_t>& type_to_string, ...@@ -253,12 +306,12 @@ void WriteTypeNodes(const std::map<int, size_t>& type_to_string,
// Writes the number of matching allocations array which looks like: // Writes the number of matching allocations array which looks like:
// "counts":[1, 1, 2] // "counts":[1, 1, 2]
void WriteCounts(const UniqueAllocCount& alloc_counts, std::ostream& out) { void WriteCounts(const AllocationCountMap& alloc_counts, std::ostream& out) {
out << "\"counts\":["; out << "\"counts\":[";
bool first_time = true; bool first_time = true;
for (const auto& cur : alloc_counts) { for (const auto& cur : alloc_counts) {
if (!first_time) if (!first_time)
out << ",\n"; out << ",";
else else
first_time = false; first_time = false;
out << cur.second; out << cur.second;
...@@ -268,31 +321,31 @@ void WriteCounts(const UniqueAllocCount& alloc_counts, std::ostream& out) { ...@@ -268,31 +321,31 @@ void WriteCounts(const UniqueAllocCount& alloc_counts, std::ostream& out) {
// Writes the sizes of each allocation which looks like: // Writes the sizes of each allocation which looks like:
// "sizes":[32, 64, 12] // "sizes":[32, 64, 12]
void WriteSizes(const UniqueAllocCount& alloc_counts, std::ostream& out) { void WriteSizes(const AllocationCountMap& alloc_counts, std::ostream& out) {
out << "\"sizes\":["; out << "\"sizes\":[";
bool first_time = true; bool first_time = true;
for (const auto& cur : alloc_counts) { for (const auto& cur : alloc_counts) {
if (!first_time) if (!first_time)
out << ",\n"; out << ",";
else else
first_time = false; first_time = false;
// Output the total size, which is size * count. // Output the total size, which is size * count.
out << cur.first.size * cur.second; out << cur.first.size() * cur.second;
} }
out << "]"; out << "]";
} }
// Writes the types array of integers which looks like: // Writes the types array of integers which looks like:
// "types":[0, 0, 1] // "types":[0, 0, 1]
void WriteTypes(const UniqueAllocCount& alloc_counts, std::ostream& out) { void WriteTypes(const AllocationCountMap& alloc_counts, std::ostream& out) {
out << "\"types\":["; out << "\"types\":[";
bool first_time = true; bool first_time = true;
for (const auto& cur : alloc_counts) { for (const auto& cur : alloc_counts) {
if (!first_time) if (!first_time)
out << ",\n"; out << ",";
else else
first_time = false; first_time = false;
out << cur.first.context_id; out << cur.first.context_id();
} }
out << "]"; out << "]";
} }
...@@ -300,17 +353,17 @@ void WriteTypes(const UniqueAllocCount& alloc_counts, std::ostream& out) { ...@@ -300,17 +353,17 @@ void WriteTypes(const UniqueAllocCount& alloc_counts, std::ostream& out) {
// Writes the nodes array which indexes for each allocation into the maps nodes // Writes the nodes array which indexes for each allocation into the maps nodes
// array written above. It looks like: // array written above. It looks like:
// "nodes":[1, 5, 10] // "nodes":[1, 5, 10]
void WriteAllocatorNodes(const UniqueAllocCount& alloc_counts, void WriteAllocatorNodes(const AllocationCountMap& alloc_counts,
const std::map<const Backtrace*, size_t>& backtraces, const std::map<const Backtrace*, size_t>& backtraces,
std::ostream& out) { std::ostream& out) {
out << "\"nodes\":["; out << "\"nodes\":[";
bool first_time = true; bool first_time = true;
for (const auto& cur : alloc_counts) { for (const auto& cur : alloc_counts) {
if (!first_time) if (!first_time)
out << ",\n"; out << ",";
else else
first_time = false; first_time = false;
auto found = backtraces.find(cur.first.backtrace); auto found = backtraces.find(cur.first.backtrace());
out << found->second; out << found->second;
} }
out << "]"; out << "]";
...@@ -318,6 +371,9 @@ void WriteAllocatorNodes(const UniqueAllocCount& alloc_counts, ...@@ -318,6 +371,9 @@ void WriteAllocatorNodes(const UniqueAllocCount& alloc_counts,
} // namespace } // namespace
ExportParams::ExportParams() = default;
ExportParams::~ExportParams() = default;
void ExportAllocationEventSetToJSON( void ExportAllocationEventSetToJSON(
int pid, int pid,
const ExportParams& params, const ExportParams& params,
...@@ -353,73 +409,26 @@ void ExportMemoryMapsAndV2StackTraceToJSON(const ExportParams& params, ...@@ -353,73 +409,26 @@ void ExportMemoryMapsAndV2StackTraceToJSON(const ExportParams& params,
out << R"("level_of_detail": "detailed")" out << R"("level_of_detail": "detailed")"
<< ",\n"; << ",\n";
// Aggregate allocations. Allocations with the same metadata (we don't use // Aggregate stats for each allocator type and filter irrelevant allocations.
// addresses) get grouped. size_t total_size[kAllocatorCount] = {0};
UniqueAllocCount alloc_counts; size_t total_count[kAllocatorCount] = {0};
for (const auto& alloc : *params.set) { AllocationCountMap filtered_counts[kAllocatorCount];
UniqueAlloc unique_alloc(alloc.allocator(), alloc.backtrace(), alloc.size(), for (const auto& alloc_pair : params.allocs) {
alloc.context_id()); uint32_t allocator_index =
alloc_counts[unique_alloc]++; static_cast<uint32_t>(alloc_pair.first.allocator());
} size_t alloc_count = alloc_pair.second;
size_t alloc_size = alloc_pair.first.size();
size_t total_size = 0;
size_t total_count = 0;
// Filter irrelevant allocations.
for (auto alloc = alloc_counts.begin(); alloc != alloc_counts.end();) {
size_t alloc_count = alloc->second;
size_t alloc_size = alloc->first.size;
size_t alloc_total_size = alloc_size * alloc_count; size_t alloc_total_size = alloc_size * alloc_count;
total_size += alloc_total_size; total_size[allocator_index] += alloc_total_size;
total_count += alloc_count; total_count[allocator_index] += alloc_count;
if (alloc_total_size < params.min_size_threshold && if (alloc_total_size >= params.min_size_threshold ||
alloc_count < params.min_count_threshold) { alloc_count >= params.min_count_threshold) {
alloc = alloc_counts.erase(alloc); filtered_counts[allocator_index].insert(alloc_pair);
} else {
++alloc;
} }
} }
// Write the top-level allocators section. This section is used by the tracing WriteAllocatorsSummary(total_size, total_count, out);
// UI to show a small summary for each allocator. It's necessary as a
// placeholder to allow the stack-viewing UI to be shown.
// TODO: Fill in placeholders for "value". https://crbug.com/758434.
const char* allocators_raw = R"(
"allocators": {
"malloc": {
"attrs": {
"virtual_size": {
"type": "scalar",
"units": "bytes",
"value": "%zx"
},
"size": {
"type": "scalar",
"units": "bytes",
"value": "%zx"
}
}
},
"malloc/allocated_objects": {
"attrs": {
"shim_allocated_objects_count": {
"type": "scalar",
"units": "objects",
"value": "%zx"
},
"shim_allocated_objects_size": {
"type": "scalar",
"units": "bytes",
"value": "%zx"
}
}
}
},
)";
std::string allocators = base::StringPrintf(
allocators_raw, total_size, total_size, total_count, total_size);
out << allocators;
WriteHeapsV2Header(out); WriteHeapsV2Header(out);
// Output Heaps_V2 format version. Currently "1" is the only valid value. // Output Heaps_V2 format version. Currently "1" is the only valid value.
...@@ -430,8 +439,10 @@ void ExportMemoryMapsAndV2StackTraceToJSON(const ExportParams& params, ...@@ -430,8 +439,10 @@ void ExportMemoryMapsAndV2StackTraceToJSON(const ExportParams& params,
// Put all required context strings in the string table and generate a // Put all required context strings in the string table and generate a
// mapping from allocation context_id to string ID. // mapping from allocation context_id to string ID.
std::map<int, size_t> context_to_string_map; std::map<int, size_t> context_to_string_map;
FillContextStrings(alloc_counts, *params.context_map, &string_table, for (uint32_t i = 0; i < kAllocatorCount; i++) {
&context_to_string_map); FillContextStrings(filtered_counts[i], *params.context_map, &string_table,
&context_to_string_map);
}
// Find all backtraces referenced by the set and not filtered. The backtrace // Find all backtraces referenced by the set and not filtered. The backtrace
// storage will contain more stacks than we want to write out (it will refer // storage will contain more stacks than we want to write out (it will refer
...@@ -440,8 +451,10 @@ void ExportMemoryMapsAndV2StackTraceToJSON(const ExportParams& params, ...@@ -440,8 +451,10 @@ void ExportMemoryMapsAndV2StackTraceToJSON(const ExportParams& params,
// //
// The map maps backtrace keys to node IDs (computed below). // The map maps backtrace keys to node IDs (computed below).
std::map<const Backtrace*, size_t> backtraces; std::map<const Backtrace*, size_t> backtraces;
for (const auto& alloc : alloc_counts) for (size_t i = 0; i < kAllocatorCount; i++) {
backtraces.emplace(alloc.first.backtrace, 0); for (const auto& alloc : filtered_counts[i])
backtraces.emplace(alloc.first.backtrace(), 0);
}
// Write each backtrace, converting the string for the stack entry to string // Write each backtrace, converting the string for the stack entry to string
// IDs. The backtrace -> node ID will be filled in at this time. // IDs. The backtrace -> node ID will be filled in at this time.
...@@ -460,15 +473,25 @@ void ExportMemoryMapsAndV2StackTraceToJSON(const ExportParams& params, ...@@ -460,15 +473,25 @@ void ExportMemoryMapsAndV2StackTraceToJSON(const ExportParams& params,
out << "},\n"; // End of maps section. out << "},\n"; // End of maps section.
// Allocators section. // Allocators section.
out << "\"allocators\":{\"malloc\":{\n"; out << "\"allocators\":{\n";
WriteCounts(alloc_counts, out); for (uint32_t i = 0; i < kAllocatorCount; i++) {
out << ",\n"; out << " \"" << StringForAllocatorType(i) << "\":{\n ";
WriteSizes(alloc_counts, out);
out << ",\n"; WriteCounts(filtered_counts[i], out);
WriteTypes(alloc_counts, out); out << ",\n ";
out << ",\n"; WriteSizes(filtered_counts[i], out);
WriteAllocatorNodes(alloc_counts, backtraces, out); out << ",\n ";
out << "}}\n"; // End of allocators section. WriteTypes(filtered_counts[i], out);
out << ",\n ";
WriteAllocatorNodes(filtered_counts[i], backtraces, out);
out << "\n }";
// Comma evry time but the last.
if (i < kAllocatorCount - 1)
out << ',';
out << "\n";
}
out << "}\n"; // End of allocators section.
WriteHeapsV2Footer(out); WriteHeapsV2Footer(out);
......
...@@ -18,9 +18,15 @@ namespace profiling { ...@@ -18,9 +18,15 @@ namespace profiling {
// Configuration passed to the export functions because they take many // Configuration passed to the export functions because they take many
// arguments. All parameters must be set. The pointers are not managed by this // arguments. All parameters must be set. The pointers are not managed by this
// class and must outlive it. // class and must outlive it.
//
// Whether something is a pointer or a value is determined by what makes the
// call site nicer without introducing unnecessary copies.
struct ExportParams { struct ExportParams {
ExportParams();
~ExportParams();
// Allocation events to export. // Allocation events to export.
const AllocationEventSet* set = nullptr; AllocationCountMap allocs;
// VM map of all regions in the process. // VM map of all regions in the process.
const std::vector<memory_instrumentation::mojom::VmRegionPtr>* maps = nullptr; const std::vector<memory_instrumentation::mojom::VmRegionPtr>* maps = nullptr;
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include "base/json/json_reader.h" #include "base/json/json_reader.h"
#include "base/json/json_writer.h" #include "base/json/json_writer.h"
#include "base/process/process.h" #include "base/process/process.h"
#include "base/strings/string_number_conversions.h"
#include "base/values.h" #include "base/values.h"
#include "build/build_config.h" #include "build/build_config.h"
#include "chrome/profiling/backtrace_storage.h" #include "chrome/profiling/backtrace_storage.h"
...@@ -87,7 +88,8 @@ const base::Value* FindFirstRegionWithAnyName( ...@@ -87,7 +88,8 @@ const base::Value* FindFirstRegionWithAnyName(
return nullptr; return nullptr;
} }
int GetStringFromStringTable(const base::Value* strings, const char* text) { // Looks up a given string id from the string table. Returns -1 if not found.
int GetIdFromStringTable(const base::Value* strings, const char* text) {
for (const auto& string : strings->GetList()) { for (const auto& string : strings->GetList()) {
const base::Value* string_id = const base::Value* string_id =
string.FindKeyOfType("id", base::Value::Type::INTEGER); string.FindKeyOfType("id", base::Value::Type::INTEGER);
...@@ -100,6 +102,23 @@ int GetStringFromStringTable(const base::Value* strings, const char* text) { ...@@ -100,6 +102,23 @@ int GetStringFromStringTable(const base::Value* strings, const char* text) {
return -1; return -1;
} }
// Looks up a given string from the string table. Returns empty string if not
// found.
std::string GetStringFromStringTable(const base::Value* strings, int sid) {
for (const auto& string : strings->GetList()) {
const base::Value* string_id =
string.FindKeyOfType("id", base::Value::Type::INTEGER);
if (string_id->GetInt() == sid) {
const base::Value* string_text =
string.FindKeyOfType("string", base::Value::Type::STRING);
if (!string_text)
return std::string();
return string_text->GetString();
}
}
return std::string();
}
int GetNodeWithNameID(const base::Value* nodes, int sid) { int GetNodeWithNameID(const base::Value* nodes, int sid) {
for (const auto& node : nodes->GetList()) { for (const auto& node : nodes->GetList()) {
const base::Value* node_id = const base::Value* node_id =
...@@ -146,13 +165,11 @@ bool IsBacktraceInList(const base::Value* backtraces, int id, int parent) { ...@@ -146,13 +165,11 @@ bool IsBacktraceInList(const base::Value* backtraces, int id, int parent) {
TEST(ProfilingJsonExporterTest, TraceHeader) { TEST(ProfilingJsonExporterTest, TraceHeader) {
BacktraceStorage backtrace_storage; BacktraceStorage backtrace_storage;
AllocationEventSet events;
MemoryMap memory_map; MemoryMap memory_map;
std::map<std::string, int> context_map; std::map<std::string, int> context_map;
std::ostringstream stream; std::ostringstream stream;
ExportParams params; ExportParams params;
params.set = &events;
params.context_map = &context_map; params.context_map = &context_map;
params.maps = &memory_map; params.maps = &memory_map;
params.min_size_threshold = kNoSizeThreshold; params.min_size_threshold = kNoSizeThreshold;
...@@ -164,7 +181,8 @@ TEST(ProfilingJsonExporterTest, TraceHeader) { ...@@ -164,7 +181,8 @@ TEST(ProfilingJsonExporterTest, TraceHeader) {
base::JSONReader reader(base::JSON_PARSE_RFC); base::JSONReader reader(base::JSON_PARSE_RFC);
std::unique_ptr<base::Value> root = reader.ReadToValue(stream.str()); std::unique_ptr<base::Value> root = reader.ReadToValue(stream.str());
ASSERT_EQ(base::JSONReader::JSON_NO_ERROR, reader.error_code()) ASSERT_EQ(base::JSONReader::JSON_NO_ERROR, reader.error_code())
<< reader.GetErrorMessage(); << reader.GetErrorMessage() << "\n"
<< stream.str();
ASSERT_TRUE(root); ASSERT_TRUE(root);
const base::Value* process_name = FindEventWithName(*root, "process_name"); const base::Value* process_name = FindEventWithName(*root, "process_name");
...@@ -215,7 +233,7 @@ TEST(ProfilingJsonExporterTest, DumpsHeader) { ...@@ -215,7 +233,7 @@ TEST(ProfilingJsonExporterTest, DumpsHeader) {
std::ostringstream stream; std::ostringstream stream;
ExportParams params; ExportParams params;
params.set = &events; params.allocs = AllocationEventSetToCountMap(events);
params.context_map = &context_map; params.context_map = &context_map;
params.maps = &memory_map; params.maps = &memory_map;
params.min_size_threshold = kNoSizeThreshold; params.min_size_threshold = kNoSizeThreshold;
...@@ -277,13 +295,15 @@ TEST(ProfilingJsonExporterTest, Simple) { ...@@ -277,13 +295,15 @@ TEST(ProfilingJsonExporterTest, Simple) {
AllocationEvent(AllocatorType::kMalloc, Address(0x2), 32, bt2, 0)); AllocationEvent(AllocatorType::kMalloc, Address(0x2), 32, bt2, 0));
events.insert( events.insert(
AllocationEvent(AllocatorType::kMalloc, Address(0x3), 20, bt1, 0)); AllocationEvent(AllocatorType::kMalloc, Address(0x3), 20, bt1, 0));
events.insert(AllocationEvent(AllocatorType::kPartitionAlloc, Address(0x4),
20, bt1, 0));
std::ostringstream stream; std::ostringstream stream;
std::map<std::string, int> context_map; std::map<std::string, int> context_map;
MemoryMap memory_map; MemoryMap memory_map;
ExportParams params; ExportParams params;
params.set = &events; params.allocs = AllocationEventSetToCountMap(events);
params.context_map = &context_map; params.context_map = &context_map;
params.maps = &memory_map; params.maps = &memory_map;
params.min_size_threshold = kNoSizeThreshold; params.min_size_threshold = kNoSizeThreshold;
...@@ -315,11 +335,11 @@ TEST(ProfilingJsonExporterTest, Simple) { ...@@ -315,11 +335,11 @@ TEST(ProfilingJsonExporterTest, Simple) {
// Validate the strings table. // Validate the strings table.
EXPECT_EQ(5u, strings->GetList().size()); EXPECT_EQ(5u, strings->GetList().size());
int sid_unknown = GetStringFromStringTable(strings, "[unknown]"); int sid_unknown = GetIdFromStringTable(strings, "[unknown]");
int sid_1234 = GetStringFromStringTable(strings, "pc:1234"); int sid_1234 = GetIdFromStringTable(strings, "pc:1234");
int sid_5678 = GetStringFromStringTable(strings, "pc:5678"); int sid_5678 = GetIdFromStringTable(strings, "pc:5678");
int sid_9012 = GetStringFromStringTable(strings, "pc:9012"); int sid_9012 = GetIdFromStringTable(strings, "pc:9012");
int sid_9013 = GetStringFromStringTable(strings, "pc:9013"); int sid_9013 = GetIdFromStringTable(strings, "pc:9013");
EXPECT_NE(-1, sid_unknown); EXPECT_NE(-1, sid_unknown);
EXPECT_NE(-1, sid_1234); EXPECT_NE(-1, sid_1234);
EXPECT_NE(-1, sid_5678); EXPECT_NE(-1, sid_5678);
...@@ -346,7 +366,7 @@ TEST(ProfilingJsonExporterTest, Simple) { ...@@ -346,7 +366,7 @@ TEST(ProfilingJsonExporterTest, Simple) {
EXPECT_TRUE(IsBacktraceInList(nodes, id2, id0)); EXPECT_TRUE(IsBacktraceInList(nodes, id2, id0));
EXPECT_TRUE(IsBacktraceInList(nodes, id3, id2)); EXPECT_TRUE(IsBacktraceInList(nodes, id3, id2));
// Retrieve the allocations and valid their structure. // Retrieve the allocations and validate their structure.
const base::Value* counts = const base::Value* counts =
heaps_v2->FindPath({"allocators", "malloc", "counts"}); heaps_v2->FindPath({"allocators", "malloc", "counts"});
const base::Value* types = const base::Value* types =
...@@ -383,6 +403,22 @@ TEST(ProfilingJsonExporterTest, Simple) { ...@@ -383,6 +403,22 @@ TEST(ProfilingJsonExporterTest, Simple) {
EXPECT_EQ(0, types->GetList()[node3].GetInt()); EXPECT_EQ(0, types->GetList()[node3].GetInt());
EXPECT_EQ(32, sizes->GetList()[node3].GetInt()); EXPECT_EQ(32, sizes->GetList()[node3].GetInt());
EXPECT_EQ(id3, backtraces->GetList()[node3].GetInt()); EXPECT_EQ(id3, backtraces->GetList()[node3].GetInt());
// Validate that the partition alloc one got through.
counts = heaps_v2->FindPath({"allocators", "partition_alloc", "counts"});
types = heaps_v2->FindPath({"allocators", "partition_alloc", "types"});
sizes = heaps_v2->FindPath({"allocators", "partition_alloc", "sizes"});
backtraces = heaps_v2->FindPath({"allocators", "partition_alloc", "nodes"});
ASSERT_TRUE(counts);
ASSERT_TRUE(types);
ASSERT_TRUE(sizes);
ASSERT_TRUE(backtraces);
// There should just be one entry for the partition_alloc allocation.
EXPECT_EQ(1u, counts->GetList().size());
EXPECT_EQ(1u, types->GetList().size());
EXPECT_EQ(1u, sizes->GetList().size());
} }
TEST(ProfilingJsonExporterTest, SimpleWithFilteredAllocations) { TEST(ProfilingJsonExporterTest, SimpleWithFilteredAllocations) {
...@@ -409,9 +445,10 @@ TEST(ProfilingJsonExporterTest, SimpleWithFilteredAllocations) { ...@@ -409,9 +445,10 @@ TEST(ProfilingJsonExporterTest, SimpleWithFilteredAllocations) {
AllocationEvent(AllocatorType::kMalloc, Address(0x3), 1000, bt2, 0)); AllocationEvent(AllocatorType::kMalloc, Address(0x3), 1000, bt2, 0));
events.insert( events.insert(
AllocationEvent(AllocatorType::kMalloc, Address(0x4), 1000, bt2, 0)); AllocationEvent(AllocatorType::kMalloc, Address(0x4), 1000, bt2, 0));
for (size_t i = 0; i < kCountThreshold + 1; ++i) for (size_t i = 0; i < kCountThreshold + 1; ++i) {
events.insert( events.insert(
AllocationEvent(AllocatorType::kMalloc, Address(0x5 + i), 1, bt3, 0)); AllocationEvent(AllocatorType::kMalloc, Address(0x5 + i), 1, bt3, 0));
}
// Validate filtering by size and count. // Validate filtering by size and count.
std::ostringstream stream; std::ostringstream stream;
...@@ -419,7 +456,7 @@ TEST(ProfilingJsonExporterTest, SimpleWithFilteredAllocations) { ...@@ -419,7 +456,7 @@ TEST(ProfilingJsonExporterTest, SimpleWithFilteredAllocations) {
MemoryMap memory_map; MemoryMap memory_map;
ExportParams params; ExportParams params;
params.set = &events; params.allocs = AllocationEventSetToCountMap(events);
params.context_map = &context_map; params.context_map = &context_map;
params.maps = &memory_map; params.maps = &memory_map;
params.min_size_threshold = kSizeThreshold; params.min_size_threshold = kSizeThreshold;
...@@ -448,10 +485,10 @@ TEST(ProfilingJsonExporterTest, SimpleWithFilteredAllocations) { ...@@ -448,10 +485,10 @@ TEST(ProfilingJsonExporterTest, SimpleWithFilteredAllocations) {
// Validate the strings table. // Validate the strings table.
EXPECT_EQ(3u, strings->GetList().size()); EXPECT_EQ(3u, strings->GetList().size());
int sid_unknown = GetStringFromStringTable(strings, "[unknown]"); int sid_unknown = GetIdFromStringTable(strings, "[unknown]");
int sid_1234 = GetStringFromStringTable(strings, "pc:1234"); int sid_1234 = GetIdFromStringTable(strings, "pc:1234");
int sid_5678 = GetStringFromStringTable(strings, "pc:5678"); int sid_5678 = GetIdFromStringTable(strings, "pc:5678");
int sid_9999 = GetStringFromStringTable(strings, "pc:9999"); int sid_9999 = GetIdFromStringTable(strings, "pc:9999");
EXPECT_NE(-1, sid_unknown); EXPECT_NE(-1, sid_unknown);
EXPECT_EQ(-1, sid_1234); // Must be filtered. EXPECT_EQ(-1, sid_1234); // Must be filtered.
EXPECT_NE(-1, sid_5678); EXPECT_NE(-1, sid_5678);
...@@ -495,7 +532,7 @@ TEST(ProfilingJsonExporterTest, MemoryMaps) { ...@@ -495,7 +532,7 @@ TEST(ProfilingJsonExporterTest, MemoryMaps) {
std::map<std::string, int> context_map; std::map<std::string, int> context_map;
ExportParams params; ExportParams params;
params.set = &events; params.allocs = AllocationEventSetToCountMap(events);
params.context_map = &context_map; params.context_map = &context_map;
params.maps = &memory_maps; params.maps = &memory_maps;
params.min_size_threshold = kNoSizeThreshold; params.min_size_threshold = kNoSizeThreshold;
...@@ -551,7 +588,7 @@ TEST(ProfilingJsonExporterTest, Metadata) { ...@@ -551,7 +588,7 @@ TEST(ProfilingJsonExporterTest, Metadata) {
MemoryMap memory_map; MemoryMap memory_map;
ExportParams params; ExportParams params;
params.set = &events; params.allocs = AllocationEventSetToCountMap(events);
params.context_map = &context_map; params.context_map = &context_map;
params.maps = &memory_map; params.maps = &memory_map;
params.min_size_threshold = kNoSizeThreshold; params.min_size_threshold = kNoSizeThreshold;
...@@ -574,4 +611,124 @@ TEST(ProfilingJsonExporterTest, Metadata) { ...@@ -574,4 +611,124 @@ TEST(ProfilingJsonExporterTest, Metadata) {
EXPECT_EQ(metadata_dict_copy, *found_metadatas); EXPECT_EQ(metadata_dict_copy, *found_metadatas);
} }
TEST(ProfilingJsonExporterTest, Context) {
BacktraceStorage backtrace_storage;
std::map<std::string, int> context_map;
std::vector<Address> stack;
stack.push_back(Address(0x1234));
const Backtrace* bt = backtrace_storage.Insert(std::move(stack));
std::string context_str1("Context 1");
int context_id1 = 1;
context_map[context_str1] = context_id1;
std::string context_str2("Context 2");
int context_id2 = 2;
context_map[context_str2] = context_id2;
// Make 4 events, all with identical metadata except context. Two share the
// same context so should get folded, one has unique context, and one has no
// context.
AllocationEventSet events;
events.insert(AllocationEvent(AllocatorType::kPartitionAlloc, Address(0x1),
16, bt, context_id1));
events.insert(AllocationEvent(AllocatorType::kPartitionAlloc, Address(0x2),
16, bt, context_id2));
events.insert(
AllocationEvent(AllocatorType::kPartitionAlloc, Address(0x3), 16, bt, 0));
events.insert(AllocationEvent(AllocatorType::kPartitionAlloc, Address(0x4),
16, bt, context_id1));
std::ostringstream stream;
MemoryMap memory_map;
ExportParams params;
params.allocs = AllocationEventSetToCountMap(events);
params.context_map = &context_map;
params.maps = &memory_map;
params.min_size_threshold = kNoSizeThreshold;
params.min_count_threshold = kNoCountThreshold;
ExportAllocationEventSetToJSON(1234, params, nullptr, stream);
std::string json = stream.str();
// JSON should parse.
base::JSONReader reader(base::JSON_PARSE_RFC);
std::unique_ptr<base::Value> root = reader.ReadToValue(stream.str());
ASSERT_EQ(base::JSONReader::JSON_NO_ERROR, reader.error_code())
<< reader.GetErrorMessage();
ASSERT_TRUE(root);
// Retrieve the allocations.
const base::Value* periodic_interval = FindFirstPeriodicInterval(*root);
ASSERT_TRUE(periodic_interval);
const base::Value* heaps_v2 =
periodic_interval->FindPath({"args", "dumps", "heaps_v2"});
ASSERT_TRUE(heaps_v2);
const base::Value* counts =
heaps_v2->FindPath({"allocators", "partition_alloc", "counts"});
ASSERT_TRUE(counts);
const base::Value* types =
heaps_v2->FindPath({"allocators", "partition_alloc", "types"});
ASSERT_TRUE(types);
const auto& counts_list = counts->GetList();
const auto& types_list = types->GetList();
// There should be three allocations, two coalesced ones, one with unique
// context, and one with no context.
EXPECT_EQ(3u, counts_list.size());
EXPECT_EQ(3u, types_list.size());
const base::Value* types_map = heaps_v2->FindPath({"maps", "types"});
ASSERT_TRUE(types_map);
const base::Value* strings = heaps_v2->FindPath({"maps", "strings"});
ASSERT_TRUE(strings);
// Reconstruct the map from type id to string.
std::map<int, std::string> type_to_string;
for (const auto& type : types_map->GetList()) {
const base::Value* id =
type.FindKeyOfType("id", base::Value::Type::INTEGER);
ASSERT_TRUE(id);
const base::Value* name_sid =
type.FindKeyOfType("name_sid", base::Value::Type::INTEGER);
ASSERT_TRUE(name_sid);
type_to_string[id->GetInt()] =
GetStringFromStringTable(strings, name_sid->GetInt());
}
// Track the three entries we have down to what we expect. The order is not
// defined so this is relatively complex to do.
bool found_double_context = false; // Allocations sharing the same context.
bool found_single_context = false; // Allocation with unique context.
bool found_no_context = false; // Allocation with no context.
for (size_t i = 0; i < types_list.size(); i++) {
const auto& found = type_to_string.find(types_list[i].GetInt());
ASSERT_NE(type_to_string.end(), found);
if (found->second == context_str1) {
// Context string matches the one with two allocations.
ASSERT_FALSE(found_double_context);
found_double_context = true;
ASSERT_EQ(2, counts_list[i].GetInt());
} else if (found->second == context_str2) {
// Context string matches the one with one allocation.
ASSERT_FALSE(found_single_context);
found_single_context = true;
ASSERT_EQ(1, counts_list[i].GetInt());
} else if (found->second == "[unknown]") {
// Context string for the one with no context.
ASSERT_FALSE(found_no_context);
found_no_context = true;
ASSERT_EQ(1, counts_list[i].GetInt());
}
}
// All three types of things should have been found in the loop.
ASSERT_TRUE(found_double_context);
ASSERT_TRUE(found_single_context);
ASSERT_TRUE(found_no_context);
}
} // namespace profiling } // namespace profiling
...@@ -125,7 +125,7 @@ bool MemlogConnectionManager::DumpProcess( ...@@ -125,7 +125,7 @@ bool MemlogConnectionManager::DumpProcess(
std::ostringstream oss; std::ostringstream oss;
ExportParams params; ExportParams params;
params.set = &connection->tracker.live_allocs(); params.allocs = connection->tracker.GetCounts();
params.context_map = &connection->tracker.context(); params.context_map = &connection->tracker.context();
params.maps = &maps; params.maps = &maps;
params.min_size_threshold = kMinSizeThreshold; params.min_size_threshold = kMinSizeThreshold;
...@@ -179,7 +179,7 @@ void MemlogConnectionManager::DumpProcessForTracing( ...@@ -179,7 +179,7 @@ void MemlogConnectionManager::DumpProcessForTracing(
Connection* connection = it->second.get(); Connection* connection = it->second.get();
std::ostringstream oss; std::ostringstream oss;
ExportParams params; ExportParams params;
params.set = &connection->tracker.live_allocs(); params.allocs = connection->tracker.GetCounts();
params.maps = &maps; params.maps = &maps;
params.context_map = &connection->tracker.context(); params.context_map = &connection->tracker.context();
params.min_size_threshold = kMinSizeThreshold; params.min_size_threshold = kMinSizeThreshold;
......
...@@ -161,10 +161,13 @@ MemlogStreamParser::ReadStatus MemlogStreamParser::ParseAlloc() { ...@@ -161,10 +161,13 @@ MemlogStreamParser::ReadStatus MemlogStreamParser::ParseAlloc() {
if (!PeekBytes(sizeof(AllocPacket), &alloc_packet)) if (!PeekBytes(sizeof(AllocPacket), &alloc_packet))
return READ_NO_DATA; return READ_NO_DATA;
std::vector<Address> stack; // Validate data.
if (alloc_packet.stack_len > kMaxStackEntries || if (alloc_packet.stack_len > kMaxStackEntries ||
alloc_packet.context_byte_len > kMaxContextLen) alloc_packet.context_byte_len > kMaxContextLen ||
return READ_ERROR; // Prevent overflow on corrupted or malicious data. alloc_packet.allocator >= AllocatorType::kCount)
return READ_ERROR;
std::vector<Address> stack;
stack.resize(alloc_packet.stack_len); stack.resize(alloc_packet.stack_len);
size_t stack_byte_size = sizeof(Address) * alloc_packet.stack_len; size_t stack_byte_size = sizeof(Address) * alloc_packet.stack_len;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment