Commit e356ae50 authored by Brett Wilson's avatar Brett Wilson Committed by Commit Bot

Hook up allocator types in OOP memlog.

Writes separate sections of allocator types for the OOP memory log.
Sone refactoring in support of this.

Adds tests for allocator splits and context information.

Bug: 763173
Change-Id: I81aeae226075b221d5b0b944fe74980557ffce32
Reviewed-on: https://chromium-review.googlesource.com/664083
Commit-Queue: Brett Wilson <brettw@chromium.org>
Reviewed-by: default avatarErik Chen <erikchen@chromium.org>
Cr-Commit-Position: refs/heads/master@{#501674}
parent 0f282076
...@@ -19,10 +19,12 @@ constexpr uint32_t kFreePacketType = 0xFEFEFEFE; ...@@ -19,10 +19,12 @@ constexpr uint32_t kFreePacketType = 0xFEFEFEFE;
constexpr uint32_t kMaxStackEntries = 256; constexpr uint32_t kMaxStackEntries = 256;
constexpr uint32_t kMaxContextLen = 256; constexpr uint32_t kMaxContextLen = 256;
// This should count up from 0 so it can be used to index into an array.
enum class AllocatorType : uint32_t { enum class AllocatorType : uint32_t {
kMalloc = 0, kMalloc = 0,
kPartitionAlloc = 1, kPartitionAlloc = 1,
kOilpan = 2 kOilpan = 2,
kCount // Number of allocator types.
}; };
#pragma pack(push, 1) #pragma pack(push, 1)
......
...@@ -19,4 +19,11 @@ AllocationEvent::AllocationEvent(AllocatorType allocator, ...@@ -19,4 +19,11 @@ AllocationEvent::AllocationEvent(AllocatorType allocator,
AllocationEvent::AllocationEvent(Address addr) : address_(addr) {} AllocationEvent::AllocationEvent(Address addr) : address_(addr) {}
AllocationCountMap AllocationEventSetToCountMap(const AllocationEventSet& set) {
AllocationCountMap map;
for (const auto& alloc : set)
map[alloc]++;
return map;
}
} // namespace profiling } // namespace profiling
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#define CHROME_PROFILING_ALLOCATION_EVENT_H_ #define CHROME_PROFILING_ALLOCATION_EVENT_H_
#include <functional> #include <functional>
#include <map>
#include <set> #include <set>
#include "chrome/common/profiling/memlog_stream.h" #include "chrome/common/profiling/memlog_stream.h"
...@@ -50,6 +51,19 @@ class AllocationEvent { ...@@ -50,6 +51,19 @@ class AllocationEvent {
} }
}; };
// Implements < for AllocationEvent using everything but the address.
struct MetadataPartialLess {
bool operator()(const AllocationEvent& lhs,
const AllocationEvent& rhs) const {
// Note that we're using pointer compiarisons on the backtrace objects
// since they're atoms and the actual ordering is not important.
return std::tie(lhs.size_, lhs.backtrace_, lhs.context_id_,
lhs.allocator_) < std::tie(rhs.size_, rhs.backtrace_,
rhs.context_id_,
rhs.allocator_);
}
};
// Implements == for AllocationEvents using address only. This is not a raw // Implements == for AllocationEvents using address only. This is not a raw
// operator because it only implements a comparison on the one field. // operator because it only implements a comparison on the one field.
struct AddressPartialEqual { struct AddressPartialEqual {
...@@ -59,6 +73,19 @@ class AllocationEvent { ...@@ -59,6 +73,19 @@ class AllocationEvent {
} }
}; };
// Implements < for AllocationEvent using everything but the address.
struct MetadataPartialEqual {
bool operator()(const AllocationEvent& lhs,
const AllocationEvent& rhs) const {
// Note that we're using pointer compiarisons on the backtrace objects
// since they're atoms.
return std::tie(lhs.size_, lhs.backtrace_, lhs.context_id_,
lhs.allocator_) == std::tie(rhs.size_, rhs.backtrace_,
rhs.context_id_,
rhs.allocator_);
}
};
private: private:
AllocatorType allocator_ = AllocatorType::kMalloc; AllocatorType allocator_ = AllocatorType::kMalloc;
Address address_; Address address_;
...@@ -67,9 +94,20 @@ class AllocationEvent { ...@@ -67,9 +94,20 @@ class AllocationEvent {
int context_id_ = 0; int context_id_ = 0;
}; };
// Unique set based on addresses of allocations.
using AllocationEventSet = using AllocationEventSet =
std::set<AllocationEvent, AllocationEvent::AddressPartialLess>; std::set<AllocationEvent, AllocationEvent::AddressPartialLess>;
// Maps allocation metadata to allocation counts of that type. In this case,
// the address of the AllocationEvent is unused.
using AllocationCountMap =
std::map<AllocationEvent, int, AllocationEvent::MetadataPartialLess>;
// Aggregates the allocation events to a count map. The address of the
// allocation event in the returned map will be the address of the first item
// in the set with that metadata.
AllocationCountMap AllocationEventSetToCountMap(const AllocationEventSet& set);
} // namespace profiling } // namespace profiling
#endif // CHROME_PROFILING_ALLOCATION_EVENT_H_ #endif // CHROME_PROFILING_ALLOCATION_EVENT_H_
...@@ -58,4 +58,8 @@ void AllocationTracker::OnComplete() { ...@@ -58,4 +58,8 @@ void AllocationTracker::OnComplete() {
std::move(complete_callback_)); std::move(complete_callback_));
} }
AllocationCountMap AllocationTracker::GetCounts() const {
return AllocationEventSetToCountMap(live_allocs_);
}
} // namespace profiling } // namespace profiling
...@@ -36,11 +36,22 @@ class AllocationTracker : public MemlogReceiver { ...@@ -36,11 +36,22 @@ class AllocationTracker : public MemlogReceiver {
const AllocationEventSet& live_allocs() const { return live_allocs_; } const AllocationEventSet& live_allocs() const { return live_allocs_; }
const ContextMap& context() const { return context_; } const ContextMap& context() const { return context_; }
// Returns the aggregated allocation counts currently live.
AllocationCountMap GetCounts() const;
private: private:
CompleteCallback complete_callback_; CompleteCallback complete_callback_;
BacktraceStorage* backtrace_storage_; BacktraceStorage* backtrace_storage_;
// Need to track all live objects. Since the free information doesn't have
// the metadata, we can't keep a map of counts indexed by just the metadata
// (which is all the trace JSON needs), but need to keep an index by address.
//
// This could be a two-level index, where one set of metadata is kept and
// addresses index into that. But a full copy of the metadata is about the
// same size as the internal map node required for this second index, with
// additional complexity.
AllocationEventSet live_allocs_; AllocationEventSet live_allocs_;
// The context strings are atoms. Since there are O(100's) of these, we do // The context strings are atoms. Since there are O(100's) of these, we do
......
This diff is collapsed.
...@@ -18,9 +18,15 @@ namespace profiling { ...@@ -18,9 +18,15 @@ namespace profiling {
// Configuration passed to the export functions because they take many // Configuration passed to the export functions because they take many
// arguments. All parameters must be set. The pointers are not managed by this // arguments. All parameters must be set. The pointers are not managed by this
// class and must outlive it. // class and must outlive it.
//
// Whether something is a pointer or a value is determined by what makes the
// call site nicer without introducing unnecessary copies.
struct ExportParams { struct ExportParams {
ExportParams();
~ExportParams();
// Allocation events to export. // Allocation events to export.
const AllocationEventSet* set = nullptr; AllocationCountMap allocs;
// VM map of all regions in the process. // VM map of all regions in the process.
const std::vector<memory_instrumentation::mojom::VmRegionPtr>* maps = nullptr; const std::vector<memory_instrumentation::mojom::VmRegionPtr>* maps = nullptr;
......
This diff is collapsed.
...@@ -125,7 +125,7 @@ bool MemlogConnectionManager::DumpProcess( ...@@ -125,7 +125,7 @@ bool MemlogConnectionManager::DumpProcess(
std::ostringstream oss; std::ostringstream oss;
ExportParams params; ExportParams params;
params.set = &connection->tracker.live_allocs(); params.allocs = connection->tracker.GetCounts();
params.context_map = &connection->tracker.context(); params.context_map = &connection->tracker.context();
params.maps = &maps; params.maps = &maps;
params.min_size_threshold = kMinSizeThreshold; params.min_size_threshold = kMinSizeThreshold;
...@@ -179,7 +179,7 @@ void MemlogConnectionManager::DumpProcessForTracing( ...@@ -179,7 +179,7 @@ void MemlogConnectionManager::DumpProcessForTracing(
Connection* connection = it->second.get(); Connection* connection = it->second.get();
std::ostringstream oss; std::ostringstream oss;
ExportParams params; ExportParams params;
params.set = &connection->tracker.live_allocs(); params.allocs = connection->tracker.GetCounts();
params.maps = &maps; params.maps = &maps;
params.context_map = &connection->tracker.context(); params.context_map = &connection->tracker.context();
params.min_size_threshold = kMinSizeThreshold; params.min_size_threshold = kMinSizeThreshold;
......
...@@ -161,10 +161,13 @@ MemlogStreamParser::ReadStatus MemlogStreamParser::ParseAlloc() { ...@@ -161,10 +161,13 @@ MemlogStreamParser::ReadStatus MemlogStreamParser::ParseAlloc() {
if (!PeekBytes(sizeof(AllocPacket), &alloc_packet)) if (!PeekBytes(sizeof(AllocPacket), &alloc_packet))
return READ_NO_DATA; return READ_NO_DATA;
std::vector<Address> stack; // Validate data.
if (alloc_packet.stack_len > kMaxStackEntries || if (alloc_packet.stack_len > kMaxStackEntries ||
alloc_packet.context_byte_len > kMaxContextLen) alloc_packet.context_byte_len > kMaxContextLen ||
return READ_ERROR; // Prevent overflow on corrupted or malicious data. alloc_packet.allocator >= AllocatorType::kCount)
return READ_ERROR;
std::vector<Address> stack;
stack.resize(alloc_packet.stack_len); stack.resize(alloc_packet.stack_len);
size_t stack_byte_size = sizeof(Address) * alloc_packet.stack_len; size_t stack_byte_size = sizeof(Address) * alloc_packet.stack_len;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment