Commit e34d38aa authored by Benoit Lize's avatar Benoit Lize Committed by Commit Bot

tools: Dump a process' heap, and analyze it.

Adds a tool to dump the content of all anonymous mapping in a given process,
and one to show statistics about the content.

From a process, this shows:
- Total anonymous mappings size
- For each page, whether it's present XOR swapped
- The total number of 0 pages
- Pages with identical non-zero content
- Compressed size of non-zero pages.

The compression ratio is derived using zlib, which is likely to be
overestimating the compression ratio, as it's better than ZRAM's compression
algorithm.

Tested on Android and Linux.

Sample output (cnn.com on Android Go):

Total pages = 36532 (149.64MB)
Total zero pages = 15191 (41.58%)
Total present zero pages = 754 (3.09MB)
Total size of non-zero pages = 87412736 (87.41MB)
Total compressed size = 28994296 (33.17%)
Duplicated non-zero pages = 812
Max non-zero pages with the same content = 387
Swapped pages = 231 (946.18kB)
Non-present pages = 14667 (60.08MB)
Freed = 5232960 (5.23MB)

TBR: brucedawson@chromium.org

Change-Id: Iba991f8a57d01591e72359163711064297ea8658
Bug: 845459
Reviewed-on: https://chromium-review.googlesource.com/1087059
Commit-Queue: Benoit L <lizeb@chromium.org>
Reviewed-by: default avatarBenoit L <lizeb@chromium.org>
Reviewed-by: default avatarBruce Dawson <brucedawson@chromium.org>
Reviewed-by: default avatarEgor Pasko <pasko@chromium.org>
Reviewed-by: default avataragrieve <agrieve@chromium.org>
Cr-Commit-Position: refs/heads/master@{#567960}
parent e78aebc9
......@@ -376,6 +376,7 @@ group("gn_all") {
"//third_party/breakpad:core-2-minidump",
"//third_party/breakpad:generate_test_dump",
"//third_party/breakpad:minidump-2-core",
"//tools/dump_process_memory:dump_process",
]
}
......
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
executable("dump_process") {
sources = [
"dump_process.cc",
]
deps = [
"//base",
]
}
group("all") {
deps = [
":dump_process",
]
}
lizeb@chromium.org
pasko@chromium.org
#!/usr/bin/python
#
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""From a dump generated by dump_process.cc dump, prints statistics about
its content.
"""
import array
import collections
import hashlib
import logging
import os
import struct
import sys
import zlib
PAGE_SIZE = 1 << 12
def _ReadPage(f):
"""Reads a page of data from a file.
Args:
f: (file) An opened file to read from.
Returns:
An array.array() of unsigned int with the page content.
"""
result = array.array('I')
result.fromfile(f, PAGE_SIZE / result.itemsize)
return result
def _PrettyPrintSize(x):
"""Pretty print sizes in bytes, e.g. 123456 -> 123.45kB.
Args:
x: (int) size
Returns:
(str) Pretty printed version, 2 decimal places.
"""
if x < 1e3:
return str(x)
elif 1e3 <= x < 1e6:
return '%.2fkB' % (x / 1e3)
elif 1e6 <= x < 1e9:
return '%.2fMB' % (x / 1e6)
else:
return '%.2fGB' % (x / 1e9)
class MappingStats(object):
"""Statistics about a mapping, from a dump.
Slots:
filename: (str) Dump filename.
start: (int) Start address of the mapping.
end: (int) End address of the mapping.
pages: (int) Sizs of the mapping in pages.
is_zero: ([bool]) For each page, whether it's a zero page.
is_present: ([bool]) For each page, whether it's present.
is_swapped: ([bool]) For each page, whether it has been swapped out.
compressed_size: ([int]) If a page is not zero, its compressed size.
hashes: ([str]) If a page is not zero, its SHA1 hash.
"""
__slots__ = ('filename', 'start', 'end', 'pages', 'is_zero', 'is_present',
'is_swapped', 'compressed_size', 'hashes', 'freed')
def __init__(self, filename, start, end):
"""Init.
Args:
filename: (str) Dump filename.
start: (int) Start address.
end: (int) End address
"""
self.filename = filename
self.start = start
self.end = end
self.pages = (end - start) / PAGE_SIZE
self.is_zero = [False for i in range(self.pages)]
self.is_present = [False for i in range(self.pages)]
self.is_swapped = [False for i in range(self.pages)]
self.compressed_size = [0 for i in range(self.pages)]
self.hashes = [None for i in range(self.pages)]
self.freed = 0
def _GetStatsFromFileDump(filename):
"""Computes per-dump statistics.
Args:
filename: (str) Path to the dump.
Returns:
MappingStats for the mapping.
"""
# These are typically only populated with DCHECK() on.
FREED_PATTERNS = (0xcccccccc, # V8
0xcdcdcdcd, # PartitionAlloc "zapped"
0xabababab, # PartitionAlloc "uninitialized"
0xdeadbeef, # V8 "zapped"
0x0baddeaf, # V8 zapped handles
0x0baffedf, # V8 zapped global handles
0x0beefdaf, # V8 zapped from space
0xbeefdeef, # V8 zapped slots
0xbadbaddb, # V8 debug zapped
0xfeed1eaf) # V8 zapped freelist
# Dump integrity checks.
metadata_filename = filename + '.metadata'
pid_start_end = os.path.basename(filename)[:-len('.dump')]
(_, start, end) = [int(x, 10) for x in pid_start_end.split('-')]
file_stat = os.stat(filename)
assert start % PAGE_SIZE == 0
assert end % PAGE_SIZE == 0
assert file_stat.st_size == (end - start)
metadata_file_stat = os.stat(metadata_filename)
result = MappingStats(filename, start, end)
# each line is [01]{2}\n, eg '10\n', 1 line per page.
assert metadata_file_stat.st_size == 3 * result.pages
with open(filename, 'r') as f, open(metadata_filename, 'r') as metadata_f:
for i in range(result.pages):
page = _ReadPage(f)
assert len(page) == 1024
result.freed += 4 * sum(x in FREED_PATTERNS for x in page)
is_zero = max(page) == 0
present, swapped = (bool(int(x)) for x in metadata_f.readline().strip())
# Not present, not swapped private anonymous == lazily initialized zero
# page.
if not present and not swapped:
assert is_zero
result.is_zero[i] = is_zero
result.is_present[i] = present
result.is_swapped[i] = swapped
if not is_zero:
sha1 = hashlib.sha1()
sha1.update(page)
page_hash = sha1.digest()
result.hashes[i] = page_hash
compressed = zlib.compress(page, 1)
result.compressed_size[i] = len(compressed)
return result
def _FindPageFromHash(mappings, page_hash):
"""Returns a page with a given hash from a list of mappings.
Args:
mappings: ([MappingStats]) List of mappings.
page_hash: (str) Page hash to look for,
Returns:
array.array(uint32_t) with the page content
"""
for mapping in mappings:
for i in range(mapping.pages):
if mapping.hashes[i] == page_hash:
with open(mapping.filename, 'r') as f:
f.seek(i * PAGE_SIZE)
page = _ReadPage(f)
sha1 = hashlib.sha1()
sha1.update(page)
assert page_hash == sha1.digest()
return page
def _PrintPage(page):
"""Prints the content of a page."""
for i, x in enumerate(page):
print '{:08x}'.format(x),
if i % 16 == 15:
print
def PrintStats(dumps):
"""Logs statistics about a process mappings dump.
Args:
dumps: ([str]) List of dumps.
"""
dump_stats = [_GetStatsFromFileDump(filename) for filename in dumps]
content_to_count = collections.defaultdict(int)
total_pages = sum(stats.pages for stats in dump_stats)
total_zero_pages = sum(sum(stats.is_zero) for stats in dump_stats)
total_compressed_size = sum(sum(stats.compressed_size)
for stats in dump_stats)
total_swapped_pages = sum(sum(stats.is_swapped) for stats in dump_stats)
total_not_present_pages = sum(stats.pages - sum(stats.is_present)
for stats in dump_stats)
total_present_zero_pages = sum(
sum(x == (True, True) for x in zip(stats.is_zero, stats.is_present))
for stats in dump_stats)
total_freed_space = sum(stats.freed for stats in dump_stats)
content_to_count = collections.defaultdict(int)
for stats in dump_stats:
for page_hash in stats.hashes:
if page_hash:
content_to_count[page_hash] += 1
print 'Total pages = %d (%s)' % (total_pages,
_PrettyPrintSize(total_pages * PAGE_SIZE))
print 'Total zero pages = %d (%.02f%%)' % (
total_zero_pages, (100. * total_zero_pages) / total_pages)
print 'Total present zero pages = %d (%s)' % (
total_present_zero_pages,
_PrettyPrintSize(total_present_zero_pages * PAGE_SIZE))
total_size_non_zero_pages = (total_pages - total_zero_pages) * PAGE_SIZE
print 'Total size of non-zero pages = %d (%s)' % (
total_size_non_zero_pages, _PrettyPrintSize(total_size_non_zero_pages))
print 'Total compressed size = %d (%.02f%%)' % (
total_compressed_size,
(100. * total_compressed_size) / total_size_non_zero_pages)
duplicated_pages = sum(x - 1 for x in content_to_count.values())
print 'Duplicated non-zero pages = %d' % duplicated_pages
count_and_hashes = sorted(((v, k) for k, v in content_to_count.items()),
reverse=True)
max_common_pages = count_and_hashes[0][0] - 1
print 'Max non-zero pages with the same content = %d' % max_common_pages
print 'Swapped pages = %d (%s)' % (
total_swapped_pages, _PrettyPrintSize(total_swapped_pages * PAGE_SIZE))
print 'Non-present pages = %d (%s)' % (
total_not_present_pages,
_PrettyPrintSize(total_not_present_pages * PAGE_SIZE))
print 'Freed = %d (%s)' % (
total_freed_space, _PrettyPrintSize(total_freed_space))
print 'Top Duplicated Pages:'
for i in range(10):
count, page_hash = count_and_hashes[i]
print '%d common pages' % count
page = _FindPageFromHash(dump_stats, page_hash)
_PrintPage(page)
print
def main():
logging.basicConfig(level=logging.INFO)
if len(sys.argv) != 2:
logging.error('Usage: %s <dumps_directory>', sys.argv[0])
sys.exit(1)
directory = sys.argv[1]
dumps = [os.path.join(directory, f) for f in os.listdir(directory)
if f.endswith('.dump')]
PrintStats(dumps)
if __name__ == '__main__':
main()
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <sys/ptrace.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <unistd.h>
#include <limits>
#include <string>
#include <vector>
#include "base/debug/proc_maps_linux.h"
#include "base/files/file.h"
#include "base/files/file_path.h"
#include "base/files/file_util.h"
#include "base/format_macros.h"
#include "base/logging.h"
#include "base/posix/eintr_wrapper.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_util.h"
#include "base/strings/stringprintf.h"
namespace {
using base::debug::MappedMemoryRegion;
constexpr size_t kPageSize = 1 << 12;
// See https://www.kernel.org/doc/Documentation/vm/pagemap.txt.
struct PageMapEntry {
uint64_t pfn_or_swap : 55;
uint64_t soft_dirty : 1;
uint64_t exclusively_mapped : 1;
uint64_t unused : 4;
uint64_t file_mapped_or_shared_anon : 1;
uint64_t swapped : 1;
uint64_t present : 1;
};
static_assert(sizeof(PageMapEntry) == sizeof(uint64_t), "Wrong bitfield size");
// Calls ptrace() on a process, and detaches in the destructor.
class ScopedPtracer {
public:
ScopedPtracer(pid_t pid) : pid_(pid), is_attached_(false) {
// ptrace() delivers a SIGSTOP signal to one thread in the target process,
// unless it is already stopped. Since we want to stop the whole process,
// send a signal to every thread in the process group.
pid_t process_group_id = getpgid(pid);
if (killpg(process_group_id, SIGSTOP)) {
PLOG(ERROR) << "Cannot stop the process group of " << pid;
return;
}
if (ptrace(PTRACE_ATTACH, pid, nullptr, nullptr)) {
PLOG(ERROR) << "Unable to attach to " << pid;
return;
}
// ptrace(PTRACE_ATTACH) sends a SISTOP signal to the process, need to wait
// for it.
int status;
pid_t ret = HANDLE_EINTR(waitpid(pid, &status, 0));
if (ret != pid) {
PLOG(ERROR) << "Waiting for the process failed";
return;
}
if (!WIFSTOPPED(status)) {
LOG(ERROR) << "The process is not stopped";
ptrace(PTRACE_DETACH, pid, 0, 0);
return;
}
is_attached_ = true;
}
~ScopedPtracer() {
if (!is_attached_)
return;
if (ptrace(PTRACE_DETACH, pid_, 0, 0)) {
PLOG(ERROR) << "Cannot detach from " << pid_;
}
pid_t process_group_id = getpgid(pid_);
if (killpg(process_group_id, SIGCONT)) {
PLOG(ERROR) << "Cannot resume the process " << pid_;
return;
}
}
bool IsAttached() const { return is_attached_; }
private:
pid_t pid_;
bool is_attached_;
};
bool ParseProcMaps(pid_t pid, std::vector<MappedMemoryRegion>* regions) {
std::string path = base::StringPrintf("/proc/%d/maps", pid);
std::string proc_maps;
bool ok = base::ReadFileToString(base::FilePath(path), &proc_maps);
if (!ok) {
LOG(ERROR) << "Cannot read " << path;
return false;
}
ok = base::debug::ParseProcMaps(proc_maps, regions);
if (!ok) {
LOG(ERROR) << "Cannot parse " << path;
return false;
}
return true;
}
// Keep anonynmous rw-p regions.
bool ShouldDump(const MappedMemoryRegion& region) {
const auto rw_p = MappedMemoryRegion::READ | MappedMemoryRegion::WRITE |
MappedMemoryRegion::PRIVATE;
if (region.permissions != rw_p)
return false;
if (base::StartsWith(region.path, "/", base::CompareCase::SENSITIVE) ||
base::StartsWith(region.path, "[stack]", base::CompareCase::SENSITIVE)) {
return false;
}
return true;
}
base::File OpenProcPidFile(const char* filename, pid_t pid) {
std::string path = base::StringPrintf("/proc/%d/%s", pid, filename);
auto file = base::File(base::FilePath(path),
base::File::FLAG_OPEN | base::File::FLAG_READ);
if (!file.IsValid()) {
PLOG(ERROR) << "Cannot open " << path;
}
return file;
}
bool DumpRegion(const MappedMemoryRegion& region,
pid_t pid,
base::File* proc_mem,
base::File* proc_pagemap) {
size_t size_in_pages = (region.end - region.start) / kPageSize;
std::string output_path = base::StringPrintf("%d-%" PRIuS "-%" PRIuS ".dump",
pid, region.start, region.end);
base::File output_file(base::FilePath(output_path),
base::File::FLAG_WRITE | base::File::FLAG_CREATE);
if (!output_file.IsValid()) {
PLOG(ERROR) << "Cannot open " << output_path;
return false;
}
std::string metadata_path = output_path + std::string(".metadata");
base::File metadata_file(base::FilePath(metadata_path),
base::File::FLAG_WRITE | base::File::FLAG_CREATE);
if (!metadata_file.IsValid()) {
PLOG(ERROR) << "Cannot open " << metadata_path;
return false;
}
// Dump metadata.
// Important: Metadata must be dumped before the data, as reading from
// /proc/pid/mem will move the data back from swap, so dumping metadata
// later would not show anything in swap.
// This also means that dumping the same process twice will result in
// inaccurate metadata.
for (size_t i = 0; i < size_in_pages; ++i) {
// See https://www.kernel.org/doc/Documentation/vm/pagemap.txt
// 64 bits per page.
int64_t pagemap_offset =
((region.start / kPageSize) + i) * sizeof(PageMapEntry);
PageMapEntry entry;
proc_pagemap->Seek(base::File::FROM_BEGIN, pagemap_offset);
int size_read = proc_pagemap->ReadAtCurrentPos(
reinterpret_cast<char*>(&entry), sizeof(PageMapEntry));
if (size_read != sizeof(PageMapEntry)) {
PLOG(ERROR) << "Cannot read from /proc/pid/pagemap at offset "
<< pagemap_offset;
return false;
}
std::string metadata = base::StringPrintf(
"%c%c\n", entry.present ? '1' : '0', entry.swapped ? '1' : '0');
metadata_file.WriteAtCurrentPos(metadata.c_str(), metadata.size());
}
// Writing data page by page to avoid allocating too much memory.
std::vector<char> buffer(kPageSize);
for (size_t i = 0; i < size_in_pages; ++i) {
uint64_t address = region.start + i * kPageSize;
// Works because the upper half of the address space is reserved for the
// kernel on at least ARM64 and x86_64 bit architectures.
CHECK(address <= std::numeric_limits<int64_t>::max());
proc_mem->Seek(base::File::FROM_BEGIN, static_cast<int64_t>(address));
int size_read = proc_mem->ReadAtCurrentPos(&buffer[0], kPageSize);
if (size_read != kPageSize) {
PLOG(ERROR) << "Cannot read from /proc/pid/mem at offset " << address;
return false;
}
int64_t output_offset = i * kPageSize;
int size_written = output_file.Write(output_offset, &buffer[0], kPageSize);
if (size_written != kPageSize) {
PLOG(ERROR) << "Cannot write to output file";
return false;
}
}
return true;
}
// Dumps the content of all the anonymous rw-p mappings in a given process to
// disk.
bool DumpMappings(pid_t pid) {
LOG(INFO) << "Attaching to " << pid;
ScopedPtracer tracer(pid);
if (!tracer.IsAttached())
return false;
LOG(INFO) << "Reading /proc/pid/maps";
std::vector<base::debug::MappedMemoryRegion> regions;
bool ok = ParseProcMaps(pid, &regions);
if (!ok)
return false;
base::File proc_mem = OpenProcPidFile("mem", pid);
if (!proc_mem.IsValid())
return false;
base::File proc_pagemap = OpenProcPidFile("pagemap", pid);
if (!proc_pagemap.IsValid())
return false;
for (const auto& region : regions) {
if (!ShouldDump(region))
continue;
std::string message =
base::StringPrintf("%" PRIuS "-%" PRIuS " (size %" PRIuS ")",
region.start, region.end, region.end - region.start);
LOG(INFO) << "Dumping " << message;
ok = DumpRegion(region, pid, &proc_mem, &proc_pagemap);
if (!ok) {
LOG(WARNING) << "Failed to dump region";
}
}
return true;
}
} // namespace
int main(int argc, char** argv) {
CHECK(sysconf(_SC_PAGESIZE) == kPageSize);
if (argc != 2) {
LOG(ERROR) << "Usage: " << argv[0] << " <pid>";
return 1;
}
pid_t pid;
bool ok = base::StringToInt(argv[1], &pid);
if (!ok) {
LOG(ERROR) << "Cannot parse PID";
return 1;
}
ok = DumpMappings(pid);
return ok ? 0 : 1;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment