Commit 203654f1 authored by ssid's avatar ssid Committed by Commit bot

[memory-infra] Implement peak detection logic

The CL implements peak detection in memory dump scheduler.
More discussion at https://goo.gl/0kOU4A.

BUG=607533

Review-Url: https://codereview.chromium.org/2737153002
Cr-Commit-Position: refs/heads/master@{#456163}
parent d9ab107e
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
#include "base/trace_event/memory_dump_scheduler.h" #include "base/trace_event/memory_dump_scheduler.h"
#include "base/process/process_metrics.h"
#include "base/single_thread_task_runner.h" #include "base/single_thread_task_runner.h"
#include "base/threading/thread_task_runner_handle.h" #include "base/threading/thread_task_runner_handle.h"
#include "base/trace_event/memory_dump_manager.h" #include "base/trace_event/memory_dump_manager.h"
...@@ -15,7 +16,7 @@ namespace trace_event { ...@@ -15,7 +16,7 @@ namespace trace_event {
namespace { namespace {
// Threshold on increase in memory from last dump beyond which a new dump must // Threshold on increase in memory from last dump beyond which a new dump must
// be triggered. // be triggered.
int64_t kMemoryIncreaseThreshold = 50 * 1024 * 1024; // 50MiB int64_t kDefaultMemoryIncreaseThreshold = 50 * 1024 * 1024; // 50MiB
const uint32_t kMemoryTotalsPollingInterval = 25; const uint32_t kMemoryTotalsPollingInterval = 25;
uint32_t g_polling_interval_ms_for_testing = 0; uint32_t g_polling_interval_ms_for_testing = 0;
} // namespace } // namespace
...@@ -85,8 +86,24 @@ void MemoryDumpScheduler::NotifyPollingSupported() { ...@@ -85,8 +86,24 @@ void MemoryDumpScheduler::NotifyPollingSupported() {
if (!polling_state_.is_configured || polling_state_.is_polling_enabled) if (!polling_state_.is_configured || polling_state_.is_polling_enabled)
return; return;
polling_state_.is_polling_enabled = true; polling_state_.is_polling_enabled = true;
for (uint32_t i = 0; i < PollingTriggerState::kMaxNumMemorySamples; ++i)
polling_state_.last_memory_totals_kb[i] = 0;
polling_state_.last_memory_totals_kb_index = 0;
polling_state_.num_polls_from_last_dump = 0; polling_state_.num_polls_from_last_dump = 0;
polling_state_.last_dump_memory_total = 0; polling_state_.last_dump_memory_total = 0;
if (!polling_state_.memory_increase_threshold) {
polling_state_.memory_increase_threshold = kDefaultMemoryIncreaseThreshold;
#if defined(OS_WIN) || defined(OS_MACOSX) || defined(OS_LINUX) || \
defined(OS_ANDROID)
// Set threshold to 1% of total system memory.
SystemMemoryInfoKB meminfo;
bool res = GetSystemMemoryInfo(&meminfo);
if (res)
polling_state_.memory_increase_threshold = (meminfo.total / 100) * 1024;
#endif
}
polling_state_.polling_task_runner->PostTask( polling_state_.polling_task_runner->PostTask(
FROM_HERE, FROM_HERE,
Bind(&MemoryDumpScheduler::PollMemoryOnPollingThread, Unretained(this))); Bind(&MemoryDumpScheduler::PollMemoryOnPollingThread, Unretained(this)));
...@@ -180,14 +197,59 @@ bool MemoryDumpScheduler::ShouldTriggerDump(uint64_t current_memory_total) { ...@@ -180,14 +197,59 @@ bool MemoryDumpScheduler::ShouldTriggerDump(uint64_t current_memory_total) {
int64_t increase_from_last_dump = int64_t increase_from_last_dump =
current_memory_total - polling_state_.last_dump_memory_total; current_memory_total - polling_state_.last_dump_memory_total;
should_dump |= increase_from_last_dump > kMemoryIncreaseThreshold; should_dump |=
increase_from_last_dump > polling_state_.memory_increase_threshold;
should_dump |= IsCurrentSamplePeak(current_memory_total);
if (should_dump) { if (should_dump) {
polling_state_.last_dump_memory_total = current_memory_total; polling_state_.last_dump_memory_total = current_memory_total;
polling_state_.num_polls_from_last_dump = 0; polling_state_.num_polls_from_last_dump = 0;
for (uint32_t i = 0; i < PollingTriggerState::kMaxNumMemorySamples; ++i)
polling_state_.last_memory_totals_kb[i] = 0;
polling_state_.last_memory_totals_kb_index = 0;
} }
return should_dump; return should_dump;
} }
bool MemoryDumpScheduler::IsCurrentSamplePeak(
uint64_t current_memory_total_bytes) {
uint64_t current_memory_total_kb = current_memory_total_bytes / 1024;
polling_state_.last_memory_totals_kb_index =
(polling_state_.last_memory_totals_kb_index + 1) %
PollingTriggerState::kMaxNumMemorySamples;
uint64_t mean = 0;
for (uint32_t i = 0; i < PollingTriggerState::kMaxNumMemorySamples; ++i) {
if (polling_state_.last_memory_totals_kb[i] == 0) {
// Not enough samples to detect peaks.
polling_state_
.last_memory_totals_kb[polling_state_.last_memory_totals_kb_index] =
current_memory_total_kb;
return false;
}
mean += polling_state_.last_memory_totals_kb[i];
}
mean = mean / PollingTriggerState::kMaxNumMemorySamples;
uint64_t variance = 0;
for (uint32_t i = 0; i < PollingTriggerState::kMaxNumMemorySamples; ++i) {
variance += (polling_state_.last_memory_totals_kb[i] - mean) *
(polling_state_.last_memory_totals_kb[i] - mean);
}
variance = variance / PollingTriggerState::kMaxNumMemorySamples;
polling_state_
.last_memory_totals_kb[polling_state_.last_memory_totals_kb_index] =
current_memory_total_kb;
// If stddev is less than 0.2% then we consider that the process is inactive.
bool is_stddev_low = variance < mean / 500 * mean / 500;
if (is_stddev_low)
return false;
// (mean + 3.69 * stddev) corresponds to a value that is higher than current
// sample with 99.99% probability.
return (current_memory_total_kb - mean) * (current_memory_total_kb - mean) >
(3.69 * 3.69 * variance);
}
MemoryDumpScheduler::PeriodicTriggerState::PeriodicTriggerState() MemoryDumpScheduler::PeriodicTriggerState::PeriodicTriggerState()
: is_configured(false), : is_configured(false),
dump_count(0), dump_count(0),
...@@ -212,7 +274,9 @@ MemoryDumpScheduler::PollingTriggerState::PollingTriggerState( ...@@ -212,7 +274,9 @@ MemoryDumpScheduler::PollingTriggerState::PollingTriggerState(
: kMemoryTotalsPollingInterval), : kMemoryTotalsPollingInterval),
min_polls_between_dumps(0), min_polls_between_dumps(0),
num_polls_from_last_dump(0), num_polls_from_last_dump(0),
last_dump_memory_total(0) {} last_dump_memory_total(0),
memory_increase_threshold(0),
last_memory_totals_kb_index(0) {}
MemoryDumpScheduler::PollingTriggerState::~PollingTriggerState() { MemoryDumpScheduler::PollingTriggerState::~PollingTriggerState() {
DCHECK(!polling_task_runner); DCHECK(!polling_task_runner);
......
...@@ -68,6 +68,8 @@ class BASE_EXPORT MemoryDumpScheduler { ...@@ -68,6 +68,8 @@ class BASE_EXPORT MemoryDumpScheduler {
}; };
struct PollingTriggerState { struct PollingTriggerState {
static const uint32_t kMaxNumMemorySamples = 50;
explicit PollingTriggerState( explicit PollingTriggerState(
scoped_refptr<SingleThreadTaskRunner> polling_task_runner); scoped_refptr<SingleThreadTaskRunner> polling_task_runner);
~PollingTriggerState(); ~PollingTriggerState();
...@@ -85,6 +87,9 @@ class BASE_EXPORT MemoryDumpScheduler { ...@@ -85,6 +87,9 @@ class BASE_EXPORT MemoryDumpScheduler {
int num_polls_from_last_dump; int num_polls_from_last_dump;
uint64_t last_dump_memory_total; uint64_t last_dump_memory_total;
int64_t memory_increase_threshold;
uint64_t last_memory_totals_kb[kMaxNumMemorySamples];
uint32_t last_memory_totals_kb_index;
DISALLOW_COPY_AND_ASSIGN(PollingTriggerState); DISALLOW_COPY_AND_ASSIGN(PollingTriggerState);
}; };
...@@ -101,6 +106,9 @@ class BASE_EXPORT MemoryDumpScheduler { ...@@ -101,6 +106,9 @@ class BASE_EXPORT MemoryDumpScheduler {
// Returns true if peak memory value is detected. // Returns true if peak memory value is detected.
bool ShouldTriggerDump(uint64_t current_memory_total); bool ShouldTriggerDump(uint64_t current_memory_total);
// Helper to detect peaks in memory usage.
bool IsCurrentSamplePeak(uint64_t current_memory_total);
// Must be set before enabling tracing. // Must be set before enabling tracing.
static void SetPollingIntervalForTesting(uint32_t interval); static void SetPollingIntervalForTesting(uint32_t interval);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment