Commit b6b2b89a authored by jar@chromium.org's avatar jar@chromium.org

Support incremental-max and sample in Profiler data

I also did some cleaning and refactoring in
tracked_objects.  We had a lot of functionality that
has migrated to JS, that we didn't need (a lot of
acccessors that are supplanted by the ToValue()
methods.

I'm anticipating that we'll move to an asynhcronous
collecting of data from the profiler, so that we can
bounce around to various threads and more cleanly
collect samples (without risking races during
data snapshots).  Several of the refactors are
heading in that direction.

r=rtenneti
tbr=jam (for microscopic content change)
BUG=106291,106293
Review URL: http://codereview.chromium.org/8775061

git-svn-id: svn://svn.chromium.org/chrome/trunk/src@112928 0039d316-1c4b-4281-b951-d872f2087c98
parent c6562f4b
...@@ -33,86 +33,134 @@ static const ThreadData::Status kInitialStartupState = ThreadData::ACTIVE; ...@@ -33,86 +33,134 @@ static const ThreadData::Status kInitialStartupState = ThreadData::ACTIVE;
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
// DeathData tallies durations when a death takes place. // DeathData tallies durations when a death takes place.
void DeathData::RecordDeath(DurationInt queue_duration, DeathData::DeathData() {
DurationInt run_duration) { Clear();
}
DeathData::DeathData(int count) {
Clear();
count_ = count;
}
// TODO(jar): I need to see if this macro to optimize branching is worth it.
//
// This macro has no branching, so it is surely fast, and is equivalent to:
// if (assign_it)
// target = source;
// We use a macro rather than a template to force this to inline.
// Related code for calculating max is discussed on the web.
#define CONDITIONAL_ASSIGN(assign_it, target, source) \
((target) ^= ((target) ^ (source)) & -static_cast<DurationInt>(assign_it))
void DeathData::RecordDeath(const DurationInt queue_duration,
const DurationInt run_duration,
int32 random_number) {
queue_duration_sum_ += queue_duration;
run_duration_sum_ += run_duration;
++count_; ++count_;
queue_time_.AddDuration(queue_duration);
run_time_.AddDuration(run_duration); // Take a uniformly distributed sample over all durations ever supplied.
// The probability that we (instead) use this new sample is 1/count_. This
// results in a completely uniform selection of the sample.
// We ignore the fact that we correlated our selection of a sample of run
// and queue times.
bool take_sample = 0 == (random_number % count_);
CONDITIONAL_ASSIGN(take_sample, queue_duration_sample_, queue_duration);
CONDITIONAL_ASSIGN(take_sample, run_duration_sample_, run_duration);
CONDITIONAL_ASSIGN(queue_duration_max_ < queue_duration, queue_duration_max_,
queue_duration);
CONDITIONAL_ASSIGN(run_duration_max_ < run_duration, run_duration_max_,
run_duration);
// Ensure we got the macros right.
DCHECK_GE(queue_duration_max_, queue_duration);
DCHECK_GE(run_duration_max_, run_duration);
DCHECK(!take_sample || run_duration_sample_ == run_duration);
DCHECK(!take_sample || queue_duration_sample_ == queue_duration);
} }
DurationInt DeathData::AverageMsRunDuration() const { int DeathData::count() const { return count_; }
return run_time_.AverageMsDuration(count_);
DurationInt DeathData::run_duration_sum() const { return run_duration_sum_; }
DurationInt DeathData::run_duration_max() const { return run_duration_max_; }
DurationInt DeathData::run_duration_sample() const {
return run_duration_sample_;
} }
DurationInt DeathData::AverageMsQueueDuration() const { DurationInt DeathData::queue_duration_sum() const {
return queue_time_.AverageMsDuration(count_); return queue_duration_sum_;
} }
void DeathData::AddDeathData(const DeathData& other) { DurationInt DeathData::queue_duration_max() const {
count_ += other.count_; return queue_duration_max_;
queue_time_.AddData(other.queue_time_);
run_time_.AddData(other.run_time_);
} }
DurationInt DeathData::queue_duration_sample() const {
return queue_duration_sample_;
}
base::DictionaryValue* DeathData::ToValue() const { base::DictionaryValue* DeathData::ToValue() const {
base::DictionaryValue* dictionary = new base::DictionaryValue; base::DictionaryValue* dictionary = new base::DictionaryValue;
dictionary->Set("count", base::Value::CreateIntegerValue(count_)); dictionary->Set("count", base::Value::CreateIntegerValue(count_));
dictionary->Set("run_ms", dictionary->Set("run_ms",
base::Value::CreateIntegerValue(run_time_.duration())); base::Value::CreateIntegerValue(run_duration_sum()));
dictionary->Set("queue_ms",
base::Value::CreateIntegerValue(queue_time_.duration()));
dictionary->Set("run_ms_max", dictionary->Set("run_ms_max",
base::Value::CreateIntegerValue(run_time_.max())); base::Value::CreateIntegerValue(run_duration_max()));
dictionary->Set("run_ms_sample",
base::Value::CreateIntegerValue(run_duration_sample()));
dictionary->Set("queue_ms",
base::Value::CreateIntegerValue(queue_duration_sum()));
dictionary->Set("queue_ms_max", dictionary->Set("queue_ms_max",
base::Value::CreateIntegerValue(queue_time_.max())); base::Value::CreateIntegerValue(queue_duration_max()));
dictionary->Set("queue_ms_sample",
base::Value::CreateIntegerValue(queue_duration_sample()));
return dictionary; return dictionary;
} }
void DeathData::Clear() { void DeathData::ResetMax() {
count_ = 0; run_duration_max_ = 0;
run_time_.Clear(); queue_duration_max_ = 0;
queue_time_.Clear();
}
//------------------------------------------------------------------------------
void DeathData::Data::AddData(const Data& other) {
duration_ += other.duration_;
if (max_ > other.max_)
return;
max_ = other.max_;
} }
void DeathData::Data::AddDuration(DurationInt duration) { void DeathData::Clear() {
duration_ += duration; count_ = 0;
if (max_ > duration) run_duration_sum_ = 0;
return; run_duration_max_ = 0;
max_ = duration; run_duration_sample_ = 0;
} queue_duration_sum_ = 0;
queue_duration_max_ = 0;
DurationInt DeathData::Data::AverageMsDuration(int count) const { queue_duration_sample_ = 0;
if (duration_ == 0 || !count)
return 0;
return (duration_ + count / 2) / count;
} }
void DeathData::Data::Clear() {
duration_ = 0;
max_ = 0;
}
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
BirthOnThread::BirthOnThread(const Location& location, BirthOnThread::BirthOnThread(const Location& location,
const ThreadData& current) const ThreadData& current)
: location_(location), : location_(location),
birth_thread_(&current) {} birth_thread_(&current) {
}
const Location BirthOnThread::location() const { return location_; }
const ThreadData* BirthOnThread::birth_thread() const { return birth_thread_; }
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
Births::Births(const Location& location, const ThreadData& current) Births::Births(const Location& location, const ThreadData& current)
: BirthOnThread(location, current), : BirthOnThread(location, current),
birth_count_(1) { } birth_count_(1) { }
int Births::birth_count() const { return birth_count_; }
void Births::RecordBirth() { ++birth_count_; }
void Births::ForgetBirth() { --birth_count_; }
void Births::Clear() { birth_count_ = 0; }
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
// ThreadData maintains the central data for all births and deaths. // ThreadData maintains the central data for all births and deaths on a single
// thread.
// TODO(jar): We should pull all these static vars together, into a struct, and // TODO(jar): We should pull all these static vars together, into a struct, and
// optimize layout so that we benefit from locality of reference during accesses // optimize layout so that we benefit from locality of reference during accesses
...@@ -170,6 +218,10 @@ ThreadData::ThreadData(int thread_number) ...@@ -170,6 +218,10 @@ ThreadData::ThreadData(int thread_number)
ThreadData::~ThreadData() {} ThreadData::~ThreadData() {}
void ThreadData::PushToHeadOfList() { void ThreadData::PushToHeadOfList() {
// Toss in a hint of randomness (atop the uniniitalized value).
random_number_ += static_cast<int32>(this - static_cast<ThreadData*>(0));
random_number_ ^= (Now() - TrackedTime()).InMilliseconds();
DCHECK(!next_); DCHECK(!next_);
base::AutoLock lock(*list_lock_.Pointer()); base::AutoLock lock(*list_lock_.Pointer());
incarnation_count_for_pool_ = incarnation_counter_; incarnation_count_for_pool_ = incarnation_counter_;
...@@ -177,6 +229,14 @@ void ThreadData::PushToHeadOfList() { ...@@ -177,6 +229,14 @@ void ThreadData::PushToHeadOfList() {
all_thread_data_list_head_ = this; all_thread_data_list_head_ = this;
} }
// static
ThreadData* ThreadData::first() {
base::AutoLock lock(*list_lock_.Pointer());
return all_thread_data_list_head_;
}
ThreadData* ThreadData::next() const { return next_; }
// static // static
void ThreadData::InitializeThreadContext(const std::string& suggested_name) { void ThreadData::InitializeThreadContext(const std::string& suggested_name) {
if (!Initialize()) // Always initialize if needed. if (!Initialize()) // Always initialize if needed.
...@@ -252,8 +312,10 @@ void ThreadData::OnThreadTerminationCleanup() { ...@@ -252,8 +312,10 @@ void ThreadData::OnThreadTerminationCleanup() {
} }
// static // static
base::DictionaryValue* ThreadData::ToValue() { base::DictionaryValue* ThreadData::ToValue(bool reset_max) {
DataCollector collected_data; // Gather data. DataCollector collected_data; // Gather data.
// Request multiple calls to collected_data.Append() for all threads.
SendAllMaps(reset_max, &collected_data);
collected_data.AddListOfLivingObjects(); // Add births that are still alive. collected_data.AddListOfLivingObjects(); // Add births that are still alive.
base::ListValue* list = collected_data.ToValue(); base::ListValue* list = collected_data.ToValue();
base::DictionaryValue* dictionary = new base::DictionaryValue(); base::DictionaryValue* dictionary = new base::DictionaryValue();
...@@ -279,6 +341,12 @@ Births* ThreadData::TallyABirth(const Location& location) { ...@@ -279,6 +341,12 @@ Births* ThreadData::TallyABirth(const Location& location) {
void ThreadData::TallyADeath(const Births& birth, void ThreadData::TallyADeath(const Births& birth,
DurationInt queue_duration, DurationInt queue_duration,
DurationInt run_duration) { DurationInt run_duration) {
// Stir in some randomness, plus add constant in case durations are zero.
const DurationInt kSomePrimeNumber = 4294967279;
random_number_ += queue_duration + run_duration + kSomePrimeNumber;
// An address is going to have some randomness to it as well ;-).
random_number_ ^= static_cast<int32>(&birth - reinterpret_cast<Births*>(0));
DeathMap::iterator it = death_map_.find(&birth); DeathMap::iterator it = death_map_.find(&birth);
DeathData* death_data; DeathData* death_data;
if (it != death_map_.end()) { if (it != death_map_.end()) {
...@@ -287,7 +355,7 @@ void ThreadData::TallyADeath(const Births& birth, ...@@ -287,7 +355,7 @@ void ThreadData::TallyADeath(const Births& birth,
base::AutoLock lock(map_lock_); // Lock as the map may get relocated now. base::AutoLock lock(map_lock_); // Lock as the map may get relocated now.
death_data = &death_map_[&birth]; death_data = &death_map_[&birth];
} // Release lock ASAP. } // Release lock ASAP.
death_data->RecordDeath(queue_duration, run_duration); death_data->RecordDeath(queue_duration, run_duration, random_number_);
} }
// static // static
...@@ -409,26 +477,46 @@ void ThreadData::TallyRunInAScopedRegionIfTracking( ...@@ -409,26 +477,46 @@ void ThreadData::TallyRunInAScopedRegionIfTracking(
current_thread_data->TallyADeath(*birth, queue_duration, run_duration); current_thread_data->TallyADeath(*birth, queue_duration, run_duration);
} }
// static const std::string ThreadData::thread_name() const { return thread_name_; }
ThreadData* ThreadData::first() {
base::AutoLock lock(*list_lock_.Pointer());
return all_thread_data_list_head_;
}
// This may be called from another thread. // This may be called from another thread.
void ThreadData::SnapshotBirthMap(BirthMap *output) const { void ThreadData::SnapshotMaps(bool reset_max,
BirthMap* birth_map,
DeathMap* death_map) {
base::AutoLock lock(map_lock_); base::AutoLock lock(map_lock_);
for (BirthMap::const_iterator it = birth_map_.begin(); for (BirthMap::const_iterator it = birth_map_.begin();
it != birth_map_.end(); ++it) it != birth_map_.end(); ++it)
(*output)[it->first] = it->second; (*birth_map)[it->first] = it->second;
for (DeathMap::iterator it = death_map_.begin();
it != death_map_.end(); ++it) {
(*death_map)[it->first] = it->second;
if (reset_max)
it->second.ResetMax();
}
} }
// This may be called from another thread. // static
void ThreadData::SnapshotDeathMap(DeathMap *output) const { void ThreadData::SendAllMaps(bool reset_max, class DataCollector* target) {
base::AutoLock lock(map_lock_); if (!kTrackAllTaskObjects)
for (DeathMap::const_iterator it = death_map_.begin(); return; // Not compiled in.
it != death_map_.end(); ++it) // Get an unchanging copy of a ThreadData list.
(*output)[it->first] = it->second; ThreadData* my_list = ThreadData::first();
// Gather data serially.
// This hackish approach *can* get some slighly corrupt tallies, as we are
// grabbing values without the protection of a lock, but it has the advantage
// of working even with threads that don't have message loops. If a user
// sees any strangeness, they can always just run their stats gathering a
// second time.
for (ThreadData* thread_data = my_list;
thread_data;
thread_data = thread_data->next()) {
// Get copy of data.
ThreadData::BirthMap birth_map;
ThreadData::DeathMap death_map;
thread_data->SnapshotMaps(reset_max, &birth_map, &death_map);
target->Append(*thread_data, birth_map, death_map);
}
} }
// static // static
...@@ -543,7 +631,7 @@ void ThreadData::ShutdownSingleThreadedCleanup(bool leak) { ...@@ -543,7 +631,7 @@ void ThreadData::ShutdownSingleThreadedCleanup(bool leak) {
all_thread_data_list_head_ = NULL; all_thread_data_list_head_ = NULL;
++incarnation_counter_; ++incarnation_counter_;
// To be clean, break apart the retired worker list (though we leak them). // To be clean, break apart the retired worker list (though we leak them).
while(first_retired_worker_) { while (first_retired_worker_) {
ThreadData* worker = first_retired_worker_; ThreadData* worker = first_retired_worker_;
CHECK_GT(worker->worker_thread_number_, 0); CHECK_GT(worker->worker_thread_number_, 0);
first_retired_worker_ = worker->next_retired_worker_; first_retired_worker_ = worker->next_retired_worker_;
...@@ -617,36 +705,14 @@ base::DictionaryValue* Snapshot::ToValue() const { ...@@ -617,36 +705,14 @@ base::DictionaryValue* Snapshot::ToValue() const {
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
// DataCollector // DataCollector
DataCollector::DataCollector() { DataCollector::DataCollector() {}
if (!kTrackAllTaskObjects)
return; // Not compiled in.
// Get an unchanging copy of a ThreadData list.
ThreadData* my_list = ThreadData::first();
// Gather data serially.
// This hackish approach *can* get some slighly corrupt tallies, as we are
// grabbing values without the protection of a lock, but it has the advantage
// of working even with threads that don't have message loops. If a user
// sees any strangeness, they can always just run their stats gathering a
// second time.
for (ThreadData* thread_data = my_list;
thread_data;
thread_data = thread_data->next()) {
Append(*thread_data);
}
}
DataCollector::~DataCollector() { DataCollector::~DataCollector() {
} }
void DataCollector::Append(const ThreadData& thread_data) { void DataCollector::Append(const ThreadData &thread_data,
// Get copy of data. const ThreadData::BirthMap &birth_map,
ThreadData::BirthMap birth_map; const ThreadData::DeathMap &death_map) {
thread_data.SnapshotBirthMap(&birth_map);
ThreadData::DeathMap death_map;
thread_data.SnapshotDeathMap(&death_map);
for (ThreadData::DeathMap::const_iterator it = death_map.begin(); for (ThreadData::DeathMap::const_iterator it = death_map.begin();
it != death_map.end(); ++it) { it != death_map.end(); ++it) {
collection_.push_back(Snapshot(*it->first, thread_data, it->second)); collection_.push_back(Snapshot(*it->first, thread_data, it->second));
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include <vector> #include <vector>
#include "base/base_export.h" #include "base/base_export.h"
#include "base/gtest_prod_util.h"
#include "base/lazy_instance.h" #include "base/lazy_instance.h"
#include "base/location.h" #include "base/location.h"
#include "base/profiler/tracked_time.h" #include "base/profiler/tracked_time.h"
...@@ -197,8 +198,8 @@ class BASE_EXPORT BirthOnThread { ...@@ -197,8 +198,8 @@ class BASE_EXPORT BirthOnThread {
public: public:
BirthOnThread(const Location& location, const ThreadData& current); BirthOnThread(const Location& location, const ThreadData& current);
const Location location() const { return location_; } const Location location() const;
const ThreadData* birth_thread() const { return birth_thread_; } const ThreadData* birth_thread() const;
private: private:
// File/lineno of birth. This defines the essence of the task, as the context // File/lineno of birth. This defines the essence of the task, as the context
...@@ -220,17 +221,17 @@ class BASE_EXPORT Births: public BirthOnThread { ...@@ -220,17 +221,17 @@ class BASE_EXPORT Births: public BirthOnThread {
public: public:
Births(const Location& location, const ThreadData& current); Births(const Location& location, const ThreadData& current);
int birth_count() const { return birth_count_; } int birth_count() const;
// When we have a birth we update the count for this BirhPLace. // When we have a birth we update the count for this BirhPLace.
void RecordBirth() { ++birth_count_; } void RecordBirth();
// When a birthplace is changed (updated), we need to decrement the counter // When a birthplace is changed (updated), we need to decrement the counter
// for the old instance. // for the old instance.
void ForgetBirth() { --birth_count_; } // We corrected a birth place. void ForgetBirth();
// Hack to quickly reset all counts to zero. // Hack to quickly reset all counts to zero.
void Clear() { birth_count_ = 0; } void Clear();
private: private:
// The number of births on this thread for our location_. // The number of births on this thread for our location_.
...@@ -247,70 +248,49 @@ class BASE_EXPORT Births: public BirthOnThread { ...@@ -247,70 +248,49 @@ class BASE_EXPORT Births: public BirthOnThread {
class BASE_EXPORT DeathData { class BASE_EXPORT DeathData {
public: public:
// Default initializer. // Default initializer.
DeathData() : count_(0) {} DeathData();
// When deaths have not yet taken place, and we gather data from all the // When deaths have not yet taken place, and we gather data from all the
// threads, we create DeathData stats that tally the number of births without // threads, we create DeathData stats that tally the number of births without
// a corrosponding death. // a corresponding death.
explicit DeathData(int count) explicit DeathData(int count);
: count_(count) {}
// Update stats for a task destruction (death) that had a Run() time of // Update stats for a task destruction (death) that had a Run() time of
// |duration|, and has had a queueing delay of |queue_duration|. // |duration|, and has had a queueing delay of |queue_duration|.
void RecordDeath(DurationInt queue_duration, void RecordDeath(const DurationInt queue_duration,
DurationInt run_duration); const DurationInt run_duration,
int random_number);
// Metrics accessors.
int count() const { return count_; } // Metrics accessors, used only in tests.
DurationInt run_duration() const { return run_time_.duration(); } int count() const;
DurationInt AverageMsRunDuration() const; DurationInt run_duration_sum() const;
DurationInt run_duration_max() const { return run_time_.max(); } DurationInt run_duration_max() const;
DurationInt queue_duration() const { return queue_time_.duration(); } DurationInt run_duration_sample() const;
DurationInt AverageMsQueueDuration() const; DurationInt queue_duration_sum() const;
DurationInt queue_duration_max() const { return queue_time_.max(); } DurationInt queue_duration_max() const;
DurationInt queue_duration_sample() const;
// Accumulate metrics from other into this. This method is never used on
// realtime statistics, and only used in snapshots and aggregatinos.
void AddDeathData(const DeathData& other);
// Construct a DictionaryValue instance containing all our stats. The caller // Construct a DictionaryValue instance containing all our stats. The caller
// assumes ownership of the returned instance. // assumes ownership of the returned instance.
base::DictionaryValue* ToValue() const; base::DictionaryValue* ToValue() const;
// Reset the max values to zero.
void ResetMax();
// Reset all tallies to zero. This is used as a hack on realtime data. // Reset all tallies to zero. This is used as a hack on realtime data.
void Clear(); void Clear();
private: private:
// DeathData::Data is a helper class, useful when different metrics need to be // Number of runs seen.
// aggregated, such as queueing times, or run times. int count_;
class Data { // Data about run time durations.
public: DurationInt run_duration_sum_;
Data() : duration_(0), max_(0) {} DurationInt run_duration_max_;
~Data() {} DurationInt run_duration_sample_;
// Data about queueing times durations.
DurationInt duration() const { return duration_; } DurationInt queue_duration_sum_;
DurationInt max() const { return max_; } DurationInt queue_duration_max_;
DurationInt queue_duration_sample_;
// Agggegate data into our state.
void AddData(const Data& other);
void AddDuration(DurationInt duration);
// Central helper function for calculating averages (correctly, in only one
// place).
DurationInt AverageMsDuration(int count) const;
// Resets all members to zero.
void Clear();
private:
DurationInt duration_; // Sum of all durations seen.
DurationInt max_; // Largest singular duration seen.
};
int count_; // Number of deaths seen.
Data run_time_; // Data about run time durations.
Data queue_time_; // Data about queueing times durations.
}; };
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
...@@ -329,29 +309,9 @@ class BASE_EXPORT Snapshot { ...@@ -329,29 +309,9 @@ class BASE_EXPORT Snapshot {
// When snapshotting a birth, with no death yet, use this: // When snapshotting a birth, with no death yet, use this:
Snapshot(const BirthOnThread& birth_on_thread, int count); Snapshot(const BirthOnThread& birth_on_thread, int count);
const ThreadData* birth_thread() const { return birth_->birth_thread(); } // Accessor, that provides default value when there is no death thread.
const Location location() const { return birth_->location(); }
const BirthOnThread& birth() const { return *birth_; }
const ThreadData* death_thread() const {return death_thread_; }
const DeathData& death_data() const { return death_data_; }
const std::string DeathThreadName() const; const std::string DeathThreadName() const;
int count() const { return death_data_.count(); }
DurationInt run_duration() const { return death_data_.run_duration(); }
DurationInt AverageMsRunDuration() const {
return death_data_.AverageMsRunDuration();
}
DurationInt run_duration_max() const {
return death_data_.run_duration_max();
}
DurationInt queue_duration() const { return death_data_.queue_duration(); }
DurationInt AverageMsQueueDuration() const {
return death_data_.AverageMsQueueDuration();
}
DurationInt queue_duration_max() const {
return death_data_.queue_duration_max();
}
// Construct a DictionaryValue instance containing all our data recursively. // Construct a DictionaryValue instance containing all our data recursively.
// The caller assumes ownership of the memory in the returned instance. // The caller assumes ownership of the memory in the returned instance.
base::DictionaryValue* ToValue() const; base::DictionaryValue* ToValue() const;
...@@ -362,53 +322,6 @@ class BASE_EXPORT Snapshot { ...@@ -362,53 +322,6 @@ class BASE_EXPORT Snapshot {
DeathData death_data_; DeathData death_data_;
}; };
//------------------------------------------------------------------------------
// DataCollector is a container class for Snapshot and BirthOnThread count
// items.
class BASE_EXPORT DataCollector {
public:
typedef std::vector<Snapshot> Collection;
// Construct with a list of how many threads should contribute. This helps us
// determine (in the async case) when we are done with all contributions.
DataCollector();
~DataCollector();
// Adds all stats from the indicated thread into our arrays. This function
// uses locks at the lowest level (when accessing the underlying maps which
// could change when not locked), and can be called from any threads.
void Append(const ThreadData& thread_data);
// After the accumulation phase, the following accessor is used to process the
// data (i.e., sort it, filter it, etc.).
Collection* collection();
// Adds entries for all the remaining living objects (objects that have
// tallied a birth, but have not yet tallied a matching death, and hence must
// be either running, queued up, or being held in limbo for future posting).
// This should be called after all known ThreadData instances have been
// processed using Append().
void AddListOfLivingObjects();
// Generates a ListValue representation of the vector of snapshots. The caller
// assumes ownership of the memory in the returned instance.
base::ListValue* ToValue() const;
private:
typedef std::map<const BirthOnThread*, int> BirthCount;
// The array that we collect data into.
Collection collection_;
// The total number of births recorded at each location for which we have not
// seen a death count. This map changes as we do Append() calls, and is later
// used by AddListOfLivingObjects() to gather up unaccounted for births.
BirthCount global_birth_count_;
DISALLOW_COPY_AND_ASSIGN(DataCollector);
};
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
// For each thread, we have a ThreadData that stores all tracking info generated // For each thread, we have a ThreadData that stores all tracking info generated
// on this thread. This prevents the need for locking as data accumulates. // on this thread. This prevents the need for locking as data accumulates.
...@@ -443,8 +356,9 @@ class BASE_EXPORT ThreadData { ...@@ -443,8 +356,9 @@ class BASE_EXPORT ThreadData {
// Constructs a DictionaryValue instance containing all recursive results in // Constructs a DictionaryValue instance containing all recursive results in
// our process. The caller assumes ownership of the memory in the returned // our process. The caller assumes ownership of the memory in the returned
// instance. // instance. During the scavenging, if |reset_max| is true, then the
static base::DictionaryValue* ToValue(); // DeathData instances max-values are reset to zero during this scan.
static base::DictionaryValue* ToValue(bool reset_max);
// Finds (or creates) a place to count births from the given location in this // Finds (or creates) a place to count births from the given location in this
// thread, and increment that tally. // thread, and increment that tally.
...@@ -484,24 +398,13 @@ class BASE_EXPORT ThreadData { ...@@ -484,24 +398,13 @@ class BASE_EXPORT ThreadData {
const TrackedTime& start_of_run, const TrackedTime& start_of_run,
const TrackedTime& end_of_run); const TrackedTime& end_of_run);
const std::string thread_name() const { return thread_name_; } const std::string thread_name() const;
// --------------------- // Snapshot (under a lock) copies of the maps in each ThreadData instance. For
// TODO(jar): // each set of maps (BirthMap and DeathMap) call the Append() method of the
// The following functions should all be private, and are only public because // |target| DataCollector. If |reset_max| is true, then the max values in
// the collection is done externally. We need to relocate that code from the // each DeathData instance should be reset during the scan.
// collection class into this class, and then all these methods can be made static void SendAllMaps(bool reset_max, class DataCollector* target);
// private.
// (Thread safe) Get start of list of all ThreadData instances.
static ThreadData* first();
// Iterate through the null terminated list of ThreadData instances.
ThreadData* next() const { return next_; }
// Using our lock, make a copy of the specified maps. These calls may arrive
// from non-local threads, and are used to quickly scan data from all threads
// in order to build JSON for about:profiler.
void SnapshotBirthMap(BirthMap *output) const;
void SnapshotDeathMap(DeathMap *output) const;
// -------- end of should be private methods.
// Hack: asynchronously clear all birth counts and death tallies data values // Hack: asynchronously clear all birth counts and death tallies data values
// in all ThreadData instances. The numerical (zeroing) part is done without // in all ThreadData instances. The numerical (zeroing) part is done without
...@@ -540,7 +443,12 @@ class BASE_EXPORT ThreadData { ...@@ -540,7 +443,12 @@ class BASE_EXPORT ThreadData {
private: private:
// Allow only tests to call ShutdownSingleThreadedCleanup. We NEVER call it // Allow only tests to call ShutdownSingleThreadedCleanup. We NEVER call it
// in production code. // in production code.
// TODO(jar): Make this a friend in DEBUG only, so that the optimizer has a
// better change of optimizing (inlining? etc.) private methods (knowing that
// there will be no need for an external entry point).
friend class TrackedObjectsTest; friend class TrackedObjectsTest;
FRIEND_TEST_ALL_PREFIXES(TrackedObjectsTest, MinimalStartupShutdown);
FRIEND_TEST_ALL_PREFIXES(TrackedObjectsTest, TinyStartupShutdown);
// Worker thread construction creates a name since there is none. // Worker thread construction creates a name since there is none.
explicit ThreadData(int thread_number); explicit ThreadData(int thread_number);
...@@ -555,6 +463,13 @@ class BASE_EXPORT ThreadData { ...@@ -555,6 +463,13 @@ class BASE_EXPORT ThreadData {
// the instance permanently on that list. // the instance permanently on that list.
void PushToHeadOfList(); void PushToHeadOfList();
// (Thread safe) Get start of list of all ThreadData instances using the lock.
static ThreadData* first();
// Iterate through the null terminated list of ThreadData instances.
ThreadData* next() const;
// In this thread's data, record a new birth. // In this thread's data, record a new birth.
Births* TallyABirth(const Location& location); Births* TallyABirth(const Location& location);
...@@ -563,6 +478,15 @@ class BASE_EXPORT ThreadData { ...@@ -563,6 +478,15 @@ class BASE_EXPORT ThreadData {
DurationInt queue_duration, DurationInt queue_duration,
DurationInt duration); DurationInt duration);
// Using our lock, make a copy of the specified maps. This call may be made
// on non-local threads, which necessitate the use of the lock to prevent
// the map(s) from being reallocaed while they are copied. If |reset_max| is
// true, then, just after we copy the DeathMap, we will set the max values to
// zero in the active DeathMap (not the snapshot).
void SnapshotMaps(bool reset_max,
BirthMap* birth_map,
DeathMap* death_map);
// Using our lock to protect the iteration, Clear all birth and death data. // Using our lock to protect the iteration, Clear all birth and death data.
void Reset(); void Reset();
...@@ -669,9 +593,64 @@ class BASE_EXPORT ThreadData { ...@@ -669,9 +593,64 @@ class BASE_EXPORT ThreadData {
// writing is only done from this thread. // writing is only done from this thread.
mutable base::Lock map_lock_; mutable base::Lock map_lock_;
// A random number that we used to select decide which sample to keep as a
// representative sample in each DeathData instance. We can't start off with
// much randomness (because we can't call RandInt() on all our threads), so
// we stir in more and more as we go.
int32 random_number_;
DISALLOW_COPY_AND_ASSIGN(ThreadData); DISALLOW_COPY_AND_ASSIGN(ThreadData);
}; };
//------------------------------------------------------------------------------
// DataCollector is a container class for Snapshot and BirthOnThread count
// items.
class BASE_EXPORT DataCollector {
public:
typedef std::vector<Snapshot> Collection;
// Construct with a list of how many threads should contribute. This helps us
// determine (in the async case) when we are done with all contributions.
DataCollector();
~DataCollector();
// Adds all stats from the indicated thread into our arrays. Accepts copies
// of the birth_map and death_map, so that the data will not change during the
// iterations and processing.
void Append(const ThreadData &thread_data,
const ThreadData::BirthMap &birth_map,
const ThreadData::DeathMap &death_map);
// After the accumulation phase, the following accessor is used to process the
// data (i.e., sort it, filter it, etc.).
Collection* collection();
// Adds entries for all the remaining living objects (objects that have
// tallied a birth, but have not yet tallied a matching death, and hence must
// be either running, queued up, or being held in limbo for future posting).
// This should be called after all known ThreadData instances have been
// processed using Append().
void AddListOfLivingObjects();
// Generates a ListValue representation of the vector of snapshots. The caller
// assumes ownership of the memory in the returned instance.
base::ListValue* ToValue() const;
private:
typedef std::map<const BirthOnThread*, int> BirthCount;
// The array that we collect data into.
Collection collection_;
// The total number of births recorded at each location for which we have not
// seen a death count. This map changes as we do Append() calls, and is later
// used by AddListOfLivingObjects() to gather up unaccounted for births.
BirthCount global_birth_count_;
DISALLOW_COPY_AND_ASSIGN(DataCollector);
};
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
// Provide simple way to to start global tracking, and to tear down tracking // Provide simple way to to start global tracking, and to tear down tracking
// when done. The design has evolved to *not* do any teardown (and just leak // when done. The design has evolved to *not* do any teardown (and just leak
......
...@@ -45,10 +45,9 @@ TEST_F(TrackedObjectsTest, MinimalStartupShutdown) { ...@@ -45,10 +45,9 @@ TEST_F(TrackedObjectsTest, MinimalStartupShutdown) {
EXPECT_TRUE(!data->next()); EXPECT_TRUE(!data->next());
EXPECT_EQ(data, ThreadData::Get()); EXPECT_EQ(data, ThreadData::Get());
ThreadData::BirthMap birth_map; ThreadData::BirthMap birth_map;
data->SnapshotBirthMap(&birth_map);
EXPECT_EQ(0u, birth_map.size());
ThreadData::DeathMap death_map; ThreadData::DeathMap death_map;
data->SnapshotDeathMap(&death_map); data->SnapshotMaps(false, &birth_map, &death_map);
EXPECT_EQ(0u, birth_map.size());
EXPECT_EQ(0u, death_map.size()); EXPECT_EQ(0u, death_map.size());
// Cleanup with no leaking. // Cleanup with no leaking.
ShutdownSingleThreadedCleanup(false); ShutdownSingleThreadedCleanup(false);
...@@ -62,10 +61,9 @@ TEST_F(TrackedObjectsTest, MinimalStartupShutdown) { ...@@ -62,10 +61,9 @@ TEST_F(TrackedObjectsTest, MinimalStartupShutdown) {
EXPECT_TRUE(!data->next()); EXPECT_TRUE(!data->next());
EXPECT_EQ(data, ThreadData::Get()); EXPECT_EQ(data, ThreadData::Get());
birth_map.clear(); birth_map.clear();
data->SnapshotBirthMap(&birth_map);
EXPECT_EQ(0u, birth_map.size());
death_map.clear(); death_map.clear();
data->SnapshotDeathMap(&death_map); data->SnapshotMaps(false, &birth_map, &death_map);
EXPECT_EQ(0u, birth_map.size());
EXPECT_EQ(0u, death_map.size()); EXPECT_EQ(0u, death_map.size());
} }
...@@ -77,16 +75,15 @@ TEST_F(TrackedObjectsTest, TinyStartupShutdown) { ...@@ -77,16 +75,15 @@ TEST_F(TrackedObjectsTest, TinyStartupShutdown) {
const Location& location = FROM_HERE; const Location& location = FROM_HERE;
ThreadData::TallyABirthIfActive(location); ThreadData::TallyABirthIfActive(location);
const ThreadData* data = ThreadData::first(); ThreadData* data = ThreadData::first();
ASSERT_TRUE(data); ASSERT_TRUE(data);
EXPECT_TRUE(!data->next()); EXPECT_TRUE(!data->next());
EXPECT_EQ(data, ThreadData::Get()); EXPECT_EQ(data, ThreadData::Get());
ThreadData::BirthMap birth_map; ThreadData::BirthMap birth_map;
data->SnapshotBirthMap(&birth_map); ThreadData::DeathMap death_map;
data->SnapshotMaps(false, &birth_map, &death_map);
EXPECT_EQ(1u, birth_map.size()); // 1 birth location. EXPECT_EQ(1u, birth_map.size()); // 1 birth location.
EXPECT_EQ(1, birth_map.begin()->second->birth_count()); // 1 birth. EXPECT_EQ(1, birth_map.begin()->second->birth_count()); // 1 birth.
ThreadData::DeathMap death_map;
data->SnapshotDeathMap(&death_map);
EXPECT_EQ(0u, death_map.size()); // No deaths. EXPECT_EQ(0u, death_map.size()); // No deaths.
...@@ -100,11 +97,10 @@ TEST_F(TrackedObjectsTest, TinyStartupShutdown) { ...@@ -100,11 +97,10 @@ TEST_F(TrackedObjectsTest, TinyStartupShutdown) {
kBogusEndRunTime); kBogusEndRunTime);
birth_map.clear(); birth_map.clear();
data->SnapshotBirthMap(&birth_map); death_map.clear();
data->SnapshotMaps(false, &birth_map, &death_map);
EXPECT_EQ(1u, birth_map.size()); // 1 birth location. EXPECT_EQ(1u, birth_map.size()); // 1 birth location.
EXPECT_EQ(2, birth_map.begin()->second->birth_count()); // 2 births. EXPECT_EQ(2, birth_map.begin()->second->birth_count()); // 2 births.
death_map.clear();
data->SnapshotDeathMap(&death_map);
EXPECT_EQ(1u, death_map.size()); // 1 location. EXPECT_EQ(1u, death_map.size()); // 1 location.
EXPECT_EQ(1, death_map.begin()->second.count()); // 1 death. EXPECT_EQ(1, death_map.begin()->second.count()); // 1 death.
...@@ -118,35 +114,40 @@ TEST_F(TrackedObjectsTest, DeathDataTest) { ...@@ -118,35 +114,40 @@ TEST_F(TrackedObjectsTest, DeathDataTest) {
scoped_ptr<DeathData> data(new DeathData()); scoped_ptr<DeathData> data(new DeathData());
ASSERT_NE(data, reinterpret_cast<DeathData*>(NULL)); ASSERT_NE(data, reinterpret_cast<DeathData*>(NULL));
EXPECT_EQ(data->run_duration(), 0); EXPECT_EQ(data->run_duration_sum(), 0);
EXPECT_EQ(data->queue_duration(), 0); EXPECT_EQ(data->run_duration_sample(), 0);
EXPECT_EQ(data->AverageMsRunDuration(), 0); EXPECT_EQ(data->queue_duration_sum(), 0);
EXPECT_EQ(data->AverageMsQueueDuration(), 0); EXPECT_EQ(data->queue_duration_sample(), 0);
EXPECT_EQ(data->count(), 0); EXPECT_EQ(data->count(), 0);
DurationInt run_ms = 42; DurationInt run_ms = 42;
DurationInt queue_ms = 8; DurationInt queue_ms = 8;
data->RecordDeath(queue_ms, run_ms); const int kUnrandomInt = 0; // Fake random int that ensure we sample data.
EXPECT_EQ(data->run_duration(), run_ms); data->RecordDeath(queue_ms, run_ms, kUnrandomInt);
EXPECT_EQ(data->queue_duration(), queue_ms); EXPECT_EQ(data->run_duration_sum(), run_ms);
EXPECT_EQ(data->AverageMsRunDuration(), run_ms); EXPECT_EQ(data->run_duration_sample(), run_ms);
EXPECT_EQ(data->AverageMsQueueDuration(), queue_ms); EXPECT_EQ(data->queue_duration_sum(), queue_ms);
EXPECT_EQ(data->queue_duration_sample(), queue_ms);
EXPECT_EQ(data->count(), 1); EXPECT_EQ(data->count(), 1);
data->RecordDeath(queue_ms, run_ms); data->RecordDeath(queue_ms, run_ms, kUnrandomInt);
EXPECT_EQ(data->run_duration(), run_ms + run_ms); EXPECT_EQ(data->run_duration_sum(), run_ms + run_ms);
EXPECT_EQ(data->queue_duration(), queue_ms + queue_ms); EXPECT_EQ(data->run_duration_sample(), run_ms);
EXPECT_EQ(data->AverageMsRunDuration(), run_ms); EXPECT_EQ(data->queue_duration_sum(), queue_ms + queue_ms);
EXPECT_EQ(data->AverageMsQueueDuration(), queue_ms); EXPECT_EQ(data->queue_duration_sample(), queue_ms);
EXPECT_EQ(data->count(), 2); EXPECT_EQ(data->count(), 2);
scoped_ptr<base::DictionaryValue> dictionary(data->ToValue()); scoped_ptr<base::DictionaryValue> dictionary(data->ToValue());
int integer; int integer;
EXPECT_TRUE(dictionary->GetInteger("run_ms", &integer)); EXPECT_TRUE(dictionary->GetInteger("run_ms", &integer));
EXPECT_EQ(integer, 2 * run_ms); EXPECT_EQ(integer, 2 * run_ms);
EXPECT_TRUE(dictionary->GetInteger("run_ms_sample", &integer));
EXPECT_EQ(integer, run_ms);
EXPECT_TRUE(dictionary->GetInteger("queue_ms", &integer)); EXPECT_TRUE(dictionary->GetInteger("queue_ms", &integer));
EXPECT_EQ(integer, 2 * queue_ms); EXPECT_EQ(integer, 2 * queue_ms);
EXPECT_TRUE(dictionary->GetInteger("queue_ms_sample", &integer));
EXPECT_EQ(integer, queue_ms);
EXPECT_TRUE(dictionary->GetInteger("count", &integer)); EXPECT_TRUE(dictionary->GetInteger("count", &integer));
EXPECT_EQ(integer, 2); EXPECT_EQ(integer, 2);
...@@ -157,8 +158,10 @@ TEST_F(TrackedObjectsTest, DeathDataTest) { ...@@ -157,8 +158,10 @@ TEST_F(TrackedObjectsTest, DeathDataTest) {
"\"count\":2," "\"count\":2,"
"\"queue_ms\":16," "\"queue_ms\":16,"
"\"queue_ms_max\":8," "\"queue_ms_max\":8,"
"\"queue_ms_sample\":8,"
"\"run_ms\":84," "\"run_ms\":84,"
"\"run_ms_max\":42" "\"run_ms_max\":42,"
"\"run_ms_sample\":42"
"}"; "}";
EXPECT_EQ(birth_only_result, json); EXPECT_EQ(birth_only_result, json);
} }
...@@ -177,7 +180,7 @@ TEST_F(TrackedObjectsTest, DeactivatedBirthOnlyToValueWorkerThread) { ...@@ -177,7 +180,7 @@ TEST_F(TrackedObjectsTest, DeactivatedBirthOnlyToValueWorkerThread) {
// We should now see a NULL birth record. // We should now see a NULL birth record.
EXPECT_EQ(birth, reinterpret_cast<Births*>(NULL)); EXPECT_EQ(birth, reinterpret_cast<Births*>(NULL));
scoped_ptr<base::Value> value(ThreadData::ToValue()); scoped_ptr<base::Value> value(ThreadData::ToValue(false));
std::string json; std::string json;
base::JSONWriter::Write(value.get(), false, &json); base::JSONWriter::Write(value.get(), false, &json);
std::string birth_only_result = "{" std::string birth_only_result = "{"
...@@ -203,7 +206,7 @@ TEST_F(TrackedObjectsTest, DeactivatedBirthOnlyToValueMainThread) { ...@@ -203,7 +206,7 @@ TEST_F(TrackedObjectsTest, DeactivatedBirthOnlyToValueMainThread) {
// We expect to not get a birth record. // We expect to not get a birth record.
EXPECT_EQ(birth, reinterpret_cast<Births*>(NULL)); EXPECT_EQ(birth, reinterpret_cast<Births*>(NULL));
scoped_ptr<base::Value> value(ThreadData::ToValue()); scoped_ptr<base::Value> value(ThreadData::ToValue(false));
std::string json; std::string json;
base::JSONWriter::Write(value.get(), false, &json); base::JSONWriter::Write(value.get(), false, &json);
std::string birth_only_result = "{" std::string birth_only_result = "{"
...@@ -225,7 +228,7 @@ TEST_F(TrackedObjectsTest, BirthOnlyToValueWorkerThread) { ...@@ -225,7 +228,7 @@ TEST_F(TrackedObjectsTest, BirthOnlyToValueWorkerThread) {
Births* birth = ThreadData::TallyABirthIfActive(location); Births* birth = ThreadData::TallyABirthIfActive(location);
EXPECT_NE(birth, reinterpret_cast<Births*>(NULL)); EXPECT_NE(birth, reinterpret_cast<Births*>(NULL));
scoped_ptr<base::Value> value(ThreadData::ToValue()); scoped_ptr<base::Value> value(ThreadData::ToValue(false));
std::string json; std::string json;
base::JSONWriter::Write(value.get(), false, &json); base::JSONWriter::Write(value.get(), false, &json);
std::string birth_only_result = "{" std::string birth_only_result = "{"
...@@ -236,8 +239,10 @@ TEST_F(TrackedObjectsTest, BirthOnlyToValueWorkerThread) { ...@@ -236,8 +239,10 @@ TEST_F(TrackedObjectsTest, BirthOnlyToValueWorkerThread) {
"\"count\":1," "\"count\":1,"
"\"queue_ms\":0," "\"queue_ms\":0,"
"\"queue_ms_max\":0," "\"queue_ms_max\":0,"
"\"queue_ms_sample\":0,"
"\"run_ms\":0," "\"run_ms\":0,"
"\"run_ms_max\":0" "\"run_ms_max\":0,"
"\"run_ms_sample\":0"
"}," "},"
"\"death_thread\":\"Still_Alive\"," "\"death_thread\":\"Still_Alive\","
"\"location\":{" "\"location\":{"
...@@ -265,7 +270,7 @@ TEST_F(TrackedObjectsTest, BirthOnlyToValueMainThread) { ...@@ -265,7 +270,7 @@ TEST_F(TrackedObjectsTest, BirthOnlyToValueMainThread) {
Births* birth = ThreadData::TallyABirthIfActive(location); Births* birth = ThreadData::TallyABirthIfActive(location);
EXPECT_NE(birth, reinterpret_cast<Births*>(NULL)); EXPECT_NE(birth, reinterpret_cast<Births*>(NULL));
scoped_ptr<base::Value> value(ThreadData::ToValue()); scoped_ptr<base::Value> value(ThreadData::ToValue(false));
std::string json; std::string json;
base::JSONWriter::Write(value.get(), false, &json); base::JSONWriter::Write(value.get(), false, &json);
std::string birth_only_result = "{" std::string birth_only_result = "{"
...@@ -276,8 +281,10 @@ TEST_F(TrackedObjectsTest, BirthOnlyToValueMainThread) { ...@@ -276,8 +281,10 @@ TEST_F(TrackedObjectsTest, BirthOnlyToValueMainThread) {
"\"count\":1," "\"count\":1,"
"\"queue_ms\":0," "\"queue_ms\":0,"
"\"queue_ms_max\":0," "\"queue_ms_max\":0,"
"\"queue_ms_sample\":0,"
"\"run_ms\":0," "\"run_ms\":0,"
"\"run_ms_max\":0" "\"run_ms_max\":0,"
"\"run_ms_sample\":0"
"}," "},"
"\"death_thread\":\"Still_Alive\"," "\"death_thread\":\"Still_Alive\","
"\"location\":{" "\"location\":{"
...@@ -318,7 +325,7 @@ TEST_F(TrackedObjectsTest, LifeCycleToValueMainThread) { ...@@ -318,7 +325,7 @@ TEST_F(TrackedObjectsTest, LifeCycleToValueMainThread) {
ThreadData::TallyRunOnNamedThreadIfTracking(pending_task, ThreadData::TallyRunOnNamedThreadIfTracking(pending_task,
kStartOfRun, kEndOfRun); kStartOfRun, kEndOfRun);
scoped_ptr<base::Value> value(ThreadData::ToValue()); scoped_ptr<base::Value> value(ThreadData::ToValue(false));
std::string json; std::string json;
base::JSONWriter::Write(value.get(), false, &json); base::JSONWriter::Write(value.get(), false, &json);
std::string one_line_result = "{" std::string one_line_result = "{"
...@@ -329,8 +336,10 @@ TEST_F(TrackedObjectsTest, LifeCycleToValueMainThread) { ...@@ -329,8 +336,10 @@ TEST_F(TrackedObjectsTest, LifeCycleToValueMainThread) {
"\"count\":1," "\"count\":1,"
"\"queue_ms\":4," "\"queue_ms\":4,"
"\"queue_ms_max\":4," "\"queue_ms_max\":4,"
"\"queue_ms_sample\":4,"
"\"run_ms\":2," "\"run_ms\":2,"
"\"run_ms_max\":2" "\"run_ms_max\":2,"
"\"run_ms_sample\":2"
"}," "},"
"\"death_thread\":\"SomeMainThreadName\"," "\"death_thread\":\"SomeMainThreadName\","
"\"location\":{" "\"location\":{"
...@@ -378,7 +387,7 @@ TEST_F(TrackedObjectsTest, LifeCycleMidDeactivatedToValueMainThread) { ...@@ -378,7 +387,7 @@ TEST_F(TrackedObjectsTest, LifeCycleMidDeactivatedToValueMainThread) {
ThreadData::TallyRunOnNamedThreadIfTracking(pending_task, ThreadData::TallyRunOnNamedThreadIfTracking(pending_task,
kStartOfRun, kEndOfRun); kStartOfRun, kEndOfRun);
scoped_ptr<base::Value> value(ThreadData::ToValue()); scoped_ptr<base::Value> value(ThreadData::ToValue(false));
std::string json; std::string json;
base::JSONWriter::Write(value.get(), false, &json); base::JSONWriter::Write(value.get(), false, &json);
std::string one_line_result = "{" std::string one_line_result = "{"
...@@ -389,8 +398,10 @@ TEST_F(TrackedObjectsTest, LifeCycleMidDeactivatedToValueMainThread) { ...@@ -389,8 +398,10 @@ TEST_F(TrackedObjectsTest, LifeCycleMidDeactivatedToValueMainThread) {
"\"count\":1," "\"count\":1,"
"\"queue_ms\":4," "\"queue_ms\":4,"
"\"queue_ms_max\":4," "\"queue_ms_max\":4,"
"\"queue_ms_sample\":4,"
"\"run_ms\":2," "\"run_ms\":2,"
"\"run_ms_max\":2" "\"run_ms_max\":2,"
"\"run_ms_sample\":2"
"}," "},"
"\"death_thread\":\"SomeMainThreadName\"," "\"death_thread\":\"SomeMainThreadName\","
"\"location\":{" "\"location\":{"
...@@ -433,7 +444,7 @@ TEST_F(TrackedObjectsTest, LifeCyclePreDeactivatedToValueMainThread) { ...@@ -433,7 +444,7 @@ TEST_F(TrackedObjectsTest, LifeCyclePreDeactivatedToValueMainThread) {
ThreadData::TallyRunOnNamedThreadIfTracking(pending_task, ThreadData::TallyRunOnNamedThreadIfTracking(pending_task,
kStartOfRun, kEndOfRun); kStartOfRun, kEndOfRun);
scoped_ptr<base::Value> value(ThreadData::ToValue()); scoped_ptr<base::Value> value(ThreadData::ToValue(false));
std::string json; std::string json;
base::JSONWriter::Write(value.get(), false, &json); base::JSONWriter::Write(value.get(), false, &json);
std::string one_line_result = "{" std::string one_line_result = "{"
...@@ -465,7 +476,8 @@ TEST_F(TrackedObjectsTest, LifeCycleToValueWorkerThread) { ...@@ -465,7 +476,8 @@ TEST_F(TrackedObjectsTest, LifeCycleToValueWorkerThread) {
ThreadData::TallyRunOnWorkerThreadIfTracking(birth, kTimePosted, ThreadData::TallyRunOnWorkerThreadIfTracking(birth, kTimePosted,
kStartOfRun, kEndOfRun); kStartOfRun, kEndOfRun);
scoped_ptr<base::Value> value(ThreadData::ToValue()); // Call for the ToValue, but tell it to not the maxes after scanning.
scoped_ptr<base::Value> value(ThreadData::ToValue(false));
std::string json; std::string json;
base::JSONWriter::Write(value.get(), false, &json); base::JSONWriter::Write(value.get(), false, &json);
std::string one_line_result = "{" std::string one_line_result = "{"
...@@ -476,8 +488,10 @@ TEST_F(TrackedObjectsTest, LifeCycleToValueWorkerThread) { ...@@ -476,8 +488,10 @@ TEST_F(TrackedObjectsTest, LifeCycleToValueWorkerThread) {
"\"count\":1," "\"count\":1,"
"\"queue_ms\":4," "\"queue_ms\":4,"
"\"queue_ms_max\":4," "\"queue_ms_max\":4,"
"\"queue_ms_sample\":4,"
"\"run_ms\":2," "\"run_ms\":2,"
"\"run_ms_max\":2" "\"run_ms_max\":2,"
"\"run_ms_sample\":2"
"}," "},"
"\"death_thread\":\"WorkerThread-1\"," "\"death_thread\":\"WorkerThread-1\","
"\"location\":{" "\"location\":{"
...@@ -489,6 +503,42 @@ TEST_F(TrackedObjectsTest, LifeCycleToValueWorkerThread) { ...@@ -489,6 +503,42 @@ TEST_F(TrackedObjectsTest, LifeCycleToValueWorkerThread) {
"]" "]"
"}"; "}";
EXPECT_EQ(one_line_result, json); EXPECT_EQ(one_line_result, json);
// Call for the ToValue, but tell it to reset the maxes after scanning.
// We'll still get the same values, but the data will be reset (which we'll
// see in a moment).
value.reset(ThreadData::ToValue(true));
base::JSONWriter::Write(value.get(), false, &json);
// Result should be unchanged.
EXPECT_EQ(one_line_result, json);
// Call for the ToValue, and now we'll see the result of the last translation,
// as the max will have been pushed back to zero.
value.reset(ThreadData::ToValue(false));
base::JSONWriter::Write(value.get(), false, &json);
std::string one_line_result_with_zeros = "{"
"\"list\":["
"{"
"\"birth_thread\":\"WorkerThread-1\","
"\"death_data\":{"
"\"count\":1,"
"\"queue_ms\":4,"
"\"queue_ms_max\":0," // Note zero here.
"\"queue_ms_sample\":4,"
"\"run_ms\":2,"
"\"run_ms_max\":0," // Note zero here.
"\"run_ms_sample\":2"
"},"
"\"death_thread\":\"WorkerThread-1\","
"\"location\":{"
"\"file_name\":\"FixedFileName\","
"\"function_name\":\"LifeCycleToValueWorkerThread\","
"\"line_number\":236"
"}"
"}"
"]"
"}";
EXPECT_EQ(one_line_result_with_zeros, json);
} }
TEST_F(TrackedObjectsTest, TwoLives) { TEST_F(TrackedObjectsTest, TwoLives) {
...@@ -526,7 +576,7 @@ TEST_F(TrackedObjectsTest, TwoLives) { ...@@ -526,7 +576,7 @@ TEST_F(TrackedObjectsTest, TwoLives) {
ThreadData::TallyRunOnNamedThreadIfTracking(pending_task2, ThreadData::TallyRunOnNamedThreadIfTracking(pending_task2,
kStartOfRun, kEndOfRun); kStartOfRun, kEndOfRun);
scoped_ptr<base::Value> value(ThreadData::ToValue()); scoped_ptr<base::Value> value(ThreadData::ToValue(false));
std::string json; std::string json;
base::JSONWriter::Write(value.get(), false, &json); base::JSONWriter::Write(value.get(), false, &json);
std::string one_line_result = "{" std::string one_line_result = "{"
...@@ -537,8 +587,10 @@ TEST_F(TrackedObjectsTest, TwoLives) { ...@@ -537,8 +587,10 @@ TEST_F(TrackedObjectsTest, TwoLives) {
"\"count\":2," "\"count\":2,"
"\"queue_ms\":8," "\"queue_ms\":8,"
"\"queue_ms_max\":4," "\"queue_ms_max\":4,"
"\"queue_ms_sample\":4,"
"\"run_ms\":4," "\"run_ms\":4,"
"\"run_ms_max\":2" "\"run_ms_max\":2,"
"\"run_ms_sample\":2"
"}," "},"
"\"death_thread\":\"SomeFileThreadName\"," "\"death_thread\":\"SomeFileThreadName\","
"\"location\":{" "\"location\":{"
...@@ -583,7 +635,7 @@ TEST_F(TrackedObjectsTest, DifferentLives) { ...@@ -583,7 +635,7 @@ TEST_F(TrackedObjectsTest, DifferentLives) {
base::TrackingInfo pending_task2(second_location, kDelayedStartTime); base::TrackingInfo pending_task2(second_location, kDelayedStartTime);
pending_task2.time_posted = kTimePosted; // Overwrite implied Now(). pending_task2.time_posted = kTimePosted; // Overwrite implied Now().
scoped_ptr<base::Value> value(ThreadData::ToValue()); scoped_ptr<base::Value> value(ThreadData::ToValue(false));
std::string json; std::string json;
base::JSONWriter::Write(value.get(), false, &json); base::JSONWriter::Write(value.get(), false, &json);
std::string one_line_result = "{" std::string one_line_result = "{"
...@@ -594,8 +646,10 @@ TEST_F(TrackedObjectsTest, DifferentLives) { ...@@ -594,8 +646,10 @@ TEST_F(TrackedObjectsTest, DifferentLives) {
"\"count\":1," "\"count\":1,"
"\"queue_ms\":4," "\"queue_ms\":4,"
"\"queue_ms_max\":4," "\"queue_ms_max\":4,"
"\"queue_ms_sample\":4,"
"\"run_ms\":2," "\"run_ms\":2,"
"\"run_ms_max\":2" "\"run_ms_max\":2,"
"\"run_ms_sample\":2"
"}," "},"
"\"death_thread\":\"SomeFileThreadName\"," "\"death_thread\":\"SomeFileThreadName\","
"\"location\":{" "\"location\":{"
...@@ -610,8 +664,10 @@ TEST_F(TrackedObjectsTest, DifferentLives) { ...@@ -610,8 +664,10 @@ TEST_F(TrackedObjectsTest, DifferentLives) {
"\"count\":1," "\"count\":1,"
"\"queue_ms\":0," "\"queue_ms\":0,"
"\"queue_ms_max\":0," "\"queue_ms_max\":0,"
"\"queue_ms_sample\":0,"
"\"run_ms\":0," "\"run_ms\":0,"
"\"run_ms_max\":0" "\"run_ms_max\":0,"
"\"run_ms_sample\":0"
"}," "},"
"\"death_thread\":\"Still_Alive\"," "\"death_thread\":\"Still_Alive\","
"\"location\":{" "\"location\":{"
......
...@@ -245,7 +245,7 @@ int TrackingSynchronizer::RegisterAndNotifyAllProcesses( ...@@ -245,7 +245,7 @@ int TrackingSynchronizer::RegisterAndNotifyAllProcesses(
content::ProfilerController::GetInstance()->GetProfilerData(sequence_number); content::ProfilerController::GetInstance()->GetProfilerData(sequence_number);
// Send profiler_data from browser process. // Send profiler_data from browser process.
base::DictionaryValue* value = tracked_objects::ThreadData::ToValue(); base::DictionaryValue* value = tracked_objects::ThreadData::ToValue(false);
const std::string process_type = const std::string process_type =
content::GetProcessTypeNameInEnglish(content::PROCESS_TYPE_BROWSER); content::GetProcessTypeNameInEnglish(content::PROCESS_TYPE_BROWSER);
value->SetString("process_type", process_type); value->SetString("process_type", process_type);
......
...@@ -232,7 +232,7 @@ void ChildThread::OnGetChildProfilerData( ...@@ -232,7 +232,7 @@ void ChildThread::OnGetChildProfilerData(
int sequence_number, int sequence_number,
const std::string& process_type) { const std::string& process_type) {
scoped_ptr<base::DictionaryValue> value( scoped_ptr<base::DictionaryValue> value(
tracked_objects::ThreadData::ToValue()); tracked_objects::ThreadData::ToValue(false));
value->SetString("process_type", process_type); value->SetString("process_type", process_type);
value->SetInteger("process_id", base::GetCurrentProcId()); value->SetInteger("process_id", base::GetCurrentProcId());
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment