Commit b7d08200 authored by erg@google.com's avatar erg@google.com

Properly order the cc files based off the h files in base/.

BUG=68682
TEST=compiles

Review URL: http://codereview.chromium.org/6385003

git-svn-id: svn://svn.chromium.org/chrome/trunk/src@72505 0039d316-1c4b-4281-b951-d872f2087c98
parent 5f59be20
...@@ -114,6 +114,8 @@ void FieldTrial::EnableBenchmarking() { ...@@ -114,6 +114,8 @@ void FieldTrial::EnableBenchmarking() {
enable_benchmarking_ = true; enable_benchmarking_ = true;
} }
FieldTrial::~FieldTrial() {}
// static // static
Time FieldTrial::GetBuildTime() { Time FieldTrial::GetBuildTime() {
Time integral_build_time; Time integral_build_time;
...@@ -124,8 +126,6 @@ Time FieldTrial::GetBuildTime() { ...@@ -124,8 +126,6 @@ Time FieldTrial::GetBuildTime() {
return integral_build_time; return integral_build_time;
} }
FieldTrial::~FieldTrial() {}
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
// FieldTrialList methods and members. // FieldTrialList methods and members.
......
...@@ -53,48 +53,6 @@ scoped_refptr<Histogram> Histogram::FactoryTimeGet(const std::string& name, ...@@ -53,48 +53,6 @@ scoped_refptr<Histogram> Histogram::FactoryTimeGet(const std::string& name,
bucket_count, flags); bucket_count, flags);
} }
Histogram::Histogram(const std::string& name, Sample minimum,
Sample maximum, size_t bucket_count)
: histogram_name_(name),
declared_min_(minimum),
declared_max_(maximum),
bucket_count_(bucket_count),
flags_(kNoFlags),
ranges_(bucket_count + 1, 0),
range_checksum_(0),
sample_() {
Initialize();
}
Histogram::Histogram(const std::string& name, TimeDelta minimum,
TimeDelta maximum, size_t bucket_count)
: histogram_name_(name),
declared_min_(static_cast<int> (minimum.InMilliseconds())),
declared_max_(static_cast<int> (maximum.InMilliseconds())),
bucket_count_(bucket_count),
flags_(kNoFlags),
ranges_(bucket_count + 1, 0),
range_checksum_(0),
sample_() {
Initialize();
}
Histogram::~Histogram() {
if (StatisticsRecorder::dump_on_exit()) {
std::string output;
WriteAscii(true, "\n", &output);
LOG(INFO) << output;
}
// Just to make sure most derived class did this properly...
DCHECK(ValidateBucketRanges());
DCHECK(HasValidRangeChecksum());
}
bool Histogram::PrintEmptyBucket(size_t index) const {
return true;
}
void Histogram::Add(int value) { void Histogram::Add(int value) {
if (value > kSampleType_MAX - 1) if (value > kSampleType_MAX - 1)
value = kSampleType_MAX - 1; value = kSampleType_MAX - 1;
...@@ -190,31 +148,223 @@ void Histogram::WriteAscii(bool graph_it, const std::string& newline, ...@@ -190,31 +148,223 @@ void Histogram::WriteAscii(bool graph_it, const std::string& newline,
DCHECK_EQ(sample_count, past); DCHECK_EQ(sample_count, past);
} }
bool Histogram::ValidateBucketRanges() const { // static
// Standard assertions that all bucket ranges should satisfy. std::string Histogram::SerializeHistogramInfo(const Histogram& histogram,
DCHECK_EQ(bucket_count_ + 1, ranges_.size()); const SampleSet& snapshot) {
DCHECK_EQ(0, ranges_[0]); DCHECK_NE(NOT_VALID_IN_RENDERER, histogram.histogram_type());
DCHECK_EQ(declared_min(), ranges_[1]);
DCHECK_EQ(declared_max(), ranges_[bucket_count_ - 1]); Pickle pickle;
DCHECK_EQ(kSampleType_MAX, ranges_[bucket_count_]); pickle.WriteString(histogram.histogram_name());
pickle.WriteInt(histogram.declared_min());
pickle.WriteInt(histogram.declared_max());
pickle.WriteSize(histogram.bucket_count());
pickle.WriteInt(histogram.range_checksum());
pickle.WriteInt(histogram.histogram_type());
pickle.WriteInt(histogram.flags());
snapshot.Serialize(&pickle);
return std::string(static_cast<const char*>(pickle.data()), pickle.size());
}
// static
bool Histogram::DeserializeHistogramInfo(const std::string& histogram_info) {
if (histogram_info.empty()) {
return false;
}
Pickle pickle(histogram_info.data(),
static_cast<int>(histogram_info.size()));
std::string histogram_name;
int declared_min;
int declared_max;
size_t bucket_count;
int range_checksum;
int histogram_type;
int pickle_flags;
SampleSet sample;
void* iter = NULL;
if (!pickle.ReadString(&iter, &histogram_name) ||
!pickle.ReadInt(&iter, &declared_min) ||
!pickle.ReadInt(&iter, &declared_max) ||
!pickle.ReadSize(&iter, &bucket_count) ||
!pickle.ReadInt(&iter, &range_checksum) ||
!pickle.ReadInt(&iter, &histogram_type) ||
!pickle.ReadInt(&iter, &pickle_flags) ||
!sample.Histogram::SampleSet::Deserialize(&iter, pickle)) {
LOG(ERROR) << "Pickle error decoding Histogram: " << histogram_name;
return false;
}
DCHECK(pickle_flags & kIPCSerializationSourceFlag);
// Since these fields may have come from an untrusted renderer, do additional
// checks above and beyond those in Histogram::Initialize()
if (declared_max <= 0 || declared_min <= 0 || declared_max < declared_min ||
INT_MAX / sizeof(Count) <= bucket_count || bucket_count < 2) {
LOG(ERROR) << "Values error decoding Histogram: " << histogram_name;
return false;
}
Flags flags = static_cast<Flags>(pickle_flags & ~kIPCSerializationSourceFlag);
DCHECK_NE(NOT_VALID_IN_RENDERER, histogram_type);
scoped_refptr<Histogram> render_histogram(NULL);
if (histogram_type == HISTOGRAM) {
render_histogram = Histogram::FactoryGet(
histogram_name, declared_min, declared_max, bucket_count, flags);
} else if (histogram_type == LINEAR_HISTOGRAM) {
render_histogram = LinearHistogram::FactoryGet(
histogram_name, declared_min, declared_max, bucket_count, flags);
} else if (histogram_type == BOOLEAN_HISTOGRAM) {
render_histogram = BooleanHistogram::FactoryGet(histogram_name, flags);
} else {
LOG(ERROR) << "Error Deserializing Histogram Unknown histogram_type: "
<< histogram_type;
return false;
}
DCHECK_EQ(render_histogram->declared_min(), declared_min);
DCHECK_EQ(render_histogram->declared_max(), declared_max);
DCHECK_EQ(render_histogram->bucket_count(), bucket_count);
DCHECK_EQ(render_histogram->range_checksum(), range_checksum);
DCHECK_EQ(render_histogram->histogram_type(), histogram_type);
if (render_histogram->flags() & kIPCSerializationSourceFlag) {
DVLOG(1) << "Single process mode, histogram observed and not copied: "
<< histogram_name;
} else {
DCHECK_EQ(flags & render_histogram->flags(), flags);
render_histogram->AddSampleSet(sample);
}
return true; return true;
} }
void Histogram::Initialize() { //------------------------------------------------------------------------------
sample_.Resize(*this); // Methods for the validating a sample and a related histogram.
if (declared_min_ < 1) //------------------------------------------------------------------------------
declared_min_ = 1;
if (declared_max_ > kSampleType_MAX - 1) Histogram::Inconsistencies Histogram::FindCorruption(
declared_max_ = kSampleType_MAX - 1; const SampleSet& snapshot) const {
DCHECK_LE(declared_min_, declared_max_); int inconsistencies = NO_INCONSISTENCIES;
DCHECK_GT(bucket_count_, 1u); Sample previous_range = -1; // Bottom range is always 0.
size_t maximal_bucket_count = declared_max_ - declared_min_ + 2; Sample checksum = 0;
DCHECK_LE(bucket_count_, maximal_bucket_count); int64 count = 0;
DCHECK_EQ(0, ranges_[0]); for (size_t index = 0; index < bucket_count(); ++index) {
ranges_[bucket_count_] = kSampleType_MAX; count += snapshot.counts(index);
InitializeBucketRange(); int new_range = ranges(index);
checksum += new_range;
if (previous_range >= new_range)
inconsistencies |= BUCKET_ORDER_ERROR;
previous_range = new_range;
}
if (checksum != range_checksum_)
inconsistencies |= RANGE_CHECKSUM_ERROR;
int64 delta64 = snapshot.redundant_count() - count;
if (delta64 != 0) {
int delta = static_cast<int>(delta64);
if (delta != delta64)
delta = INT_MAX; // Flag all giant errors as INT_MAX.
// Since snapshots of histograms are taken asynchronously relative to
// sampling (and snapped from different threads), it is pretty likely that
// we'll catch a redundant count that doesn't match the sample count. We
// allow for a certain amount of slop before flagging this as an
// inconsistency. Even with an inconsistency, we'll snapshot it again (for
// UMA in about a half hour, so we'll eventually get the data, if it was
// not the result of a corruption. If histograms show that 1 is "too tight"
// then we may try to use 2 or 3 for this slop value.
const int kCommonRaceBasedCountMismatch = 1;
if (delta > 0) {
UMA_HISTOGRAM_COUNTS("Histogram.InconsistentCountHigh", delta);
if (delta > kCommonRaceBasedCountMismatch)
inconsistencies |= COUNT_HIGH_ERROR;
} else {
DCHECK_GT(0, delta);
UMA_HISTOGRAM_COUNTS("Histogram.InconsistentCountLow", -delta);
if (-delta > kCommonRaceBasedCountMismatch)
inconsistencies |= COUNT_LOW_ERROR;
}
}
return static_cast<Inconsistencies>(inconsistencies);
}
Histogram::ClassType Histogram::histogram_type() const {
return HISTOGRAM;
}
Histogram::Sample Histogram::ranges(size_t i) const {
return ranges_[i];
}
size_t Histogram::bucket_count() const {
return bucket_count_;
}
// Do a safe atomic snapshot of sample data.
// This implementation assumes we are on a safe single thread.
void Histogram::SnapshotSample(SampleSet* sample) const {
// Note locking not done in this version!!!
*sample = sample_;
}
bool Histogram::HasConstructorArguments(Sample minimum,
Sample maximum,
size_t bucket_count) {
return ((minimum == declared_min_) && (maximum == declared_max_) &&
(bucket_count == bucket_count_));
}
bool Histogram::HasConstructorTimeDeltaArguments(TimeDelta minimum,
TimeDelta maximum,
size_t bucket_count) {
return ((minimum.InMilliseconds() == declared_min_) &&
(maximum.InMilliseconds() == declared_max_) &&
(bucket_count == bucket_count_));
}
Histogram::Histogram(const std::string& name, Sample minimum,
Sample maximum, size_t bucket_count)
: histogram_name_(name),
declared_min_(minimum),
declared_max_(maximum),
bucket_count_(bucket_count),
flags_(kNoFlags),
ranges_(bucket_count + 1, 0),
range_checksum_(0),
sample_() {
Initialize();
}
Histogram::Histogram(const std::string& name, TimeDelta minimum,
TimeDelta maximum, size_t bucket_count)
: histogram_name_(name),
declared_min_(static_cast<int> (minimum.InMilliseconds())),
declared_max_(static_cast<int> (maximum.InMilliseconds())),
bucket_count_(bucket_count),
flags_(kNoFlags),
ranges_(bucket_count + 1, 0),
range_checksum_(0),
sample_() {
Initialize();
}
Histogram::~Histogram() {
if (StatisticsRecorder::dump_on_exit()) {
std::string output;
WriteAscii(true, "\n", &output);
LOG(INFO) << output;
}
// Just to make sure most derived class did this properly...
DCHECK(ValidateBucketRanges()); DCHECK(ValidateBucketRanges());
StatisticsRecorder::Register(this); DCHECK(HasValidRangeChecksum());
}
bool Histogram::PrintEmptyBucket(size_t index) const {
return true;
} }
// Calculate what range of values are held in each bucket. // Calculate what range of values are held in each bucket.
...@@ -295,6 +445,53 @@ void Histogram::ResetRangeChecksum() { ...@@ -295,6 +445,53 @@ void Histogram::ResetRangeChecksum() {
range_checksum_ = CalculateRangeChecksum(); range_checksum_ = CalculateRangeChecksum();
} }
const std::string Histogram::GetAsciiBucketRange(size_t i) const {
std::string result;
if (kHexRangePrintingFlag & flags_)
StringAppendF(&result, "%#x", ranges(i));
else
StringAppendF(&result, "%d", ranges(i));
return result;
}
// Update histogram data with new sample.
void Histogram::Accumulate(Sample value, Count count, size_t index) {
// Note locking not done in this version!!!
sample_.Accumulate(value, count, index);
}
void Histogram::SetBucketRange(size_t i, Sample value) {
DCHECK_GT(bucket_count_, i);
ranges_[i] = value;
}
bool Histogram::ValidateBucketRanges() const {
// Standard assertions that all bucket ranges should satisfy.
DCHECK_EQ(bucket_count_ + 1, ranges_.size());
DCHECK_EQ(0, ranges_[0]);
DCHECK_EQ(declared_min(), ranges_[1]);
DCHECK_EQ(declared_max(), ranges_[bucket_count_ - 1]);
DCHECK_EQ(kSampleType_MAX, ranges_[bucket_count_]);
return true;
}
void Histogram::Initialize() {
sample_.Resize(*this);
if (declared_min_ < 1)
declared_min_ = 1;
if (declared_max_ > kSampleType_MAX - 1)
declared_max_ = kSampleType_MAX - 1;
DCHECK_LE(declared_min_, declared_max_);
DCHECK_GT(bucket_count_, 1u);
size_t maximal_bucket_count = declared_max_ - declared_min_ + 2;
DCHECK_LE(bucket_count_, maximal_bucket_count);
DCHECK_EQ(0, ranges_[0]);
ranges_[bucket_count_] = kSampleType_MAX;
InitializeBucketRange();
DCHECK(ValidateBucketRanges());
StatisticsRecorder::Register(this);
}
bool Histogram::HasValidRangeChecksum() const { bool Histogram::HasValidRangeChecksum() const {
return CalculateRangeChecksum() == range_checksum_; return CalculateRangeChecksum() == range_checksum_;
} }
...@@ -308,49 +505,6 @@ Histogram::Sample Histogram::CalculateRangeChecksum() const { ...@@ -308,49 +505,6 @@ Histogram::Sample Histogram::CalculateRangeChecksum() const {
return checksum; return checksum;
} }
//------------------------------------------------------------------------------
// The following two methods can be overridden to provide a thread safe
// version of this class. The cost of locking is low... but an error in each
// of these methods has minimal impact. For now, I'll leave this unlocked,
// and I don't believe I can loose more than a count or two.
// The vectors are NOT reallocated, so there is no risk of them moving around.
// Update histogram data with new sample.
void Histogram::Accumulate(Sample value, Count count, size_t index) {
// Note locking not done in this version!!!
sample_.Accumulate(value, count, index);
}
// Do a safe atomic snapshot of sample data.
// This implementation assumes we are on a safe single thread.
void Histogram::SnapshotSample(SampleSet* sample) const {
// Note locking not done in this version!!!
*sample = sample_;
}
bool Histogram::HasConstructorArguments(Sample minimum,
Sample maximum,
size_t bucket_count) {
return ((minimum == declared_min_) && (maximum == declared_max_) &&
(bucket_count == bucket_count_));
}
bool Histogram::HasConstructorTimeDeltaArguments(TimeDelta minimum,
TimeDelta maximum,
size_t bucket_count) {
return ((minimum.InMilliseconds() == declared_min_) &&
(maximum.InMilliseconds() == declared_max_) &&
(bucket_count == bucket_count_));
}
//------------------------------------------------------------------------------
// Accessor methods
void Histogram::SetBucketRange(size_t i, Sample value) {
DCHECK_GT(bucket_count_, i);
ranges_[i] = value;
}
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
// Private methods // Private methods
...@@ -400,15 +554,6 @@ void Histogram::WriteAsciiBucketContext(const int64 past, ...@@ -400,15 +554,6 @@ void Histogram::WriteAsciiBucketContext(const int64 past,
} }
} }
const std::string Histogram::GetAsciiBucketRange(size_t i) const {
std::string result;
if (kHexRangePrintingFlag & flags_)
StringAppendF(&result, "%#x", ranges(i));
else
StringAppendF(&result, "%d", ranges(i));
return result;
}
void Histogram::WriteAsciiBucketValue(Count current, double scaled_sum, void Histogram::WriteAsciiBucketValue(Count current, double scaled_sum,
std::string* output) const { std::string* output) const {
StringAppendF(output, " (%d = %3.1f%%)", current, current/scaled_sum); StringAppendF(output, " (%d = %3.1f%%)", current, current/scaled_sum);
...@@ -428,161 +573,6 @@ void Histogram::WriteAsciiBucketGraph(double current_size, double max_size, ...@@ -428,161 +573,6 @@ void Histogram::WriteAsciiBucketGraph(double current_size, double max_size,
output->append(" "); output->append(" ");
} }
// static
std::string Histogram::SerializeHistogramInfo(const Histogram& histogram,
const SampleSet& snapshot) {
DCHECK_NE(NOT_VALID_IN_RENDERER, histogram.histogram_type());
Pickle pickle;
pickle.WriteString(histogram.histogram_name());
pickle.WriteInt(histogram.declared_min());
pickle.WriteInt(histogram.declared_max());
pickle.WriteSize(histogram.bucket_count());
pickle.WriteInt(histogram.range_checksum());
pickle.WriteInt(histogram.histogram_type());
pickle.WriteInt(histogram.flags());
snapshot.Serialize(&pickle);
return std::string(static_cast<const char*>(pickle.data()), pickle.size());
}
// static
bool Histogram::DeserializeHistogramInfo(const std::string& histogram_info) {
if (histogram_info.empty()) {
return false;
}
Pickle pickle(histogram_info.data(),
static_cast<int>(histogram_info.size()));
std::string histogram_name;
int declared_min;
int declared_max;
size_t bucket_count;
int range_checksum;
int histogram_type;
int pickle_flags;
SampleSet sample;
void* iter = NULL;
if (!pickle.ReadString(&iter, &histogram_name) ||
!pickle.ReadInt(&iter, &declared_min) ||
!pickle.ReadInt(&iter, &declared_max) ||
!pickle.ReadSize(&iter, &bucket_count) ||
!pickle.ReadInt(&iter, &range_checksum) ||
!pickle.ReadInt(&iter, &histogram_type) ||
!pickle.ReadInt(&iter, &pickle_flags) ||
!sample.Histogram::SampleSet::Deserialize(&iter, pickle)) {
LOG(ERROR) << "Pickle error decoding Histogram: " << histogram_name;
return false;
}
DCHECK(pickle_flags & kIPCSerializationSourceFlag);
// Since these fields may have come from an untrusted renderer, do additional
// checks above and beyond those in Histogram::Initialize()
if (declared_max <= 0 || declared_min <= 0 || declared_max < declared_min ||
INT_MAX / sizeof(Count) <= bucket_count || bucket_count < 2) {
LOG(ERROR) << "Values error decoding Histogram: " << histogram_name;
return false;
}
Flags flags = static_cast<Flags>(pickle_flags & ~kIPCSerializationSourceFlag);
DCHECK_NE(NOT_VALID_IN_RENDERER, histogram_type);
scoped_refptr<Histogram> render_histogram(NULL);
if (histogram_type == HISTOGRAM) {
render_histogram = Histogram::FactoryGet(
histogram_name, declared_min, declared_max, bucket_count, flags);
} else if (histogram_type == LINEAR_HISTOGRAM) {
render_histogram = LinearHistogram::FactoryGet(
histogram_name, declared_min, declared_max, bucket_count, flags);
} else if (histogram_type == BOOLEAN_HISTOGRAM) {
render_histogram = BooleanHistogram::FactoryGet(histogram_name, flags);
} else {
LOG(ERROR) << "Error Deserializing Histogram Unknown histogram_type: "
<< histogram_type;
return false;
}
DCHECK_EQ(render_histogram->declared_min(), declared_min);
DCHECK_EQ(render_histogram->declared_max(), declared_max);
DCHECK_EQ(render_histogram->bucket_count(), bucket_count);
DCHECK_EQ(render_histogram->range_checksum(), range_checksum);
DCHECK_EQ(render_histogram->histogram_type(), histogram_type);
if (render_histogram->flags() & kIPCSerializationSourceFlag) {
DVLOG(1) << "Single process mode, histogram observed and not copied: "
<< histogram_name;
} else {
DCHECK_EQ(flags & render_histogram->flags(), flags);
render_histogram->AddSampleSet(sample);
}
return true;
}
//------------------------------------------------------------------------------
// Methods for the validating a sample and a related histogram.
//------------------------------------------------------------------------------
Histogram::Inconsistencies Histogram::FindCorruption(
const SampleSet& snapshot) const {
int inconsistencies = NO_INCONSISTENCIES;
Sample previous_range = -1; // Bottom range is always 0.
Sample checksum = 0;
int64 count = 0;
for (size_t index = 0; index < bucket_count(); ++index) {
count += snapshot.counts(index);
int new_range = ranges(index);
checksum += new_range;
if (previous_range >= new_range)
inconsistencies |= BUCKET_ORDER_ERROR;
previous_range = new_range;
}
if (checksum != range_checksum_)
inconsistencies |= RANGE_CHECKSUM_ERROR;
int64 delta64 = snapshot.redundant_count() - count;
if (delta64 != 0) {
int delta = static_cast<int>(delta64);
if (delta != delta64)
delta = INT_MAX; // Flag all giant errors as INT_MAX.
// Since snapshots of histograms are taken asynchronously relative to
// sampling (and snapped from different threads), it is pretty likely that
// we'll catch a redundant count that doesn't match the sample count. We
// allow for a certain amount of slop before flagging this as an
// inconsistency. Even with an inconsistency, we'll snapshot it again (for
// UMA in about a half hour, so we'll eventually get the data, if it was
// not the result of a corruption. If histograms show that 1 is "too tight"
// then we may try to use 2 or 3 for this slop value.
const int kCommonRaceBasedCountMismatch = 1;
if (delta > 0) {
UMA_HISTOGRAM_COUNTS("Histogram.InconsistentCountHigh", delta);
if (delta > kCommonRaceBasedCountMismatch)
inconsistencies |= COUNT_HIGH_ERROR;
} else {
DCHECK_GT(0, delta);
UMA_HISTOGRAM_COUNTS("Histogram.InconsistentCountLow", -delta);
if (-delta > kCommonRaceBasedCountMismatch)
inconsistencies |= COUNT_LOW_ERROR;
}
}
return static_cast<Inconsistencies>(inconsistencies);
}
Histogram::ClassType Histogram::histogram_type() const {
return HISTOGRAM;
}
Histogram::Sample Histogram::ranges(size_t i) const {
return ranges_[i];
}
size_t Histogram::bucket_count() const {
return bucket_count_;
}
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
// Methods for the Histogram::SampleSet class // Methods for the Histogram::SampleSet class
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
...@@ -700,6 +690,9 @@ bool Histogram::SampleSet::Deserialize(void** iter, const Pickle& pickle) { ...@@ -700,6 +690,9 @@ bool Histogram::SampleSet::Deserialize(void** iter, const Pickle& pickle) {
// buckets. // buckets.
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
LinearHistogram::~LinearHistogram() {
}
scoped_refptr<Histogram> LinearHistogram::FactoryGet(const std::string& name, scoped_refptr<Histogram> LinearHistogram::FactoryGet(const std::string& name,
Sample minimum, Sample minimum,
Sample maximum, Sample maximum,
...@@ -733,7 +726,15 @@ scoped_refptr<Histogram> LinearHistogram::FactoryTimeGet( ...@@ -733,7 +726,15 @@ scoped_refptr<Histogram> LinearHistogram::FactoryTimeGet(
bucket_count, flags); bucket_count, flags);
} }
LinearHistogram::~LinearHistogram() { Histogram::ClassType LinearHistogram::histogram_type() const {
return LINEAR_HISTOGRAM;
}
void LinearHistogram::SetRangeDescriptions(
const DescriptionPair descriptions[]) {
for (int i =0; descriptions[i].description; ++i) {
bucket_description_[descriptions[i].sample] = descriptions[i].description;
}
} }
LinearHistogram::LinearHistogram(const std::string& name, LinearHistogram::LinearHistogram(const std::string& name,
...@@ -757,30 +758,6 @@ LinearHistogram::LinearHistogram(const std::string& name, ...@@ -757,30 +758,6 @@ LinearHistogram::LinearHistogram(const std::string& name,
DCHECK(ValidateBucketRanges()); DCHECK(ValidateBucketRanges());
} }
Histogram::ClassType LinearHistogram::histogram_type() const {
return LINEAR_HISTOGRAM;
}
void LinearHistogram::SetRangeDescriptions(
const DescriptionPair descriptions[]) {
for (int i =0; descriptions[i].description; ++i) {
bucket_description_[descriptions[i].sample] = descriptions[i].description;
}
}
const std::string LinearHistogram::GetAsciiBucketRange(size_t i) const {
int range = ranges(i);
BucketDescriptionMap::const_iterator it = bucket_description_.find(range);
if (it == bucket_description_.end())
return Histogram::GetAsciiBucketRange(i);
return it->second;
}
bool LinearHistogram::PrintEmptyBucket(size_t index) const {
return bucket_description_.find(ranges(index)) == bucket_description_.end();
}
void LinearHistogram::InitializeBucketRange() { void LinearHistogram::InitializeBucketRange() {
DCHECK_GT(declared_min(), 0); // 0 is the underflow bucket here. DCHECK_GT(declared_min(), 0); // 0 is the underflow bucket here.
double min = declared_min(); double min = declared_min();
...@@ -802,6 +779,19 @@ double LinearHistogram::GetBucketSize(Count current, size_t i) const { ...@@ -802,6 +779,19 @@ double LinearHistogram::GetBucketSize(Count current, size_t i) const {
return current/denominator; return current/denominator;
} }
const std::string LinearHistogram::GetAsciiBucketRange(size_t i) const {
int range = ranges(i);
BucketDescriptionMap::const_iterator it = bucket_description_.find(range);
if (it == bucket_description_.end())
return Histogram::GetAsciiBucketRange(i);
return it->second;
}
bool LinearHistogram::PrintEmptyBucket(size_t index) const {
return bucket_description_.find(ranges(index)) == bucket_description_.end();
}
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
// This section provides implementation for BooleanHistogram. // This section provides implementation for BooleanHistogram.
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
......
...@@ -13,6 +13,22 @@ ...@@ -13,6 +13,22 @@
namespace base { namespace base {
// static
Process Process::Current() {
return Process(GetCurrentProcessHandle());
}
ProcessId Process::pid() const {
if (process_ == 0)
return 0;
return GetProcId(process_);
}
bool Process::is_current() const {
return process_ == GetCurrentProcessHandle();
}
void Process::Close() { void Process::Close() {
process_ = 0; process_ = 0;
// if the process wasn't terminated (so we waited) or the state // if the process wasn't terminated (so we waited) or the state
...@@ -43,22 +59,6 @@ bool Process::SetProcessBackgrounded(bool value) { ...@@ -43,22 +59,6 @@ bool Process::SetProcessBackgrounded(bool value) {
} }
#endif #endif
ProcessId Process::pid() const {
if (process_ == 0)
return 0;
return GetProcId(process_);
}
bool Process::is_current() const {
return process_ == GetCurrentProcessHandle();
}
// static
Process Process::Current() {
return Process(GetCurrentProcessHandle());
}
int Process::GetPriority() const { int Process::GetPriority() const {
DCHECK(process_); DCHECK(process_);
return getpriority(PRIO_PROCESS, process_); return getpriority(PRIO_PROCESS, process_);
......
...@@ -44,10 +44,6 @@ const ProcessEntry* ProcessIterator::NextProcessEntry() { ...@@ -44,10 +44,6 @@ const ProcessEntry* ProcessIterator::NextProcessEntry() {
return NULL; return NULL;
} }
bool ProcessIterator::IncludeEntry() {
return !filter_ || filter_->Includes(entry_);
}
ProcessIterator::ProcessEntries ProcessIterator::Snapshot() { ProcessIterator::ProcessEntries ProcessIterator::Snapshot() {
ProcessEntries found; ProcessEntries found;
while (const ProcessEntry* process_entry = NextProcessEntry()) { while (const ProcessEntry* process_entry = NextProcessEntry()) {
...@@ -56,6 +52,10 @@ ProcessIterator::ProcessEntries ProcessIterator::Snapshot() { ...@@ -56,6 +52,10 @@ ProcessIterator::ProcessEntries ProcessIterator::Snapshot() {
return found; return found;
} }
bool ProcessIterator::IncludeEntry() {
return !filter_ || filter_->Includes(entry_);
}
NamedProcessIterator::NamedProcessIterator( NamedProcessIterator::NamedProcessIterator(
const FilePath::StringType& executable_name, const FilePath::StringType& executable_name,
const ProcessFilter* filter) : ProcessIterator(filter), const ProcessFilter* filter) : ProcessIterator(filter),
......
...@@ -68,6 +68,39 @@ bool GetProcCmdline(pid_t pid, std::vector<std::string>* proc_cmd_line_args) { ...@@ -68,6 +68,39 @@ bool GetProcCmdline(pid_t pid, std::vector<std::string>* proc_cmd_line_args) {
return true; return true;
} }
// Get the total CPU of a single process. Return value is number of jiffies
// on success or -1 on error.
int GetProcessCPU(pid_t pid) {
// Synchronously reading files in /proc is safe.
base::ThreadRestrictions::ScopedAllowIO allow_io;
// Use /proc/<pid>/task to find all threads and parse their /stat file.
FilePath path = FilePath(StringPrintf("/proc/%d/task/", pid));
DIR* dir = opendir(path.value().c_str());
if (!dir) {
PLOG(ERROR) << "opendir(" << path.value() << ")";
return -1;
}
int total_cpu = 0;
while (struct dirent* ent = readdir(dir)) {
if (ent->d_name[0] == '.')
continue;
FilePath stat_path = path.AppendASCII(ent->d_name).AppendASCII("stat");
std::string stat;
if (file_util::ReadFileToString(stat_path, &stat)) {
int cpu = base::ParseProcStatCPU(stat);
if (cpu > 0)
total_cpu += cpu;
}
}
closedir(dir);
return total_cpu;
}
} // namespace } // namespace
namespace base { namespace base {
...@@ -226,14 +259,6 @@ bool NamedProcessIterator::IncludeEntry() { ...@@ -226,14 +259,6 @@ bool NamedProcessIterator::IncludeEntry() {
} }
ProcessMetrics::ProcessMetrics(ProcessHandle process)
: process_(process),
last_time_(0),
last_system_time_(0),
last_cpu_(0) {
processor_count_ = base::SysInfo::NumberOfProcessors();
}
// static // static
ProcessMetrics* ProcessMetrics::CreateProcessMetrics(ProcessHandle process) { ProcessMetrics* ProcessMetrics::CreateProcessMetrics(ProcessHandle process) {
return new ProcessMetrics(process); return new ProcessMetrics(process);
...@@ -399,6 +424,49 @@ bool ProcessMetrics::GetWorkingSetKBytes(WorkingSetKBytes* ws_usage) const { ...@@ -399,6 +424,49 @@ bool ProcessMetrics::GetWorkingSetKBytes(WorkingSetKBytes* ws_usage) const {
return true; return true;
} }
double ProcessMetrics::GetCPUUsage() {
// This queries the /proc-specific scaling factor which is
// conceptually the system hertz. To dump this value on another
// system, try
// od -t dL /proc/self/auxv
// and look for the number after 17 in the output; mine is
// 0000040 17 100 3 134512692
// which means the answer is 100.
// It may be the case that this value is always 100.
static const int kHertz = sysconf(_SC_CLK_TCK);
struct timeval now;
int retval = gettimeofday(&now, NULL);
if (retval)
return 0;
int64 time = TimeValToMicroseconds(now);
if (last_time_ == 0) {
// First call, just set the last values.
last_time_ = time;
last_cpu_ = GetProcessCPU(process_);
return 0;
}
int64 time_delta = time - last_time_;
DCHECK_NE(time_delta, 0);
if (time_delta == 0)
return 0;
int cpu = GetProcessCPU(process_);
// We have the number of jiffies in the time period. Convert to percentage.
// Note this means we will go *over* 100 in the case where multiple threads
// are together adding to more than one CPU's worth.
int percentage = 100 * (cpu - last_cpu_) /
(kHertz * TimeDelta::FromMicroseconds(time_delta).InSecondsF());
last_time_ = time;
last_cpu_ = cpu;
return percentage;
}
// To have /proc/self/io file you must enable CONFIG_TASK_IO_ACCOUNTING // To have /proc/self/io file you must enable CONFIG_TASK_IO_ACCOUNTING
// in your kernel configuration. // in your kernel configuration.
bool ProcessMetrics::GetIOCounters(IoCounters* io_counters) const { bool ProcessMetrics::GetIOCounters(IoCounters* io_counters) const {
...@@ -446,6 +514,14 @@ bool ProcessMetrics::GetIOCounters(IoCounters* io_counters) const { ...@@ -446,6 +514,14 @@ bool ProcessMetrics::GetIOCounters(IoCounters* io_counters) const {
return true; return true;
} }
ProcessMetrics::ProcessMetrics(ProcessHandle process)
: process_(process),
last_time_(0),
last_system_time_(0),
last_cpu_(0) {
processor_count_ = base::SysInfo::NumberOfProcessors();
}
// Exposed for testing. // Exposed for testing.
int ParseProcStatCPU(const std::string& input) { int ParseProcStatCPU(const std::string& input) {
...@@ -469,82 +545,6 @@ int ParseProcStatCPU(const std::string& input) { ...@@ -469,82 +545,6 @@ int ParseProcStatCPU(const std::string& input) {
return fields11 + fields12; return fields11 + fields12;
} }
// Get the total CPU of a single process. Return value is number of jiffies
// on success or -1 on error.
static int GetProcessCPU(pid_t pid) {
// Synchronously reading files in /proc is safe.
base::ThreadRestrictions::ScopedAllowIO allow_io;
// Use /proc/<pid>/task to find all threads and parse their /stat file.
FilePath path = FilePath(StringPrintf("/proc/%d/task/", pid));
DIR* dir = opendir(path.value().c_str());
if (!dir) {
PLOG(ERROR) << "opendir(" << path.value() << ")";
return -1;
}
int total_cpu = 0;
while (struct dirent* ent = readdir(dir)) {
if (ent->d_name[0] == '.')
continue;
FilePath stat_path = path.AppendASCII(ent->d_name).AppendASCII("stat");
std::string stat;
if (file_util::ReadFileToString(stat_path, &stat)) {
int cpu = ParseProcStatCPU(stat);
if (cpu > 0)
total_cpu += cpu;
}
}
closedir(dir);
return total_cpu;
}
double ProcessMetrics::GetCPUUsage() {
// This queries the /proc-specific scaling factor which is
// conceptually the system hertz. To dump this value on another
// system, try
// od -t dL /proc/self/auxv
// and look for the number after 17 in the output; mine is
// 0000040 17 100 3 134512692
// which means the answer is 100.
// It may be the case that this value is always 100.
static const int kHertz = sysconf(_SC_CLK_TCK);
struct timeval now;
int retval = gettimeofday(&now, NULL);
if (retval)
return 0;
int64 time = TimeValToMicroseconds(now);
if (last_time_ == 0) {
// First call, just set the last values.
last_time_ = time;
last_cpu_ = GetProcessCPU(process_);
return 0;
}
int64 time_delta = time - last_time_;
DCHECK_NE(time_delta, 0);
if (time_delta == 0)
return 0;
int cpu = GetProcessCPU(process_);
// We have the number of jiffies in the time period. Convert to percentage.
// Note this means we will go *over* 100 in the case where multiple threads
// are together adding to more than one CPU's worth.
int percentage = 100 * (cpu - last_cpu_) /
(kHertz * TimeDelta::FromMicroseconds(time_delta).InSecondsF());
last_time_ = time;
last_cpu_ = cpu;
return percentage;
}
namespace { namespace {
// The format of /proc/meminfo is: // The format of /proc/meminfo is:
......
...@@ -61,10 +61,10 @@ void WeakReferenceOwner::Invalidate() { ...@@ -61,10 +61,10 @@ void WeakReferenceOwner::Invalidate() {
WeakPtrBase::WeakPtrBase() { WeakPtrBase::WeakPtrBase() {
} }
WeakPtrBase::WeakPtrBase(const WeakReference& ref) : ref_(ref) { WeakPtrBase::~WeakPtrBase() {
} }
WeakPtrBase::~WeakPtrBase() { WeakPtrBase::WeakPtrBase(const WeakReference& ref) : ref_(ref) {
} }
} // namespace internal } // namespace internal
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment