Commit d6b55394 authored by jar@chromium.org's avatar jar@chromium.org

Remove obsolete use of CLIPPED_HISTOGRMAS

Histograms that "clip" data (discard samples
over a given point) make it hard or impossible
to contrast results (such as via trimmed means).
As a result, a long standing note asked that these
histogram form no longer be used.
This CL removes all remaining uses, and discards
so histograms that are no longer significant (to
SDCH experimentation).  We transition to 
CUSTOM_HISTOGRAMS for all cases where the data
appears to be useful (and we provide new names
for the histograms).

r=rtenneti,wtc

Review URL: http://codereview.chromium.org/7553025

git-svn-id: svn://svn.chromium.org/chrome/trunk/src@95570 0039d316-1c4b-4281-b951-d872f2087c98
parent 732b0e5f
...@@ -964,16 +964,16 @@ void HttpNetworkTransaction::LogTransactionConnectedMetrics() { ...@@ -964,16 +964,16 @@ void HttpNetworkTransaction::LogTransactionConnectedMetrics() {
base::TimeDelta total_duration = response_.response_time - start_time_; base::TimeDelta total_duration = response_.response_time - start_time_;
UMA_HISTOGRAM_CLIPPED_TIMES( UMA_HISTOGRAM_CUSTOM_TIMES(
"Net.Transaction_Connected_Under_10", "Net.Transaction_Connected",
total_duration, total_duration,
base::TimeDelta::FromMilliseconds(1), base::TimeDelta::FromMinutes(10), base::TimeDelta::FromMilliseconds(1), base::TimeDelta::FromMinutes(10),
100); 100);
bool reused_socket = stream_->IsConnectionReused(); bool reused_socket = stream_->IsConnectionReused();
if (!reused_socket) { if (!reused_socket) {
UMA_HISTOGRAM_CLIPPED_TIMES( UMA_HISTOGRAM_CUSTOM_TIMES(
"Net.Transaction_Connected_New", "Net.Transaction_Connected_New_b",
total_duration, total_duration,
base::TimeDelta::FromMilliseconds(1), base::TimeDelta::FromMinutes(10), base::TimeDelta::FromMilliseconds(1), base::TimeDelta::FromMinutes(10),
100); 100);
...@@ -981,8 +981,8 @@ void HttpNetworkTransaction::LogTransactionConnectedMetrics() { ...@@ -981,8 +981,8 @@ void HttpNetworkTransaction::LogTransactionConnectedMetrics() {
static const bool use_conn_impact_histogram = static const bool use_conn_impact_histogram =
base::FieldTrialList::TrialExists("ConnCountImpact"); base::FieldTrialList::TrialExists("ConnCountImpact");
if (use_conn_impact_histogram) { if (use_conn_impact_histogram) {
UMA_HISTOGRAM_CLIPPED_TIMES( UMA_HISTOGRAM_CUSTOM_TIMES(
base::FieldTrial::MakeName("Net.Transaction_Connected_New", base::FieldTrial::MakeName("Net.Transaction_Connected_New_b",
"ConnCountImpact"), "ConnCountImpact"),
total_duration, total_duration,
base::TimeDelta::FromMilliseconds(1), base::TimeDelta::FromMinutes(10), base::TimeDelta::FromMilliseconds(1), base::TimeDelta::FromMinutes(10),
...@@ -993,15 +993,15 @@ void HttpNetworkTransaction::LogTransactionConnectedMetrics() { ...@@ -993,15 +993,15 @@ void HttpNetworkTransaction::LogTransactionConnectedMetrics() {
static const bool use_spdy_histogram = static const bool use_spdy_histogram =
base::FieldTrialList::TrialExists("SpdyImpact"); base::FieldTrialList::TrialExists("SpdyImpact");
if (use_spdy_histogram && response_.was_npn_negotiated) { if (use_spdy_histogram && response_.was_npn_negotiated) {
UMA_HISTOGRAM_CLIPPED_TIMES( UMA_HISTOGRAM_CUSTOM_TIMES(
base::FieldTrial::MakeName("Net.Transaction_Connected_Under_10", base::FieldTrial::MakeName("Net.Transaction_Connected",
"SpdyImpact"), "SpdyImpact"),
total_duration, base::TimeDelta::FromMilliseconds(1), total_duration, base::TimeDelta::FromMilliseconds(1),
base::TimeDelta::FromMinutes(10), 100); base::TimeDelta::FromMinutes(10), 100);
if (!reused_socket) { if (!reused_socket) {
UMA_HISTOGRAM_CLIPPED_TIMES( UMA_HISTOGRAM_CUSTOM_TIMES(
base::FieldTrial::MakeName("Net.Transaction_Connected_New", base::FieldTrial::MakeName("Net.Transaction_Connected_New_b",
"SpdyImpact"), "SpdyImpact"),
total_duration, base::TimeDelta::FromMilliseconds(1), total_duration, base::TimeDelta::FromMilliseconds(1),
base::TimeDelta::FromMinutes(10), 100); base::TimeDelta::FromMinutes(10), 100);
...@@ -1012,14 +1012,14 @@ void HttpNetworkTransaction::LogTransactionConnectedMetrics() { ...@@ -1012,14 +1012,14 @@ void HttpNetworkTransaction::LogTransactionConnectedMetrics() {
// types. This will change when we also prioritize certain subresources like // types. This will change when we also prioritize certain subresources like
// css, js, etc. // css, js, etc.
if (request_->priority) { if (request_->priority) {
UMA_HISTOGRAM_CLIPPED_TIMES( UMA_HISTOGRAM_CUSTOM_TIMES(
"Net.Priority_High_Latency", "Net.Priority_High_Latency_b",
total_duration, total_duration,
base::TimeDelta::FromMilliseconds(1), base::TimeDelta::FromMinutes(10), base::TimeDelta::FromMilliseconds(1), base::TimeDelta::FromMinutes(10),
100); 100);
} else { } else {
UMA_HISTOGRAM_CLIPPED_TIMES( UMA_HISTOGRAM_CUSTOM_TIMES(
"Net.Priority_Low_Latency", "Net.Priority_Low_Latency_b",
total_duration, total_duration,
base::TimeDelta::FromMilliseconds(1), base::TimeDelta::FromMinutes(10), base::TimeDelta::FromMilliseconds(1), base::TimeDelta::FromMinutes(10),
100); 100);
...@@ -1034,18 +1034,17 @@ void HttpNetworkTransaction::LogTransactionMetrics() const { ...@@ -1034,18 +1034,17 @@ void HttpNetworkTransaction::LogTransactionMetrics() const {
base::TimeDelta total_duration = base::Time::Now() - start_time_; base::TimeDelta total_duration = base::Time::Now() - start_time_;
UMA_HISTOGRAM_LONG_TIMES("Net.Transaction_Latency", duration); UMA_HISTOGRAM_CUSTOM_TIMES("Net.Transaction_Latency_b", duration,
UMA_HISTOGRAM_CLIPPED_TIMES("Net.Transaction_Latency_Under_10", duration, base::TimeDelta::FromMilliseconds(1),
base::TimeDelta::FromMilliseconds(1), base::TimeDelta::FromMinutes(10),
base::TimeDelta::FromMinutes(10), 100);
100); UMA_HISTOGRAM_CUSTOM_TIMES("Net.Transaction_Latency_Total",
UMA_HISTOGRAM_CLIPPED_TIMES("Net.Transaction_Latency_Total_Under_10", total_duration,
total_duration, base::TimeDelta::FromMilliseconds(1),
base::TimeDelta::FromMilliseconds(1), base::TimeDelta::FromMinutes(10), 100);
base::TimeDelta::FromMinutes(10), 100);
if (!stream_->IsConnectionReused()) { if (!stream_->IsConnectionReused()) {
UMA_HISTOGRAM_CLIPPED_TIMES( UMA_HISTOGRAM_CUSTOM_TIMES(
"Net.Transaction_Latency_Total_New_Connection_Under_10", "Net.Transaction_Latency_Total_New_Connection",
total_duration, base::TimeDelta::FromMilliseconds(1), total_duration, base::TimeDelta::FromMilliseconds(1),
base::TimeDelta::FromMinutes(10), 100); base::TimeDelta::FromMinutes(10), 100);
} }
......
...@@ -266,10 +266,8 @@ URLRequestHttpJob::URLRequestHttpJob(URLRequest* request) ...@@ -266,10 +266,8 @@ URLRequestHttpJob::URLRequestHttpJob(URLRequest* request)
packet_timing_enabled_(false), packet_timing_enabled_(false),
done_(false), done_(false),
bytes_observed_in_packets_(0), bytes_observed_in_packets_(0),
packet_times_(),
request_time_snapshot_(), request_time_snapshot_(),
final_packet_time_(), final_packet_time_(),
observed_packet_count_(0),
ALLOW_THIS_IN_INITIALIZER_LIST( ALLOW_THIS_IN_INITIALIZER_LIST(
filter_context_(new HttpFilterContext(this))), filter_context_(new HttpFilterContext(this))),
ALLOW_THIS_IN_INITIALIZER_LIST(method_factory_(this)), ALLOW_THIS_IN_INITIALIZER_LIST(method_factory_(this)),
...@@ -1299,22 +1297,10 @@ void URLRequestHttpJob::UpdatePacketReadTimes() { ...@@ -1299,22 +1297,10 @@ void URLRequestHttpJob::UpdatePacketReadTimes() {
return; // No new bytes have arrived. return; // No new bytes have arrived.
} }
final_packet_time_ = base::Time::Now();
if (!bytes_observed_in_packets_) if (!bytes_observed_in_packets_)
request_time_snapshot_ = request_ ? request_->request_time() : base::Time(); request_time_snapshot_ = request_ ? request_->request_time() : base::Time();
final_packet_time_ = base::Time::Now();
const size_t kTypicalPacketSize = 1430;
while (filter_input_byte_count() > bytes_observed_in_packets_) {
++observed_packet_count_;
if (packet_times_.size() < kSdchPacketHistogramCount) {
packet_times_.push_back(final_packet_time_);
DCHECK_EQ(static_cast<size_t>(observed_packet_count_),
packet_times_.size());
}
bytes_observed_in_packets_ += kTypicalPacketSize;
}
// Since packets may not be full, we'll remember the number of bytes we've
// accounted for in packets thus far.
bytes_observed_in_packets_ = filter_input_byte_count(); bytes_observed_in_packets_ = filter_input_byte_count();
} }
...@@ -1326,116 +1312,28 @@ void URLRequestHttpJob::RecordPacketStats( ...@@ -1326,116 +1312,28 @@ void URLRequestHttpJob::RecordPacketStats(
base::TimeDelta duration = final_packet_time_ - request_time_snapshot_; base::TimeDelta duration = final_packet_time_ - request_time_snapshot_;
switch (statistic) { switch (statistic) {
case FilterContext::SDCH_DECODE: { case FilterContext::SDCH_DECODE: {
UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Decode_Latency_F_a", duration,
base::TimeDelta::FromMilliseconds(20),
base::TimeDelta::FromMinutes(10), 100);
UMA_HISTOGRAM_COUNTS_100("Sdch3.Network_Decode_Packets_b",
static_cast<int>(observed_packet_count_));
UMA_HISTOGRAM_CUSTOM_COUNTS("Sdch3.Network_Decode_Bytes_Processed_b", UMA_HISTOGRAM_CUSTOM_COUNTS("Sdch3.Network_Decode_Bytes_Processed_b",
static_cast<int>(bytes_observed_in_packets_), 500, 100000, 100); static_cast<int>(bytes_observed_in_packets_), 500, 100000, 100);
if (packet_times_.empty())
return;
UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Decode_1st_To_Last_a",
final_packet_time_ - packet_times_[0],
base::TimeDelta::FromMilliseconds(20),
base::TimeDelta::FromMinutes(10), 100);
DCHECK_GT(kSdchPacketHistogramCount, 4u);
if (packet_times_.size() <= 4)
return;
UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Decode_1st_To_2nd_c",
packet_times_[1] - packet_times_[0],
base::TimeDelta::FromMilliseconds(1),
base::TimeDelta::FromSeconds(10), 100);
UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Decode_2nd_To_3rd_c",
packet_times_[2] - packet_times_[1],
base::TimeDelta::FromMilliseconds(1),
base::TimeDelta::FromSeconds(10), 100);
UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Decode_3rd_To_4th_c",
packet_times_[3] - packet_times_[2],
base::TimeDelta::FromMilliseconds(1),
base::TimeDelta::FromSeconds(10), 100);
UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Decode_4th_To_5th_c",
packet_times_[4] - packet_times_[3],
base::TimeDelta::FromMilliseconds(1),
base::TimeDelta::FromSeconds(10), 100);
return; return;
} }
case FilterContext::SDCH_PASSTHROUGH: { case FilterContext::SDCH_PASSTHROUGH: {
// Despite advertising a dictionary, we handled non-sdch compressed // Despite advertising a dictionary, we handled non-sdch compressed
// content. // content.
UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Pass-through_Latency_F_a",
duration,
base::TimeDelta::FromMilliseconds(20),
base::TimeDelta::FromMinutes(10), 100);
UMA_HISTOGRAM_COUNTS_100("Sdch3.Network_Pass-through_Packets_b",
observed_packet_count_);
if (packet_times_.empty())
return;
UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Pass-through_1st_To_Last_a",
final_packet_time_ - packet_times_[0],
base::TimeDelta::FromMilliseconds(20),
base::TimeDelta::FromMinutes(10), 100);
DCHECK_GT(kSdchPacketHistogramCount, 4u);
if (packet_times_.size() <= 4)
return;
UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Pass-through_1st_To_2nd_c",
packet_times_[1] - packet_times_[0],
base::TimeDelta::FromMilliseconds(1),
base::TimeDelta::FromSeconds(10), 100);
UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Pass-through_2nd_To_3rd_c",
packet_times_[2] - packet_times_[1],
base::TimeDelta::FromMilliseconds(1),
base::TimeDelta::FromSeconds(10), 100);
UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Pass-through_3rd_To_4th_c",
packet_times_[3] - packet_times_[2],
base::TimeDelta::FromMilliseconds(1),
base::TimeDelta::FromSeconds(10), 100);
UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Pass-through_4th_To_5th_c",
packet_times_[4] - packet_times_[3],
base::TimeDelta::FromMilliseconds(1),
base::TimeDelta::FromSeconds(10), 100);
return; return;
} }
case FilterContext::SDCH_EXPERIMENT_DECODE: { case FilterContext::SDCH_EXPERIMENT_DECODE: {
UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Experiment_Decode", UMA_HISTOGRAM_CUSTOM_TIMES("Sdch3.Experiment2_Decode",
duration, duration,
base::TimeDelta::FromMilliseconds(20), base::TimeDelta::FromMilliseconds(20),
base::TimeDelta::FromMinutes(10), 100); base::TimeDelta::FromMinutes(10), 100);
// We already provided interpacket histograms above in the SDCH_DECODE
// case, so we don't need them here.
return; return;
} }
case FilterContext::SDCH_EXPERIMENT_HOLDBACK: { case FilterContext::SDCH_EXPERIMENT_HOLDBACK: {
UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Experiment_Holdback", UMA_HISTOGRAM_CUSTOM_TIMES("Sdch3.Experiment2_Holdback",
duration, duration,
base::TimeDelta::FromMilliseconds(20), base::TimeDelta::FromMilliseconds(20),
base::TimeDelta::FromMinutes(10), 100); base::TimeDelta::FromMinutes(10), 100);
UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Experiment_Holdback_1st_To_Last_a",
final_packet_time_ - packet_times_[0],
base::TimeDelta::FromMilliseconds(20),
base::TimeDelta::FromMinutes(10), 100);
DCHECK_GT(kSdchPacketHistogramCount, 4u);
if (packet_times_.size() <= 4)
return;
UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Experiment_Holdback_1st_To_2nd_c",
packet_times_[1] - packet_times_[0],
base::TimeDelta::FromMilliseconds(1),
base::TimeDelta::FromSeconds(10), 100);
UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Experiment_Holdback_2nd_To_3rd_c",
packet_times_[2] - packet_times_[1],
base::TimeDelta::FromMilliseconds(1),
base::TimeDelta::FromSeconds(10), 100);
UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Experiment_Holdback_3rd_To_4th_c",
packet_times_[3] - packet_times_[2],
base::TimeDelta::FromMilliseconds(1),
base::TimeDelta::FromSeconds(10), 100);
UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Experiment_Holdback_4th_To_5th_c",
packet_times_[4] - packet_times_[3],
base::TimeDelta::FromMilliseconds(1),
base::TimeDelta::FromSeconds(10), 100);
return; return;
} }
default: default:
......
...@@ -198,9 +198,6 @@ class URLRequestHttpJob : public URLRequestJob { ...@@ -198,9 +198,6 @@ class URLRequestHttpJob : public URLRequestJob {
// those packets may possibly have had their time of arrival recorded). // those packets may possibly have had their time of arrival recorded).
int64 bytes_observed_in_packets_; int64 bytes_observed_in_packets_;
// Arrival times for some of the first few packets.
std::vector<base::Time> packet_times_;
// The request time may not be available when we are being destroyed, so we // The request time may not be available when we are being destroyed, so we
// snapshot it early on. // snapshot it early on.
base::Time request_time_snapshot_; base::Time request_time_snapshot_;
...@@ -212,10 +209,6 @@ class URLRequestHttpJob : public URLRequestJob { ...@@ -212,10 +209,6 @@ class URLRequestHttpJob : public URLRequestJob {
// The start time for the job, ignoring re-starts. // The start time for the job, ignoring re-starts.
base::TimeTicks start_time_; base::TimeTicks start_time_;
// The count of the number of packets, some of which may not have been timed.
// We're ignoring overflow, as 1430 x 2^31 is a LOT of bytes.
int observed_packet_count_;
scoped_ptr<HttpFilterContext> filter_context_; scoped_ptr<HttpFilterContext> filter_context_;
ScopedRunnableMethodFactory<URLRequestHttpJob> method_factory_; ScopedRunnableMethodFactory<URLRequestHttpJob> method_factory_;
base::WeakPtrFactory<URLRequestHttpJob> weak_ptr_factory_; base::WeakPtrFactory<URLRequestHttpJob> weak_ptr_factory_;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment