Commit ca442712 authored by Shivani Sharma's avatar Shivani Sharma Committed by Commit Bot

HttpCache Parallel Writing Metrics enum description fixed.

HttpCache.ParallelWritingPattern histogram's 0 enum value is actually
a valid value and not undefined. This CL fixes the description and adds
tests for testing the 0 value.

Bug: 472740
Change-Id: Iac4cdb38b6141221e901003e3540c60228626400
Reviewed-on: https://chromium-review.googlesource.com/820191Reviewed-by: default avatarJosh Karlin <jkarlin@chromium.org>
Commit-Queue: Shivani Sharma <shivanisha@chromium.org>
Cr-Commit-Position: refs/heads/master@{#523526}
parent a131ab14
...@@ -1105,10 +1105,14 @@ void HttpCache::ProcessDoneHeadersQueue(ActiveEntry* entry) { ...@@ -1105,10 +1105,14 @@ void HttpCache::ProcessDoneHeadersQueue(ActiveEntry* entry) {
transaction->WriteModeTransactionAboutToBecomeReader(); transaction->WriteModeTransactionAboutToBecomeReader();
auto return_val = entry->readers.insert(transaction); auto return_val = entry->readers.insert(transaction);
DCHECK(return_val.second); DCHECK(return_val.second);
transaction->MaybeSetParallelWritingPatternForMetrics(
PARALLEL_WRITING_NONE_CACHE_READ);
} }
} else { // mode READ } else { // mode READ
auto return_val = entry->readers.insert(transaction); auto return_val = entry->readers.insert(transaction);
DCHECK(return_val.second); DCHECK(return_val.second);
transaction->MaybeSetParallelWritingPatternForMetrics(
PARALLEL_WRITING_NONE_CACHE_READ);
} }
} }
......
...@@ -115,9 +115,8 @@ class NET_EXPORT HttpCache : public HttpTransactionFactory { ...@@ -115,9 +115,8 @@ class NET_EXPORT HttpCache : public HttpTransactionFactory {
// This is also used to log metrics so should be consistent with the values in // This is also used to log metrics so should be consistent with the values in
// enums.xml and should only be appended to. // enums.xml and should only be appended to.
enum ParallelWritingPattern { enum ParallelWritingPattern {
// Used as the default value till the transaction reaches the response body // Used as the default value till the transaction is in initial headers
// phase. Also used when a transaction is waiting for Writers to do a // phase.
// cleanup. This value is not logged in the histogram.
PARALLEL_WRITING_NONE, PARALLEL_WRITING_NONE,
// The transaction creates a writers object. This is only logged for // The transaction creates a writers object. This is only logged for
// transactions that did not fail to join existing writers earlier. // transactions that did not fail to join existing writers earlier.
...@@ -133,6 +132,9 @@ class NET_EXPORT HttpCache : public HttpTransactionFactory { ...@@ -133,6 +132,9 @@ class NET_EXPORT HttpCache : public HttpTransactionFactory {
// The transaction cannot join existing writers since it does not have cache // The transaction cannot join existing writers since it does not have cache
// write privileges. // write privileges.
PARALLEL_WRITING_NOT_JOIN_READ_ONLY, PARALLEL_WRITING_NOT_JOIN_READ_ONLY,
// Writers does not exist and the transaction does not need to create one
// since it is going to read from the cache.
PARALLEL_WRITING_NONE_CACHE_READ,
// On adding a value here, make sure to add in enums.xml as well. // On adding a value here, make sure to add in enums.xml as well.
PARALLEL_WRITING_MAX PARALLEL_WRITING_MAX
}; };
......
...@@ -797,6 +797,8 @@ TEST(HttpCache, ReleaseBuffer) { ...@@ -797,6 +797,8 @@ TEST(HttpCache, ReleaseBuffer) {
TEST(HttpCache, SimpleGETWithDiskFailures) { TEST(HttpCache, SimpleGETWithDiskFailures) {
MockHttpCache cache; MockHttpCache cache;
base::HistogramTester histograms;
const std::string histogram_name = "HttpCache.ParallelWritingPattern";
cache.disk_cache()->set_soft_failures(true); cache.disk_cache()->set_soft_failures(true);
...@@ -813,6 +815,11 @@ TEST(HttpCache, SimpleGETWithDiskFailures) { ...@@ -813,6 +815,11 @@ TEST(HttpCache, SimpleGETWithDiskFailures) {
EXPECT_EQ(2, cache.network_layer()->transaction_count()); EXPECT_EQ(2, cache.network_layer()->transaction_count());
EXPECT_EQ(0, cache.disk_cache()->open_count()); EXPECT_EQ(0, cache.disk_cache()->open_count());
EXPECT_EQ(2, cache.disk_cache()->create_count()); EXPECT_EQ(2, cache.disk_cache()->create_count());
// Since the transactions were in headers phase when failed,
// PARALLEL_WRITING_NONE should be logged.
histograms.ExpectBucketCount(
histogram_name, static_cast<int>(HttpCache::PARALLEL_WRITING_NONE), 2);
} }
// Tests that disk failures after the transaction has started don't cause the // Tests that disk failures after the transaction has started don't cause the
...@@ -989,6 +996,8 @@ TEST(HttpCache, SimpleGET_LoadOnlyFromCache_Miss) { ...@@ -989,6 +996,8 @@ TEST(HttpCache, SimpleGET_LoadOnlyFromCache_Miss) {
TEST(HttpCache, SimpleGET_LoadPreferringCache_Hit) { TEST(HttpCache, SimpleGET_LoadPreferringCache_Hit) {
MockHttpCache cache; MockHttpCache cache;
base::HistogramTester histograms;
const std::string histogram_name = "HttpCache.ParallelWritingPattern";
// write to the cache // write to the cache
RunTransactionTest(cache.http_cache(), kSimpleGET_Transaction); RunTransactionTest(cache.http_cache(), kSimpleGET_Transaction);
...@@ -1002,6 +1011,12 @@ TEST(HttpCache, SimpleGET_LoadPreferringCache_Hit) { ...@@ -1002,6 +1011,12 @@ TEST(HttpCache, SimpleGET_LoadPreferringCache_Hit) {
EXPECT_EQ(1, cache.network_layer()->transaction_count()); EXPECT_EQ(1, cache.network_layer()->transaction_count());
EXPECT_EQ(1, cache.disk_cache()->open_count()); EXPECT_EQ(1, cache.disk_cache()->open_count());
EXPECT_EQ(1, cache.disk_cache()->create_count()); EXPECT_EQ(1, cache.disk_cache()->create_count());
histograms.ExpectBucketCount(
histogram_name, static_cast<int>(HttpCache::PARALLEL_WRITING_CREATE), 1);
histograms.ExpectBucketCount(
histogram_name,
static_cast<int>(HttpCache::PARALLEL_WRITING_NONE_CACHE_READ), 1);
} }
TEST(HttpCache, SimpleGET_LoadPreferringCache_Miss) { TEST(HttpCache, SimpleGET_LoadPreferringCache_Miss) {
...@@ -5035,6 +5050,8 @@ TEST(HttpCache, SimplePOST_LoadOnlyFromCache_Miss) { ...@@ -5035,6 +5050,8 @@ TEST(HttpCache, SimplePOST_LoadOnlyFromCache_Miss) {
TEST(HttpCache, SimplePOST_LoadOnlyFromCache_Hit) { TEST(HttpCache, SimplePOST_LoadOnlyFromCache_Hit) {
MockHttpCache cache; MockHttpCache cache;
base::HistogramTester histograms;
const std::string histogram_name = "HttpCache.ParallelWritingPattern";
// Test that we hit the cache for POST requests. // Test that we hit the cache for POST requests.
...@@ -5064,6 +5081,10 @@ TEST(HttpCache, SimplePOST_LoadOnlyFromCache_Hit) { ...@@ -5064,6 +5081,10 @@ TEST(HttpCache, SimplePOST_LoadOnlyFromCache_Hit) {
EXPECT_EQ(1, cache.network_layer()->transaction_count()); EXPECT_EQ(1, cache.network_layer()->transaction_count());
EXPECT_EQ(1, cache.disk_cache()->open_count()); EXPECT_EQ(1, cache.disk_cache()->open_count());
EXPECT_EQ(1, cache.disk_cache()->create_count()); EXPECT_EQ(1, cache.disk_cache()->create_count());
histograms.ExpectBucketCount(
histogram_name,
static_cast<int>(HttpCache::PARALLEL_WRITING_NONE_CACHE_READ), 1);
} }
// Test that we don't hit the cache for POST requests if there is a byte range. // Test that we don't hit the cache for POST requests if there is a byte range.
......
...@@ -21262,12 +21262,13 @@ Called by update_gpu_driver_bug_workaround_entries.py.--> ...@@ -21262,12 +21262,13 @@ Called by update_gpu_driver_bug_workaround_entries.py.-->
</enum> </enum>
<enum name="HttpCacheParallelWritingPattern"> <enum name="HttpCacheParallelWritingPattern">
<int value="0" label="Undefined"/> <int value="0" label="Initial value, headers phase"/>
<int value="1" label="Create Writers"/> <int value="1" label="Created Writers"/>
<int value="2" label="Joined Writing"/> <int value="2" label="Joined Writers"/>
<int value="3" label="Not joined Range Request"/> <int value="3" label="Not joined, range request"/>
<int value="4" label="Not joined not GET"/> <int value="4" label="Not joined, not GET"/>
<int value="5" label="Not joined read-only"/> <int value="5" label="Not joined, read-only"/>
<int value="6" label="Cache read, no Writers"/>
</enum> </enum>
<enum name="HttpCachePattern"> <enum name="HttpCachePattern">
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment