Commit 2417f3ec authored by Maks Orlovich's avatar Maks Orlovich Committed by Commit Bot

SimpleCache: remove metrics on parallelizability

They are complex to compute, and the results are basically there isn't any:
1) There is usually only one read ongoing (>99% of the time, except I
broke this bucket in M63...)
2) For writes, basically 90+% of the time we have only a single write to
   an entry, too. Most of the rest it's a second write following an
   optimistic write, which can't be easily parallelized since HttpCache writes
   with truncate = true. This isn't entirely impossible, though.

Bug: 757682
Change-Id: I7c65988e14a6d413b95122e9c7c39c922f77524f
Reviewed-on: https://chromium-review.googlesource.com/1122706Reviewed-by: default avatarSteven Holte <holte@chromium.org>
Reviewed-by: default avatarJosh Karlin <jkarlin@chromium.org>
Commit-Queue: Maks Orlovich <morlovich@chromium.org>
Cr-Commit-Position: refs/heads/master@{#575515}
parent bb7663f3
...@@ -391,8 +391,10 @@ int SimpleEntryImpl::ReadData(int stream_index, ...@@ -391,8 +391,10 @@ int SimpleEntryImpl::ReadData(int stream_index,
return net::ERR_INVALID_ARGUMENT; return net::ERR_INVALID_ARGUMENT;
} }
// TODO(felipeg): Optimization: Add support for truly parallel read // If this is the only operation, bypass the queue, and also see if there is
// operations. // in-memory data to handle it synchronously. In principle, multiple reads can
// be parallelized, but past studies have shown that parallelizable ones
// happen <1% of the time, so it's probably not worth the effort.
bool alone_in_queue = bool alone_in_queue =
pending_operations_.size() == 0 && state_ == STATE_READY; pending_operations_.size() == 0 && state_ == STATE_READY;
...@@ -402,8 +404,7 @@ int SimpleEntryImpl::ReadData(int stream_index, ...@@ -402,8 +404,7 @@ int SimpleEntryImpl::ReadData(int stream_index,
} }
pending_operations_.push(SimpleEntryOperation::ReadOperation( pending_operations_.push(SimpleEntryOperation::ReadOperation(
this, stream_index, offset, buf_len, buf, std::move(callback), this, stream_index, offset, buf_len, buf, std::move(callback)));
alone_in_queue));
RunNextOperationIfNeeded(); RunNextOperationIfNeeded();
return net::ERR_IO_PENDING; return net::ERR_IO_PENDING;
} }
...@@ -453,7 +454,8 @@ int SimpleEntryImpl::WriteData(int stream_index, ...@@ -453,7 +454,8 @@ int SimpleEntryImpl::WriteData(int stream_index,
// actually run the write operation that sets the stream size. It also // actually run the write operation that sets the stream size. It also
// prevents from previous possibly-conflicting writes that could be stacked // prevents from previous possibly-conflicting writes that could be stacked
// in the |pending_operations_|. We could optimize this for when we have // in the |pending_operations_|. We could optimize this for when we have
// only read operations enqueued. // only read operations enqueued, but past studies have shown that that such
// parallelizable cases are very rare.
const bool optimistic = const bool optimistic =
(use_optimistic_operations_ && state_ == STATE_READY && (use_optimistic_operations_ && state_ == STATE_READY &&
pending_operations_.size() == 0); pending_operations_.size() == 0);
...@@ -567,7 +569,6 @@ size_t SimpleEntryImpl::EstimateMemoryUsage() const { ...@@ -567,7 +569,6 @@ size_t SimpleEntryImpl::EstimateMemoryUsage() const {
// measured, but the ownership of SimpleSynchronousEntry isn't straightforward // measured, but the ownership of SimpleSynchronousEntry isn't straightforward
return sizeof(SimpleSynchronousEntry) + return sizeof(SimpleSynchronousEntry) +
base::trace_event::EstimateMemoryUsage(pending_operations_) + base::trace_event::EstimateMemoryUsage(pending_operations_) +
base::trace_event::EstimateMemoryUsage(executing_operation_) +
(stream_0_data_ ? stream_0_data_->capacity() : 0) + (stream_0_data_ ? stream_0_data_->capacity() : 0) +
(stream_1_prefetch_data_ ? stream_1_prefetch_data_->capacity() : 0); (stream_1_prefetch_data_ ? stream_1_prefetch_data_->capacity() : 0);
} }
...@@ -665,13 +666,11 @@ void SimpleEntryImpl::RunNextOperationIfNeeded() { ...@@ -665,13 +666,11 @@ void SimpleEntryImpl::RunNextOperationIfNeeded() {
CloseInternal(); CloseInternal();
break; break;
case SimpleEntryOperation::TYPE_READ: case SimpleEntryOperation::TYPE_READ:
RecordReadIsParallelizable(*operation);
ReadDataInternal(/* sync_possible= */ false, operation->index(), ReadDataInternal(/* sync_possible= */ false, operation->index(),
operation->offset(), operation->buf(), operation->offset(), operation->buf(),
operation->length(), operation->ReleaseCallback()); operation->length(), operation->ReleaseCallback());
break; break;
case SimpleEntryOperation::TYPE_WRITE: case SimpleEntryOperation::TYPE_WRITE:
RecordWriteDependencyType(*operation);
WriteDataInternal(operation->index(), operation->offset(), WriteDataInternal(operation->index(), operation->offset(),
operation->buf(), operation->length(), operation->buf(), operation->length(),
operation->ReleaseCallback(), operation->truncate()); operation->ReleaseCallback(), operation->truncate());
...@@ -697,10 +696,6 @@ void SimpleEntryImpl::RunNextOperationIfNeeded() { ...@@ -697,10 +696,6 @@ void SimpleEntryImpl::RunNextOperationIfNeeded() {
default: default:
NOTREACHED(); NOTREACHED();
} }
// The operation is kept for histograms. Makes sure it does not leak
// resources.
executing_operation_.swap(operation);
executing_operation_->ReleaseReferences();
// |this| may have been deleted. // |this| may have been deleted.
} }
} }
...@@ -1538,77 +1533,6 @@ int64_t SimpleEntryImpl::GetDiskUsage() const { ...@@ -1538,77 +1533,6 @@ int64_t SimpleEntryImpl::GetDiskUsage() const {
return file_size; return file_size;
} }
void SimpleEntryImpl::RecordReadIsParallelizable(
const SimpleEntryOperation& operation) const {
if (!executing_operation_)
return;
// Used in histograms, please only add entries at the end.
enum ReadDependencyType {
// READ_STANDALONE = 0, Deprecated.
READ_FOLLOWS_READ = 1,
READ_FOLLOWS_CONFLICTING_WRITE = 2,
READ_FOLLOWS_NON_CONFLICTING_WRITE = 3,
READ_FOLLOWS_OTHER = 4,
READ_ALONE_IN_QUEUE = 5,
READ_DEPENDENCY_TYPE_MAX = 6,
};
ReadDependencyType type = READ_FOLLOWS_OTHER;
if (operation.alone_in_queue()) {
type = READ_ALONE_IN_QUEUE;
} else if (executing_operation_->type() == SimpleEntryOperation::TYPE_READ) {
type = READ_FOLLOWS_READ;
} else if (executing_operation_->type() == SimpleEntryOperation::TYPE_WRITE) {
if (executing_operation_->ConflictsWith(operation))
type = READ_FOLLOWS_CONFLICTING_WRITE;
else
type = READ_FOLLOWS_NON_CONFLICTING_WRITE;
}
SIMPLE_CACHE_UMA(ENUMERATION,
"ReadIsParallelizable", cache_type_,
type, READ_DEPENDENCY_TYPE_MAX);
}
void SimpleEntryImpl::RecordWriteDependencyType(
const SimpleEntryOperation& operation) const {
if (!executing_operation_)
return;
// Used in histograms, please only add entries at the end.
enum WriteDependencyType {
WRITE_OPTIMISTIC = 0,
WRITE_FOLLOWS_CONFLICTING_OPTIMISTIC = 1,
WRITE_FOLLOWS_NON_CONFLICTING_OPTIMISTIC = 2,
WRITE_FOLLOWS_CONFLICTING_WRITE = 3,
WRITE_FOLLOWS_NON_CONFLICTING_WRITE = 4,
WRITE_FOLLOWS_CONFLICTING_READ = 5,
WRITE_FOLLOWS_NON_CONFLICTING_READ = 6,
WRITE_FOLLOWS_OTHER = 7,
WRITE_DEPENDENCY_TYPE_MAX = 8,
};
WriteDependencyType type = WRITE_FOLLOWS_OTHER;
if (operation.optimistic()) {
type = WRITE_OPTIMISTIC;
} else if (executing_operation_->type() == SimpleEntryOperation::TYPE_READ ||
executing_operation_->type() == SimpleEntryOperation::TYPE_WRITE) {
bool conflicting = executing_operation_->ConflictsWith(operation);
if (executing_operation_->type() == SimpleEntryOperation::TYPE_READ) {
type = conflicting ? WRITE_FOLLOWS_CONFLICTING_READ
: WRITE_FOLLOWS_NON_CONFLICTING_READ;
} else if (executing_operation_->optimistic()) {
type = conflicting ? WRITE_FOLLOWS_CONFLICTING_OPTIMISTIC
: WRITE_FOLLOWS_NON_CONFLICTING_OPTIMISTIC;
} else {
type = conflicting ? WRITE_FOLLOWS_CONFLICTING_WRITE
: WRITE_FOLLOWS_NON_CONFLICTING_WRITE;
}
}
SIMPLE_CACHE_UMA(ENUMERATION,
"WriteDependencyType", cache_type_,
type, WRITE_DEPENDENCY_TYPE_MAX);
}
int SimpleEntryImpl::ReadFromBuffer(net::GrowableIOBuffer* in_buf, int SimpleEntryImpl::ReadFromBuffer(net::GrowableIOBuffer* in_buf,
int offset, int offset,
int buf_len, int buf_len,
......
...@@ -330,10 +330,6 @@ class NET_EXPORT_PRIVATE SimpleEntryImpl : public Entry, ...@@ -330,10 +330,6 @@ class NET_EXPORT_PRIVATE SimpleEntryImpl : public Entry,
int64_t GetDiskUsage() const; int64_t GetDiskUsage() const;
// Used to report histograms.
void RecordReadIsParallelizable(const SimpleEntryOperation& operation) const;
void RecordWriteDependencyType(const SimpleEntryOperation& operation) const;
// Completes a read from the stream data kept in memory, logging metrics // Completes a read from the stream data kept in memory, logging metrics
// and updating metadata. Returns the # of bytes read successfully. // and updating metadata. Returns the # of bytes read successfully.
// This asumes the caller has already range-checked offset and buf_len // This asumes the caller has already range-checked offset and buf_len
...@@ -424,8 +420,6 @@ class NET_EXPORT_PRIVATE SimpleEntryImpl : public Entry, ...@@ -424,8 +420,6 @@ class NET_EXPORT_PRIVATE SimpleEntryImpl : public Entry,
net::NetLogWithSource net_log_; net::NetLogWithSource net_log_;
std::unique_ptr<SimpleEntryOperation> executing_operation_;
// Unlike other streams, stream 0 data is read from the disk when the entry is // Unlike other streams, stream 0 data is read from the disk when the entry is
// opened, and then kept in memory. All read/write operations on stream 0 // opened, and then kept in memory. All read/write operations on stream 0
// affect the |stream_0_data_| buffer. When the entry is closed, // affect the |stream_0_data_| buffer. When the entry is closed,
......
...@@ -13,27 +13,6 @@ ...@@ -13,27 +13,6 @@
namespace disk_cache { namespace disk_cache {
namespace {
bool IsReadWriteType(unsigned int type) {
return type == SimpleEntryOperation::TYPE_READ ||
type == SimpleEntryOperation::TYPE_WRITE ||
type == SimpleEntryOperation::TYPE_READ_SPARSE ||
type == SimpleEntryOperation::TYPE_WRITE_SPARSE;
}
bool IsReadType(unsigned type) {
return type == SimpleEntryOperation::TYPE_READ ||
type == SimpleEntryOperation::TYPE_READ_SPARSE;
}
bool IsSparseType(unsigned type) {
return type == SimpleEntryOperation::TYPE_READ_SPARSE ||
type == SimpleEntryOperation::TYPE_WRITE_SPARSE;
}
} // anonymous namespace
SimpleEntryOperation::SimpleEntryOperation(SimpleEntryOperation&& other) SimpleEntryOperation::SimpleEntryOperation(SimpleEntryOperation&& other)
: entry_(std::move(other.entry_)), : entry_(std::move(other.entry_)),
buf_(std::move(other.buf_)), buf_(std::move(other.buf_)),
...@@ -47,8 +26,7 @@ SimpleEntryOperation::SimpleEntryOperation(SimpleEntryOperation&& other) ...@@ -47,8 +26,7 @@ SimpleEntryOperation::SimpleEntryOperation(SimpleEntryOperation&& other)
have_index_(other.have_index_), have_index_(other.have_index_),
index_(other.index_), index_(other.index_),
truncate_(other.truncate_), truncate_(other.truncate_),
optimistic_(other.optimistic_), optimistic_(other.optimistic_) {}
alone_in_queue_(other.alone_in_queue_) {}
SimpleEntryOperation::~SimpleEntryOperation() = default; SimpleEntryOperation::~SimpleEntryOperation() = default;
...@@ -59,8 +37,7 @@ SimpleEntryOperation SimpleEntryOperation::OpenOperation( ...@@ -59,8 +37,7 @@ SimpleEntryOperation SimpleEntryOperation::OpenOperation(
net::CompletionOnceCallback callback, net::CompletionOnceCallback callback,
Entry** out_entry) { Entry** out_entry) {
return SimpleEntryOperation(entry, NULL, std::move(callback), out_entry, 0, 0, return SimpleEntryOperation(entry, NULL, std::move(callback), out_entry, 0, 0,
0, NULL, TYPE_OPEN, have_index, 0, false, false, 0, NULL, TYPE_OPEN, have_index, 0, false, false);
false);
} }
// static // static
...@@ -70,7 +47,7 @@ SimpleEntryOperation SimpleEntryOperation::CreateOperation( ...@@ -70,7 +47,7 @@ SimpleEntryOperation SimpleEntryOperation::CreateOperation(
net::CompletionOnceCallback callback, net::CompletionOnceCallback callback,
Entry** out_entry) { Entry** out_entry) {
return SimpleEntryOperation(entry, NULL, std::move(callback), out_entry, 0, 0, return SimpleEntryOperation(entry, NULL, std::move(callback), out_entry, 0, 0,
0, NULL, TYPE_CREATE, have_index, 0, false, false, 0, NULL, TYPE_CREATE, have_index, 0, false,
false); false);
} }
...@@ -78,8 +55,7 @@ SimpleEntryOperation SimpleEntryOperation::CreateOperation( ...@@ -78,8 +55,7 @@ SimpleEntryOperation SimpleEntryOperation::CreateOperation(
SimpleEntryOperation SimpleEntryOperation::CloseOperation( SimpleEntryOperation SimpleEntryOperation::CloseOperation(
SimpleEntryImpl* entry) { SimpleEntryImpl* entry) {
return SimpleEntryOperation(entry, NULL, CompletionOnceCallback(), NULL, 0, 0, return SimpleEntryOperation(entry, NULL, CompletionOnceCallback(), NULL, 0, 0,
0, NULL, TYPE_CLOSE, false, 0, false, false, 0, NULL, TYPE_CLOSE, false, 0, false, false);
false);
} }
// static // static
...@@ -89,11 +65,10 @@ SimpleEntryOperation SimpleEntryOperation::ReadOperation( ...@@ -89,11 +65,10 @@ SimpleEntryOperation SimpleEntryOperation::ReadOperation(
int offset, int offset,
int length, int length,
net::IOBuffer* buf, net::IOBuffer* buf,
CompletionOnceCallback callback, CompletionOnceCallback callback) {
bool alone_in_queue) {
return SimpleEntryOperation(entry, buf, std::move(callback), NULL, offset, 0, return SimpleEntryOperation(entry, buf, std::move(callback), NULL, offset, 0,
length, NULL, TYPE_READ, false, index, false, length, NULL, TYPE_READ, false, index, false,
false, alone_in_queue); false);
} }
// static // static
...@@ -108,7 +83,7 @@ SimpleEntryOperation SimpleEntryOperation::WriteOperation( ...@@ -108,7 +83,7 @@ SimpleEntryOperation SimpleEntryOperation::WriteOperation(
CompletionOnceCallback callback) { CompletionOnceCallback callback) {
return SimpleEntryOperation(entry, buf, std::move(callback), NULL, offset, 0, return SimpleEntryOperation(entry, buf, std::move(callback), NULL, offset, 0,
length, NULL, TYPE_WRITE, false, index, truncate, length, NULL, TYPE_WRITE, false, index, truncate,
optimistic, false); optimistic);
} }
// static // static
...@@ -120,7 +95,7 @@ SimpleEntryOperation SimpleEntryOperation::ReadSparseOperation( ...@@ -120,7 +95,7 @@ SimpleEntryOperation SimpleEntryOperation::ReadSparseOperation(
CompletionOnceCallback callback) { CompletionOnceCallback callback) {
return SimpleEntryOperation(entry, buf, std::move(callback), NULL, 0, return SimpleEntryOperation(entry, buf, std::move(callback), NULL, 0,
sparse_offset, length, NULL, TYPE_READ_SPARSE, sparse_offset, length, NULL, TYPE_READ_SPARSE,
false, 0, false, false, false); false, 0, false, false);
} }
// static // static
...@@ -132,7 +107,7 @@ SimpleEntryOperation SimpleEntryOperation::WriteSparseOperation( ...@@ -132,7 +107,7 @@ SimpleEntryOperation SimpleEntryOperation::WriteSparseOperation(
CompletionOnceCallback callback) { CompletionOnceCallback callback) {
return SimpleEntryOperation(entry, buf, std::move(callback), NULL, 0, return SimpleEntryOperation(entry, buf, std::move(callback), NULL, 0,
sparse_offset, length, NULL, TYPE_WRITE_SPARSE, sparse_offset, length, NULL, TYPE_WRITE_SPARSE,
false, 0, false, false, false); false, 0, false, false);
} }
// static // static
...@@ -142,9 +117,9 @@ SimpleEntryOperation SimpleEntryOperation::GetAvailableRangeOperation( ...@@ -142,9 +117,9 @@ SimpleEntryOperation SimpleEntryOperation::GetAvailableRangeOperation(
int length, int length,
int64_t* out_start, int64_t* out_start,
CompletionOnceCallback callback) { CompletionOnceCallback callback) {
return SimpleEntryOperation( return SimpleEntryOperation(entry, NULL, std::move(callback), NULL, 0,
entry, NULL, std::move(callback), NULL, 0, sparse_offset, length, sparse_offset, length, out_start,
out_start, TYPE_GET_AVAILABLE_RANGE, false, 0, false, false, false); TYPE_GET_AVAILABLE_RANGE, false, 0, false, false);
} }
// static // static
...@@ -161,53 +136,9 @@ SimpleEntryOperation SimpleEntryOperation::DoomOperation( ...@@ -161,53 +136,9 @@ SimpleEntryOperation SimpleEntryOperation::DoomOperation(
const int index = 0; const int index = 0;
const bool truncate = false; const bool truncate = false;
const bool optimistic = false; const bool optimistic = false;
const bool alone_in_queue = false; return SimpleEntryOperation(
return SimpleEntryOperation(entry, buf, std::move(callback), out_entry, entry, buf, std::move(callback), out_entry, offset, sparse_offset, length,
offset, sparse_offset, length, out_start, out_start, TYPE_DOOM, have_index, index, truncate, optimistic);
TYPE_DOOM, have_index, index, truncate,
optimistic, alone_in_queue);
}
bool SimpleEntryOperation::ConflictsWith(
const SimpleEntryOperation& other_op) const {
EntryOperationType other_type = other_op.type();
// Non-read/write operations conflict with everything.
if (!IsReadWriteType(type_) || !IsReadWriteType(other_type))
return true;
// Reads (sparse or otherwise) conflict with nothing.
if (IsReadType(type_) && IsReadType(other_type))
return false;
// Sparse and non-sparse operations do not conflict with each other.
if (IsSparseType(type_) != IsSparseType(other_type)) {
return false;
}
// There must be two read/write operations, at least one must be a write, and
// they must be either both non-sparse or both sparse. Compare the streams
// and offsets to see whether they overlap.
if (IsSparseType(type_)) {
int64_t end = sparse_offset_ + length_;
int64_t other_op_end = other_op.sparse_offset() + other_op.length();
return sparse_offset_ < other_op_end && other_op.sparse_offset() < end;
}
if (index_ != other_op.index_)
return false;
int end = (type_ == TYPE_WRITE && truncate_) ? INT_MAX : offset_ + length_;
int other_op_end = (other_op.type() == TYPE_WRITE && other_op.truncate())
? INT_MAX
: other_op.offset() + other_op.length();
return offset_ < other_op_end && other_op.offset() < end;
}
void SimpleEntryOperation::ReleaseReferences() {
callback_ = CompletionOnceCallback();
buf_ = NULL;
entry_ = NULL;
} }
SimpleEntryOperation::SimpleEntryOperation(SimpleEntryImpl* entry, SimpleEntryOperation::SimpleEntryOperation(SimpleEntryImpl* entry,
...@@ -222,8 +153,7 @@ SimpleEntryOperation::SimpleEntryOperation(SimpleEntryImpl* entry, ...@@ -222,8 +153,7 @@ SimpleEntryOperation::SimpleEntryOperation(SimpleEntryImpl* entry,
bool have_index, bool have_index,
int index, int index,
bool truncate, bool truncate,
bool optimistic, bool optimistic)
bool alone_in_queue)
: entry_(entry), : entry_(entry),
buf_(buf), buf_(buf),
callback_(std::move(callback)), callback_(std::move(callback)),
...@@ -236,7 +166,6 @@ SimpleEntryOperation::SimpleEntryOperation(SimpleEntryImpl* entry, ...@@ -236,7 +166,6 @@ SimpleEntryOperation::SimpleEntryOperation(SimpleEntryImpl* entry,
have_index_(have_index), have_index_(have_index),
index_(index), index_(index),
truncate_(truncate), truncate_(truncate),
optimistic_(optimistic), optimistic_(optimistic) {}
alone_in_queue_(alone_in_queue) {}
} // namespace disk_cache } // namespace disk_cache
...@@ -55,8 +55,7 @@ class SimpleEntryOperation { ...@@ -55,8 +55,7 @@ class SimpleEntryOperation {
int offset, int offset,
int length, int length,
net::IOBuffer* buf, net::IOBuffer* buf,
CompletionOnceCallback callback, CompletionOnceCallback callback);
bool alone_in_queue);
static SimpleEntryOperation WriteOperation(SimpleEntryImpl* entry, static SimpleEntryOperation WriteOperation(SimpleEntryImpl* entry,
int index, int index,
int offset, int offset,
...@@ -86,11 +85,6 @@ class SimpleEntryOperation { ...@@ -86,11 +85,6 @@ class SimpleEntryOperation {
static SimpleEntryOperation DoomOperation(SimpleEntryImpl* entry, static SimpleEntryOperation DoomOperation(SimpleEntryImpl* entry,
CompletionOnceCallback callback); CompletionOnceCallback callback);
bool ConflictsWith(const SimpleEntryOperation& other_op) const;
// Releases all references. After calling this operation, SimpleEntryOperation
// will only hold POD members.
void ReleaseReferences();
EntryOperationType type() const { EntryOperationType type() const {
return static_cast<EntryOperationType>(type_); return static_cast<EntryOperationType>(type_);
} }
...@@ -106,7 +100,6 @@ class SimpleEntryOperation { ...@@ -106,7 +100,6 @@ class SimpleEntryOperation {
net::IOBuffer* buf() { return buf_.get(); } net::IOBuffer* buf() { return buf_.get(); }
bool truncate() const { return truncate_; } bool truncate() const { return truncate_; }
bool optimistic() const { return optimistic_; } bool optimistic() const { return optimistic_; }
bool alone_in_queue() const { return alone_in_queue_; }
private: private:
SimpleEntryOperation(SimpleEntryImpl* entry, SimpleEntryOperation(SimpleEntryImpl* entry,
...@@ -121,8 +114,7 @@ class SimpleEntryOperation { ...@@ -121,8 +114,7 @@ class SimpleEntryOperation {
bool have_index, bool have_index,
int index, int index,
bool truncate, bool truncate,
bool optimistic, bool optimistic);
bool alone_in_queue);
// This ensures entry will not be deleted until the operation has ran. // This ensures entry will not be deleted until the operation has ran.
scoped_refptr<SimpleEntryImpl> entry_; scoped_refptr<SimpleEntryImpl> entry_;
...@@ -148,8 +140,6 @@ class SimpleEntryOperation { ...@@ -148,8 +140,6 @@ class SimpleEntryOperation {
// Used only in write operations. // Used only in write operations.
const bool truncate_; const bool truncate_;
const bool optimistic_; const bool optimistic_;
// Used only in SimpleCache.ReadIsParallelizable histogram.
const bool alone_in_queue_;
}; };
} // namespace disk_cache } // namespace disk_cache
......
...@@ -93812,6 +93812,9 @@ uploading your change for review. ...@@ -93812,6 +93812,9 @@ uploading your change for review.
<histogram base="true" name="SimpleCache.ReadIsParallelizable" <histogram base="true" name="SimpleCache.ReadIsParallelizable"
enum="SimpleCacheReadParallelizable"> enum="SimpleCacheReadParallelizable">
<obsolete>
Removed 2018-07-02. See https://crrev.com/c/1122706
</obsolete>
<owner>morlovich@chromium.org</owner> <owner>morlovich@chromium.org</owner>
<summary> <summary>
For each Read operation, whether it could have been issued in parallel of a For each Read operation, whether it could have been issued in parallel of a
...@@ -93961,6 +93964,9 @@ uploading your change for review. ...@@ -93961,6 +93964,9 @@ uploading your change for review.
<histogram base="true" name="SimpleCache.WriteDependencyType" <histogram base="true" name="SimpleCache.WriteDependencyType"
enum="SimpleCacheWriteDependencyType"> enum="SimpleCacheWriteDependencyType">
<obsolete>
Removed 2018-07-02. See https://crrev.com/c/1122706
</obsolete>
<owner>morlovich@chromium.org</owner> <owner>morlovich@chromium.org</owner>
<summary> <summary>
Shows whether a write operation depends on the previous operation in queue Shows whether a write operation depends on the previous operation in queue
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment