Commit 41e71f90 authored by Bill Budge's avatar Bill Budge Committed by Commit Bot

[code cache] Refactor GeneratedCodeCache to split reads and writes

- Splits Write into two parts, response time and data. Uses stream 0
  for response time, stream 1 for data, to avoid a slowdown.
- Splits Read similarly.
- Eliminates the extra data copy when writing caused by passing a
  span from CodeCacheHostImpl rather than the Mojo BigBuffer received
  from the renderer.
- Changes the keyed PendingOperation queues to keep the active op at
  the head. This simplifies the helper method and callback signatures.
- PendingOperations now own the IOBuffers for Write and Fetch.
- Using stream 0 now will make deduplication of the code caches (not
  storing a copy of the code for each origin) easier in the future.

Behavior changes:
- IssuePendingOperations now runs PendingOperations through the queues
  rather than all at once. This was probably didn't happen in the real
  world but seems incorrect.
- Reads where one or more parts fail now doom the entry. We should
  help the cache eliminate inaccessible data.
- Response time is now stored in stream 0. This means any data in the
  cache from previous Chromes where response time was a prefix on
  stream 1 will cause a cache miss, since the time stamp read will fail.
  This CL then makes the data read fail, since returning data prefixed
  with a timestamp could be confusing now. Invalidating the caches
  shouldn't be a problem, since we expect a new version of Chrome/V8
  to cause the code caches to become invalid since their version/feature
  header won't match, see:
  https://cs.chromium.org/chromium/src/v8/src/snapshot/code-serializer.cc?rcl=6c89d2ffb531b3c79181532b5de04adaf7206049&l=387

Bug: chromium:992991
Change-Id: If3c5e8a83bca811fcb809a8a89c6d22942456f13
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1814685
Commit-Queue: Bill Budge <bbudge@chromium.org>
Reviewed-by: default avatarMaks Orlovich <morlovich@chromium.org>
Reviewed-by: default avatarKinuko Yasuda <kinuko@chromium.org>
Reviewed-by: default avatarMythri Alle <mythria@chromium.org>
Cr-Commit-Position: refs/heads/master@{#700307}
parent 047b6d71
...@@ -69,9 +69,6 @@ std::string GetCacheKey(const GURL& resource_url, const GURL& origin_lock) { ...@@ -69,9 +69,6 @@ std::string GetCacheKey(const GURL& resource_url, const GURL& origin_lock) {
constexpr int kResponseTimeSizeInBytes = sizeof(int64_t); constexpr int kResponseTimeSizeInBytes = sizeof(int64_t);
static_assert(mojo_base::BigBuffer::kMaxInlineBytes >=
2 * kResponseTimeSizeInBytes,
"Buffer may not be large enough for response time");
static_assert(mojo_base::BigBuffer::kMaxInlineBytes <= static_assert(mojo_base::BigBuffer::kMaxInlineBytes <=
std::numeric_limits<int>::max(), std::numeric_limits<int>::max(),
"Buffer size calculations may overflow int"); "Buffer size calculations may overflow int");
...@@ -80,6 +77,12 @@ static_assert(mojo_base::BigBuffer::kMaxInlineBytes <= ...@@ -80,6 +77,12 @@ static_assert(mojo_base::BigBuffer::kMaxInlineBytes <=
// as an IOBuffer allows us to avoid a copy. For large code, this can be slow. // as an IOBuffer allows us to avoid a copy. For large code, this can be slow.
class BigIOBuffer : public net::IOBufferWithSize { class BigIOBuffer : public net::IOBufferWithSize {
public: public:
explicit BigIOBuffer(mojo_base::BigBuffer buffer)
: net::IOBufferWithSize(nullptr, buffer.size()),
buffer_(std::move(buffer)) {
data_ = reinterpret_cast<char*>(buffer_.data());
DCHECK(data_);
}
explicit BigIOBuffer(size_t size) : net::IOBufferWithSize(nullptr, size) { explicit BigIOBuffer(size_t size) : net::IOBufferWithSize(nullptr, size) {
buffer_ = mojo_base::BigBuffer(size); buffer_ = mojo_base::BigBuffer(size);
data_ = reinterpret_cast<char*>(buffer_.data()); data_ = reinterpret_cast<char*>(buffer_.data());
...@@ -127,91 +130,78 @@ void GeneratedCodeCache::CollectStatistics( ...@@ -127,91 +130,78 @@ void GeneratedCodeCache::CollectStatistics(
} }
// Stores the information about a pending request while disk backend is // Stores the information about a pending request while disk backend is
// being initialized. // being initialized or another request for the same key is live.
class GeneratedCodeCache::PendingOperation { class GeneratedCodeCache::PendingOperation {
public: public:
static std::unique_ptr<PendingOperation> CreateWritePendingOp( PendingOperation(Operation op,
std::string key, const std::string& key,
scoped_refptr<net::IOBufferWithSize>); scoped_refptr<net::IOBufferWithSize> time_buffer,
static std::unique_ptr<PendingOperation> CreateFetchPendingOp( scoped_refptr<BigIOBuffer> data_buffer)
std::string key, : op_(op),
const ReadDataCallback&); key_(key),
static std::unique_ptr<PendingOperation> CreateDeletePendingOp( time_buffer_(time_buffer),
std::string key); data_buffer_(data_buffer) {
static std::unique_ptr<PendingOperation> CreateGetBackendPendingOp( DCHECK_EQ(Operation::kWrite, op_);
GetBackendCallback callback); }
PendingOperation(Operation op,
const std::string& key,
ReadDataCallback read_callback)
: op_(op), key_(key), read_callback_(std::move(read_callback)) {
DCHECK_EQ(Operation::kFetch, op_);
}
PendingOperation(Operation op, const std::string& key) : op_(op), key_(key) {
DCHECK_EQ(Operation::kDelete, op_);
}
PendingOperation(Operation op, GetBackendCallback backend_callback)
: op_(op), backend_callback_(std::move(backend_callback)) {
DCHECK_EQ(Operation::kGetBackend, op_);
}
~PendingOperation(); ~PendingOperation();
Operation operation() const { return op_; } Operation operation() const { return op_; }
const std::string& key() const { return key_; } const std::string& key() const { return key_; }
const scoped_refptr<net::IOBufferWithSize> data() const { return data_; } scoped_refptr<net::IOBufferWithSize> time_buffer() { return time_buffer_; }
ReadDataCallback ReleaseReadCallback() { return std::move(read_callback_); } scoped_refptr<BigIOBuffer> data_buffer() { return data_buffer_; }
GetBackendCallback ReleaseBackendCallback() { ReadDataCallback TakeReadCallback() { return std::move(read_callback_); }
GetBackendCallback TakeBackendCallback() {
return std::move(backend_callback_); return std::move(backend_callback_);
} }
private: // These are used by Fetch operations to hold the buffers we create once the
PendingOperation(Operation op, // entry is opened.
std::string key, void set_time_buffer(scoped_refptr<net::IOBufferWithSize> time_buffer) {
scoped_refptr<net::IOBufferWithSize>, DCHECK_EQ(Operation::kFetch, op_);
const ReadDataCallback&, time_buffer_ = time_buffer;
GetBackendCallback); }
// Save fetched data until we can run the callback.
void set_data_buffer(scoped_refptr<BigIOBuffer> data_buffer) {
DCHECK_EQ(Operation::kFetch, op_);
data_buffer_ = data_buffer;
}
// Verifies that Write/Fetch callbacks are received in the order we expect.
void VerifyCompletions(int expected) {
#if DCHECK_IS_ON()
DCHECK_EQ(expected, completions_);
completions_++;
#endif
}
private:
const Operation op_; const Operation op_;
const std::string key_; const std::string key_;
const scoped_refptr<net::IOBufferWithSize> data_; scoped_refptr<net::IOBufferWithSize> time_buffer_;
scoped_refptr<BigIOBuffer> data_buffer_;
ReadDataCallback read_callback_; ReadDataCallback read_callback_;
GetBackendCallback backend_callback_; GetBackendCallback backend_callback_;
#if DCHECK_IS_ON()
int completions_ = 0;
#endif
}; };
std::unique_ptr<GeneratedCodeCache::PendingOperation>
GeneratedCodeCache::PendingOperation::CreateWritePendingOp(
std::string key,
scoped_refptr<net::IOBufferWithSize> buffer) {
return base::WrapUnique(
new PendingOperation(Operation::kWrite, std::move(key), buffer,
ReadDataCallback(), GetBackendCallback()));
}
std::unique_ptr<GeneratedCodeCache::PendingOperation>
GeneratedCodeCache::PendingOperation::CreateFetchPendingOp(
std::string key,
const ReadDataCallback& read_callback) {
return base::WrapUnique(new PendingOperation(
Operation::kFetch, std::move(key), scoped_refptr<net::IOBufferWithSize>(),
std::move(read_callback), GetBackendCallback()));
}
std::unique_ptr<GeneratedCodeCache::PendingOperation>
GeneratedCodeCache::PendingOperation::CreateDeletePendingOp(std::string key) {
return base::WrapUnique(
new PendingOperation(Operation::kDelete, std::move(key),
scoped_refptr<net::IOBufferWithSize>(),
ReadDataCallback(), GetBackendCallback()));
}
std::unique_ptr<GeneratedCodeCache::PendingOperation>
GeneratedCodeCache::PendingOperation::CreateGetBackendPendingOp(
GetBackendCallback backend_callback) {
return base::WrapUnique(
new PendingOperation(Operation::kGetBackend, std::string(),
scoped_refptr<net::IOBufferWithSize>(),
ReadDataCallback(), std::move(backend_callback)));
}
GeneratedCodeCache::PendingOperation::PendingOperation(
Operation op,
std::string key,
scoped_refptr<net::IOBufferWithSize> buffer,
const ReadDataCallback& read_callback,
GetBackendCallback backend_callback)
: op_(op),
key_(std::move(key)),
data_(buffer),
read_callback_(std::move(read_callback)),
backend_callback_(std::move(backend_callback)) {}
GeneratedCodeCache::PendingOperation::~PendingOperation() = default; GeneratedCodeCache::PendingOperation::~PendingOperation() = default;
GeneratedCodeCache::GeneratedCodeCache(const base::FilePath& path, GeneratedCodeCache::GeneratedCodeCache(const base::FilePath& path,
...@@ -235,54 +225,44 @@ void GeneratedCodeCache::GetBackend(GetBackendCallback callback) { ...@@ -235,54 +225,44 @@ void GeneratedCodeCache::GetBackend(GetBackendCallback callback) {
std::move(callback).Run(backend_.get()); std::move(callback).Run(backend_.get());
return; return;
case kInitializing: case kInitializing:
pending_ops_.push_back( pending_ops_.emplace(std::make_unique<PendingOperation>(
GeneratedCodeCache::PendingOperation::CreateGetBackendPendingOp( Operation::kGetBackend, std::move(callback)));
std::move(callback)));
return; return;
} }
} }
void GeneratedCodeCache::WriteData(const GURL& url, void GeneratedCodeCache::WriteEntry(const GURL& url,
const GURL& origin_lock, const GURL& origin_lock,
const base::Time& response_time, const base::Time& response_time,
base::span<const uint8_t> data) { mojo_base::BigBuffer data) {
// Silently ignore the requests.
if (backend_state_ == kFailed) { if (backend_state_ == kFailed) {
// Silently fail the request.
CollectStatistics(CacheEntryStatus::kError); CollectStatistics(CacheEntryStatus::kError);
return; return;
} }
// Append the response time to the metadata. Code caches store // Response time and data are written separately, to avoid a copy. We need
// response_time + generated code as a single entry. // two IOBuffers, one for the time and one for the BigBuffer.
scoped_refptr<net::IOBufferWithSize> buffer = scoped_refptr<net::IOBufferWithSize> time_buffer =
base::MakeRefCounted<net::IOBufferWithSize>(data.size() + base::MakeRefCounted<net::IOBufferWithSize>(kResponseTimeSizeInBytes);
kResponseTimeSizeInBytes);
int64_t serialized_time = int64_t serialized_time =
response_time.ToDeltaSinceWindowsEpoch().InMicroseconds(); response_time.ToDeltaSinceWindowsEpoch().InMicroseconds();
memcpy(buffer->data(), &serialized_time, kResponseTimeSizeInBytes); memcpy(time_buffer->data(), &serialized_time, kResponseTimeSizeInBytes);
if (!data.empty()) scoped_refptr<BigIOBuffer> data_buffer =
memcpy(buffer->data() + kResponseTimeSizeInBytes, data.data(), data.size()); base::MakeRefCounted<BigIOBuffer>(std::move(data));
// Create the write operation.
std::string key = GetCacheKey(url, origin_lock); std::string key = GetCacheKey(url, origin_lock);
// If there is an in progress operation corresponding to this key. Enqueue it auto op = std::make_unique<PendingOperation>(Operation::kWrite, key,
// so we can issue once the in-progress operation finishes. time_buffer, data_buffer);
if (!TryBeginOperation(key)) {
EnqueueAsPendingOperation(
key, GeneratedCodeCache::PendingOperation::CreateWritePendingOp(
key, buffer));
return;
}
if (backend_state_ != kInitialized) { if (backend_state_ != kInitialized) {
// Insert it into the list of pending operations while the backend is // Insert it into the list of pending operations while the backend is
// still being opened. // still being opened.
pending_ops_.push_back( pending_ops_.emplace(std::move(op));
GeneratedCodeCache::PendingOperation::CreateWritePendingOp(
std::move(key), buffer));
return; return;
} }
WriteDataImpl(key, buffer); EnqueueOperationAndIssueIfNext(std::move(op));
} }
void GeneratedCodeCache::FetchEntry(const GURL& url, void GeneratedCodeCache::FetchEntry(const GURL& url,
...@@ -290,51 +270,42 @@ void GeneratedCodeCache::FetchEntry(const GURL& url, ...@@ -290,51 +270,42 @@ void GeneratedCodeCache::FetchEntry(const GURL& url,
ReadDataCallback read_data_callback) { ReadDataCallback read_data_callback) {
if (backend_state_ == kFailed) { if (backend_state_ == kFailed) {
CollectStatistics(CacheEntryStatus::kError); CollectStatistics(CacheEntryStatus::kError);
// Silently ignore the requests. // Fail the request.
std::move(read_data_callback).Run(base::Time(), mojo_base::BigBuffer()); std::move(read_data_callback).Run(base::Time(), mojo_base::BigBuffer());
return; return;
} }
std::string key = GetCacheKey(url, origin_lock); std::string key = GetCacheKey(url, origin_lock);
// If there is an in progress operation corresponding to this key. Enqueue it auto op = std::make_unique<PendingOperation>(Operation::kFetch, key,
// so we can issue once the in-progress operation finishes. std::move(read_data_callback));
if (!TryBeginOperation(key)) {
EnqueueAsPendingOperation(
key, GeneratedCodeCache::PendingOperation::CreateFetchPendingOp(
key, read_data_callback));
return;
}
if (backend_state_ != kInitialized) { if (backend_state_ != kInitialized) {
// Insert it into the list of pending operations while the backend is // Insert it into the list of pending operations while the backend is
// still being opened. // still being opened.
pending_ops_.push_back( pending_ops_.emplace(std::move(op));
GeneratedCodeCache::PendingOperation::CreateFetchPendingOp(
std::move(key), read_data_callback));
return; return;
} }
FetchEntryImpl(key, read_data_callback); EnqueueOperationAndIssueIfNext(std::move(op));
} }
void GeneratedCodeCache::DeleteEntry(const GURL& url, const GURL& origin_lock) { void GeneratedCodeCache::DeleteEntry(const GURL& url, const GURL& origin_lock) {
// Silently ignore the requests.
if (backend_state_ == kFailed) { if (backend_state_ == kFailed) {
// Silently fail.
CollectStatistics(CacheEntryStatus::kError); CollectStatistics(CacheEntryStatus::kError);
return; return;
} }
std::string key = GetCacheKey(url, origin_lock); std::string key = GetCacheKey(url, origin_lock);
auto op = std::make_unique<PendingOperation>(Operation::kDelete, key);
if (backend_state_ != kInitialized) { if (backend_state_ != kInitialized) {
// Insert it into the list of pending operations while the backend is // Insert it into the list of pending operations while the backend is
// still being opened. // still being opened.
pending_ops_.push_back( pending_ops_.emplace(std::move(op));
GeneratedCodeCache::PendingOperation::CreateDeletePendingOp(
std::move(key)));
return; return;
} }
DeleteEntryImpl(key); EnqueueOperationAndIssueIfNext(std::move(op));
} }
void GeneratedCodeCache::CreateBackend() { void GeneratedCodeCache::CreateBackend() {
...@@ -365,265 +336,303 @@ void GeneratedCodeCache::DidCreateBackend( ...@@ -365,265 +336,303 @@ void GeneratedCodeCache::DidCreateBackend(
int rv) { int rv) {
if (rv != net::OK) { if (rv != net::OK) {
backend_state_ = kFailed; backend_state_ = kFailed;
// Process pending operations to process any required callbacks. } else {
IssuePendingOperations(); backend_ = std::move(backend_ptr->data);
return; backend_state_ = kInitialized;
} }
backend_ = std::move(backend_ptr->data);
backend_state_ = kInitialized;
IssuePendingOperations(); IssuePendingOperations();
} }
void GeneratedCodeCache::IssuePendingOperations() { void GeneratedCodeCache::IssuePendingOperations() {
// Issue all the pending operations that were received when creating // Issue any operations that were received while creating the backend.
// the backend. while (!pending_ops_.empty()) {
for (auto const& op : pending_ops_) { // Take ownership of the next PendingOperation here. |op| will either be
IssueOperation(op.get()); // moved onto a queue in active_entries_map_ or issued and completed in
// |DoPendingGetBackend|.
std::unique_ptr<PendingOperation> op = std::move(pending_ops_.front());
pending_ops_.pop();
// Properly enqueue/dequeue ops for Write, Fetch, and Delete.
if (op->operation() != Operation::kGetBackend) {
EnqueueOperationAndIssueIfNext(std::move(op));
} else {
// There is no queue for get backend operations. Issue them immediately.
IssueOperation(op.get());
}
} }
pending_ops_.clear();
} }
void GeneratedCodeCache::IssueOperation(PendingOperation* op) { void GeneratedCodeCache::IssueOperation(PendingOperation* op) {
switch (op->operation()) { switch (op->operation()) {
case kFetch: case kFetch:
FetchEntryImpl(op->key(), op->ReleaseReadCallback()); FetchEntryImpl(op);
break; break;
case kWrite: case kWrite:
WriteDataImpl(op->key(), op->data()); WriteEntryImpl(op);
break; break;
case kDelete: case kDelete:
DeleteEntryImpl(op->key()); DeleteEntryImpl(op);
break; break;
case kGetBackend: case kGetBackend:
DoPendingGetBackend(op->ReleaseBackendCallback()); DoPendingGetBackend(op);
break; break;
} }
} }
void GeneratedCodeCache::WriteDataImpl( void GeneratedCodeCache::WriteEntryImpl(PendingOperation* op) {
const std::string& key, DCHECK_EQ(Operation::kWrite, op->operation());
scoped_refptr<net::IOBufferWithSize> buffer) {
if (backend_state_ != kInitialized) { if (backend_state_ != kInitialized) {
IssueQueuedOperationForEntry(key); // Silently fail the request.
CloseOperationAndIssueNext(op);
return; return;
} }
disk_cache::EntryResultCallback callback = disk_cache::EntryResult result = backend_->OpenOrCreateEntry(
base::BindOnce(&GeneratedCodeCache::CompleteForWriteData, op->key(), net::LOW,
weak_ptr_factory_.GetWeakPtr(), buffer, key); base::BindOnce(&GeneratedCodeCache::OpenCompleteForWrite,
weak_ptr_factory_.GetWeakPtr(), op));
disk_cache::EntryResult result =
backend_->OpenOrCreateEntry(key, net::LOW, std::move(callback));
if (result.net_error() != net::ERR_IO_PENDING) { if (result.net_error() != net::ERR_IO_PENDING) {
CompleteForWriteData(buffer, key, std::move(result)); OpenCompleteForWrite(op, std::move(result));
} }
} }
void GeneratedCodeCache::CompleteForWriteData( void GeneratedCodeCache::OpenCompleteForWrite(
scoped_refptr<net::IOBufferWithSize> buffer, PendingOperation* op,
const std::string& key,
disk_cache::EntryResult entry_result) { disk_cache::EntryResult entry_result) {
DCHECK_EQ(Operation::kWrite, op->operation());
if (entry_result.net_error() != net::OK) { if (entry_result.net_error() != net::OK) {
CollectStatistics(CacheEntryStatus::kError); CollectStatistics(CacheEntryStatus::kError);
IssueQueuedOperationForEntry(key); CloseOperationAndIssueNext(op);
return; return;
} }
int result = net::ERR_FAILED; if (entry_result.opened()) {
bool opened = entry_result.opened(); CollectStatistics(CacheEntryStatus::kUpdate);
{ } else {
disk_cache::ScopedEntryPtr disk_entry(entry_result.ReleaseEntry()); CollectStatistics(CacheEntryStatus::kCreate);
}
if (opened) { disk_cache::ScopedEntryPtr entry(entry_result.ReleaseEntry());
CollectStatistics(CacheEntryStatus::kUpdate); // There should be a valid entry if the open was successful.
} else { DCHECK(entry);
CollectStatistics(CacheEntryStatus::kCreate);
} // The response time must be written first, truncating the data.
// This call will truncate the data. This is safe to do since we read the auto time_buffer = op->time_buffer();
// entire data at the same time currently. If we want to read in parts we int result = entry->WriteData(
// have to doom the entry first. kResponseTimeStream, 0, time_buffer.get(), kResponseTimeSizeInBytes,
result = disk_entry->WriteData( base::BindOnce(&GeneratedCodeCache::WriteResponseTimeComplete,
kDataIndex, 0, buffer.get(), buffer->size(), weak_ptr_factory_.GetWeakPtr(), op),
base::BindOnce(&GeneratedCodeCache::WriteDataCompleted, true);
weak_ptr_factory_.GetWeakPtr(), key),
true); if (result != net::ERR_IO_PENDING) {
WriteResponseTimeComplete(op, result);
} }
// Write the data after the response time, truncating the data.
auto data_buffer = op->data_buffer();
result =
entry->WriteData(kDataStream, 0, data_buffer.get(), data_buffer->size(),
base::BindOnce(&GeneratedCodeCache::WriteDataComplete,
weak_ptr_factory_.GetWeakPtr(), op),
true);
if (result != net::ERR_IO_PENDING) { if (result != net::ERR_IO_PENDING) {
WriteDataCompleted(key, result); WriteDataComplete(op, result);
} }
} }
void GeneratedCodeCache::WriteDataCompleted(const std::string& key, int rv) { void GeneratedCodeCache::WriteResponseTimeComplete(PendingOperation* op,
if (rv < 0) { int rv) {
DCHECK_EQ(Operation::kWrite, op->operation());
op->VerifyCompletions(0); // WriteDataComplete did not run.
if (rv != kResponseTimeSizeInBytes) {
// The response time write failed; release the time buffer to signal that
// the overall request should also fail.
op->set_time_buffer(nullptr);
}
// |WriteDataComplete| needs to run and call CloseOperationAndIssueNext.
}
void GeneratedCodeCache::WriteDataComplete(PendingOperation* op, int rv) {
DCHECK_EQ(Operation::kWrite, op->operation());
op->VerifyCompletions(1); // WriteResponseTimeComplete ran.
if (rv != op->data_buffer()->size() || !op->time_buffer()) {
// The write failed; record the failure and doom the entry here.
CollectStatistics(CacheEntryStatus::kWriteFailed); CollectStatistics(CacheEntryStatus::kWriteFailed);
// The write failed; we should delete the entry. DoomEntry(op);
DeleteEntryImpl(key);
} }
IssueQueuedOperationForEntry(key); CloseOperationAndIssueNext(op);
} }
void GeneratedCodeCache::FetchEntryImpl(const std::string& key, void GeneratedCodeCache::FetchEntryImpl(PendingOperation* op) {
ReadDataCallback read_data_callback) { DCHECK_EQ(Operation::kFetch, op->operation());
if (backend_state_ != kInitialized) { if (backend_state_ != kInitialized) {
std::move(read_data_callback).Run(base::Time(), mojo_base::BigBuffer()); op->TakeReadCallback().Run(base::Time(), mojo_base::BigBuffer());
IssueQueuedOperationForEntry(key); CloseOperationAndIssueNext(op);
return; return;
} }
disk_cache::EntryResultCallback callback =
base::BindOnce(&GeneratedCodeCache::OpenCompleteForReadData,
weak_ptr_factory_.GetWeakPtr(), read_data_callback, key);
// This is a part of loading cycle and hence should run with a high priority. // This is a part of loading cycle and hence should run with a high priority.
disk_cache::EntryResult result = disk_cache::EntryResult result = backend_->OpenEntry(
backend_->OpenEntry(key, net::HIGHEST, std::move(callback)); op->key(), net::HIGHEST,
base::BindOnce(&GeneratedCodeCache::OpenCompleteForRead,
weak_ptr_factory_.GetWeakPtr(), op));
if (result.net_error() != net::ERR_IO_PENDING) { if (result.net_error() != net::ERR_IO_PENDING) {
OpenCompleteForReadData(read_data_callback, key, std::move(result)); OpenCompleteForRead(op, std::move(result));
} }
} }
void GeneratedCodeCache::OpenCompleteForReadData( void GeneratedCodeCache::OpenCompleteForRead(
ReadDataCallback read_data_callback, PendingOperation* op,
const std::string& key,
disk_cache::EntryResult entry_result) { disk_cache::EntryResult entry_result) {
DCHECK_EQ(Operation::kFetch, op->operation());
if (entry_result.net_error() != net::OK) { if (entry_result.net_error() != net::OK) {
CollectStatistics(CacheEntryStatus::kMiss); CollectStatistics(CacheEntryStatus::kMiss);
std::move(read_data_callback).Run(base::Time(), mojo_base::BigBuffer()); op->TakeReadCallback().Run(base::Time(), mojo_base::BigBuffer());
IssueQueuedOperationForEntry(key); CloseOperationAndIssueNext(op);
return; return;
} }
disk_cache::ScopedEntryPtr disk_entry(entry_result.ReleaseEntry()); disk_cache::ScopedEntryPtr entry(entry_result.ReleaseEntry());
// There should be a valid entry if the open was successful. // There should be a valid entry if the open was successful.
DCHECK(disk_entry); DCHECK(entry);
int entry_size = disk_entry->GetDataSize(kDataIndex); // To avoid a copying the data, we read it in two parts, response time and
// Use a BigIOBuffer backed to read and transfer the entry without copying. // code. Create the buffers and pass them to |op|.
// We have to read the data in two parts, response time and code, if we don't scoped_refptr<net::IOBufferWithSize> time_buffer =
// want to copy. Use the same buffer to read the response time and the code. base::MakeRefCounted<net::IOBufferWithSize>(kResponseTimeSizeInBytes);
int code_size = op->set_time_buffer(time_buffer);
std::max(kResponseTimeSizeInBytes, entry_size - kResponseTimeSizeInBytes); int data_size = entry->GetDataSize(kDataStream);
// Release the disk entry to pass it to |ReadResponseTimeComplete|. scoped_refptr<BigIOBuffer> data_buffer =
disk_cache::Entry* entry = disk_entry.release(); base::MakeRefCounted<BigIOBuffer>(data_size);
scoped_refptr<net::IOBufferWithSize> buffer = op->set_data_buffer(data_buffer);
base::MakeRefCounted<BigIOBuffer>(static_cast<size_t>(code_size));
net::CompletionOnceCallback callback = base::BindOnce( // We must read response time first.
&GeneratedCodeCache::ReadResponseTimeComplete, int result = entry->ReadData(
weak_ptr_factory_.GetWeakPtr(), key, read_data_callback, buffer, entry); kResponseTimeStream, 0, time_buffer.get(), kResponseTimeSizeInBytes,
int result = entry->ReadData(kDataIndex, 0, buffer.get(), base::BindOnce(&GeneratedCodeCache::ReadResponseTimeComplete,
kResponseTimeSizeInBytes, std::move(callback)); weak_ptr_factory_.GetWeakPtr(), op));
if (result != net::ERR_IO_PENDING) { if (result != net::ERR_IO_PENDING) {
ReadResponseTimeComplete(key, read_data_callback, buffer, entry, result); ReadResponseTimeComplete(op, result);
}
// Read the data after the response time.
result =
entry->ReadData(kDataStream, 0, data_buffer.get(), data_buffer->size(),
base::BindOnce(&GeneratedCodeCache::ReadDataComplete,
weak_ptr_factory_.GetWeakPtr(), op));
if (result != net::ERR_IO_PENDING) {
ReadDataComplete(op, result);
} }
} }
void GeneratedCodeCache::ReadResponseTimeComplete( void GeneratedCodeCache::ReadResponseTimeComplete(PendingOperation* op,
const std::string& key, int rv) {
ReadDataCallback read_data_callback, DCHECK_EQ(Operation::kFetch, op->operation());
scoped_refptr<net::IOBufferWithSize> buffer, op->VerifyCompletions(0); // ReadDataComplete did not run.
disk_cache::Entry* entry,
int rv) {
DCHECK(entry);
disk_cache::ScopedEntryPtr disk_entry(entry);
if (rv != kResponseTimeSizeInBytes) { if (rv != kResponseTimeSizeInBytes) {
CollectStatistics(CacheEntryStatus::kMiss); CollectStatistics(CacheEntryStatus::kMiss);
std::move(read_data_callback).Run(base::Time(), mojo_base::BigBuffer()); // The response time read failed; release the time buffer to signal that
} else { // the overall request should also fail.
// This is considered a cache hit, since response time was read. op->set_time_buffer(nullptr);
CollectStatistics(CacheEntryStatus::kHit); return;
int64_t raw_response_time = *(reinterpret_cast<int64_t*>(buffer->data()));
net::CompletionOnceCallback callback = base::BindOnce(
&GeneratedCodeCache::ReadCodeComplete, weak_ptr_factory_.GetWeakPtr(),
key, read_data_callback, buffer, raw_response_time);
int result =
disk_entry->ReadData(kDataIndex, kResponseTimeSizeInBytes, buffer.get(),
buffer->size(), std::move(callback));
if (result != net::ERR_IO_PENDING) {
ReadCodeComplete(key, read_data_callback, buffer, raw_response_time,
result);
}
} }
// This is considered a cache hit, since response time was read.
CollectStatistics(CacheEntryStatus::kHit);
// |ReadDataComplete| needs to run and call CloseOperationAndIssueNext.
} }
void GeneratedCodeCache::ReadCodeComplete( void GeneratedCodeCache::ReadDataComplete(PendingOperation* op, int rv) {
const std::string& key, DCHECK_EQ(Operation::kFetch, op->operation());
ReadDataCallback callback, op->VerifyCompletions(1); // ReadResponseTimeComplete ran.
scoped_refptr<net::IOBufferWithSize> buffer, // Fail the request if either read failed.
int64_t raw_response_time, if (rv != op->data_buffer()->size() || !op->time_buffer()) {
int rv) { op->TakeReadCallback().Run(base::Time(), mojo_base::BigBuffer());
base::Time response_time = base::Time::FromDeltaSinceWindowsEpoch( // Doom this entry since it is inaccessible.
base::TimeDelta::FromMicroseconds(raw_response_time)); DoomEntry(op);
if (rv != buffer->size()) {
// Trim the buffer in the unlikely case that code size is less than
// kResponseTimeSizeInBytes. On error, return an empty buffer with the
// response time, so the renderer can clear the metadata.
mojo_base::BigBuffer trimmed_buffer =
rv > 0 ? mojo_base::BigBuffer(base::make_span(
reinterpret_cast<const uint8_t*>(buffer->data()),
static_cast<size_t>(rv)))
: mojo_base::BigBuffer();
std::move(callback).Run(response_time, std::move(trimmed_buffer));
} else { } else {
std::move(callback).Run( int64_t raw_response_time =
response_time, static_cast<BigIOBuffer*>(buffer.get())->TakeBuffer()); *(reinterpret_cast<int64_t*>(op->time_buffer()->data()));
base::Time response_time = base::Time::FromDeltaSinceWindowsEpoch(
base::TimeDelta::FromMicroseconds(raw_response_time));
op->TakeReadCallback().Run(response_time, op->data_buffer()->TakeBuffer());
} }
IssueQueuedOperationForEntry(key); CloseOperationAndIssueNext(op);
} }
void GeneratedCodeCache::DeleteEntryImpl(const std::string& key) { void GeneratedCodeCache::DeleteEntryImpl(PendingOperation* op) {
if (backend_state_ != kInitialized) DCHECK(op->operation() == Operation::kDelete);
return; DoomEntry(op);
CloseOperationAndIssueNext(op);
}
void GeneratedCodeCache::DoomEntry(PendingOperation* op) {
// Write, Fetch, and Delete may all doom an entry.
DCHECK_NE(Operation::kGetBackend, op->operation());
// Entries shouldn't be doomed if the backend hasn't been initialized.
DCHECK_EQ(kInitialized, backend_state_);
CollectStatistics(CacheEntryStatus::kClear); CollectStatistics(CacheEntryStatus::kClear);
backend_->DoomEntry(key, net::LOWEST, net::CompletionOnceCallback()); backend_->DoomEntry(op->key(), net::LOWEST, net::CompletionOnceCallback());
} }
void GeneratedCodeCache::IssueQueuedOperationForEntry(const std::string& key) { void GeneratedCodeCache::IssueNextOperation(const std::string& key) {
auto it = active_entries_map_.find(key); auto it = active_entries_map_.find(key);
DCHECK(it != active_entries_map_.end()); if (it == active_entries_map_.end())
// If no more queued entries then remove the entry to indicate that there are
// no in-progress operations for this key.
if (it->second.empty()) {
active_entries_map_.erase(it);
return; return;
}
std::unique_ptr<PendingOperation> op = std::move(it->second.front()); DCHECK(!it->second.empty());
// Pop it before issuing the operation. Still retain the queue even if it is IssueOperation(it->second.front().get());
// empty to indicate that there is a in-progress operation.
it->second.pop();
IssueOperation(op.get());
} }
bool GeneratedCodeCache::TryBeginOperation(const std::string& key) { void GeneratedCodeCache::CloseOperationAndIssueNext(PendingOperation* op) {
auto it = active_entries_map_.find(key); // Dequeue op, keeping it alive long enough to issue another op.
if (it != active_entries_map_.end()) std::unique_ptr<PendingOperation> keep_alive = DequeueOperation(op);
return false; IssueNextOperation(op->key());
// Create an entry to indicate there is a in-progress operation for this key.
active_entries_map_[key] = base::queue<std::unique_ptr<PendingOperation>>();
return true;
} }
void GeneratedCodeCache::EnqueueAsPendingOperation( void GeneratedCodeCache::EnqueueOperationAndIssueIfNext(
const std::string& key,
std::unique_ptr<PendingOperation> op) { std::unique_ptr<PendingOperation> op) {
auto it = active_entries_map_.find(key); // GetBackend ops have no key and shouldn't be enqueued here.
DCHECK(it != active_entries_map_.end()); DCHECK_NE(Operation::kGetBackend, op->operation());
auto it = active_entries_map_.find(op->key());
bool can_issue = false;
if (it == active_entries_map_.end()) {
it = active_entries_map_.emplace(op->key(), PendingOperationQueue()).first;
can_issue = true;
}
const std::string& key = op->key();
it->second.emplace(std::move(op)); it->second.emplace(std::move(op));
if (can_issue)
IssueNextOperation(key);
} }
void GeneratedCodeCache::DoPendingGetBackend(GetBackendCallback user_callback) { std::unique_ptr<GeneratedCodeCache::PendingOperation>
if (backend_state_ == kInitialized) { GeneratedCodeCache::DequeueOperation(PendingOperation* op) {
std::move(user_callback).Run(backend_.get()); auto it = active_entries_map_.find(op->key());
return; DCHECK(it != active_entries_map_.end());
DCHECK(!it->second.empty());
std::unique_ptr<PendingOperation> result = std::move(it->second.front());
// |op| should be at the front.
DCHECK_EQ(op, result.get());
it->second.pop();
// Delete the queue if it becomes empty.
if (it->second.empty()) {
active_entries_map_.erase(it);
} }
return result;
}
DCHECK_EQ(backend_state_, kFailed); void GeneratedCodeCache::DoPendingGetBackend(PendingOperation* op) {
std::move(user_callback).Run(nullptr); // |op| is kept alive in |IssuePendingOperations| for the duration of this
return; // call. We shouldn't access |op| after returning from this function.
DCHECK_EQ(kGetBackend, op->operation());
if (backend_state_ == kInitialized) {
op->TakeBackendCallback().Run(backend_.get());
} else {
DCHECK_EQ(backend_state_, kFailed);
op->TakeBackendCallback().Run(nullptr);
}
} }
void GeneratedCodeCache::SetLastUsedTimeForTest( void GeneratedCodeCache::SetLastUsedTimeForTest(
......
...@@ -88,10 +88,10 @@ class CONTENT_EXPORT GeneratedCodeCache { ...@@ -88,10 +88,10 @@ class CONTENT_EXPORT GeneratedCodeCache {
// Writes data to the cache. If there is an entry corresponding to // Writes data to the cache. If there is an entry corresponding to
// <|resource_url|, |origin_lock|> this overwrites the existing data. If // <|resource_url|, |origin_lock|> this overwrites the existing data. If
// there is no entry it creates a new one. // there is no entry it creates a new one.
void WriteData(const GURL& resource_url, void WriteEntry(const GURL& resource_url,
const GURL& origin_lock, const GURL& origin_lock,
const base::Time& response_time, const base::Time& response_time,
base::span<const uint8_t> data); mojo_base::BigBuffer data);
// Fetch entry corresponding to <resource_url, origin_lock> from the cache // Fetch entry corresponding to <resource_url, origin_lock> from the cache
// and return it using the ReadDataCallback. // and return it using the ReadDataCallback.
...@@ -122,7 +122,7 @@ class CONTENT_EXPORT GeneratedCodeCache { ...@@ -122,7 +122,7 @@ class CONTENT_EXPORT GeneratedCodeCache {
enum Operation { kFetch, kWrite, kDelete, kGetBackend }; enum Operation { kFetch, kWrite, kDelete, kGetBackend };
// Data streams corresponding to each entry. // Data streams corresponding to each entry.
enum { kDataIndex = 1 }; enum { kResponseTimeStream = 0, kDataStream = 1 };
// Creates a simple_disk_cache backend. // Creates a simple_disk_cache backend.
void CreateBackend(); void CreateBackend();
...@@ -130,50 +130,41 @@ class CONTENT_EXPORT GeneratedCodeCache { ...@@ -130,50 +130,41 @@ class CONTENT_EXPORT GeneratedCodeCache {
scoped_refptr<base::RefCountedData<ScopedBackendPtr>> backend_ptr, scoped_refptr<base::RefCountedData<ScopedBackendPtr>> backend_ptr,
int rv); int rv);
// The requests that are received while tha backend is being initialized // Issues ops that were received while the backend was being initialized.
// are recorded in pending operations list. This function issues all pending
// operations.
void IssuePendingOperations(); void IssuePendingOperations();
void IssueOperation(PendingOperation* op);
// Write entry to cache // Writes entry to cache.
void WriteDataImpl(const std::string& key, void WriteEntryImpl(PendingOperation* op);
scoped_refptr<net::IOBufferWithSize> buffer); void OpenCompleteForWrite(PendingOperation* op,
void CompleteForWriteData(scoped_refptr<net::IOBufferWithSize> buffer,
const std::string& key,
disk_cache::EntryResult result); disk_cache::EntryResult result);
void WriteDataCompleted(const std::string& key, int rv); void WriteResponseTimeComplete(PendingOperation* op, int rv);
void WriteDataComplete(PendingOperation* op, int rv);
// Fetch entry from cache
void FetchEntryImpl(const std::string& key, ReadDataCallback); // Fetches entry from cache.
void OpenCompleteForReadData(ReadDataCallback callback, void FetchEntryImpl(PendingOperation* op);
const std::string& key, void OpenCompleteForRead(PendingOperation* op,
disk_cache::EntryResult result); disk_cache::EntryResult result);
void ReadResponseTimeComplete(const std::string& key, void ReadResponseTimeComplete(PendingOperation* op, int rv);
ReadDataCallback callback, void ReadDataComplete(PendingOperation* op, int rv);
scoped_refptr<net::IOBufferWithSize> buffer,
disk_cache::Entry* entry, // Deletes entry from cache.
int rv); void DeleteEntryImpl(PendingOperation* op);
void ReadCodeComplete(const std::string& key,
ReadDataCallback callback, void DoomEntry(PendingOperation* op);
scoped_refptr<net::IOBufferWithSize> buffer,
int64_t raw_response_time, // Issues the next operation on the queue for |key|.
int rv); void IssueNextOperation(const std::string& key);
// Removes |op| and issues the next operation on its queue.
// Delete entry from cache void CloseOperationAndIssueNext(PendingOperation* op);
void DeleteEntryImpl(const std::string& key);
// Enqueues the operation issues it if there are no pending operations for
// Issues the queued operation at the front of the queue for the given |key|. // its key.
void IssueQueuedOperationForEntry(const std::string& key); void EnqueueOperationAndIssueIfNext(std::unique_ptr<PendingOperation> op);
// Checks for in-progress operations. If there are none, marks the key as // Dequeues the operation and transfers ownership to caller.
// in-progress and returns true. Otherwise returns false. std::unique_ptr<PendingOperation> DequeueOperation(PendingOperation* op);
bool TryBeginOperation(const std::string& key);
// Enqueues the operation as pending for the key. This should only be called
// if TryBeginOperation returned false.
void EnqueueAsPendingOperation(const std::string& key,
std::unique_ptr<PendingOperation> op);
void IssueOperation(PendingOperation* op);
void DoPendingGetBackend(GetBackendCallback callback); void DoPendingGetBackend(PendingOperation* op);
void OpenCompleteForSetLastUsedForTest( void OpenCompleteForSetLastUsedForTest(
base::Time time, base::Time time,
...@@ -185,11 +176,12 @@ class CONTENT_EXPORT GeneratedCodeCache { ...@@ -185,11 +176,12 @@ class CONTENT_EXPORT GeneratedCodeCache {
std::unique_ptr<disk_cache::Backend> backend_; std::unique_ptr<disk_cache::Backend> backend_;
BackendState backend_state_; BackendState backend_state_;
std::vector<std::unique_ptr<PendingOperation>> pending_ops_; // Queue for operations received while initializing the backend.
using PendingOperationQueue = base::queue<std::unique_ptr<PendingOperation>>;
PendingOperationQueue pending_ops_;
// Map from key to queue ops. // Map from key to queue of pending operations.
std::map<std::string, base::queue<std::unique_ptr<PendingOperation>>> std::map<std::string, PendingOperationQueue> active_entries_map_;
active_entries_map_;
base::FilePath path_; base::FilePath path_;
int max_size_bytes_; int max_size_bytes_;
......
...@@ -61,8 +61,8 @@ class GeneratedCodeCacheTest : public testing::Test { ...@@ -61,8 +61,8 @@ class GeneratedCodeCacheTest : public testing::Test {
const std::string& data, const std::string& data,
base::Time response_time) { base::Time response_time) {
std::vector<uint8_t> vector_data(data.begin(), data.end()); std::vector<uint8_t> vector_data(data.begin(), data.end());
generated_code_cache_->WriteData(url, origin_lock, response_time, generated_code_cache_->WriteEntry(url, origin_lock, response_time,
vector_data); vector_data);
} }
void DeleteFromCache(const GURL& url, const GURL& origin_lock) { void DeleteFromCache(const GURL& url, const GURL& origin_lock) {
......
...@@ -142,7 +142,8 @@ void CodeCacheHostImpl::DidGenerateCacheableMetadata( ...@@ -142,7 +142,8 @@ void CodeCacheHostImpl::DidGenerateCacheableMetadata(
if (!origin_lock) if (!origin_lock)
return; return;
code_cache->WriteData(url, *origin_lock, expected_response_time, data); code_cache->WriteEntry(url, *origin_lock, expected_response_time,
std::move(data));
} }
void CodeCacheHostImpl::FetchCachedCode(blink::mojom::CodeCacheType cache_type, void CodeCacheHostImpl::FetchCachedCode(blink::mojom::CodeCacheType cache_type,
......
...@@ -313,8 +313,8 @@ class RemoveCodeCacheTester { ...@@ -313,8 +313,8 @@ class RemoveCodeCacheTester {
GURL origin_lock, GURL origin_lock,
const std::string& data) { const std::string& data) {
std::vector<uint8_t> data_vector(data.begin(), data.end()); std::vector<uint8_t> data_vector(data.begin(), data.end());
GetCache(cache)->WriteData(url, origin_lock, base::Time::Now(), GetCache(cache)->WriteEntry(url, origin_lock, base::Time::Now(),
data_vector); data_vector);
base::RunLoop().RunUntilIdle(); base::RunLoop().RunUntilIdle();
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment