Commit 41e71f90 authored by Bill Budge's avatar Bill Budge Committed by Commit Bot

[code cache] Refactor GeneratedCodeCache to split reads and writes

- Splits Write into two parts, response time and data. Uses stream 0
  for response time, stream 1 for data, to avoid a slowdown.
- Splits Read similarly.
- Eliminates the extra data copy when writing caused by passing a
  span from CodeCacheHostImpl rather than the Mojo BigBuffer received
  from the renderer.
- Changes the keyed PendingOperation queues to keep the active op at
  the head. This simplifies the helper method and callback signatures.
- PendingOperations now own the IOBuffers for Write and Fetch.
- Using stream 0 now will make deduplication of the code caches (not
  storing a copy of the code for each origin) easier in the future.

Behavior changes:
- IssuePendingOperations now runs PendingOperations through the queues
  rather than all at once. This was probably didn't happen in the real
  world but seems incorrect.
- Reads where one or more parts fail now doom the entry. We should
  help the cache eliminate inaccessible data.
- Response time is now stored in stream 0. This means any data in the
  cache from previous Chromes where response time was a prefix on
  stream 1 will cause a cache miss, since the time stamp read will fail.
  This CL then makes the data read fail, since returning data prefixed
  with a timestamp could be confusing now. Invalidating the caches
  shouldn't be a problem, since we expect a new version of Chrome/V8
  to cause the code caches to become invalid since their version/feature
  header won't match, see:
  https://cs.chromium.org/chromium/src/v8/src/snapshot/code-serializer.cc?rcl=6c89d2ffb531b3c79181532b5de04adaf7206049&l=387

Bug: chromium:992991
Change-Id: If3c5e8a83bca811fcb809a8a89c6d22942456f13
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1814685
Commit-Queue: Bill Budge <bbudge@chromium.org>
Reviewed-by: default avatarMaks Orlovich <morlovich@chromium.org>
Reviewed-by: default avatarKinuko Yasuda <kinuko@chromium.org>
Reviewed-by: default avatarMythri Alle <mythria@chromium.org>
Cr-Commit-Position: refs/heads/master@{#700307}
parent 047b6d71
......@@ -88,10 +88,10 @@ class CONTENT_EXPORT GeneratedCodeCache {
// Writes data to the cache. If there is an entry corresponding to
// <|resource_url|, |origin_lock|> this overwrites the existing data. If
// there is no entry it creates a new one.
void WriteData(const GURL& resource_url,
const GURL& origin_lock,
const base::Time& response_time,
base::span<const uint8_t> data);
void WriteEntry(const GURL& resource_url,
const GURL& origin_lock,
const base::Time& response_time,
mojo_base::BigBuffer data);
// Fetch entry corresponding to <resource_url, origin_lock> from the cache
// and return it using the ReadDataCallback.
......@@ -122,7 +122,7 @@ class CONTENT_EXPORT GeneratedCodeCache {
enum Operation { kFetch, kWrite, kDelete, kGetBackend };
// Data streams corresponding to each entry.
enum { kDataIndex = 1 };
enum { kResponseTimeStream = 0, kDataStream = 1 };
// Creates a simple_disk_cache backend.
void CreateBackend();
......@@ -130,50 +130,41 @@ class CONTENT_EXPORT GeneratedCodeCache {
scoped_refptr<base::RefCountedData<ScopedBackendPtr>> backend_ptr,
int rv);
// The requests that are received while tha backend is being initialized
// are recorded in pending operations list. This function issues all pending
// operations.
// Issues ops that were received while the backend was being initialized.
void IssuePendingOperations();
void IssueOperation(PendingOperation* op);
// Write entry to cache
void WriteDataImpl(const std::string& key,
scoped_refptr<net::IOBufferWithSize> buffer);
void CompleteForWriteData(scoped_refptr<net::IOBufferWithSize> buffer,
const std::string& key,
// Writes entry to cache.
void WriteEntryImpl(PendingOperation* op);
void OpenCompleteForWrite(PendingOperation* op,
disk_cache::EntryResult result);
void WriteDataCompleted(const std::string& key, int rv);
// Fetch entry from cache
void FetchEntryImpl(const std::string& key, ReadDataCallback);
void OpenCompleteForReadData(ReadDataCallback callback,
const std::string& key,
disk_cache::EntryResult result);
void ReadResponseTimeComplete(const std::string& key,
ReadDataCallback callback,
scoped_refptr<net::IOBufferWithSize> buffer,
disk_cache::Entry* entry,
int rv);
void ReadCodeComplete(const std::string& key,
ReadDataCallback callback,
scoped_refptr<net::IOBufferWithSize> buffer,
int64_t raw_response_time,
int rv);
// Delete entry from cache
void DeleteEntryImpl(const std::string& key);
// Issues the queued operation at the front of the queue for the given |key|.
void IssueQueuedOperationForEntry(const std::string& key);
// Checks for in-progress operations. If there are none, marks the key as
// in-progress and returns true. Otherwise returns false.
bool TryBeginOperation(const std::string& key);
// Enqueues the operation as pending for the key. This should only be called
// if TryBeginOperation returned false.
void EnqueueAsPendingOperation(const std::string& key,
std::unique_ptr<PendingOperation> op);
void IssueOperation(PendingOperation* op);
void WriteResponseTimeComplete(PendingOperation* op, int rv);
void WriteDataComplete(PendingOperation* op, int rv);
// Fetches entry from cache.
void FetchEntryImpl(PendingOperation* op);
void OpenCompleteForRead(PendingOperation* op,
disk_cache::EntryResult result);
void ReadResponseTimeComplete(PendingOperation* op, int rv);
void ReadDataComplete(PendingOperation* op, int rv);
// Deletes entry from cache.
void DeleteEntryImpl(PendingOperation* op);
void DoomEntry(PendingOperation* op);
// Issues the next operation on the queue for |key|.
void IssueNextOperation(const std::string& key);
// Removes |op| and issues the next operation on its queue.
void CloseOperationAndIssueNext(PendingOperation* op);
// Enqueues the operation issues it if there are no pending operations for
// its key.
void EnqueueOperationAndIssueIfNext(std::unique_ptr<PendingOperation> op);
// Dequeues the operation and transfers ownership to caller.
std::unique_ptr<PendingOperation> DequeueOperation(PendingOperation* op);
void DoPendingGetBackend(GetBackendCallback callback);
void DoPendingGetBackend(PendingOperation* op);
void OpenCompleteForSetLastUsedForTest(
base::Time time,
......@@ -185,11 +176,12 @@ class CONTENT_EXPORT GeneratedCodeCache {
std::unique_ptr<disk_cache::Backend> backend_;
BackendState backend_state_;
std::vector<std::unique_ptr<PendingOperation>> pending_ops_;
// Queue for operations received while initializing the backend.
using PendingOperationQueue = base::queue<std::unique_ptr<PendingOperation>>;
PendingOperationQueue pending_ops_;
// Map from key to queue ops.
std::map<std::string, base::queue<std::unique_ptr<PendingOperation>>>
active_entries_map_;
// Map from key to queue of pending operations.
std::map<std::string, PendingOperationQueue> active_entries_map_;
base::FilePath path_;
int max_size_bytes_;
......
......@@ -61,8 +61,8 @@ class GeneratedCodeCacheTest : public testing::Test {
const std::string& data,
base::Time response_time) {
std::vector<uint8_t> vector_data(data.begin(), data.end());
generated_code_cache_->WriteData(url, origin_lock, response_time,
vector_data);
generated_code_cache_->WriteEntry(url, origin_lock, response_time,
vector_data);
}
void DeleteFromCache(const GURL& url, const GURL& origin_lock) {
......
......@@ -142,7 +142,8 @@ void CodeCacheHostImpl::DidGenerateCacheableMetadata(
if (!origin_lock)
return;
code_cache->WriteData(url, *origin_lock, expected_response_time, data);
code_cache->WriteEntry(url, *origin_lock, expected_response_time,
std::move(data));
}
void CodeCacheHostImpl::FetchCachedCode(blink::mojom::CodeCacheType cache_type,
......
......@@ -313,8 +313,8 @@ class RemoveCodeCacheTester {
GURL origin_lock,
const std::string& data) {
std::vector<uint8_t> data_vector(data.begin(), data.end());
GetCache(cache)->WriteData(url, origin_lock, base::Time::Now(),
data_vector);
GetCache(cache)->WriteEntry(url, origin_lock, base::Time::Now(),
data_vector);
base::RunLoop().RunUntilIdle();
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment