Commit f0f54006 authored by Daniel Murphy's avatar Daniel Murphy Committed by Commit Bot

[IndexedDB] Adding last_modified and size to 'file' BlobInfos

The goal of this change is to avoid writing empty blob files in
IndexedDB solely to store the last_modified date.

This changes creates a migration for IndexedDB from v3 to v4. This
migration changes the way BlobInfos are encoded, and adds 'size' and
'last modified' to the 'File' BlobInfo.

Now that the metadata is changed, this change also avoids writing
'empty' files to disk. Previously this is where the last_modified time
was stored, but since that is now in the metadata these no longer need
to be written.

Bug: 1024966,1015214
Change-Id: I1309561373e0d917119fece4c552edd6ff6f5c18
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2014060
Commit-Queue: Daniel Murphy <dmurph@chromium.org>
Reviewed-by: default avatarenne <enne@chromium.org>
Reviewed-by: default avatarMarijn Kruisselbrink <mek@chromium.org>
Cr-Commit-Position: refs/heads/master@{#735149}
parent afcab37d
...@@ -264,8 +264,9 @@ Status MergeDatabaseIntoActiveBlobJournal( ...@@ -264,8 +264,9 @@ Status MergeDatabaseIntoActiveBlobJournal(
// Blob Data is encoded as a series of: // Blob Data is encoded as a series of:
// { is_file [bool], blob_number [int64_t as varInt], // { is_file [bool], blob_number [int64_t as varInt],
// type [string-with-length, may be empty], // type [string-with-length, may be empty],
// (for Blobs only) size [int64_t as varInt] // size [int64_t as varInt]
// (for Files only) fileName [string-with-length] // (for Files only) fileName [string-with-length]
// (for Files only) lastModified [int64_t as varInt, in microseconds]
// } // }
// There is no length field; just read until you run out of data. // There is no length field; just read until you run out of data.
std::string EncodeBlobInfos(const std::vector<IndexedDBBlobInfo>& blob_info) { std::string EncodeBlobInfos(const std::vector<IndexedDBBlobInfo>& blob_info) {
...@@ -274,16 +275,19 @@ std::string EncodeBlobInfos(const std::vector<IndexedDBBlobInfo>& blob_info) { ...@@ -274,16 +275,19 @@ std::string EncodeBlobInfos(const std::vector<IndexedDBBlobInfo>& blob_info) {
EncodeBool(info.is_file(), &ret); EncodeBool(info.is_file(), &ret);
EncodeVarInt(info.blob_number(), &ret); EncodeVarInt(info.blob_number(), &ret);
EncodeStringWithLength(info.type(), &ret); EncodeStringWithLength(info.type(), &ret);
if (info.is_file()) EncodeVarInt(info.size(), &ret);
if (info.is_file()) {
EncodeStringWithLength(info.file_name(), &ret); EncodeStringWithLength(info.file_name(), &ret);
else EncodeVarInt(
EncodeVarInt(info.size(), &ret); info.last_modified().ToDeltaSinceWindowsEpoch().InMicroseconds(),
&ret);
}
} }
return ret; return ret;
} }
bool DecodeBlobInfos(const std::string& data, bool DecodeV3BlobInfos(const base::StringPiece& data,
std::vector<IndexedDBBlobInfo>* output) { std::vector<IndexedDBBlobInfo>* output) {
std::vector<IndexedDBBlobInfo> ret; std::vector<IndexedDBBlobInfo> ret;
output->clear(); output->clear();
StringPiece slice(data); StringPiece slice(data);
...@@ -304,7 +308,9 @@ bool DecodeBlobInfos(const std::string& data, ...@@ -304,7 +308,9 @@ bool DecodeBlobInfos(const std::string& data,
if (is_file) { if (is_file) {
if (!DecodeStringWithLength(&slice, &file_name)) if (!DecodeStringWithLength(&slice, &file_name))
return false; return false;
ret.push_back(IndexedDBBlobInfo(blob_number, type, file_name)); ret.push_back(IndexedDBBlobInfo(blob_number, type, file_name,
base::Time(),
IndexedDBBlobInfo::kUnknownSize));
} else { } else {
if (!DecodeVarInt(&slice, &size) || size < 0) if (!DecodeVarInt(&slice, &size) || size < 0)
return false; return false;
...@@ -316,6 +322,47 @@ bool DecodeBlobInfos(const std::string& data, ...@@ -316,6 +322,47 @@ bool DecodeBlobInfos(const std::string& data,
return true; return true;
} }
bool DecodeBlobInfos(const std::string& data,
std::vector<IndexedDBBlobInfo>* output) {
std::vector<IndexedDBBlobInfo> ret;
output->clear();
StringPiece slice(data);
while (!slice.empty()) {
bool is_file;
int64_t blob_number;
base::string16 type;
int64_t size;
base::string16 file_name;
if (!DecodeBool(&slice, &is_file))
return false;
if (!DecodeVarInt(&slice, &blob_number) ||
!DatabaseMetaDataKey::IsValidBlobNumber(blob_number))
return false;
if (!DecodeStringWithLength(&slice, &type))
return false;
if (!DecodeVarInt(&slice, &size) || size < 0)
return false;
if (!is_file) {
ret.push_back(IndexedDBBlobInfo(type, size, blob_number));
continue;
}
if (!DecodeStringWithLength(&slice, &file_name))
return false;
int64_t last_modified;
if (!DecodeVarInt(&slice, &last_modified) || size < 0)
return false;
ret.push_back(
IndexedDBBlobInfo(blob_number, type, file_name,
base::Time::FromDeltaSinceWindowsEpoch(
base::TimeDelta::FromMicroseconds(last_modified)),
size));
}
output->swap(ret);
return true;
}
bool IsPathTooLong(const FilePath& leveldb_dir) { bool IsPathTooLong(const FilePath& leveldb_dir) {
int limit = base::GetMaximumPathComponentLength(leveldb_dir.DirName()); int limit = base::GetMaximumPathComponentLength(leveldb_dir.DirName());
if (limit == -1) { if (limit == -1) {
...@@ -644,6 +691,7 @@ leveldb::Status IndexedDBBackingStore::Initialize(bool clean_active_journal) { ...@@ -644,6 +691,7 @@ leveldb::Status IndexedDBBackingStore::Initialize(bool clean_active_journal) {
INTERNAL_READ_ERROR(SET_UP_METADATA); INTERNAL_READ_ERROR(SET_UP_METADATA);
return s; return s;
} }
std::vector<base::FilePath> empty_blobs_to_delete;
indexed_db::ReportSchemaVersion(db_schema_version, origin_); indexed_db::ReportSchemaVersion(db_schema_version, origin_);
if (!found) { if (!found) {
// Initialize new backing store. // Initialize new backing store.
...@@ -732,6 +780,17 @@ leveldb::Status IndexedDBBackingStore::Initialize(bool clean_active_journal) { ...@@ -732,6 +780,17 @@ leveldb::Status IndexedDBBackingStore::Initialize(bool clean_active_journal) {
PutInt(write_batch.get(), schema_version_key, db_schema_version)); PutInt(write_batch.get(), schema_version_key, db_schema_version));
} }
} }
if (db_schema_version < 4) {
s = UpgradeBlobEntriesToV4(db_.get(), write_batch.get(),
&empty_blobs_to_delete);
if (!s.ok()) {
INTERNAL_CONSISTENCY_ERROR_UNTESTED(SET_UP_METADATA);
return InternalInconsistencyStatus();
}
db_schema_version = 4;
ignore_result(
PutInt(write_batch.get(), schema_version_key, db_schema_version));
}
} }
if (!s.ok()) { if (!s.ok()) {
...@@ -781,6 +840,12 @@ leveldb::Status IndexedDBBackingStore::Initialize(bool clean_active_journal) { ...@@ -781,6 +840,12 @@ leveldb::Status IndexedDBBackingStore::Initialize(bool clean_active_journal) {
return s; return s;
} }
// Delete all empty files that resulted from the migration to v4. If this
// fails it's not a big deal.
for (const auto& path : empty_blobs_to_delete) {
ignore_result(base::DeleteFile(path, /*recursive=*/false));
}
if (clean_active_journal) { if (clean_active_journal) {
s = CleanUpBlobJournal(ActiveBlobJournalKey::Encode()); s = CleanUpBlobJournal(ActiveBlobJournalKey::Encode());
if (!s.ok()) { if (!s.ok()) {
...@@ -850,6 +915,88 @@ Status IndexedDBBackingStore::AnyDatabaseContainsBlobs( ...@@ -850,6 +915,88 @@ Status IndexedDBBackingStore::AnyDatabaseContainsBlobs(
return Status::OK(); return Status::OK();
} }
Status IndexedDBBackingStore::UpgradeBlobEntriesToV4(
TransactionalLevelDBDatabase* db,
LevelDBWriteBatch* write_batch,
std::vector<base::FilePath>* empty_blobs_to_delete) {
Status status = leveldb::Status::OK();
std::vector<base::string16> names;
IndexedDBMetadataCoding metadata_coding;
status = metadata_coding.ReadDatabaseNames(db, origin_identifier_, &names);
if (!status.ok())
return status;
for (const auto& name : names) {
IndexedDBDatabaseMetadata metadata;
bool found = false;
status = metadata_coding.ReadMetadataForDatabaseName(
db, origin_identifier_, name, &metadata, &found);
if (!found)
return Status::NotFound("Metadata not found for \"%s\".",
base::UTF16ToUTF8(name));
for (const auto& store_id_metadata_pair : metadata.object_stores) {
leveldb::ReadOptions options;
// Since this is a scan, don't fill up the cache, as it's not likely these
// blocks will be reloaded.
options.fill_cache = false;
options.verify_checksums = true;
std::unique_ptr<TransactionalLevelDBIterator> iterator =
db->CreateIterator(options);
std::string min_key = BlobEntryKey::EncodeMinKeyForObjectStore(
metadata.id, store_id_metadata_pair.first);
std::string max_key = BlobEntryKey::EncodeStopKeyForObjectStore(
metadata.id, store_id_metadata_pair.first);
status = iterator->Seek(base::StringPiece(min_key));
if (status.IsNotFound()) {
status = Status::OK();
continue;
}
if (!status.ok())
return status;
// Loop through all blob entries in for the given object store.
for (; status.ok() && iterator->IsValid() &&
db->leveldb_state()->comparator()->Compare(
leveldb_env::MakeSlice(iterator->Key()), max_key) < 0;
status = iterator->Next()) {
std::vector<IndexedDBBlobInfo> temp_blob_infos;
DecodeV3BlobInfos(iterator->Value(), &temp_blob_infos);
bool needs_rewrite = false;
// Read the old entries & modify them to add the missing data.
for (auto& blob_info : temp_blob_infos) {
if (!blob_info.is_file())
continue;
needs_rewrite = true;
base::File::Info info;
base::FilePath path =
GetBlobFileName(metadata.id, blob_info.blob_number());
if (!base::GetFileInfo(path, &info)) {
return leveldb::Status::Corruption(
"Unable to upgrade to database version 4.", "");
}
blob_info.set_size(info.size);
blob_info.set_last_modified(info.last_modified);
if (info.size == 0)
empty_blobs_to_delete->push_back(path);
}
if (!needs_rewrite)
continue;
std::string data = EncodeBlobInfos(temp_blob_infos);
write_batch->Put(iterator->Key(), data);
if (!status.ok())
return status;
}
if (status.IsNotFound())
status = leveldb::Status::OK();
if (!status.ok())
return status;
}
if (!status.ok())
return status;
}
return Status::OK();
}
Status IndexedDBBackingStore::RevertSchemaToV2() { Status IndexedDBBackingStore::RevertSchemaToV2() {
#if DCHECK_IS_ON() #if DCHECK_IS_ON()
DCHECK_CALLED_ON_VALID_SEQUENCE(idb_sequence_checker_); DCHECK_CALLED_ON_VALID_SEQUENCE(idb_sequence_checker_);
...@@ -1509,6 +1656,11 @@ class IndexedDBBackingStore::Transaction::ChainedBlobWriterImpl ...@@ -1509,6 +1656,11 @@ class IndexedDBBackingStore::Transaction::ChainedBlobWriterImpl
std::move(callback_).Run(BlobWriteResult::kRunPhaseTwoAsync); std::move(callback_).Run(BlobWriteResult::kRunPhaseTwoAsync);
return; return;
} else { } else {
if (iter_->size() == 0) {
waiting_for_callback_ = true;
ReportWriteCompletion(true, 0);
return;
}
if (!write_file_callback_.Run(database_id_, *iter_, this)) { if (!write_file_callback_.Run(database_id_, *iter_, this)) {
std::move(callback_).Run(BlobWriteResult::kFailure); std::move(callback_).Run(BlobWriteResult::kFailure);
return; return;
...@@ -1625,6 +1777,10 @@ bool IndexedDBBackingStore::WriteBlobFile( ...@@ -1625,6 +1777,10 @@ bool IndexedDBBackingStore::WriteBlobFile(
if (!MakeIDBBlobDirectory(blob_path_, database_id, descriptor.blob_number())) if (!MakeIDBBlobDirectory(blob_path_, database_id, descriptor.blob_number()))
return false; return false;
// Writing empty files to android is a problem, as we seem to be unable to set
// the last_modified time of an empty file.
DCHECK_NE(descriptor.size(), 0);
bool use_copy_file = descriptor.is_file() && !descriptor.file_path().empty(); bool use_copy_file = descriptor.is_file() && !descriptor.file_path().empty();
FilePath path = GetBlobFileName(database_id, descriptor.blob_number()); FilePath path = GetBlobFileName(database_id, descriptor.blob_number());
...@@ -1913,15 +2069,6 @@ Status IndexedDBBackingStore::Transaction::GetBlobInfoForRecord( ...@@ -1913,15 +2069,6 @@ Status IndexedDBBackingStore::Transaction::GetBlobInfoForRecord(
entry.set_release_callback( entry.set_release_callback(
backing_store_->active_blob_registry()->GetFinalReleaseCallback( backing_store_->active_blob_registry()->GetFinalReleaseCallback(
database_id, entry.blob_number())); database_id, entry.blob_number()));
if (entry.is_file() && !entry.file_path().empty()) {
base::File::Info info;
if (base::GetFileInfo(entry.file_path(), &info)) {
// This should always work, but it isn't fatal if it doesn't; it just
// means a potential slow synchronous call from the renderer later.
entry.set_last_modified(info.last_modified);
entry.set_size(info.size);
}
}
} }
} }
return Status::OK(); return Status::OK();
......
...@@ -49,6 +49,7 @@ struct IndexedDBDatabaseMetadata; ...@@ -49,6 +49,7 @@ struct IndexedDBDatabaseMetadata;
namespace content { namespace content {
class IndexedDBActiveBlobRegistry; class IndexedDBActiveBlobRegistry;
class LevelDBWriteBatch;
class TransactionalLevelDBDatabase; class TransactionalLevelDBDatabase;
class TransactionalLevelDBFactory; class TransactionalLevelDBFactory;
class TransactionalLevelDBIterator; class TransactionalLevelDBIterator;
...@@ -509,6 +510,11 @@ class CONTENT_EXPORT IndexedDBBackingStore { ...@@ -509,6 +510,11 @@ class CONTENT_EXPORT IndexedDBBackingStore {
TransactionalLevelDBDatabase* database, TransactionalLevelDBDatabase* database,
bool* blobs_exist); bool* blobs_exist);
leveldb::Status UpgradeBlobEntriesToV4(
TransactionalLevelDBDatabase* database,
LevelDBWriteBatch* write_batch,
std::vector<base::FilePath>* empty_blobs_to_delete);
// TODO(dmurph): Move this completely to IndexedDBMetadataFactory. // TODO(dmurph): Move this completely to IndexedDBMetadataFactory.
leveldb::Status GetCompleteMetadata( leveldb::Status GetCompleteMetadata(
std::vector<blink::IndexedDBDatabaseMetadata>* output); std::vector<blink::IndexedDBDatabaseMetadata>* output);
......
...@@ -13,6 +13,8 @@ ...@@ -13,6 +13,8 @@
namespace content { namespace content {
const int64_t IndexedDBBlobInfo::kUnknownSize;
// static // static
void IndexedDBBlobInfo::ConvertBlobInfo( void IndexedDBBlobInfo::ConvertBlobInfo(
const std::vector<IndexedDBBlobInfo>& blob_info, const std::vector<IndexedDBBlobInfo>& blob_info,
...@@ -35,7 +37,7 @@ void IndexedDBBlobInfo::ConvertBlobInfo( ...@@ -35,7 +37,7 @@ void IndexedDBBlobInfo::ConvertBlobInfo(
} }
} }
IndexedDBBlobInfo::IndexedDBBlobInfo() : is_file_(false), size_(-1) {} IndexedDBBlobInfo::IndexedDBBlobInfo() : is_file_(false), size_(kUnknownSize) {}
IndexedDBBlobInfo::IndexedDBBlobInfo( IndexedDBBlobInfo::IndexedDBBlobInfo(
mojo::PendingRemote<blink::mojom::Blob> blob_remote, mojo::PendingRemote<blink::mojom::Blob> blob_remote,
...@@ -58,22 +60,28 @@ IndexedDBBlobInfo::IndexedDBBlobInfo( ...@@ -58,22 +60,28 @@ IndexedDBBlobInfo::IndexedDBBlobInfo(
const std::string& uuid, const std::string& uuid,
const base::FilePath& file_path, const base::FilePath& file_path,
const base::string16& file_name, const base::string16& file_name,
const base::string16& type) const base::string16& type,
const base::Time& last_modified,
const int64_t size)
: is_file_(true), : is_file_(true),
blob_remote_(std::move(blob_remote)), blob_remote_(std::move(blob_remote)),
uuid_(uuid), uuid_(uuid),
type_(type), type_(type),
size_(-1), size_(size),
file_name_(file_name), file_name_(file_name),
file_path_(file_path) {} file_path_(file_path),
last_modified_(last_modified) {}
IndexedDBBlobInfo::IndexedDBBlobInfo(int64_t blob_number, IndexedDBBlobInfo::IndexedDBBlobInfo(int64_t blob_number,
const base::string16& type, const base::string16& type,
const base::string16& file_name) const base::string16& file_name,
const base::Time& last_modified,
const int64_t size)
: is_file_(true), : is_file_(true),
type_(type), type_(type),
size_(-1), size_(size),
file_name_(file_name), file_name_(file_name),
last_modified_(last_modified),
blob_number_(blob_number) {} blob_number_(blob_number) {}
IndexedDBBlobInfo::IndexedDBBlobInfo(const IndexedDBBlobInfo& other) = default; IndexedDBBlobInfo::IndexedDBBlobInfo(const IndexedDBBlobInfo& other) = default;
......
...@@ -27,6 +27,9 @@ namespace content { ...@@ -27,6 +27,9 @@ namespace content {
class CONTENT_EXPORT IndexedDBBlobInfo { class CONTENT_EXPORT IndexedDBBlobInfo {
public: public:
// Used for files with unknown size.
const static int64_t kUnknownSize = -1;
static void ConvertBlobInfo( static void ConvertBlobInfo(
const std::vector<IndexedDBBlobInfo>& blob_info, const std::vector<IndexedDBBlobInfo>& blob_info,
std::vector<blink::mojom::IDBBlobInfoPtr>* blob_or_file_info); std::vector<blink::mojom::IDBBlobInfoPtr>* blob_or_file_info);
...@@ -41,14 +44,22 @@ class CONTENT_EXPORT IndexedDBBlobInfo { ...@@ -41,14 +44,22 @@ class CONTENT_EXPORT IndexedDBBlobInfo {
int64_t size, int64_t size,
int64_t blob_number); int64_t blob_number);
// These two are used for Files. // These two are used for Files.
// The |last_modified| time here is stored in two places - first in the
// leveldb database, and second as the last_modified time of the file written
// to disk. If these don't match, then something modified the file on disk and
// it should be considered corrupt.
IndexedDBBlobInfo(mojo::PendingRemote<blink::mojom::Blob> blob_remote, IndexedDBBlobInfo(mojo::PendingRemote<blink::mojom::Blob> blob_remote,
const std::string& uuid, const std::string& uuid,
const base::FilePath& file_path, const base::FilePath& file_path,
const base::string16& file_name, const base::string16& file_name,
const base::string16& type); const base::string16& type,
const base::Time& last_modified,
const int64_t size);
IndexedDBBlobInfo(int64_t blob_number, IndexedDBBlobInfo(int64_t blob_number,
const base::string16& type, const base::string16& type,
const base::string16& file_name); const base::string16& file_name,
const base::Time& last_modified,
const int64_t size);
IndexedDBBlobInfo(const IndexedDBBlobInfo& other); IndexedDBBlobInfo(const IndexedDBBlobInfo& other);
~IndexedDBBlobInfo(); ~IndexedDBBlobInfo();
......
...@@ -461,22 +461,49 @@ IN_PROC_BROWSER_TEST_F(IndexedDBBrowserTestWithGCExposed, ...@@ -461,22 +461,49 @@ IN_PROC_BROWSER_TEST_F(IndexedDBBrowserTestWithGCExposed,
SimpleTest(GetTestUrl("indexeddb", "database_callbacks_first.html")); SimpleTest(GetTestUrl("indexeddb", "database_callbacks_first.html"));
} }
static void CopyLevelDBToProfile(Shell* shell, struct BlobModificationTime {
scoped_refptr<IndexedDBContextImpl> context, base::FilePath relative_blob_path;
const std::string& test_directory) { base::Time time;
};
static void CopyLevelDBToProfile(
Shell* shell,
scoped_refptr<IndexedDBContextImpl> context,
const std::string& test_directory,
std::vector<BlobModificationTime> modification_times) {
DCHECK(context->IDBTaskRunner()->RunsTasksInCurrentSequence()); DCHECK(context->IDBTaskRunner()->RunsTasksInCurrentSequence());
base::FilePath leveldb_dir(FILE_PATH_LITERAL("file__0.indexeddb.leveldb")); base::FilePath leveldb_dir(FILE_PATH_LITERAL("file__0.indexeddb.leveldb"));
base::FilePath test_data_dir = base::FilePath blob_dir(FILE_PATH_LITERAL("file__0.indexeddb.blob"));
base::FilePath test_leveldb_data_dir =
GetTestFilePath("indexeddb", test_directory.c_str()).Append(leveldb_dir); GetTestFilePath("indexeddb", test_directory.c_str()).Append(leveldb_dir);
base::FilePath dest = context->data_path().Append(leveldb_dir); base::FilePath test_blob_data_dir =
// If we don't create the destination directory first, the contents of the GetTestFilePath("indexeddb", test_directory.c_str()).Append(blob_dir);
base::FilePath leveldb_dest = context->data_path().Append(leveldb_dir);
base::FilePath blob_dest = context->data_path().Append(blob_dir);
// If we don't create the destination directories first, the contents of the
// leveldb directory are copied directly into profile/IndexedDB instead of // leveldb directory are copied directly into profile/IndexedDB instead of
// profile/IndexedDB/file__0.xxx/ // profile/IndexedDB/file__0.xxx/
ASSERT_TRUE(base::CreateDirectory(dest)); ASSERT_TRUE(base::CreateDirectory(leveldb_dest));
const bool kRecursive = true; const bool kRecursive = true;
ASSERT_TRUE(base::CopyDirectory(test_data_dir, ASSERT_TRUE(base::CopyDirectory(test_leveldb_data_dir, context->data_path(),
context->data_path(), kRecursive));
if (!base::PathExists(test_blob_data_dir))
return;
ASSERT_TRUE(base::CreateDirectory(blob_dest));
ASSERT_TRUE(base::CopyDirectory(test_blob_data_dir, context->data_path(),
kRecursive)); kRecursive));
// For some reason touching files on Android fails with EPERM.
// https://crbug.com/1045488
#if !defined(OS_ANDROID)
// The modification time of the saved blobs is used for File objects, so these
// need to manually be set (they are clobbered both by the above copy
// operation and by git).
for (const BlobModificationTime& time : modification_times) {
base::FilePath total_path = blob_dest.Append(time.relative_blob_path);
ASSERT_TRUE(base::TouchFile(total_path, time.time, time.time));
}
#endif
} }
class IndexedDBBrowserTestWithPreexistingLevelDB : public IndexedDBBrowserTest { class IndexedDBBrowserTestWithPreexistingLevelDB : public IndexedDBBrowserTest {
...@@ -485,8 +512,9 @@ class IndexedDBBrowserTestWithPreexistingLevelDB : public IndexedDBBrowserTest { ...@@ -485,8 +512,9 @@ class IndexedDBBrowserTestWithPreexistingLevelDB : public IndexedDBBrowserTest {
void SetUpOnMainThread() override { void SetUpOnMainThread() override {
scoped_refptr<IndexedDBContextImpl> context = GetContext(); scoped_refptr<IndexedDBContextImpl> context = GetContext();
context->IDBTaskRunner()->PostTask( context->IDBTaskRunner()->PostTask(
FROM_HERE, base::BindOnce(&CopyLevelDBToProfile, shell(), context, FROM_HERE,
EnclosingLevelDBDir())); base::BindOnce(&CopyLevelDBToProfile, shell(), context,
EnclosingLevelDBDir(), CustomModificationTimes()));
scoped_refptr<base::ThreadTestHelper> helper( scoped_refptr<base::ThreadTestHelper> helper(
new base::ThreadTestHelper(GetContext()->IDBTaskRunner())); new base::ThreadTestHelper(GetContext()->IDBTaskRunner()));
ASSERT_TRUE(helper->Run()); ASSERT_TRUE(helper->Run());
...@@ -494,6 +522,10 @@ class IndexedDBBrowserTestWithPreexistingLevelDB : public IndexedDBBrowserTest { ...@@ -494,6 +522,10 @@ class IndexedDBBrowserTestWithPreexistingLevelDB : public IndexedDBBrowserTest {
virtual std::string EnclosingLevelDBDir() = 0; virtual std::string EnclosingLevelDBDir() = 0;
virtual std::vector<BlobModificationTime> CustomModificationTimes() {
return std::vector<BlobModificationTime>();
}
private: private:
DISALLOW_COPY_AND_ASSIGN(IndexedDBBrowserTestWithPreexistingLevelDB); DISALLOW_COPY_AND_ASSIGN(IndexedDBBrowserTestWithPreexistingLevelDB);
}; };
...@@ -507,6 +539,33 @@ IN_PROC_BROWSER_TEST_F(IndexedDBBrowserTestWithVersion0Schema, MigrationTest) { ...@@ -507,6 +539,33 @@ IN_PROC_BROWSER_TEST_F(IndexedDBBrowserTestWithVersion0Schema, MigrationTest) {
SimpleTest(GetTestUrl("indexeddb", "migration_test.html")); SimpleTest(GetTestUrl("indexeddb", "migration_test.html"));
} }
class IndexedDBBrowserTestWithVersion3Schema
: public IndexedDBBrowserTestWithPreexistingLevelDB {
std::string EnclosingLevelDBDir() override { return "v3_migration_test"; }
std::vector<BlobModificationTime> CustomModificationTimes() override {
return {
{base::FilePath(FILE_PATH_LITERAL("1/00/3")),
base::Time::FromJsTime(1579809038000)},
{base::FilePath(FILE_PATH_LITERAL("1/00/4")),
base::Time::FromJsTime(1579808985000)},
{base::FilePath(FILE_PATH_LITERAL("1/00/5")),
base::Time::FromJsTime(1579199256000)},
};
}
};
IN_PROC_BROWSER_TEST_F(IndexedDBBrowserTestWithVersion3Schema, MigrationTest) {
const GURL test_url = GetTestUrl("indexeddb", "v3_migration_test.html");
// For some reason setting empty file modification time on Android fails with
// EPERM. https://crbug.com/1045488
#if defined(OS_ANDROID)
SimpleTest(GURL(test_url.spec() + "#ignoreTimes"));
#else
SimpleTest(GURL(test_url.spec()));
#endif
}
class IndexedDBBrowserTestWithVersion123456Schema : public class IndexedDBBrowserTestWithVersion123456Schema : public
IndexedDBBrowserTestWithPreexistingLevelDB { IndexedDBBrowserTestWithPreexistingLevelDB {
std::string EnclosingLevelDBDir() override { return "schema_version_123456"; } std::string EnclosingLevelDBDir() override { return "schema_version_123456"; }
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include "base/stl_util.h" #include "base/stl_util.h"
#include "base/strings/utf_string_conversions.h" #include "base/strings/utf_string_conversions.h"
#include "base/task/post_task.h" #include "base/task/post_task.h"
#include "build/build_config.h"
#include "content/browser/indexed_db/cursor_impl.h" #include "content/browser/indexed_db/cursor_impl.h"
#include "content/browser/indexed_db/file_stream_reader_to_data_pipe.h" #include "content/browser/indexed_db/file_stream_reader_to_data_pipe.h"
#include "content/browser/indexed_db/indexed_db_callbacks.h" #include "content/browser/indexed_db/indexed_db_callbacks.h"
...@@ -443,7 +444,13 @@ void IndexedDBDispatcherHost::CreateAllBlobs( ...@@ -443,7 +444,13 @@ void IndexedDBDispatcherHost::CreateAllBlobs(
element->content_type = base::UTF16ToUTF8(blob_info.type()); element->content_type = base::UTF16ToUTF8(blob_info.type());
element->type = storage::mojom::BlobDataItemType::kIndexedDB; element->type = storage::mojom::BlobDataItemType::kIndexedDB;
BindFileReader(blob_info.file_path(), blob_info.last_modified(), base::Time last_modified;
// Android doesn't seem to consistantly be able to set file modification
// times. https://crbug.com/1045488
#if !defined(OS_ANDROID)
last_modified = blob_info.last_modified();
#endif
BindFileReader(blob_info.file_path(), last_modified,
blob_info.release_callback(), blob_info.release_callback(),
element->reader.InitWithNewPipeAndPassReceiver()); element->reader.InitWithNewPipeAndPassReceiver());
......
...@@ -30,7 +30,8 @@ namespace indexed_db { ...@@ -30,7 +30,8 @@ namespace indexed_db {
// 1 - Adds UserIntVersion to DatabaseMetaData. // 1 - Adds UserIntVersion to DatabaseMetaData.
// 2 - Adds DataVersion to to global metadata. // 2 - Adds DataVersion to to global metadata.
// 3 - Adds metadata needed for blob support. // 3 - Adds metadata needed for blob support.
const constexpr int64_t kLatestKnownSchemaVersion = 3; // 4 - Adds size & last_modified to 'file' blob_info encodings.
const constexpr int64_t kLatestKnownSchemaVersion = 4;
} // namespace indexed_db } // namespace indexed_db
CONTENT_EXPORT extern const unsigned char kMinimumIndexId; CONTENT_EXPORT extern const unsigned char kMinimumIndexId;
...@@ -450,9 +451,9 @@ class ObjectStoreDataKey { ...@@ -450,9 +451,9 @@ class ObjectStoreDataKey {
CONTENT_EXPORT static std::string Encode(int64_t database_id, CONTENT_EXPORT static std::string Encode(int64_t database_id,
int64_t object_store_id, int64_t object_store_id,
const std::string encoded_user_key); const std::string encoded_user_key);
static std::string Encode(int64_t database_id, CONTENT_EXPORT static std::string Encode(int64_t database_id,
int64_t object_store_id, int64_t object_store_id,
const blink::IndexedDBKey& user_key); const blink::IndexedDBKey& user_key);
std::string DebugString() const; std::string DebugString() const;
std::unique_ptr<blink::IndexedDBKey> user_key() const; std::unique_ptr<blink::IndexedDBKey> user_key() const;
...@@ -484,7 +485,7 @@ class ExistsEntryKey { ...@@ -484,7 +485,7 @@ class ExistsEntryKey {
DISALLOW_COPY_AND_ASSIGN(ExistsEntryKey); DISALLOW_COPY_AND_ASSIGN(ExistsEntryKey);
}; };
class BlobEntryKey { class CONTENT_EXPORT BlobEntryKey {
public: public:
BlobEntryKey() : database_id_(0), object_store_id_(0) {} BlobEntryKey() : database_id_(0), object_store_id_(0) {}
static bool Decode(base::StringPiece* slice, BlobEntryKey* result); static bool Decode(base::StringPiece* slice, BlobEntryKey* result);
......
...@@ -206,13 +206,10 @@ void TransactionImpl::CreateBlobInfos( ...@@ -206,13 +206,10 @@ void TransactionImpl::CreateBlobInfos(
*security_policy_failure = true; *security_policy_failure = true;
return; return;
} }
(*blob_infos)[i] = DCHECK_NE(info->size, IndexedDBBlobInfo::kUnknownSize);
IndexedDBBlobInfo(std::move(info->blob), info->uuid, info->file->path, (*blob_infos)[i] = IndexedDBBlobInfo(
info->file->name, info->mime_type); std::move(info->blob), info->uuid, info->file->path, info->file->name,
if (info->size != -1) { info->mime_type, info->file->last_modified, info->size);
(*blob_infos)[i].set_last_modified(info->file->last_modified);
(*blob_infos)[i].set_size(info->size);
}
} else { } else {
(*blob_infos)[i] = IndexedDBBlobInfo(std::move(info->blob), info->uuid, (*blob_infos)[i] = IndexedDBBlobInfo(std::move(info->blob), info->uuid,
info->mime_type, info->size); info->mime_type, info->size);
......
...@@ -155,14 +155,14 @@ function promiseDeleteThenOpenDb(dbName, upgradeCallback) { ...@@ -155,14 +155,14 @@ function promiseDeleteThenOpenDb(dbName, upgradeCallback) {
}; };
deleteRequest.onsuccess = () => { deleteRequest.onsuccess = () => {
const openRequest = indexedDB.open(dbName); const openRequest = indexedDB.open(dbName);
openRequest.onerror = () => { openRequest.onerror = (event) => {
reject(new Error('An error occurred on opening database ${dbName}')); reject(new Error('An error occurred on opening database ${dbName}'));
}; };
openRequest.onblocked = () => { openRequest.onblocked = () => {
reject(new Error('Opening database ${dbName} was blocked')); reject(new Error('Opening database ${dbName} was blocked'));
}; };
openRequest.onupgradeneeded = () => { openRequest.onupgradeneeded = (event) => {
upgradeCallback(); upgradeCallback(event.target.result);
}; };
openRequest.onsuccess = () => { openRequest.onsuccess = () => {
resolve(event.target.result); resolve(event.target.result);
...@@ -185,7 +185,7 @@ function promiseOpenDb(dbName, optionalUpgradeCallback) { ...@@ -185,7 +185,7 @@ function promiseOpenDb(dbName, optionalUpgradeCallback) {
reject(e); reject(e);
}; };
if (optionalUpgradeCallback) { if (optionalUpgradeCallback) {
openRequest.onupgradeneeded = () => { openRequest.onupgradeneeded = (event) => {
const db = event.target.result; const db = event.target.result;
optionalUpgradeCallback(db); optionalUpgradeCallback(db);
}; };
......
...@@ -43,6 +43,7 @@ function writeBlob() { ...@@ -43,6 +43,7 @@ function writeBlob() {
var put_request = put_tx.objectStore(store_name).put(put_blob, blob_key); var put_request = put_tx.objectStore(store_name).put(put_blob, blob_key);
put_request.onerror = unexpectedErrorCallback; put_request.onerror = unexpectedErrorCallback;
put_tx.oncomplete = function () { put_tx.oncomplete = function () {
debug('Write complete');
var get_tx = db.transaction(store_name); var get_tx = db.transaction(store_name);
var get_request1 = get_tx.objectStore(store_name).get(blob_key); var get_request1 = get_tx.objectStore(store_name).get(blob_key);
...@@ -69,6 +70,7 @@ function writeBlob() { ...@@ -69,6 +70,7 @@ function writeBlob() {
} }
function writeFile() { function writeFile() {
debug('Writing file.');
put_file = new File([''], 'somefile', { put_file = new File([''], 'somefile', {
type: 'application/x-special-snowflake', type: 'application/x-special-snowflake',
lastModified: new Date('1999-12-31T23:59:59Z') lastModified: new Date('1999-12-31T23:59:59Z')
......
<!DOCTYPE html>
<html>
<!--
Copyright 2020 The Chromium Authors. All rights reserved.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file.
-->
<head>
<body>
<input type="file" id="files" name="file1" multiple
onchange="handleFilesAndStart(this.files)">
</form>
</body>
<!-- Create a database to be migrated & verified by a browsertest. -->
<title>IDB Database generator</title>
<script type="text/javascript" src="common.js"></script>
<script>
const dbName = 'db_migration_test';
const objectStoreName = 'storeName';
const indexes = [
{ name: 'name', value: 'name' },
{ name: 'nameAndFlagged', value: ['name', 'flagged'] },
];
const objectStoreData = [
{ id: 0, name: 'Daniel', flagged: true, data: new Blob(['testData1']) },
{ id: 1, name: 'Henry', flagged: false,
data: new Blob(['testData2'], { type: 'application/test' }) },
{ id: 2, name: 'Sarah', flagged: true, data: null },
{ id: 3, name: 'Dave', flagged: false, data: null },
{ id: 4, name: 'Courtney', flagged: true, data: null },
{ id: 5, name: 'Ruthie', flagged: false, data: new Blob() },
];
function upgradeCallback(db) {
debug('upgrade called');
var os = db.createObjectStore(objectStoreName, { keyPath : 'id' });
for (let index of indexes)
os.createIndex(index.name, index.value);
for (let row of objectStoreData)
os.add(row);
debug('upgrade done');
}
async function handleFilesAndStart(files) {
if (files.length != 3) {
fail('There needs to be 3 files: ' + files);
return;
}
objectStoreData[2].data = files[0];
objectStoreData[3].data = files[1];
objectStoreData[4].data = files[2];
let db = await promiseDeleteThenOpenDb('blob_corrupt_db', upgradeCallback);
done('database opened');
}
</script>
</head>
<div id="status">
Select 3 files from the dialog above, and they will be saved into the database.
<br/>Waiting for files...
</div>
</html>
\ No newline at end of file
<!DOCTYPE html>
<html>
<!--
Copyright 2020 The Chromium Authors. All rights reserved.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file.
-->
<head>
<!-- Read & verify database created by migration_database_generator.html -->
<title>IDB test for migration from schema v3</title>
<script type="text/javascript" src="common.js"></script>
<script>
const dbName = 'db_migration_test';
const objectStoreName = 'storeName';
var testTimes = true;
const objectStoreData = [
{ id: 0, name: 'Daniel', flagged: true,
data: { type: 'blob', size: 9, contentType: '', content: 'testData1' } },
{ id: 1, name: 'Henry', flagged: false,
data: { type: 'blob', size: 9,
contentType: 'application/test', content: 'testData2' } },
{ id: 2, name: 'Sarah', flagged: true,
data: { type: 'file', name: "some_text.txt",
lastModified: 1579809038000, size: 7, content: 'hello!\n' } },
{ id: 3, name: 'Dave', flagged: false,
data: { type: 'file', name: "empty_file.txt",
lastModified: 1579808985000, size: 0, content: '' } },
{ id: 4, name: 'Courtney', flagged: true,
data: { type: 'file', name: "unnamed.gif",
lastModified: 1579199256000, size: 584359, content: null } },
{ id: 5, name: 'Ruthie', flagged: false,
data: { type: 'blob', size: 0, contentType: '', content: '' } },
];
let contentsLeft = 0;
let compareContents = async (blobOrFile, contents, size) => {
try {
self.text = await blobOrFile.text();
self.reference = contents;
self.size = size;
shouldBe("text", "reference");
shouldBe("text.length", "size");
contentsLeft -= 1;
if (contentsLeft == 0)
done('Finished comparisons. Databases match.');
} catch(e) {
fail("Could not read blob: " + e);
}
}
async function test() {
let param = location.hash.substring(1);
if (param == 'ignoreTimes')
testTimes = false;
let upgraded = false;
let db = await promiseOpenDb('blob_corrupt_db', () => { upgraded = true; });
if (upgraded) {
fail("Database should already be populated");
return;
}
const transaction = db.transaction(objectStoreName, 'readonly');
transaction.onabort = unexpectedAbortCallback;
transaction.onerror = unexpectedErrorCallback;
transaction.oncomplete = () => { debug("Finished reading database."); };
const objectStore = transaction.objectStore(objectStoreName);
for (let rowRef of objectStoreData) {
debug('Fetching row ' + rowRef.id);
const request = objectStore.get(rowRef.id);
request.onerror = unexpectedErrorCallback;
request.onsuccess = (event) => {
self.value = event.target.result;
self.rowRef = rowRef;
debug('Received row ' + value.id);
shouldBe("value.id", "rowRef.id");
shouldBe("value.name", "rowRef.name");
shouldBe("value.flagged", "rowRef.flagged");
shouldBe("value.data.size", "rowRef.data.size");
if (rowRef.data.type == 'blob') {
shouldBe("value.data.type", "rowRef.data.contentType");
} else if (rowRef.data.type == 'file') {
shouldBe("value.data.name", "rowRef.data.name");
if (testTimes)
shouldBe("rowRef.data.lastModified", "value.data.lastModified");
}
if (rowRef.data.content != null) {
contentsLeft += 1;
compareContents(value.data, rowRef.data.content, rowRef.data.size);
}
}
}
}
</script>
</head>
<body onLoad="test()">
<div id="status">Starting...</div>
</body>
</html>
2020/01/23-12:39:46.682 25d73 Reusing MANIFEST /usr/local/google/home/dmurph/.config/google-chrome/Default/IndexedDB/file__0.indexeddb.leveldb/MANIFEST-000001
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment