Commit c3e113ab authored by Dan Harrington's avatar Dan Harrington Committed by Commit Bot

improve prefetch schema testing

This is in preparation for a schema upgrade. Improvements:
- we test that the new schema is compatible with the old one (minimally)
- the test won't need to be changed with the next version (only the .sql/data files)
- the history of schemas is now clearly documented, and diffable
- we test that the migrated schema matches, not just the data
- it guarantees the next schema upgrade (v3) CL will test the current schema (v2)
  accurately.


Change-Id: Ibd0de797bcfd878b24956946552eb7a77117124b
Reviewed-on: https://chromium-review.googlesource.com/c/1179977Reviewed-by: default avatarCarlos Knippschild <carlosk@chromium.org>
Commit-Queue: Dan H <harringtond@google.com>
Cr-Commit-Position: refs/heads/master@{#599715}
parent c00fa468
...@@ -146,3 +146,30 @@ Furthermore, when adding or removing columns, any existing column ordering might ...@@ -146,3 +146,30 @@ Furthermore, when adding or removing columns, any existing column ordering might
not be kept. This means that any query must not presume column ordering and must not be kept. This means that any query must not presume column ordering and must
always explicitly refer to them by name. Using <code>SELECT * FROM ...</code> always explicitly refer to them by name. Using <code>SELECT * FROM ...</code>
for obtaining data in all columns is therefore *unsafe and forbidden*. for obtaining data in all columns is therefore *unsafe and forbidden*.
## Schema History / Test Data
The components/test/data/offline_pages/prefetch/version_schemas directory contains
data used in testing the prefetch database schema,
see prefetch_store_schema_unittest.cc for the tests. In this directory, there
are two files for every version of the prefetch database schema:
- v#.sql
SQL that creates the database schema for this version and inserts rows in each
table for testing. This defines the initial state for each migration test. Data
inserted here should attempt to cover edge cases specific to that version (like
a change in a default value).
- v#.data
Represents the expected result of running the initial state defined in the .sql
file through the migration logic up to the current version of the schema.
Or in pseudo-code:
migrated_db = MigrateToCurrentSchema(BuildDbFromSqlFile(old_version_sql_file));
EXPECT_EQ(GetDataFromDataFile(old_version_data_file),
GetDataFromDb(migrated_db));
Whenever a new version is created, existing .data might need to be updated to
account for the added migration step.
...@@ -17,9 +17,9 @@ namespace offline_pages { ...@@ -17,9 +17,9 @@ namespace offline_pages {
// of 0). // of 0).
// static // static
const int PrefetchStoreSchema::kCurrentVersion = 2; constexpr int PrefetchStoreSchema::kCurrentVersion;
// static // static
const int PrefetchStoreSchema::kCompatibleVersion = 1; constexpr int PrefetchStoreSchema::kCompatibleVersion;
namespace { namespace {
...@@ -66,39 +66,43 @@ int GetCompatibleVersionNumber(sql::MetaTable* meta_table) { ...@@ -66,39 +66,43 @@ int GetCompatibleVersionNumber(sql::MetaTable* meta_table) {
// simplify data retrieval. Columns with fixed length types must come first and // simplify data retrieval. Columns with fixed length types must come first and
// variable length types must come later. // variable length types must come later.
static const char kItemsTableCreationSql[] = static const char kItemsTableCreationSql[] =
"CREATE TABLE IF NOT EXISTS prefetch_items "
// Fixed length columns come first. // Fixed length columns come first.
"(offline_id INTEGER PRIMARY KEY NOT NULL," R"sql(
" state INTEGER NOT NULL DEFAULT 0," CREATE TABLE IF NOT EXISTS prefetch_items(
" generate_bundle_attempts INTEGER NOT NULL DEFAULT 0," offline_id INTEGER PRIMARY KEY NOT NULL,
" get_operation_attempts INTEGER NOT NULL DEFAULT 0," state INTEGER NOT NULL DEFAULT 0,
" download_initiation_attempts INTEGER NOT NULL DEFAULT 0," generate_bundle_attempts INTEGER NOT NULL DEFAULT 0,
" archive_body_length INTEGER_NOT_NULL DEFAULT -1," get_operation_attempts INTEGER NOT NULL DEFAULT 0,
" creation_time INTEGER NOT NULL," download_initiation_attempts INTEGER NOT NULL DEFAULT 0,
" freshness_time INTEGER NOT NULL," archive_body_length INTEGER_NOT_NULL DEFAULT -1,
" error_code INTEGER NOT NULL DEFAULT 0," creation_time INTEGER NOT NULL,
" file_size INTEGER NOT NULL DEFAULT -1," freshness_time INTEGER NOT NULL,
// Variable length columns come later. error_code INTEGER NOT NULL DEFAULT 0,
" guid VARCHAR NOT NULL DEFAULT ''," file_size INTEGER NOT NULL DEFAULT -1,
" client_namespace VARCHAR NOT NULL DEFAULT ''," guid VARCHAR NOT NULL DEFAULT '',
" client_id VARCHAR NOT NULL DEFAULT ''," client_namespace VARCHAR NOT NULL DEFAULT '',
" requested_url VARCHAR NOT NULL DEFAULT ''," client_id VARCHAR NOT NULL DEFAULT '',
" final_archived_url VARCHAR NOT NULL DEFAULT ''," requested_url VARCHAR NOT NULL DEFAULT '',
" operation_name VARCHAR NOT NULL DEFAULT ''," final_archived_url VARCHAR NOT NULL DEFAULT '',
" archive_body_name VARCHAR NOT NULL DEFAULT ''," operation_name VARCHAR NOT NULL DEFAULT '',
" title VARCHAR NOT NULL DEFAULT ''," archive_body_name VARCHAR NOT NULL DEFAULT '',
" file_path VARCHAR NOT NULL DEFAULT ''" title VARCHAR NOT NULL DEFAULT '',
")"; file_path VARCHAR NOT NULL DEFAULT ''
)
)sql";
bool CreatePrefetchItemsTable(sql::Database* db) { bool CreatePrefetchItemsTable(sql::Database* db) {
return db->Execute(kItemsTableCreationSql); return db->Execute(kItemsTableCreationSql);
} }
static const char kQuotaTableCreationSql[] = static const char kQuotaTableCreationSql[] =
"CREATE TABLE IF NOT EXISTS prefetch_downloader_quota " R"sql(
"(quota_id INTEGER PRIMARY KEY NOT NULL DEFAULT 1," CREATE TABLE IF NOT EXISTS prefetch_downloader_quota(
" update_time INTEGER NOT NULL," quota_id INTEGER PRIMARY KEY NOT NULL DEFAULT 1,
" available_quota INTEGER NOT NULL DEFAULT 0)"; update_time INTEGER NOT NULL,
available_quota INTEGER NOT NULL DEFAULT 0
)
)sql";
bool CreatePrefetchQuotaTable(sql::Database* db) { bool CreatePrefetchQuotaTable(sql::Database* db) {
return db->Execute(kQuotaTableCreationSql); return db->Execute(kQuotaTableCreationSql);
...@@ -119,47 +123,52 @@ bool CreateLatestSchema(sql::Database* db) { ...@@ -119,47 +123,52 @@ bool CreateLatestSchema(sql::Database* db) {
int MigrateFromVersion1To2(sql::Database* db, sql::MetaTable* meta_table) { int MigrateFromVersion1To2(sql::Database* db, sql::MetaTable* meta_table) {
const int target_version = 2; const int target_version = 2;
const int target_compatible_version = 1; const int target_compatible_version = 1;
// 1. Rename the existing items table.
// 2. Create the new items table.
// 3. Copy existing rows to the new items table.
// 4. Drop the old items table.
static const char kVersion1ToVersion2MigrationSql[] = static const char kVersion1ToVersion2MigrationSql[] =
// Rename the existing items table. R"sql(
"ALTER TABLE prefetch_items RENAME TO prefetch_items_old; " ALTER TABLE prefetch_items RENAME TO prefetch_items_old;
// Creates the new items table.
"CREATE TABLE prefetch_items " CREATE TABLE prefetch_items(
"(offline_id INTEGER PRIMARY KEY NOT NULL," offline_id INTEGER PRIMARY KEY NOT NULL,
" state INTEGER NOT NULL DEFAULT 0," state INTEGER NOT NULL DEFAULT 0,
" generate_bundle_attempts INTEGER NOT NULL DEFAULT 0," generate_bundle_attempts INTEGER NOT NULL DEFAULT 0,
" get_operation_attempts INTEGER NOT NULL DEFAULT 0," get_operation_attempts INTEGER NOT NULL DEFAULT 0,
" download_initiation_attempts INTEGER NOT NULL DEFAULT 0," download_initiation_attempts INTEGER NOT NULL DEFAULT 0,
" archive_body_length INTEGER_NOT_NULL DEFAULT -1," archive_body_length INTEGER_NOT_NULL DEFAULT -1,
" creation_time INTEGER NOT NULL," creation_time INTEGER NOT NULL,
" freshness_time INTEGER NOT NULL," freshness_time INTEGER NOT NULL,
" error_code INTEGER NOT NULL DEFAULT 0," error_code INTEGER NOT NULL DEFAULT 0,
// Note: default value changed from 0 to -1. file_size INTEGER NOT NULL DEFAULT -1,
" file_size INTEGER NOT NULL DEFAULT -1," guid VARCHAR NOT NULL DEFAULT '',
" guid VARCHAR NOT NULL DEFAULT ''," client_namespace VARCHAR NOT NULL DEFAULT '',
" client_namespace VARCHAR NOT NULL DEFAULT ''," client_id VARCHAR NOT NULL DEFAULT '',
" client_id VARCHAR NOT NULL DEFAULT ''," requested_url VARCHAR NOT NULL DEFAULT '',
" requested_url VARCHAR NOT NULL DEFAULT ''," final_archived_url VARCHAR NOT NULL DEFAULT '',
" final_archived_url VARCHAR NOT NULL DEFAULT ''," operation_name VARCHAR NOT NULL DEFAULT '',
" operation_name VARCHAR NOT NULL DEFAULT ''," archive_body_name VARCHAR NOT NULL DEFAULT '',
" archive_body_name VARCHAR NOT NULL DEFAULT ''," title VARCHAR NOT NULL DEFAULT '',
" title VARCHAR NOT NULL DEFAULT ''," file_path VARCHAR NOT NULL DEFAULT ''
" file_path VARCHAR NOT NULL DEFAULT ''); " );
// Copy existing rows to the new items table.
"INSERT INTO prefetch_items " INSERT INTO prefetch_items
" (offline_id, state, generate_bundle_attempts, get_operation_attempts," (offline_id, state, generate_bundle_attempts, get_operation_attempts,
" download_initiation_attempts, archive_body_length, creation_time," download_initiation_attempts, archive_body_length, creation_time,
" freshness_time, error_code, file_size, guid, client_namespace," freshness_time, error_code, file_size, guid, client_namespace,
" client_id, requested_url, final_archived_url, operation_name," client_id, requested_url, final_archived_url, operation_name,
" archive_body_name, title, file_path)" archive_body_name, title, file_path)
" SELECT " SELECT
" offline_id, state, generate_bundle_attempts, get_operation_attempts," offline_id, state, generate_bundle_attempts, get_operation_attempts,
" download_initiation_attempts, archive_body_length, creation_time," download_initiation_attempts, archive_body_length, creation_time,
" freshness_time, error_code, file_size, guid, client_namespace," freshness_time, error_code, file_size, guid, client_namespace,
" client_id, requested_url, final_archived_url, operation_name," client_id, requested_url, final_archived_url, operation_name,
" archive_body_name, title, file_path" archive_body_name, title, file_path
" FROM prefetch_items_old; " FROM prefetch_items_old;
// Drops the old items table.
"DROP TABLE prefetch_items_old; "; DROP TABLE prefetch_items_old;
)sql";
sql::Transaction transaction(db); sql::Transaction transaction(db);
if (transaction.Begin() && db->Execute(kVersion1ToVersion2MigrationSql) && if (transaction.Begin() && db->Execute(kVersion1ToVersion2MigrationSql) &&
......
...@@ -17,8 +17,8 @@ namespace offline_pages { ...@@ -17,8 +17,8 @@ namespace offline_pages {
// from any and all previous database versions to the latest. // from any and all previous database versions to the latest.
class PrefetchStoreSchema { class PrefetchStoreSchema {
public: public:
static const int kCurrentVersion; static constexpr int kCurrentVersion = 2;
static const int kCompatibleVersion; static constexpr int kCompatibleVersion = 1;
// Creates or upgrade the database schema as needed from information stored in // Creates or upgrade the database schema as needed from information stored in
// a metadata table. Returns |true| if the database is ready to be used, // a metadata table. Returns |true| if the database is ready to be used,
......
...@@ -7,6 +7,13 @@ ...@@ -7,6 +7,13 @@
#include <limits> #include <limits>
#include <memory> #include <memory>
#include "base/files/file_path.h"
#include "base/files/file_util.h"
#include "base/json/string_escape.h"
#include "base/path_service.h"
#include "base/strings/strcat.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_util.h"
#include "sql/database.h" #include "sql/database.h"
#include "sql/meta_table.h" #include "sql/meta_table.h"
#include "sql/statement.h" #include "sql/statement.h"
...@@ -14,7 +21,7 @@ ...@@ -14,7 +21,7 @@
#include "testing/gtest/include/gtest/gtest.h" #include "testing/gtest/include/gtest/gtest.h"
namespace offline_pages { namespace offline_pages {
namespace {
static const char kSomeTableCreationSql[] = static const char kSomeTableCreationSql[] =
"CREATE TABLE some_table " "CREATE TABLE some_table "
"(id INTEGER PRIMARY KEY NOT NULL," "(id INTEGER PRIMARY KEY NOT NULL,"
...@@ -25,6 +32,156 @@ static const char kAnotherTableCreationSql[] = ...@@ -25,6 +32,156 @@ static const char kAnotherTableCreationSql[] =
"(id INTEGER PRIMARY KEY NOT NULL," "(id INTEGER PRIMARY KEY NOT NULL,"
" name VARCHAR NOT NULL)"; " name VARCHAR NOT NULL)";
std::vector<std::string> TableColumns(sql::Database* db,
const std::string table_name) {
std::vector<std::string> columns;
std::string sql = "PRAGMA TABLE_INFO(" + table_name + ")";
sql::Statement table_info(db->GetUniqueStatement(sql.c_str()));
while (table_info.Step())
columns.push_back(table_info.ColumnString(1));
return columns;
}
struct Table {
std::string ToString() const {
std::ostringstream ss;
ss << "-- TABLE " << name << " --\n";
for (size_t row_index = 0; row_index < rows.size(); ++row_index) {
ss << "--- ROW " << row_index << " ---\n";
const std::vector<std::string>& row = rows[row_index];
for (size_t i = 0; i < row.size(); ++i) {
ss << column_names[i] << ": " << base::GetQuotedJSONString(row[i])
<< '\n';
}
}
return ss.str();
}
std::string name;
std::vector<std::string> column_names;
// List of all values. Has size [row_count][column_count].
std::vector<std::vector<std::string>> rows;
};
// Returns the value contained in a table cell, or nullptr if the cell row or
// column is invalid.
const std::string* TableCell(const Table& table,
const std::string& column,
size_t row) {
if (row >= table.rows.size())
return nullptr;
for (size_t i = 0; i < table.column_names.size(); ++i) {
if (table.column_names[i] == column) {
return &table.rows[row][i];
}
}
return nullptr;
}
struct DatabaseTables {
std::string ToString() {
std::ostringstream ss;
for (auto i = tables.begin(); i != tables.end(); ++i)
ss << i->second.ToString();
return ss.str();
}
std::map<std::string, Table> tables;
};
Table ReadTable(sql::Database* db, const std::string table_name) {
Table table;
table.name = table_name;
table.column_names = TableColumns(db, table_name);
std::string sql = "SELECT * FROM " + table_name;
sql::Statement all_data(db->GetUniqueStatement(sql.c_str()));
while (all_data.Step()) {
std::vector<std::string> row;
for (size_t i = 0; i < table.column_names.size(); ++i) {
row.push_back(all_data.ColumnString(i));
}
table.rows.push_back(std::move(row));
}
return table;
}
// Returns all tables in |db|, except the 'meta' table. We don't test the 'meta'
// table directly in this file, but instead use the MetaTable class.
DatabaseTables ReadTables(sql::Database* db) {
DatabaseTables database_tables;
std::stringstream ss;
sql::Statement table_names(db->GetUniqueStatement(
"SELECT name FROM sqlite_master WHERE type='table'"));
while (table_names.Step()) {
const std::string table_name = table_names.ColumnString(0);
if (table_name == "meta")
continue;
database_tables.tables[table_name] = ReadTable(db, table_name);
}
return database_tables;
}
// Returns the SQL that defines a table.
std::string TableSql(sql::Database* db, const std::string& table_name) {
DatabaseTables database_tables;
std::stringstream ss;
sql::Statement table_sql(db->GetUniqueStatement(
"SELECT sql FROM sqlite_master WHERE type='table' AND name=?"));
table_sql.BindString(0, table_name);
if (!table_sql.Step())
return std::string();
// Try to normalize the SQL, since we use this to compare schemas.
std::string sql =
base::CollapseWhitespaceASCII(table_sql.ColumnString(0), true);
base::ReplaceSubstringsAfterOffset(&sql, 0, ", ", ",");
base::ReplaceSubstringsAfterOffset(&sql, 0, ",", ",\n");
return sql;
}
std::string ReadSchemaFile(const std::string& file_name) {
std::string data;
base::FilePath path;
CHECK(base::PathService::Get(base::DIR_SOURCE_ROOT, &path));
path = path.AppendASCII(
"components/test/data/offline_pages/prefetch/version_schemas/")
.AppendASCII(file_name);
CHECK(base::ReadFileToString(path, &data)) << path;
return data;
}
std::unique_ptr<sql::Database> CreateTablesWithSampleRows(int version) {
auto db = std::make_unique<sql::Database>();
CHECK(db->OpenInMemory());
// Write a meta table. v*.sql overwrites version and last_compatible_version.
sql::MetaTable meta_table;
CHECK(meta_table.Init(db.get(), 1, 1));
const std::string schema = ReadSchemaFile(
base::StrCat({"v", base::NumberToString(version), ".sql"}));
CHECK(db->Execute(schema.c_str()));
return db;
}
void ExpectDbIsCurrent(sql::Database* db) {
// Check the meta table.
sql::MetaTable meta_table;
EXPECT_TRUE(meta_table.Init(db, 1, 1));
EXPECT_EQ(PrefetchStoreSchema::kCurrentVersion,
meta_table.GetVersionNumber());
EXPECT_EQ(PrefetchStoreSchema::kCompatibleVersion,
meta_table.GetCompatibleVersionNumber());
std::unique_ptr<sql::Database> current_db =
CreateTablesWithSampleRows(PrefetchStoreSchema::kCurrentVersion);
// Check that database schema is current.
for (auto name_and_table : ReadTables(db).tables) {
const std::string current_sql =
TableSql(current_db.get(), name_and_table.first);
const std::string real_sql = TableSql(db, name_and_table.first);
EXPECT_EQ(current_sql, real_sql);
}
}
TEST(PrefetchStoreSchemaPreconditionTest, TEST(PrefetchStoreSchemaPreconditionTest,
TestSqliteCreateTableIsTransactional) { TestSqliteCreateTableIsTransactional) {
sql::Database db; sql::Database db;
...@@ -90,200 +247,108 @@ TEST(PrefetchStoreSchemaPreconditionTest, ...@@ -90,200 +247,108 @@ TEST(PrefetchStoreSchemaPreconditionTest,
EXPECT_TRUE(db.DoesColumnExist("some_table", "value")); EXPECT_TRUE(db.DoesColumnExist("some_table", "value"));
} }
class PrefetchStoreSchemaTest : public testing::Test { // Verify the latest v#.sql accurately represents the current schema.
public: //
PrefetchStoreSchemaTest() = default; // Note: We keep the creation code for the current schema version duplicated in
~PrefetchStoreSchemaTest() override = default; // PrefetchStoreSchema and in the latest version test file so that when we move
// on from the current schema we already know it's represented correctly in the
void SetUp() override { // test.
db_ = std::make_unique<sql::Database>(); TEST(PrefetchStoreSchemaTest, TestCurrentSqlFileIsAccurate) {
ASSERT_TRUE(db_->OpenInMemory()); // Create the database with the release code, and with v?.sql.
ASSERT_FALSE(sql::MetaTable::DoesTableExist(db_.get())); sql::Database db;
} ASSERT_TRUE(db.OpenInMemory());
ASSERT_TRUE(PrefetchStoreSchema::CreateOrUpgradeIfNeeded(&db));
void CheckTablesExistence() {
EXPECT_TRUE(db_->DoesTableExist("prefetch_items"));
EXPECT_TRUE(db_->DoesTableExist("prefetch_downloader_quota"));
EXPECT_FALSE(db_->DoesTableExist("prefetch_items_old"));
}
protected:
std::unique_ptr<sql::Database> db_;
std::unique_ptr<PrefetchStoreSchema> schema_;
};
TEST_F(PrefetchStoreSchemaTest, TestSchemaCreationFromNothing) {
EXPECT_TRUE(PrefetchStoreSchema::CreateOrUpgradeIfNeeded(db_.get()));
CheckTablesExistence();
sql::MetaTable meta_table;
EXPECT_TRUE(meta_table.Init(db_.get(), std::numeric_limits<int>::max(),
std::numeric_limits<int>::max()));
EXPECT_EQ(PrefetchStoreSchema::kCurrentVersion,
meta_table.GetVersionNumber());
EXPECT_EQ(PrefetchStoreSchema::kCompatibleVersion,
meta_table.GetCompatibleVersionNumber());
}
TEST_F(PrefetchStoreSchemaTest, TestMissingTablesAreCreatedAtLatestVersion) {
sql::MetaTable meta_table;
EXPECT_TRUE(meta_table.Init(db_.get(), PrefetchStoreSchema::kCurrentVersion,
PrefetchStoreSchema::kCompatibleVersion));
EXPECT_EQ(PrefetchStoreSchema::kCurrentVersion,
meta_table.GetVersionNumber());
EXPECT_EQ(PrefetchStoreSchema::kCompatibleVersion,
meta_table.GetCompatibleVersionNumber());
EXPECT_TRUE(PrefetchStoreSchema::CreateOrUpgradeIfNeeded(db_.get()));
CheckTablesExistence();
}
TEST_F(PrefetchStoreSchemaTest, TestMissingTablesAreRecreated) {
EXPECT_TRUE(PrefetchStoreSchema::CreateOrUpgradeIfNeeded(db_.get()));
CheckTablesExistence();
EXPECT_TRUE(db_->Execute("DROP TABLE prefetch_items"));
EXPECT_TRUE(PrefetchStoreSchema::CreateOrUpgradeIfNeeded(db_.get()));
CheckTablesExistence();
EXPECT_TRUE(db_->Execute("DROP TABLE prefetch_downloader_quota"));
EXPECT_TRUE(PrefetchStoreSchema::CreateOrUpgradeIfNeeded(db_.get()));
CheckTablesExistence();
}
void CreateVersion1TablesWithSampleRows(sql::Database* db) { ExpectDbIsCurrent(&db);
// Create version 1 tables.
static const char kV0ItemsTableCreationSql[] =
"CREATE TABLE prefetch_items"
"(offline_id INTEGER PRIMARY KEY NOT NULL,"
" state INTEGER NOT NULL DEFAULT 0,"
" generate_bundle_attempts INTEGER NOT NULL DEFAULT 0,"
" get_operation_attempts INTEGER NOT NULL DEFAULT 0,"
" download_initiation_attempts INTEGER NOT NULL DEFAULT 0,"
" archive_body_length INTEGER_NOT_NULL DEFAULT -1,"
" creation_time INTEGER NOT NULL,"
" freshness_time INTEGER NOT NULL,"
" error_code INTEGER NOT NULL DEFAULT 0,"
" file_size INTEGER NOT NULL DEFAULT 0,"
" guid VARCHAR NOT NULL DEFAULT '',"
" client_namespace VARCHAR NOT NULL DEFAULT '',"
" client_id VARCHAR NOT NULL DEFAULT '',"
" requested_url VARCHAR NOT NULL DEFAULT '',"
" final_archived_url VARCHAR NOT NULL DEFAULT '',"
" operation_name VARCHAR NOT NULL DEFAULT '',"
" archive_body_name VARCHAR NOT NULL DEFAULT '',"
" title VARCHAR NOT NULL DEFAULT '',"
" file_path VARCHAR NOT NULL DEFAULT ''"
")";
EXPECT_TRUE(db->Execute(kV0ItemsTableCreationSql));
static const char kV0QuotaTableCreationSql[] =
"CREATE TABLE prefetch_downloader_quota"
"(quota_id INTEGER PRIMARY KEY NOT NULL DEFAULT 1,"
" update_time INTEGER NOT NULL,"
" available_quota INTEGER NOT NULL DEFAULT 0)";
EXPECT_TRUE(db->Execute(kV0QuotaTableCreationSql));
// Insert one row with artificial values into the items table.
static const char kV0ItemInsertSql[] =
"INSERT INTO prefetch_items"
" (offline_id, state, generate_bundle_attempts, get_operation_attempts,"
" download_initiation_attempts, archive_body_length, creation_time,"
" freshness_time, error_code, file_size, guid, client_namespace,"
" client_id, requested_url, final_archived_url, operation_name,"
" archive_body_name, title, file_path)"
" VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)";
sql::Statement insertStatement1(db->GetUniqueStatement(kV0ItemInsertSql));
// Generates fake values for all integer columns starting at 1.
for (int i = 0; i <= 9; ++i)
insertStatement1.BindInt(i, i + 1);
// Generates fake values for all string columns starting at "a".
for (int i = 10; i <= 18; ++i)
insertStatement1.BindString(i, std::string(1, 'a' + i - 10));
EXPECT_TRUE(insertStatement1.Run());
// Insert one row with artificial values into the quota table.
static const char kV0QuotaInsertSql[] =
"INSERT INTO prefetch_downloader_quota"
" (quota_id, update_time, available_quota)"
" VALUES (?, ?, ?)";
sql::Statement insertStatement2(db->GetUniqueStatement(kV0QuotaInsertSql));
// Generates fake values for all columns.
insertStatement2.BindInt(0, 1);
insertStatement2.BindInt(1, 2);
insertStatement2.BindInt(2, 3);
EXPECT_TRUE(insertStatement2.Run());
} }
void CheckSampleRowsAtCurrentVersion(sql::Database* db) { // Tests database creation starting with all previous versions, or an empty
// Checks the previously inserted item row was migrated correctly. // state.
static const char kV0ItemSelectSql[] = TEST(PrefetchStoreSchemaTest, TestCreateOrMigrate) {
"SELECT " for (int i = 0; i <= PrefetchStoreSchema::kCurrentVersion; ++i) {
" offline_id, state, generate_bundle_attempts, get_operation_attempts," SCOPED_TRACE(testing::Message() << "Testing migration from version " << i);
" download_initiation_attempts, archive_body_length, creation_time," std::unique_ptr<sql::Database> db;
" freshness_time, error_code, file_size, guid, client_namespace," // When i==0, start from an empty state.
" client_id, requested_url, final_archived_url, operation_name," const int version = i > 0 ? i : PrefetchStoreSchema::kCurrentVersion;
" archive_body_name, title, file_path" if (i > 0) {
" FROM prefetch_items"; db = CreateTablesWithSampleRows(i);
sql::Statement selectStatement1(db->GetUniqueStatement(kV0ItemSelectSql)); // Executes the migration.
ASSERT_TRUE(selectStatement1.Step()); EXPECT_TRUE(PrefetchStoreSchema::CreateOrUpgradeIfNeeded(db.get()));
// Checks fake values for all integer columns. } else {
for (int i = 0; i <= 9; ++i) db = std::make_unique<sql::Database>();
EXPECT_EQ(i + 1, selectStatement1.ColumnInt(i)) ASSERT_TRUE(db->OpenInMemory());
<< "Wrong integer value at items table's column " << i; // Creation from scratch.
// Checks fake values for all string columns. EXPECT_TRUE(PrefetchStoreSchema::CreateOrUpgradeIfNeeded(db.get()));
for (int i = 10; i <= 18; ++i) // Tables are already created, this will just insert rows.
EXPECT_EQ(std::string(1, 'a' + i - 10), selectStatement1.ColumnString(i)) const std::string schema = ReadSchemaFile(
<< "Wrong string value at items table's column " << i; base::StrCat({"v", base::NumberToString(version), ".sql"}));
; ASSERT_TRUE(db->Execute(schema.c_str()));
EXPECT_FALSE(selectStatement1.Step()); }
// Checks the previously inserted quota row was migrated correctly. // Check schema.
static const char kV0QuotaSelectSql[] = ExpectDbIsCurrent(db.get());
"SELECT quota_id, update_time, available_quota"
" FROM prefetch_downloader_quota"; // Check the database contents.
sql::Statement selectStatement2(db->GetUniqueStatement(kV0QuotaSelectSql)); std::string expected_data = ReadSchemaFile(
ASSERT_TRUE(selectStatement2.Step()); base::StrCat({"v", base::NumberToString(version), ".data"}));
// Checks fake values for all columns. EXPECT_EQ(expected_data, ReadTables(db.get()).ToString());
EXPECT_EQ(1, selectStatement2.ColumnInt(0)); }
EXPECT_EQ(2, selectStatement2.ColumnInt(1));
EXPECT_EQ(3, selectStatement2.ColumnInt(2));
EXPECT_FALSE(selectStatement2.Step());
} }
// Tests that a migration from the initially deployed version of the schema, // Test that the current database version can be used by all compatible
// as it was for chromium/src at 90113a2c01ca9ff77042daacd8282a4c16aade85, is // versions.
// correctly migrated to the final, current version without losing data. TEST(PrefetchStoreSchemaTest, TestRevert) {
TEST_F(PrefetchStoreSchemaTest, TestMigrationFromV0) { static_assert(PrefetchStoreSchema::kCompatibleVersion == 1,
// Set version numbers to 1. "If compatible version is changed, add a test to verify the "
sql::MetaTable meta_table; "database is correctly razed and recreated!");
EXPECT_TRUE(meta_table.Init(db_.get(), 1, 1));
EXPECT_EQ(1, meta_table.GetVersionNumber()); // This test simply runs the insert operations in v*.sql on a database
EXPECT_EQ(1, meta_table.GetCompatibleVersionNumber()); // with the current schema.
for (int version = PrefetchStoreSchema::kCompatibleVersion;
CreateVersion1TablesWithSampleRows(db_.get()); version < PrefetchStoreSchema::kCurrentVersion; ++version) {
SCOPED_TRACE(testing::Message() << "Testing revert to version " << version);
// Executes the migration. // First, extract the expected state after running v*.sql.
EXPECT_TRUE(PrefetchStoreSchema::CreateOrUpgradeIfNeeded(db_.get())); DatabaseTables original_state;
EXPECT_EQ(2, meta_table.GetVersionNumber()); {
EXPECT_EQ(1, meta_table.GetCompatibleVersionNumber()); std::unique_ptr<sql::Database> db = CreateTablesWithSampleRows(version);
CheckTablesExistence(); original_state = ReadTables(db.get());
}
CheckSampleRowsAtCurrentVersion(db_.get());
// Create a new database at the current version.
// Tests that the default value for file size is now -1. sql::Database db;
sql::Statement fileSizeInsertStatement(db_->GetUniqueStatement( ASSERT_TRUE(db.OpenInMemory());
"INSERT INTO prefetch_items (offline_id, creation_time, freshness_time)" EXPECT_TRUE(PrefetchStoreSchema::CreateOrUpgradeIfNeeded(&db));
" VALUES (?, ?, ?)"));
fileSizeInsertStatement.BindInt(0, 100); // Attempt to insert a row using the old SQL.
fileSizeInsertStatement.BindInt(1, 101); const std::string schema = ReadSchemaFile(
fileSizeInsertStatement.BindInt(2, 102); base::StrCat({"v", base::NumberToString(version), ".sql"}));
EXPECT_TRUE(fileSizeInsertStatement.Run()); EXPECT_TRUE(db.Execute(schema.c_str()));
sql::Statement fileSizeSelectStatement(db_->GetUniqueStatement( // Check the database contents.
"SELECT file_size FROM prefetch_items WHERE offline_id = ?")); // We should find every value from original_state present in the db.
fileSizeSelectStatement.BindInt(0, 100); std::string expected_data = ReadSchemaFile(
ASSERT_TRUE(fileSizeSelectStatement.Step()); base::StrCat({"v", base::NumberToString(version), ".data"}));
EXPECT_EQ(-1, fileSizeSelectStatement.ColumnInt(0)); const DatabaseTables new_state = ReadTables(&db);
EXPECT_FALSE(fileSizeSelectStatement.Step()); for (auto name_and_table : original_state.tables) {
const Table& original_table = name_and_table.second;
ASSERT_EQ(1ul, new_state.tables.count(name_and_table.first));
const Table& new_table =
new_state.tables.find(name_and_table.first)->second;
for (size_t row = 0; row < original_table.rows.size(); ++row) {
for (const std::string& column_name : original_table.column_names) {
const std::string* old_value =
TableCell(original_table, column_name, row);
const std::string* new_value = TableCell(new_table, column_name, row);
ASSERT_TRUE(old_value);
EXPECT_TRUE(new_value) << "new table does not have old value";
if (new_value) {
EXPECT_EQ(*old_value, *new_value);
}
}
}
}
}
} }
} // namespace
} // namespace offline_pages } // namespace offline_pages
-- TABLE prefetch_downloader_quota --
--- ROW 0 ---
quota_id: "1"
update_time: "2"
available_quota: "3"
-- TABLE prefetch_items --
--- ROW 0 ---
offline_id: "1"
state: "2"
generate_bundle_attempts: "3"
get_operation_attempts: "4"
download_initiation_attempts: "5"
archive_body_length: "6"
creation_time: "7"
freshness_time: "8"
error_code: "9"
file_size: "10"
guid: "guid"
client_namespace: "client_namespace"
client_id: "client_id"
requested_url: "requested_url"
final_archived_url: "final_archived_url"
operation_name: "operation_name"
archive_body_name: "archive_body_name"
title: "title"
file_path: "file_path"
INSERT OR REPLACE INTO meta (key, value)
VALUES ("version", 1), ("last_compatible_version", 1);
CREATE TABLE IF NOT EXISTS prefetch_items
(
offline_id INTEGER PRIMARY KEY NOT NULL,
state INTEGER NOT NULL DEFAULT 0,
generate_bundle_attempts INTEGER NOT NULL DEFAULT 0,
get_operation_attempts INTEGER NOT NULL DEFAULT 0,
download_initiation_attempts INTEGER NOT NULL DEFAULT 0,
archive_body_length INTEGER_NOT_NULL DEFAULT -1,
creation_time INTEGER NOT NULL,
freshness_time INTEGER NOT NULL,
error_code INTEGER NOT NULL DEFAULT 0,
file_size INTEGER NOT NULL DEFAULT 0,
guid VARCHAR NOT NULL DEFAULT '',
client_namespace VARCHAR NOT NULL DEFAULT '',
client_id VARCHAR NOT NULL DEFAULT '',
requested_url VARCHAR NOT NULL DEFAULT '',
final_archived_url VARCHAR NOT NULL DEFAULT '',
operation_name VARCHAR NOT NULL DEFAULT '',
archive_body_name VARCHAR NOT NULL DEFAULT '',
title VARCHAR NOT NULL DEFAULT '',
file_path VARCHAR NOT NULL DEFAULT ''
);
CREATE TABLE IF NOT EXISTS prefetch_downloader_quota
(
quota_id INTEGER PRIMARY KEY NOT NULL DEFAULT 1,
update_time INTEGER NOT NULL,
available_quota INTEGER NOT NULL DEFAULT 0
);
INSERT INTO prefetch_items
(
offline_id,
state,
generate_bundle_attempts,
get_operation_attempts,
download_initiation_attempts,
archive_body_length,
creation_time,
freshness_time,
error_code,
file_size,
guid,
client_namespace,
client_id,
requested_url,
final_archived_url,
operation_name,
archive_body_name,
title,
file_path
)
VALUES
(
1, -- offline_id
2, -- state
3, -- generate_bundle_attempts
4, -- get_operation_attempts
5, -- download_initiation_attempts
6, -- archive_body_length
7, -- creation_time
8, -- freshness_time
9, -- error_code
10, -- file_size
'guid', -- guid
'client_namespace', -- client_namespace
'client_id', -- client_id
'requested_url', -- requested_url
'final_archived_url', -- final_archived_url
'operation_name', -- operation_name
'archive_body_name', -- archive_body_name
'title', -- title
'file_path' -- file_path
);
INSERT INTO prefetch_downloader_quota
(
quota_id,
update_time,
available_quota
)
VALUES
(
1,
2,
3
);
-- TABLE prefetch_downloader_quota --
--- ROW 0 ---
quota_id: "1"
update_time: "2"
available_quota: "3"
-- TABLE prefetch_items --
--- ROW 0 ---
offline_id: "1"
state: "2"
generate_bundle_attempts: "3"
get_operation_attempts: "4"
download_initiation_attempts: "5"
archive_body_length: "6"
creation_time: "7"
freshness_time: "8"
error_code: "9"
file_size: "10"
guid: "guid"
client_namespace: "client_namespace"
client_id: "client_id"
requested_url: "requested_url"
final_archived_url: "final_archived_url"
operation_name: "operation_name"
archive_body_name: "archive_body_name"
title: "title"
file_path: "file_path"
INSERT OR REPLACE INTO meta (key, value)
VALUES ("version", 2), ("last_compatible_version", 1);
CREATE TABLE IF NOT EXISTS prefetch_items
(
offline_id INTEGER PRIMARY KEY NOT NULL,
state INTEGER NOT NULL DEFAULT 0,
generate_bundle_attempts INTEGER NOT NULL DEFAULT 0,
get_operation_attempts INTEGER NOT NULL DEFAULT 0,
download_initiation_attempts INTEGER NOT NULL DEFAULT 0,
archive_body_length INTEGER_NOT_NULL DEFAULT -1,
creation_time INTEGER NOT NULL,
freshness_time INTEGER NOT NULL,
error_code INTEGER NOT NULL DEFAULT 0,
file_size INTEGER NOT NULL DEFAULT -1,
guid VARCHAR NOT NULL DEFAULT '',
client_namespace VARCHAR NOT NULL DEFAULT '',
client_id VARCHAR NOT NULL DEFAULT '',
requested_url VARCHAR NOT NULL DEFAULT '',
final_archived_url VARCHAR NOT NULL DEFAULT '',
operation_name VARCHAR NOT NULL DEFAULT '',
archive_body_name VARCHAR NOT NULL DEFAULT '',
title VARCHAR NOT NULL DEFAULT '',
file_path VARCHAR NOT NULL DEFAULT ''
);
CREATE TABLE IF NOT EXISTS prefetch_downloader_quota
(
quota_id INTEGER PRIMARY KEY NOT NULL DEFAULT 1,
update_time INTEGER NOT NULL,
available_quota INTEGER NOT NULL DEFAULT 0
);
INSERT INTO prefetch_items
(
offline_id,
state,
generate_bundle_attempts,
get_operation_attempts,
download_initiation_attempts,
archive_body_length,
creation_time,
freshness_time,
error_code,
file_size,
guid,
client_namespace,
client_id,
requested_url,
final_archived_url,
operation_name,
archive_body_name,
title,
file_path
)
VALUES
(
1, -- offline_id
2, -- state
3, -- generate_bundle_attempts
4, -- get_operation_attempts
5, -- download_initiation_attempts
6, -- archive_body_length
7, -- creation_time
8, -- freshness_time
9, -- error_code
10, -- file_size
'guid', -- guid
'client_namespace', -- client_namespace
'client_id', -- client_id
'requested_url', -- requested_url
'final_archived_url', -- final_archived_url
'operation_name', -- operation_name
'archive_body_name', -- archive_body_name
'title', -- title
'file_path' -- file_path
);
INSERT INTO prefetch_downloader_quota
(
quota_id,
update_time,
available_quota
)
VALUES
(
1,
2,
3
);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment