Commit 180186cf authored by Carlos Knippschild's avatar Carlos Knippschild Committed by Commit Bot

Zombie prefetch items are deleted based on expiration time

This CL changes the way zombie prefetch items are expired to be based on
an expidation time, 7 days after changing to this state. Before they
would be deleted when a new set of suggestions was received which didn't
include their URLs. That didn't work well with the way the Feed provides
suggestions hence the need for this change.

Bug: 841516
Change-Id: I74c53e491cb6442ad71a2d9a275506677fdf813d
Reviewed-on: https://chromium-review.googlesource.com/c/1351594
Commit-Queue: Carlos Knippschild <carlosk@chromium.org>
Reviewed-by: default avatarCarlos Knippschild <carlosk@chromium.org>
Reviewed-by: default avatarDan H <harringtond@google.com>
Cr-Commit-Position: refs/heads/master@{#612393}
parent 193f427a
......@@ -6,7 +6,6 @@
#include <map>
#include <memory>
#include <set>
#include <utility>
#include "base/bind.h"
......@@ -31,27 +30,20 @@ namespace offline_pages {
using Result = AddUniqueUrlsTask::Result;
namespace {
struct ItemInfo {
int64_t offline_id;
PrefetchItemState state;
};
std::map<std::string, ItemInfo> FindExistingPrefetchItemsInNamespaceSync(
// Returns a map of URL to offline_id for all existing prefetch items.
std::map<std::string, int64_t> GetAllUrlsAndIdsFromNamespaceSync(
sql::Database* db,
const std::string& name_space) {
static const char kSql[] =
"SELECT offline_id, state, requested_url FROM prefetch_items"
"SELECT requested_url, offline_id FROM prefetch_items"
" WHERE client_namespace = ?";
sql::Statement statement(db->GetCachedStatement(SQL_FROM_HERE, kSql));
statement.BindString(0, name_space);
std::map<std::string, ItemInfo> result;
while (statement.Step()) {
result.emplace(statement.ColumnString(2),
ItemInfo{.offline_id = statement.ColumnInt64(0),
.state = static_cast<PrefetchItemState>(
statement.ColumnInt(1))});
}
std::map<std::string, int64_t> result;
while (statement.Step())
result.emplace(statement.ColumnString(0), statement.ColumnInt64(1));
return result;
}
......@@ -96,11 +88,9 @@ bool UpdateItemTimeSync(sql::Database* db,
}
// Adds new prefetch item entries to the store using the URLs and client IDs
// from |candidate_prefetch_urls| and the client's |name_space|. Also cleans up
// entries in the Zombie state from the client's |name_space| except for the
// ones whose URL is contained in |candidate_prefetch_urls|.
// Returns the number of added prefecth items.
Result AddUrlsAndCleanupZombiesSync(
// from |candidate_prefetch_urls| and the client's |name_space|. Returns the
// result of the attempt to add new URLs.
Result AddUniqueUrlsSync(
const std::string& name_space,
const std::vector<PrefetchURL>& candidate_prefetch_urls,
sql::Database* db) {
......@@ -108,46 +98,35 @@ Result AddUrlsAndCleanupZombiesSync(
if (!transaction.Begin())
return Result::STORE_ERROR;
std::map<std::string, ItemInfo> existing_items =
FindExistingPrefetchItemsInNamespaceSync(db, name_space);
std::map<std::string, int64_t> existing_items =
GetAllUrlsAndIdsFromNamespaceSync(db, name_space);
int added_row_count = 0;
base::Time now = OfflineClock()->Now();
// Insert rows in reverse order to ensure that the beginning of the list has
// the newest timestamp. This will cause it to be prefetched first.
// the most recent timestamps so that it is prefetched first.
for (auto candidate_iter = candidate_prefetch_urls.rbegin();
candidate_iter != candidate_prefetch_urls.rend(); ++candidate_iter) {
PrefetchURL prefetch_url = *candidate_iter;
auto iter = existing_items.find(prefetch_url.url.spec());
if (iter == existing_items.end()) {
if (!CreatePrefetchItemSync(db, name_space, prefetch_url,
store_utils::ToDatabaseTime(now)))
const PrefetchURL& prefetch_url = *candidate_iter;
auto existing_iter = existing_items.find(prefetch_url.url.spec());
if (existing_iter != existing_items.end()) {
// An existing item is still being suggested so update its timestamps (and
// therefore priority).
if (!UpdateItemTimeSync(db, existing_iter->second, now))
return Result::STORE_ERROR; // Transaction rollback.
added_row_count++;
} else {
// The existing item is still being suggested, update its timestamp (and
// therefore priority).
if (!UpdateItemTimeSync(db, iter->second.offline_id, now))
if (!CreatePrefetchItemSync(db, name_space, prefetch_url,
store_utils::ToDatabaseTime(now))) {
return Result::STORE_ERROR; // Transaction rollback.
// Removing from the list of existing items if it was requested again, to
// prevent it from being removed in the next step.
existing_items.erase(iter);
}
added_row_count++;
}
// We artificially add a microsecond to ensure that the timestamp is
// different (and guarantee a particular order when sorting by timestamp).
now += base::TimeDelta::FromMicroseconds(1);
}
// Purge remaining zombie IDs.
for (const auto& existing_item : existing_items) {
if (existing_item.second.state != PrefetchItemState::ZOMBIE)
continue;
if (!PrefetchStoreUtils::DeletePrefetchItemByOfflineIdSync(
db, existing_item.second.offline_id)) {
return Result::STORE_ERROR; // Transaction rollback.
}
}
if (!transaction.Commit())
return Result::STORE_ERROR; // Transaction rollback.
......@@ -155,6 +134,7 @@ Result AddUrlsAndCleanupZombiesSync(
added_row_count);
return added_row_count > 0 ? Result::URLS_ADDED : Result::NOTHING_ADDED;
}
} // namespace
AddUniqueUrlsTask::AddUniqueUrlsTask(
......@@ -174,8 +154,8 @@ AddUniqueUrlsTask::AddUniqueUrlsTask(
AddUniqueUrlsTask::~AddUniqueUrlsTask() {}
void AddUniqueUrlsTask::Run() {
prefetch_store_->Execute(base::BindOnce(&AddUrlsAndCleanupZombiesSync,
name_space_, prefetch_urls_),
prefetch_store_->Execute(
base::BindOnce(&AddUniqueUrlsSync, name_space_, prefetch_urls_),
base::BindOnce(&AddUniqueUrlsTask::OnUrlsAdded,
weak_ptr_factory_.GetWeakPtr()),
Result::STORE_ERROR);
......
......@@ -20,12 +20,10 @@ struct PrefetchURL;
// Task that adds new URL suggestions to the pipeline. URLs are matched against
// existing ones from any stage of the process so that only new, unique ones are
// actually added.
// Fully processed items are kept in the store in the PrefetchItemState::ZOMBIE
// state until it is confirmed that the client for its namespace is not
// recommending the same URL anymore to avoid processing it twice. So once the
// step described above is done, all same namespace items in the ZOMBIE state
// whose URL didn't match any of the just suggested ones are finally deleted
// from the store.
// Fully processed items are kept in store in the zombie state so that follow up
// recommendations of the same URL from the same client are not processed twice.
// Zombie items are then cleaned after a set period of time by the
// |StaleEntryFinalizerTask|.
class AddUniqueUrlsTask : public Task {
public:
// Result of executing the command in the store.
......
......@@ -5,6 +5,7 @@
#include "components/offline_pages/core/prefetch/tasks/add_unique_urls_task.h"
#include <map>
#include <memory>
#include <set>
#include <string>
#include <vector>
......@@ -28,15 +29,12 @@ const char kTestNamespace[] = "test";
const char kClientId1[] = "ID-1";
const char kClientId2[] = "ID-2";
const char kClientId3[] = "ID-3";
const char kClientId4[] = "ID-5";
const GURL kTestURL1("https://www.google.com/");
const GURL kTestURL2("http://www.example.com/");
const GURL kTestURL3("https://news.google.com/");
const GURL kTestURL4("https://chrome.google.com/");
const base::string16 kTestTitle1 = base::ASCIIToUTF16("Title 1");
const base::string16 kTestTitle2 = base::ASCIIToUTF16("Title 2");
const base::string16 kTestTitle3 = base::ASCIIToUTF16("Title 3");
const base::string16 kTestTitle4 = base::ASCIIToUTF16("Title 4");
} // namespace
class AddUniqueUrlsTaskTest : public PrefetchTaskTestBase {
......@@ -77,12 +75,12 @@ TEST_F(AddUniqueUrlsTaskTest, AddTaskInEmptyStore) {
kTestNamespace, urls));
std::map<std::string, PrefetchItem> items = GetAllItems();
ASSERT_EQ(2u, items.size());
ASSERT_TRUE(items.count(kClientId1) > 0);
ASSERT_EQ(2U, items.size());
ASSERT_GT(items.count(kClientId1), 0U);
EXPECT_EQ(kTestURL1, items[kClientId1].url);
EXPECT_EQ(kTestNamespace, items[kClientId1].client_id.name_space);
EXPECT_EQ(kTestTitle1, items[kClientId1].title);
ASSERT_TRUE(items.count(kClientId2) > 0);
ASSERT_GT(items.count(kClientId2), 0U);
EXPECT_EQ(kTestURL2, items[kClientId2].url);
EXPECT_EQ(kTestNamespace, items[kClientId2].client_id.name_space);
EXPECT_EQ(kTestTitle2, items[kClientId2].title);
......@@ -111,92 +109,78 @@ TEST_F(AddUniqueUrlsTaskTest, SingleDuplicateUrlNotAdded) {
EXPECT_EQ(1, dispatcher()->task_schedule_count);
}
TEST_F(AddUniqueUrlsTaskTest, DontAddURLIfItExists) {
TEST_F(AddUniqueUrlsTaskTest, DontAddURLIfItAlreadyExists) {
// Overrides and initializes a test clock.
TestScopedOfflineClock clock;
std::vector<PrefetchURL> urls;
urls.push_back(PrefetchURL{kClientId1, kTestURL1, kTestTitle1});
urls.push_back(PrefetchURL{kClientId2, kTestURL2, kTestTitle2});
const base::Time start_time = base::Time::Now();
clock.SetNow(start_time);
// Populate the store with pre-existing items.
std::vector<PrefetchURL> urls = {{kClientId1, kTestURL1, kTestTitle1},
{kClientId2, kTestURL2, kTestTitle2}};
RunTask(std::make_unique<AddUniqueUrlsTask>(dispatcher(), store(),
kTestNamespace, urls));
EXPECT_EQ(1, dispatcher()->task_schedule_count);
std::map<std::string, PrefetchItem> items_before = GetAllItems();
// Advance time to verify that timestamp of kClientId1 is updated on the next
// task execution.
clock.Advance(base::TimeDelta::FromSeconds(1));
// Advance time by 1 hour to verify that timestamp of ID-1 is updated on the
// next task execution.
clock.Advance(base::TimeDelta::FromHours(1));
const base::Time later_time = clock.Now();
urls = {
// This PrefetchURL has a duplicate URL, should not be added.
{kClientId4, kTestURL1, kTestTitle4},
{kClientId3, kTestURL3, kTestTitle3},
};
// Turn ID-1 and ID-2 items into zombies.
// Note: ZombifyPrefetchItem returns the number of affected items.
EXPECT_EQ(1, store_util()->ZombifyPrefetchItems(kTestNamespace, kTestURL1));
EXPECT_EQ(1, store_util()->ZombifyPrefetchItems(kTestNamespace, kTestURL2));
urls = {{kClientId1, kTestURL1, kTestTitle1},
{kClientId3, kTestURL3, kTestTitle3}};
RunTask(std::make_unique<AddUniqueUrlsTask>(dispatcher(), store(),
kTestNamespace, urls));
EXPECT_EQ(2, dispatcher()->task_schedule_count);
std::map<std::string, PrefetchItem> items = GetAllItems();
ASSERT_EQ(3u, items.size());
ASSERT_TRUE(items.count(kClientId1) > 0);
ASSERT_EQ(3U, items.size());
ASSERT_GT(items.count(kClientId1), 0U);
// Re-suggested ID-1 should have its timestamp updated.
EXPECT_EQ(kTestURL1, items[kClientId1].url);
EXPECT_EQ(kTestNamespace, items[kClientId1].client_id.name_space);
EXPECT_EQ(kTestTitle1, items[kClientId1].title);
ASSERT_TRUE(items.count(kClientId2) > 0);
EXPECT_EQ(PrefetchItemState::ZOMBIE, items[kClientId1].state);
// Note: as timestamps are inserted with microsecond variations, we're
// comparing them using a safe range of 1 second.
EXPECT_LE(later_time, items[kClientId1].creation_time);
EXPECT_GE(later_time + base::TimeDelta::FromSeconds(1),
items[kClientId1].creation_time);
EXPECT_LE(later_time, items[kClientId1].freshness_time);
EXPECT_GE(later_time + base::TimeDelta::FromSeconds(1),
items[kClientId1].freshness_time);
// Previously existing ID-2 should not have been modified.
ASSERT_GT(items.count(kClientId2), 0U);
EXPECT_EQ(kTestURL2, items[kClientId2].url);
EXPECT_EQ(kTestNamespace, items[kClientId2].client_id.name_space);
EXPECT_EQ(kTestTitle2, items[kClientId2].title);
ASSERT_TRUE(items.count(kClientId3) > 0);
EXPECT_EQ(kTestURL3, items[kClientId3].url);
EXPECT_EQ(kTestNamespace, items[kClientId3].client_id.name_space);
EXPECT_EQ(kTestTitle3, items[kClientId3].title);
// Although kClientId4 was not inserted, it should have resulted in updating
// kClientId1's timestamp.
EXPECT_GT(items[kClientId1].creation_time,
items_before[kClientId1].creation_time);
EXPECT_EQ(items[kClientId1].creation_time, items[kClientId1].freshness_time);
}
TEST_F(AddUniqueUrlsTaskTest, HandleZombiePrefetchItems) {
std::vector<PrefetchURL> urls;
urls.push_back(PrefetchURL{kClientId1, kTestURL1, kTestTitle1});
urls.push_back(PrefetchURL{kClientId2, kTestURL2, kTestTitle2});
urls.push_back(PrefetchURL{kClientId3, kTestURL3, kTestTitle3});
RunTask(std::make_unique<AddUniqueUrlsTask>(dispatcher(), store(),
kTestNamespace, urls));
EXPECT_EQ(1, dispatcher()->task_schedule_count);
// ZombifyPrefetchItem returns the number of affected items.
EXPECT_EQ(1, store_util()->ZombifyPrefetchItems(kTestNamespace, urls[0].url));
EXPECT_EQ(1, store_util()->ZombifyPrefetchItems(kTestNamespace, urls[1].url));
urls = {
{kClientId1, kTestURL1, kTestTitle1},
{kClientId3, kTestURL3, kTestTitle3},
{kClientId4, kTestURL4, kTestTitle4},
};
// ID-1 is expected to stay in zombie state.
// ID-2 is expected to be removed, because it is in zombie state.
// ID-3 is still requested, so it is ignored.
// ID-4 is added.
RunTask(std::make_unique<AddUniqueUrlsTask>(dispatcher(), store(),
kTestNamespace, urls));
EXPECT_EQ(2, dispatcher()->task_schedule_count);
std::map<std::string, PrefetchItem> items = GetAllItems();
ASSERT_EQ(3u, items.size());
ASSERT_TRUE(items.count(kClientId1) > 0);
EXPECT_EQ(kTestURL1, items[kClientId1].url);
EXPECT_EQ(kTestNamespace, items[kClientId1].client_id.name_space);
EXPECT_EQ(kTestTitle1, items[kClientId1].title);
ASSERT_TRUE(items.count(kClientId3) > 0);
EXPECT_EQ(PrefetchItemState::ZOMBIE, items[kClientId2].state);
EXPECT_LE(start_time, items[kClientId2].creation_time);
EXPECT_GE(start_time + base::TimeDelta::FromSeconds(1),
items[kClientId2].creation_time);
EXPECT_LE(start_time, items[kClientId2].freshness_time);
EXPECT_GE(start_time + base::TimeDelta::FromSeconds(1),
items[kClientId2].freshness_time);
// Newly suggested ID-3 should be added.
ASSERT_GT(items.count(kClientId3), 0U);
EXPECT_EQ(kTestURL3, items[kClientId3].url);
EXPECT_EQ(kTestNamespace, items[kClientId3].client_id.name_space);
EXPECT_EQ(kTestTitle3, items[kClientId3].title);
ASSERT_TRUE(items.count(kClientId4) > 0);
EXPECT_EQ(kTestURL4, items[kClientId4].url);
EXPECT_EQ(kTestNamespace, items[kClientId4].client_id.name_space);
EXPECT_EQ(kTestTitle4, items[kClientId4].title);
EXPECT_EQ(PrefetchItemState::NEW_REQUEST, items[kClientId3].state);
EXPECT_LE(later_time, items[kClientId3].creation_time);
EXPECT_GE(later_time + base::TimeDelta::FromSeconds(1),
items[kClientId3].creation_time);
EXPECT_LE(later_time, items[kClientId3].freshness_time);
EXPECT_GE(later_time + base::TimeDelta::FromSeconds(1),
items[kClientId3].freshness_time);
}
} // namespace offline_pages
......@@ -77,8 +77,7 @@ std::vector<PrefetchItemStats> FetchUrlsSync(sql::Database* db) {
statement.ColumnInt64(5)), // creation_time
static_cast<PrefetchItemErrorCode>(
statement.ColumnInt(6)), // error_code
statement.ColumnInt64(7) // file_size
);
statement.ColumnInt64(7)); // file_size
}
return urls;
......@@ -92,7 +91,7 @@ bool MarkUrlAsZombie(sql::Database* db,
"offline_id = ?";
sql::Statement statement(db->GetCachedStatement(SQL_FROM_HERE, kSql));
statement.BindInt(0, static_cast<int>(PrefetchItemState::ZOMBIE));
statement.BindInt(1, store_utils::ToDatabaseTime(freshness_time));
statement.BindInt64(1, store_utils::ToDatabaseTime(freshness_time));
statement.BindInt64(2, offline_id);
return statement.Run();
}
......
......@@ -12,10 +12,9 @@
namespace offline_pages {
class PrefetchStore;
// Prefetching task that takes finished PrefetchItems, records interesting
// metrics about the final status, and marks them as zombies. Zombies are
// cleaned up when suggestions are updated and there are no more
// suggestions at the |requested_url|.
// Prefetching task that takes finished prefetch items, records interesting
// metrics about their final status, and marks them as zombies. Zombies are
// cleaned after a set period of time by the |StaleEntryFinalizerTask|.
// NOTE: this task is run periodically as reconciliation task or from some
// event handlers. It should not cause 'progress' in pipeline on which other
// tasks would depend. It should only move entries to ZOMBIE state.
......
......@@ -6,13 +6,16 @@
#include <memory>
#include <set>
#include <vector>
#include "base/test/metrics/histogram_tester.h"
#include "base/time/time.h"
#include "components/offline_pages/core/prefetch/mock_prefetch_item_generator.h"
#include "components/offline_pages/core/prefetch/prefetch_item.h"
#include "components/offline_pages/core/prefetch/prefetch_types.h"
#include "components/offline_pages/core/prefetch/store/prefetch_store_test_util.h"
#include "components/offline_pages/core/prefetch/tasks/prefetch_task_test_base.h"
#include "components/offline_pages/core/test_scoped_offline_clock.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace offline_pages {
......@@ -78,28 +81,42 @@ TEST_F(MetricsFinalizationTaskTest, LeavesOtherStatesAlone) {
}
TEST_F(MetricsFinalizationTaskTest, FinalizesMultipleItems) {
base::Time before_insert_time = base::Time::Now();
std::set<PrefetchItem> finished_items = {
item_generator()->CreateItem(PrefetchItemState::FINISHED),
item_generator()->CreateItem(PrefetchItemState::FINISHED),
item_generator()->CreateItem(PrefetchItemState::FINISHED)};
PrefetchItem unfinished_item =
item_generator()->CreateItem(PrefetchItemState::NEW_REQUEST);
for (auto& item : finished_items) {
ASSERT_TRUE(store_util()->InsertPrefetchItem(item));
// Confirms that ItemGenerator did set |freshness_time| with Time::Now().
ASSERT_LE(before_insert_time, item.freshness_time);
}
PrefetchItem unfinished_item =
item_generator()->CreateItem(PrefetchItemState::NEW_REQUEST);
ASSERT_TRUE(store_util()->InsertPrefetchItem(unfinished_item));
// Overrides the offline clock and set a current time in the future.
TestScopedOfflineClock clock;
clock.SetNow(before_insert_time + base::TimeDelta::FromHours(1));
// Execute the metrics task.
RunTask(metrics_finalization_task_.get());
// The finished ones should all have become zombies and the new request should
// be untouched.
std::set<PrefetchItem> all_items;
// The finished ones should be zombies and the new request should be
// untouched.
EXPECT_EQ(4U, store_util()->GetAllItems(&all_items));
EXPECT_EQ(0U, FilterByState(all_items, PrefetchItemState::FINISHED).size());
EXPECT_EQ(3U, FilterByState(all_items, PrefetchItemState::ZOMBIE).size());
std::set<PrefetchItem> zombie_items =
FilterByState(all_items, PrefetchItemState::ZOMBIE);
EXPECT_EQ(3U, zombie_items.size());
for (const PrefetchItem& zombie_item : zombie_items) {
EXPECT_EQ(clock.Now(), zombie_item.freshness_time)
<< "Incorrect freshness_time (not updated?) for item "
<< zombie_item.client_id;
}
std::set<PrefetchItem> items_in_new_request_state =
FilterByState(all_items, PrefetchItemState::NEW_REQUEST);
......
......@@ -26,6 +26,14 @@ using Result = StaleEntryFinalizerTask::Result;
namespace {
// Maximum amount of time into the future an item can has its freshness time set
// to after which it will be finalized (or deleted if in the zombie state).
constexpr base::TimeDelta kFutureItemTimeLimit = base::TimeDelta::FromDays(1);
// Expiration time delay for items entering the zombie state, after which they
// are permanently deleted.
constexpr base::TimeDelta kZombieItemLifetime = base::TimeDelta::FromDays(7);
// If this time changes, we need to update the desciption in histograms.xml
// for OfflinePages.Prefetching.StuckItemState.
const int kStuckTimeLimitInDays = 7;
......@@ -125,7 +133,7 @@ bool FinalizeFutureItems(PrefetchItemState state,
"UPDATE prefetch_items SET state = ?, error_code = ?"
" WHERE state = ? AND freshness_time > ?";
const int64_t future_fresh_db_time_limit =
store_utils::ToDatabaseTime(now + base::TimeDelta::FromDays(1));
store_utils::ToDatabaseTime(now + kFutureItemTimeLimit);
sql::Statement statement(db->GetCachedStatement(SQL_FROM_HERE, kSql));
statement.BindInt(0, static_cast<int>(PrefetchItemState::FINISHED));
statement.BindInt(
......@@ -137,6 +145,22 @@ bool FinalizeFutureItems(PrefetchItemState state,
return statement.Run();
}
bool DeleteExpiredAndFutureZombies(base::Time now, sql::Database* db) {
static const char kSql[] =
"DELETE FROM prefetch_items"
" WHERE state = ? "
" AND (freshness_time < ? OR freshness_time > ?)";
const int64_t earliest_zombie_db_time =
store_utils::ToDatabaseTime(now - kZombieItemLifetime);
const int64_t future_zombie_db_time =
store_utils::ToDatabaseTime(now + kFutureItemTimeLimit);
sql::Statement statement(db->GetCachedStatement(SQL_FROM_HERE, kSql));
statement.BindInt(0, static_cast<int>(PrefetchItemState::ZOMBIE));
statement.BindInt64(1, earliest_zombie_db_time);
statement.BindInt64(2, future_zombie_db_time);
return statement.Run();
}
// If there is a bug in our code, an item might be stuck in the queue waiting
// on an event that didn't happen. If so, finalize that item and report it.
void ReportAndFinalizeStuckItems(base::Time now, sql::Database* db) {
......@@ -200,8 +224,12 @@ Result FinalizeStaleEntriesSync(sql::Database* db) {
return Result::NO_MORE_WORK;
}
if (!DeleteExpiredAndFutureZombies(now, db))
return Result::NO_MORE_WORK;
// Items could also be stuck in a non-expirable state due to a bug, report
// them.
// them. This should always be the last step, coming after the regular
// freshness maintenance steps above are done.
ReportAndFinalizeStuckItems(now, db);
Result result = Result::MORE_WORK_NEEDED;
......
......@@ -21,6 +21,8 @@ class PrefetchStore;
// considered stale are moved to the "finished" state and have their error code
// column set to the PrefetchItemErrorCode value that identifies the bucket they
// were at.
// It also handles items in the the "zombie" state which are deleted once
// considered expired after a set amount of time.
// NOTE: This task is run periodically as reconciliation task and from some
// event handlers. As such, it must not cause network operations nor cause
// 'progress' in the pipeline that would trigger other tasks.
......
......@@ -25,7 +25,8 @@ namespace offline_pages {
using Result = StaleEntryFinalizerTask::Result;
std::set<PrefetchItem> Filter(const std::set<PrefetchItem>& items,
// Return only the items in the provided |state|.
std::set<PrefetchItem> Select(const std::set<PrefetchItem>& items,
PrefetchItemState state) {
std::set<PrefetchItem> result;
for (const PrefetchItem& item : items) {
......@@ -35,6 +36,17 @@ std::set<PrefetchItem> Filter(const std::set<PrefetchItem>& items,
return result;
}
// Return only the items with the provided |error_code|.
std::set<PrefetchItem> Select(const std::set<PrefetchItem>& items,
PrefetchItemErrorCode error_code) {
std::set<PrefetchItem> result;
for (const PrefetchItem& item : items) {
if (item.error_code == error_code)
result.insert(item);
}
return result;
}
class StaleEntryFinalizerTaskTest : public PrefetchTaskTestBase {
public:
StaleEntryFinalizerTaskTest() = default;
......@@ -43,8 +55,10 @@ class StaleEntryFinalizerTaskTest : public PrefetchTaskTestBase {
void SetUp() override;
void TearDown() override;
PrefetchItem CreateAndInsertItem(PrefetchItemState state,
int time_delta_in_hours);
PrefetchItem InsertItemWithFreshnessTime(PrefetchItemState state,
int freshness_delta_in_hours);
PrefetchItem InsertItemWithCreationTime(PrefetchItemState state,
int creation_delta_in_hours);
TestPrefetchDispatcher* dispatcher() { return &dispatcher_; }
......@@ -66,13 +80,25 @@ void StaleEntryFinalizerTaskTest::TearDown() {
PrefetchTaskTestBase::TearDown();
}
PrefetchItem StaleEntryFinalizerTaskTest::CreateAndInsertItem(
PrefetchItem StaleEntryFinalizerTaskTest::InsertItemWithFreshnessTime(
PrefetchItemState state,
int time_delta_in_hours) {
int freshness_delta_in_hours) {
PrefetchItem item(item_generator()->CreateItem(state));
item.freshness_time = simple_test_clock_.Now() +
base::TimeDelta::FromHours(time_delta_in_hours);
item.creation_time = item.freshness_time;
base::TimeDelta::FromHours(freshness_delta_in_hours);
item.creation_time = simple_test_clock_.Now();
EXPECT_TRUE(store_util()->InsertPrefetchItem(item))
<< "Failed inserting item with state " << static_cast<int>(state);
return item;
}
PrefetchItem StaleEntryFinalizerTaskTest::InsertItemWithCreationTime(
PrefetchItemState state,
int creation_delta_in_hours) {
PrefetchItem item(item_generator()->CreateItem(state));
item.creation_time = simple_test_clock_.Now() +
base::TimeDelta::FromHours(creation_delta_in_hours);
item.freshness_time = simple_test_clock_.Now();
EXPECT_TRUE(store_util()->InsertPrefetchItem(item))
<< "Failed inserting item with state " << static_cast<int>(state);
return item;
......@@ -100,31 +126,31 @@ TEST_F(StaleEntryFinalizerTaskTest, EmptyRun) {
TEST_F(StaleEntryFinalizerTaskTest, HandlesFreshnessTimesCorrectly) {
// Insert fresh and stale items for all expirable states from all buckets.
PrefetchItem b1_item1_fresh =
CreateAndInsertItem(PrefetchItemState::NEW_REQUEST, -23);
InsertItemWithFreshnessTime(PrefetchItemState::NEW_REQUEST, -23);
PrefetchItem b1_item2_stale =
CreateAndInsertItem(PrefetchItemState::NEW_REQUEST, -25);
InsertItemWithFreshnessTime(PrefetchItemState::NEW_REQUEST, -25);
PrefetchItem b2_item1_fresh =
CreateAndInsertItem(PrefetchItemState::AWAITING_GCM, -23);
InsertItemWithFreshnessTime(PrefetchItemState::AWAITING_GCM, -23);
PrefetchItem b2_item2_stale =
CreateAndInsertItem(PrefetchItemState::AWAITING_GCM, -25);
InsertItemWithFreshnessTime(PrefetchItemState::AWAITING_GCM, -25);
PrefetchItem b2_item3_fresh =
CreateAndInsertItem(PrefetchItemState::RECEIVED_GCM, -23);
InsertItemWithFreshnessTime(PrefetchItemState::RECEIVED_GCM, -23);
PrefetchItem b2_item4_stale =
CreateAndInsertItem(PrefetchItemState::RECEIVED_GCM, -25);
InsertItemWithFreshnessTime(PrefetchItemState::RECEIVED_GCM, -25);
PrefetchItem b2_item5_fresh =
CreateAndInsertItem(PrefetchItemState::RECEIVED_BUNDLE, -23);
InsertItemWithFreshnessTime(PrefetchItemState::RECEIVED_BUNDLE, -23);
PrefetchItem b2_item6_stale =
CreateAndInsertItem(PrefetchItemState::RECEIVED_BUNDLE, -25);
InsertItemWithFreshnessTime(PrefetchItemState::RECEIVED_BUNDLE, -25);
PrefetchItem b3_item1_fresh =
CreateAndInsertItem(PrefetchItemState::DOWNLOADING, -47);
InsertItemWithFreshnessTime(PrefetchItemState::DOWNLOADING, -47);
PrefetchItem b3_item2_stale =
CreateAndInsertItem(PrefetchItemState::DOWNLOADING, -49);
InsertItemWithFreshnessTime(PrefetchItemState::DOWNLOADING, -49);
PrefetchItem b3_item3_fresh =
CreateAndInsertItem(PrefetchItemState::IMPORTING, -47);
InsertItemWithFreshnessTime(PrefetchItemState::IMPORTING, -47);
PrefetchItem b3_item4_stale =
CreateAndInsertItem(PrefetchItemState::IMPORTING, -49);
InsertItemWithFreshnessTime(PrefetchItemState::IMPORTING, -49);
// Check inserted initial items.
std::set<PrefetchItem> initial_items = {
......@@ -174,7 +200,7 @@ TEST_F(StaleEntryFinalizerTaskTest, HandlesStalesInAllStatesCorrectly) {
// than the point at which we report items as too old.
const int many_hours = -6 * 24;
for (PrefetchItemState state : kOrderedPrefetchItemStates)
CreateAndInsertItem(state, many_hours);
InsertItemWithFreshnessTime(state, many_hours);
EXPECT_EQ(11, store_util()->CountPrefetchItems());
// Execute the expiration task.
......@@ -186,19 +212,19 @@ TEST_F(StaleEntryFinalizerTaskTest, HandlesStalesInAllStatesCorrectly) {
EXPECT_EQ(11U, post_items.size());
EXPECT_EQ(
1U,
Filter(post_items, PrefetchItemState::SENT_GENERATE_PAGE_BUNDLE).size());
Select(post_items, PrefetchItemState::SENT_GENERATE_PAGE_BUNDLE).size());
EXPECT_EQ(1U,
Filter(post_items, PrefetchItemState::SENT_GET_OPERATION).size());
EXPECT_EQ(1U, Filter(post_items, PrefetchItemState::DOWNLOADED).size());
EXPECT_EQ(7U, Filter(post_items, PrefetchItemState::FINISHED).size());
EXPECT_EQ(1U, Filter(post_items, PrefetchItemState::ZOMBIE).size());
Select(post_items, PrefetchItemState::SENT_GET_OPERATION).size());
EXPECT_EQ(1U, Select(post_items, PrefetchItemState::DOWNLOADED).size());
EXPECT_EQ(7U, Select(post_items, PrefetchItemState::FINISHED).size());
EXPECT_EQ(1U, Select(post_items, PrefetchItemState::ZOMBIE).size());
}
// Items in states AWAITING_GCM and ZOMBIE should cause the task to finish with
// a NO_MORE_WORK result.
TEST_F(StaleEntryFinalizerTaskTest, NoWorkInQueue) {
CreateAndInsertItem(PrefetchItemState::AWAITING_GCM, 0);
CreateAndInsertItem(PrefetchItemState::ZOMBIE, 0);
InsertItemWithFreshnessTime(PrefetchItemState::AWAITING_GCM, 0);
InsertItemWithFreshnessTime(PrefetchItemState::ZOMBIE, 0);
RunTask(stale_finalizer_task_.get());
EXPECT_EQ(Result::NO_MORE_WORK, stale_finalizer_task_->final_status());
......@@ -232,34 +258,34 @@ TEST_F(StaleEntryFinalizerTaskTest, WorkInQueue) {
TEST_F(StaleEntryFinalizerTaskTest, HandlesClockSetBackwardsCorrectly) {
// Insert fresh and stale items for all expirable states from all buckets.
PrefetchItem b1_item1_recent =
CreateAndInsertItem(PrefetchItemState::NEW_REQUEST, 23);
InsertItemWithFreshnessTime(PrefetchItemState::NEW_REQUEST, 23);
PrefetchItem b1_item2_future =
CreateAndInsertItem(PrefetchItemState::NEW_REQUEST, 25);
InsertItemWithFreshnessTime(PrefetchItemState::NEW_REQUEST, 25);
PrefetchItem b2_item1_recent =
CreateAndInsertItem(PrefetchItemState::AWAITING_GCM, 23);
InsertItemWithFreshnessTime(PrefetchItemState::AWAITING_GCM, 23);
PrefetchItem b2_item2_future =
CreateAndInsertItem(PrefetchItemState::AWAITING_GCM, 25);
InsertItemWithFreshnessTime(PrefetchItemState::AWAITING_GCM, 25);
PrefetchItem b2_item3_recent =
CreateAndInsertItem(PrefetchItemState::RECEIVED_GCM, 23);
InsertItemWithFreshnessTime(PrefetchItemState::RECEIVED_GCM, 23);
PrefetchItem b2_item4_future =
CreateAndInsertItem(PrefetchItemState::RECEIVED_GCM, 25);
InsertItemWithFreshnessTime(PrefetchItemState::RECEIVED_GCM, 25);
PrefetchItem b2_item5_recent =
CreateAndInsertItem(PrefetchItemState::RECEIVED_BUNDLE, 23);
InsertItemWithFreshnessTime(PrefetchItemState::RECEIVED_BUNDLE, 23);
PrefetchItem b2_item6_future =
CreateAndInsertItem(PrefetchItemState::RECEIVED_BUNDLE, 25);
InsertItemWithFreshnessTime(PrefetchItemState::RECEIVED_BUNDLE, 25);
PrefetchItem b3_item1_recent =
CreateAndInsertItem(PrefetchItemState::DOWNLOADING, 23);
InsertItemWithFreshnessTime(PrefetchItemState::DOWNLOADING, 23);
PrefetchItem b3_item2_future =
CreateAndInsertItem(PrefetchItemState::DOWNLOADING, 25);
InsertItemWithFreshnessTime(PrefetchItemState::DOWNLOADING, 25);
PrefetchItem b3_item3_recent =
CreateAndInsertItem(PrefetchItemState::IMPORTING, 23);
InsertItemWithFreshnessTime(PrefetchItemState::IMPORTING, 23);
PrefetchItem b3_item4_future =
CreateAndInsertItem(PrefetchItemState::IMPORTING, 25);
InsertItemWithFreshnessTime(PrefetchItemState::IMPORTING, 25);
PrefetchItem b4_item1_future =
CreateAndInsertItem(PrefetchItemState::SENT_GENERATE_PAGE_BUNDLE, 25);
PrefetchItem b4_item1_future = InsertItemWithFreshnessTime(
PrefetchItemState::SENT_GENERATE_PAGE_BUNDLE, 25);
// Check inserted initial items.
std::set<PrefetchItem> initial_items = {
......@@ -319,60 +345,97 @@ TEST_F(StaleEntryFinalizerTaskTest,
// Insert "future" items for every state.
const int many_hours = 7 * 24;
for (PrefetchItemState state : kOrderedPrefetchItemStates)
CreateAndInsertItem(state, many_hours);
InsertItemWithFreshnessTime(state, many_hours);
EXPECT_EQ(11, store_util()->CountPrefetchItems());
// Execute the expiration task.
RunTask(stale_finalizer_task_.get());
EXPECT_EQ(Result::MORE_WORK_NEEDED, stale_finalizer_task_->final_status());
// Checks item counts for states expected to still exist.
// Checks item counts for states expected to still exist. The zombie item is
// expected to be deleted.
std::set<PrefetchItem> post_items = store_util()->GetAllItems();
EXPECT_EQ(11U, post_items.size());
EXPECT_EQ(10U, post_items.size());
EXPECT_EQ(
1U,
Filter(post_items, PrefetchItemState::SENT_GENERATE_PAGE_BUNDLE).size());
Select(post_items, PrefetchItemState::SENT_GENERATE_PAGE_BUNDLE).size());
EXPECT_EQ(1U,
Filter(post_items, PrefetchItemState::SENT_GET_OPERATION).size());
EXPECT_EQ(1U, Filter(post_items, PrefetchItemState::DOWNLOADED).size());
EXPECT_EQ(7U, Filter(post_items, PrefetchItemState::FINISHED).size());
EXPECT_EQ(1U, Filter(post_items, PrefetchItemState::ZOMBIE).size());
Select(post_items, PrefetchItemState::SENT_GET_OPERATION).size());
EXPECT_EQ(1U, Select(post_items, PrefetchItemState::DOWNLOADED).size());
EXPECT_EQ(7U, Select(post_items, PrefetchItemState::FINISHED).size());
}
// Verifies that only stale, live items are transitioned to 'FINISHED'.
TEST_F(StaleEntryFinalizerTaskTest, HandlesStuckItemsCorrectly) {
base::HistogramTester histogram_tester;
// Insert fresh and stale items for all expirable states from all buckets.
PrefetchItem item1_recent =
CreateAndInsertItem(PrefetchItemState::SENT_GENERATE_PAGE_BUNDLE, 1);
PrefetchItem item2_stuck =
CreateAndInsertItem(PrefetchItemState::SENT_GENERATE_PAGE_BUNDLE, -170);
PrefetchItem item3_finished =
CreateAndInsertItem(PrefetchItemState::FINISHED, -170);
PrefetchItem item4_zombie =
CreateAndInsertItem(PrefetchItemState::ZOMBIE, -170);
// Insert stuck and non stuck items for all expirable states from all buckets.
// Note that stuck items are determined based on creation time instead of
// freshness.
for (PrefetchItemState state : kOrderedPrefetchItemStates) {
InsertItemWithCreationTime(state, -1);
InsertItemWithCreationTime(state, -170); // 170h is a bit more than a week.
}
EXPECT_EQ(22, store_util()->CountPrefetchItems());
// Execute the expiration task.
RunTask(stale_finalizer_task_.get());
EXPECT_EQ(Result::MORE_WORK_NEEDED, stale_finalizer_task_->final_status());
std::set<PrefetchItem> final_items = store_util()->GetAllItems();
EXPECT_EQ(22U, final_items.size());
// Zombie items should still be there.
EXPECT_EQ(2U, Select(final_items, PrefetchItemState::ZOMBIE).size());
// Stuck entries should have been finalized with appropriate error codes. The
// initially inserter finished items should still be there, with a "success"
// error code.
std::set<PrefetchItem> final_finished_items =
Select(final_items, PrefetchItemState::FINISHED);
EXPECT_EQ(11U, final_finished_items.size());
EXPECT_EQ(9U,
Select(final_finished_items, PrefetchItemErrorCode::STUCK).size());
EXPECT_EQ(
2U, Select(final_finished_items, PrefetchItemErrorCode::SUCCESS).size());
// All other non-stuck items should remain as they were.
std::vector<PrefetchItemState> stuck_states = GetAllStatesExcept(
{PrefetchItemState::FINISHED, PrefetchItemState::ZOMBIE});
for (PrefetchItemState state : stuck_states)
EXPECT_EQ(1U, Select(final_items, state).size());
// Check metrics were reported for all stuck entries but not for finished nor
// zombie items.
histogram_tester.ExpectTotalCount("OfflinePages.Prefetching.StuckItemState",
9);
for (PrefetchItemState state : stuck_states) {
histogram_tester.ExpectBucketCount(
"OfflinePages.Prefetching.StuckItemState", state, 1);
}
}
TEST_F(StaleEntryFinalizerTaskTest, HandlesZombieFreshnessTimesCorrectly) {
PrefetchItem zombie_item1_fresh =
InsertItemWithFreshnessTime(PrefetchItemState::ZOMBIE, -160);
PrefetchItem zombie_item2_expired =
InsertItemWithFreshnessTime(PrefetchItemState::ZOMBIE, -170);
PrefetchItem zombie_item3_future_fresh =
InsertItemWithFreshnessTime(PrefetchItemState::ZOMBIE, 23);
PrefetchItem zombie_item3_future_expired =
InsertItemWithFreshnessTime(PrefetchItemState::ZOMBIE, 25);
// Check inserted initial items.
std::set<PrefetchItem> initial_items = {item1_recent, item2_stuck,
item3_finished, item4_zombie};
std::set<PrefetchItem> initial_items = {
zombie_item1_fresh, zombie_item2_expired, zombie_item3_future_fresh,
zombie_item3_future_expired};
EXPECT_EQ(initial_items, store_util()->GetAllItems());
// Execute the expiration task.
RunTask(stale_finalizer_task_.get());
EXPECT_EQ(Result::MORE_WORK_NEEDED, stale_finalizer_task_->final_status());
EXPECT_EQ(Result::NO_MORE_WORK, stale_finalizer_task_->final_status());
// Only the stuck item is changed.
PrefetchItem want_stuck_item = item2_stuck;
want_stuck_item.state = PrefetchItemState::FINISHED;
want_stuck_item.error_code = PrefetchItemErrorCode::STUCK;
std::set<PrefetchItem> final_items{item1_recent, want_stuck_item,
item3_finished, item4_zombie};
EXPECT_EQ(final_items, store_util()->GetAllItems());
// Check that the proper UMA was reported for the stale item, but not the
// fresh item, so there should be exactly one sample.
histogram_tester.ExpectUniqueSample(
"OfflinePages.Prefetching.StuckItemState",
static_cast<int>(PrefetchItemState::SENT_GENERATE_PAGE_BUNDLE), 1);
// Only the unexpired zombie should remain.
std::set<PrefetchItem> expected_final_items = {zombie_item1_fresh,
zombie_item3_future_fresh};
EXPECT_EQ(expected_final_items, store_util()->GetAllItems());
}
} // namespace offline_pages
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment