Commit a0aa7ae8 authored by Mohamed Amir Yosef's avatar Mohamed Amir Yosef Committed by Commit Bot

[Sync::USS] Reorder bookmark remote updates before processing them

There are no order guarantees in the remote updates received from
the server.
However, for hierarchical types such as bookmarks, it's important
to process updates in the correct order.
For example parent node creation should happen before child node
creation.

This CL implements the following reordering rules for bookmark
remote updates in USS.
1. Creations and updates come before deletions.
2. Parent creation/update should come before child creation/update.
3. No need to further order deletions. Parent deletions can
   happen before child deletions. This is safe because all updates
   (e.g. moves) should have been processed already.

Bug: 516866
Change-Id: I059224b897ea51a4a0321124b082e76be824d28f
Reviewed-on: https://chromium-review.googlesource.com/1141870
Commit-Queue: Mohamed Amir Yosef <mamir@chromium.org>
Reviewed-by: default avatarMikel Astiz <mastiz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#577232}
parent 3fa50872
......@@ -101,6 +101,11 @@ void InitWithSyncedBookmarks(const std::vector<BookmarkInfo>& bookmarks,
for (BookmarkInfo bookmark : bookmarks) {
updates.push_back(CreateUpdateData(bookmark));
}
// TODO(crbug.com/516866): Remove after a proper positioning for remote
// updates is implemented. Reversing the updates because the sorting algorithm
// isn't stable. This is OK for now because once proper positioning is
// implemented, the siblings update requests order would be irrelvant.
std::reverse(updates.begin(), updates.end());
processor->OnUpdateReceived(CreateDummyModelTypeState(), updates);
AssertState(processor, bookmarks);
}
......@@ -314,82 +319,6 @@ TEST_F(BookmarkModelTypeProcessorTest, ShouldUpdateModelAfterRemoteDelete) {
Eq(ASCIIToUTF16(kTitle3)));
}
TEST_F(BookmarkModelTypeProcessorTest, ShouldEncodeSyncMetadata) {
const std::string kNodeId1 = "node_id1";
const std::string kTitle1 = "title1";
const std::string kUrl1 = "http://www.url1.com";
const std::string kNodeId2 = "node_id2";
const std::string kTitle2 = "title2";
const std::string kUrl2 = "http://www.url2.com";
std::vector<BookmarkInfo> bookmarks = {
{kNodeId1, kTitle1, kUrl1, kBookmarkBarId, /*server_tag=*/std::string()},
{kNodeId2, kTitle2, kUrl2, kBookmarkBarId,
/*server_tag=*/std::string()}};
InitWithSyncedBookmarks(bookmarks, processor());
// Make sure original bookmark exists.
const bookmarks::BookmarkNode* bookmark_bar =
bookmark_model()->bookmark_bar_node();
const bookmarks::BookmarkNode* bookmark_node1 = bookmark_bar->GetChild(0);
const bookmarks::BookmarkNode* bookmark_node2 = bookmark_bar->GetChild(1);
ASSERT_THAT(bookmark_node1, NotNull());
ASSERT_THAT(bookmark_node2, NotNull());
std::string metadata_str = processor()->EncodeSyncMetadata();
sync_pb::BookmarkModelMetadata model_metadata;
EXPECT_TRUE(model_metadata.ParseFromString(metadata_str));
// There should be 3 entries now, one for the bookmark bar, and the other 2
// nodes.
ASSERT_THAT(model_metadata.bookmarks_metadata().size(), Eq(3));
EXPECT_THAT(model_metadata.bookmarks_metadata().Get(0).id(),
Eq(bookmark_bar->id()));
EXPECT_THAT(model_metadata.bookmarks_metadata().Get(0).metadata().server_id(),
Eq(kBookmarkBarId));
EXPECT_THAT(
model_metadata.bookmarks_metadata().Get(0).metadata().is_deleted(),
Eq(false));
EXPECT_THAT(model_metadata.bookmarks_metadata().Get(1).id(),
Eq(bookmark_node1->id()));
EXPECT_THAT(model_metadata.bookmarks_metadata().Get(1).metadata().server_id(),
Eq(kNodeId1));
EXPECT_THAT(
model_metadata.bookmarks_metadata().Get(1).metadata().is_deleted(),
Eq(false));
EXPECT_THAT(model_metadata.bookmarks_metadata().Get(2).id(),
Eq(bookmark_node2->id()));
EXPECT_THAT(model_metadata.bookmarks_metadata().Get(2).metadata().server_id(),
Eq(kNodeId2));
EXPECT_THAT(
model_metadata.bookmarks_metadata().Get(2).metadata().is_deleted(),
Eq(false));
// Process a remote delete for the first node.
syncer::UpdateResponseDataList updates;
updates.push_back(CreateTombstone(kNodeId1));
processor()->OnUpdateReceived(CreateDummyModelTypeState(), updates);
metadata_str = processor()->EncodeSyncMetadata();
model_metadata.ParseFromString(metadata_str);
// There should be 3 entries now, one for the bookmark bar, and the remaning
// bookmark node.
ASSERT_THAT(model_metadata.bookmarks_metadata().size(), Eq(2));
EXPECT_THAT(model_metadata.bookmarks_metadata().Get(1).id(),
Eq(bookmark_node2->id()));
EXPECT_THAT(model_metadata.bookmarks_metadata().Get(1).metadata().server_id(),
Eq(kNodeId2));
EXPECT_THAT(
model_metadata.bookmarks_metadata().Get(1).metadata().is_deleted(),
Eq(false));
}
TEST_F(BookmarkModelTypeProcessorTest, ShouldDecodeSyncMetadata) {
const std::string kNodeId = "node_id1";
const std::string kTitle = "title1";
......
......@@ -4,6 +4,12 @@
#include "components/sync_bookmarks/bookmark_remote_updates_handler.h"
#include <set>
#include <string>
#include <unordered_map>
#include <utility>
#include "base/strings/string_piece.h"
#include "base/strings/utf_string_conversions.h"
#include "components/bookmarks/browser/bookmark_model.h"
#include "components/bookmarks/browser/bookmark_node.h"
......@@ -104,6 +110,31 @@ bool IsValidBookmark(const sync_pb::BookmarkSpecifics& specifics,
return true;
}
// Recursive method to traverse a forest created by ReorderUpdates() to to
// emit updates in top-down order. |ordered_updates| must not be null because
// traversed updates are appended to |*ordered_updates|.
void TraverseAndAppendChildren(
const base::StringPiece& node_id,
const std::unordered_map<base::StringPiece,
const syncer::UpdateResponseData*,
base::StringPieceHash>& id_to_updates,
const std::unordered_map<base::StringPiece,
std::vector<base::StringPiece>,
base::StringPieceHash>& node_to_children,
std::vector<const syncer::UpdateResponseData*>* ordered_updates) {
// If no children to traverse, we are done.
if (node_to_children.count(node_id) == 0) {
return;
}
// Recurse over all children.
for (const base::StringPiece& child : node_to_children.at(node_id)) {
DCHECK_NE(id_to_updates.count(child), 0U);
ordered_updates->push_back(id_to_updates.at(child));
TraverseAndAppendChildren(child, id_to_updates, node_to_children,
ordered_updates);
}
}
} // namespace
BookmarkRemoteUpdatesHandler::BookmarkRemoteUpdatesHandler(
......@@ -116,7 +147,7 @@ BookmarkRemoteUpdatesHandler::BookmarkRemoteUpdatesHandler(
void BookmarkRemoteUpdatesHandler::Process(
const syncer::UpdateResponseDataList& updates) {
for (const syncer::UpdateResponseData* update : ReorderUpdates(updates)) {
for (const syncer::UpdateResponseData* update : ReorderUpdates(&updates)) {
const syncer::EntityData& update_entity = update->entity.value();
// TODO(crbug.com/516866): Check |update_entity| for sanity.
// 1. Has bookmark specifics or no specifics in case of delete.
......@@ -143,49 +174,86 @@ void BookmarkRemoteUpdatesHandler::Process(
// static
std::vector<const syncer::UpdateResponseData*>
BookmarkRemoteUpdatesHandler::ReorderUpdatesForTest(
const syncer::UpdateResponseDataList& updates) {
const syncer::UpdateResponseDataList* updates) {
return ReorderUpdates(updates);
}
// static
std::vector<const syncer::UpdateResponseData*>
BookmarkRemoteUpdatesHandler::ReorderUpdates(
const syncer::UpdateResponseDataList& updates) {
// TODO(crbug.com/516866): This is a very simple (hacky) reordering algorithm
// that assumes no folders exist except the top level permanent ones. This
// should be fixed before enabling USS for bookmarks.
std::vector<const syncer::UpdateResponseData*> ordered_updates;
for (const syncer::UpdateResponseData& update : updates) {
const syncer::UpdateResponseDataList* updates) {
// This method sorts the remote updates according to the following rules:
// 1. Creations and updates come before deletions.
// 2. Parent creation/update should come before child creation/update.
// 3. No need to further order deletions. Parent deletions can happen before
// child deletions. This is safe because all updates (e.g. moves) should
// have been processed already.
// The algorithm works by constructing a forest of all non-deletion updates
// and then traverses each tree in the forest recursively: Forest
// Construction:
// 1. Iterate over all updates and construct the |parent_to_children| map and
// collect all parents in |roots|.
// 2. Iterate over all updates again and drop any parent that has a
// coressponding update. What's left in |roots| are the roots of the
// forest.
// 3. Start at each root in |roots|, emit the update and recurse over its
// children.
std::unordered_map<base::StringPiece, const syncer::UpdateResponseData*,
base::StringPieceHash>
id_to_updates;
std::set<base::StringPiece> roots;
std::unordered_map<base::StringPiece, std::vector<base::StringPiece>,
base::StringPieceHash>
parent_to_children;
// Add only non-deletions to |id_to_updates|.
for (const syncer::UpdateResponseData& update : *updates) {
const syncer::EntityData& update_entity = update.entity.value();
// Ignore updates to root nodes.
if (update_entity.parent_id == "0") {
continue;
}
if (update_entity.parent_id == kBookmarksRootId) {
ordered_updates.push_back(&update);
}
}
for (const syncer::UpdateResponseData& update : updates) {
const syncer::EntityData& update_entity = update.entity.value();
// Deletions should come last.
if (update_entity.is_deleted()) {
continue;
}
if (update_entity.parent_id != "0" &&
update_entity.parent_id != kBookmarksRootId) {
ordered_updates.push_back(&update);
id_to_updates[update_entity.id] = &update;
}
// Iterate over |id_to_updates| and construct |roots| and
// |parent_to_children|.
for (const std::pair<base::StringPiece, const syncer::UpdateResponseData*>&
pair : id_to_updates) {
const syncer::EntityData& update_entity = pair.second->entity.value();
parent_to_children[update_entity.parent_id].push_back(update_entity.id);
// If this entity's parent has no pending update, add it to |roots|.
if (id_to_updates.count(update_entity.parent_id) == 0) {
roots.insert(update_entity.parent_id);
}
}
// |roots| contains only root of all trees in the forest all of which are
// ready to be processed because none has a pending update.
std::vector<const syncer::UpdateResponseData*> ordered_updates;
for (const base::StringPiece& root : roots) {
TraverseAndAppendChildren(root, id_to_updates, parent_to_children,
&ordered_updates);
}
// Now add deletions.
for (const syncer::UpdateResponseData& update : updates) {
int root_node_updates_count = 0;
// Add deletions.
for (const syncer::UpdateResponseData& update : *updates) {
const syncer::EntityData& update_entity = update.entity.value();
if (!update_entity.is_deleted()) {
// Ignore updates to root nodes.
if (update_entity.parent_id == "0") {
root_node_updates_count++;
continue;
}
if (update_entity.parent_id != "0" &&
update_entity.parent_id != kBookmarksRootId) {
if (update_entity.is_deleted()) {
ordered_updates.push_back(&update);
}
}
// All non root updates should have been included in |ordered_updates|.
DCHECK_EQ(updates->size(), ordered_updates.size() + root_node_updates_count);
return ordered_updates;
}
......@@ -302,8 +370,10 @@ void BookmarkRemoteUpdatesHandler::ProcessRemoteDelete(
// Handle corner cases first.
if (tracked_entity == nullptr) {
// Local entity doesn't exist and update is tombstone.
DLOG(WARNING) << "Received remote delete for a non-existing item.";
// Process deletion only if the entity is still tracked. It could have
// been recursively deleted already with an earlier deletion of its
// parent.
DVLOG(1) << "Received remote delete for a non-existing item.";
return;
}
......@@ -313,15 +383,23 @@ void BookmarkRemoteUpdatesHandler::ProcessRemoteDelete(
if (bookmark_model_->is_permanent_node(node)) {
return;
}
// TODO(crbug.com/516866): Allow deletions of non-empty direcoties if makes
// sense, and recursively delete children.
if (node->child_count() > 0) {
DLOG(WARNING) << "Trying to delete a non-empty folder.";
return;
}
// Remove the entities of |node| and its children.
RemoveEntityAndChildrenFromTracker(node);
// Remove the node and its children from the model.
bookmark_model_->Remove(node);
bookmark_tracker_->Remove(update_entity.id);
}
void BookmarkRemoteUpdatesHandler::RemoveEntityAndChildrenFromTracker(
const bookmarks::BookmarkNode* node) {
const SyncedBookmarkTracker::Entity* entity =
bookmark_tracker_->GetEntityForBookmarkNode(node);
DCHECK(entity);
bookmark_tracker_->Remove(entity->metadata()->server_id());
for (int i = 0; i < node->child_count(); ++i) {
const bookmarks::BookmarkNode* child = node->GetChild(i);
RemoveEntityAndChildrenFromTracker(child);
}
}
const bookmarks::BookmarkNode* BookmarkRemoteUpdatesHandler::GetParentNode(
......
......@@ -5,6 +5,8 @@
#ifndef COMPONENTS_SYNC_BOOKMARKS_BOOKMARK_REMOTE_UPDATES_HANDLER_H_
#define COMPONENTS_SYNC_BOOKMARKS_BOOKMARK_REMOTE_UPDATES_HANDLER_H_
#include <map>
#include <string>
#include <vector>
#include "components/sync/engine/non_blocking_sync_common.h"
......@@ -17,7 +19,8 @@ class BookmarkNode;
namespace sync_bookmarks {
// Responsible for processing remote updates received from the sync server.
// Responsible for processing one batch of remote updates received from the sync
// server.
class BookmarkRemoteUpdatesHandler {
public:
// |bookmark_model| and |bookmark_tracker| must not be null and most outlive
......@@ -30,7 +33,7 @@ class BookmarkRemoteUpdatesHandler {
// Public for testing.
static std::vector<const syncer::UpdateResponseData*> ReorderUpdatesForTest(
const syncer::UpdateResponseDataList& updates);
const syncer::UpdateResponseDataList* updates);
private:
// Reorders incoming updates such that parent creation is before child
......@@ -38,7 +41,7 @@ class BookmarkRemoteUpdatesHandler {
// come last. The returned pointers point to the elements in the original
// |updates|.
static std::vector<const syncer::UpdateResponseData*> ReorderUpdates(
const syncer::UpdateResponseDataList& updates);
const syncer::UpdateResponseDataList* updates);
// Given a remote update entity, it returns the parent bookmark node of the
// corresponding node. It returns null if the parent node cannot be found.
......@@ -70,6 +73,10 @@ class BookmarkRemoteUpdatesHandler {
void ProcessRemoteDelete(const syncer::EntityData& update_entity,
const SyncedBookmarkTracker::Entity* tracked_entity);
// Recursively removes the entities corresponding to |node| and its children
// from |bookmark_tracker_|.
void RemoveEntityAndChildrenFromTracker(const bookmarks::BookmarkNode* node);
// Associates the permanent bookmark folders with the corresponding server
// side ids and registers the association in |bookmark_tracker_|.
// |update_entity| must contain server_defined_unique_tag that is used to
......
......@@ -97,6 +97,35 @@ TEST(SyncedBookmarkTrackerTest, ShouldReturnNullForDisassociatedNodes) {
EXPECT_THAT(tracker.GetEntityForSyncId(kSyncId), IsNull());
}
TEST(SyncedBookmarkTrackerTest, ShouldBuildBookmarkModelMetadata) {
SyncedBookmarkTracker tracker(std::vector<NodeMetadataPair>(),
std::make_unique<sync_pb::ModelTypeState>());
const std::string kSyncId = "SYNC_ID";
const std::string kTitle = "Title";
const GURL kUrl("http://www.foo.com");
const int64_t kId = 1;
const int64_t kServerVersion = 1000;
const base::Time kCreationTime(base::Time::Now() -
base::TimeDelta::FromSeconds(1));
const syncer::UniquePosition unique_position =
syncer::UniquePosition::InitialPosition(
syncer::UniquePosition::RandomSuffix());
const sync_pb::EntitySpecifics specifics =
GenerateSpecifics(/*title=*/std::string(), /*url=*/std::string());
bookmarks::BookmarkNode node(kId, kUrl);
tracker.Add(kSyncId, &node, kServerVersion, kCreationTime,
unique_position.ToProto(), specifics);
sync_pb::BookmarkModelMetadata bookmark_model_metadata =
tracker.BuildBookmarkModelMetadata();
ASSERT_THAT(bookmark_model_metadata.bookmarks_metadata().size(), Eq(1));
EXPECT_THAT(
bookmark_model_metadata.bookmarks_metadata(0).metadata().server_id(),
Eq(kSyncId));
}
TEST(SyncedBookmarkTrackerTest,
ShouldRequireCommitRequestWhenSequenceNumberIsIncremented) {
SyncedBookmarkTracker tracker(std::vector<NodeMetadataPair>(),
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment