Commit c164ede5 authored by Mikel Astiz's avatar Mikel Astiz Committed by Commit Bot

Handle duplicate bookmark GUIDs in remote data

Server-side data can theoretically contain duplicate GUIDs, and the end
result today is a crash.

It is hard to deal with this case or even reason about what a reasonable
behavior is, so this patch instead detects and resolves the issue in a
preprocessing stage, such that offending GUIDs are ignored.

Doing so allows further refactorings to simplify the merge logic.

Bug: 978430
Change-Id: I4926bfe88aee2724633a447b0ed9ce086358b439
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1939974
Commit-Queue: Mikel Astiz <mastiz@chromium.org>
Reviewed-by: default avatarMohamed Amir Yosef <mamir@chromium.org>
Reviewed-by: default avatarMarc Treib <treib@chromium.org>
Cr-Commit-Position: refs/heads/master@{#720217}
parent f863cc78
...@@ -6,7 +6,6 @@ ...@@ -6,7 +6,6 @@
#include <stdint.h> #include <stdint.h>
#include <algorithm>
#include <map> #include <map>
#include <utility> #include <utility>
#include <vector> #include <vector>
...@@ -36,31 +35,6 @@ namespace syncer { ...@@ -36,31 +35,6 @@ namespace syncer {
namespace { namespace {
bool ContainsDuplicate(std::vector<std::string> values) {
std::sort(values.begin(), values.end());
return std::adjacent_find(values.begin(), values.end()) != values.end();
}
bool ContainsDuplicateClientTagHash(const UpdateResponseDataList& updates) {
std::vector<std::string> raw_client_tag_hashes;
for (const std::unique_ptr<UpdateResponseData>& update : updates) {
DCHECK(update);
if (!update->entity->client_tag_hash.value().empty()) {
raw_client_tag_hashes.push_back(update->entity->client_tag_hash.value());
}
}
return ContainsDuplicate(std::move(raw_client_tag_hashes));
}
bool ContainsDuplicateServerID(const UpdateResponseDataList& updates) {
std::vector<std::string> server_ids;
for (const std::unique_ptr<UpdateResponseData>& update : updates) {
DCHECK(update);
server_ids.push_back(update->entity->id);
}
return ContainsDuplicate(std::move(server_ids));
}
// Enumeration of possible values for the positioning schemes used in Sync // Enumeration of possible values for the positioning schemes used in Sync
// entities. Used in UMA metrics. Do not re-order or delete these entries; they // entities. Used in UMA metrics. Do not re-order or delete these entries; they
// are used in a UMA histogram. Please edit SyncPositioningScheme in enums.xml // are used in a UMA histogram. Please edit SyncPositioningScheme in enums.xml
...@@ -482,19 +456,16 @@ void ModelTypeWorker::ApplyPendingUpdates() { ...@@ -482,19 +456,16 @@ void ModelTypeWorker::ApplyPendingUpdates() {
<< base::StringPrintf("Delivering %" PRIuS " applicable updates.", << base::StringPrintf("Delivering %" PRIuS " applicable updates.",
pending_updates_.size()); pending_updates_.size());
// Having duplicates should be rare, so only do the de-duping if // Deduplicate updates first based on server ids, which is the only legit
// we've actually detected one. // source of duplicates, specially due to pagination.
// Deduplicate updates first based on server ids.
if (ContainsDuplicateServerID(pending_updates_)) {
DeduplicatePendingUpdatesBasedOnServerId(); DeduplicatePendingUpdatesBasedOnServerId();
}
// Check for duplicate client tag hashes after removing duplicate server // As extra precaution, and although it shouldn't be necessary without a
// ids, and deduplicate updates based on client tag hashes if necessary. // misbehaving server, deduplicate based on client tags and originator item
if (ContainsDuplicateClientTagHash(pending_updates_)) { // IDs. This allows further code to use DCHECKs without relying on external
// behavior.
DeduplicatePendingUpdatesBasedOnClientTagHash(); DeduplicatePendingUpdatesBasedOnClientTagHash();
} DeduplicatePendingUpdatesBasedOnOriginatorClientItemId();
int num_updates_applied = pending_updates_.size(); int num_updates_applied = pending_updates_.size();
model_type_processor_->OnUpdateReceived(model_type_state_, model_type_processor_->OnUpdateReceived(model_type_state_,
...@@ -690,6 +661,7 @@ void ModelTypeWorker::DecryptStoredEntities() { ...@@ -690,6 +661,7 @@ void ModelTypeWorker::DecryptStoredEntities() {
void ModelTypeWorker::DeduplicatePendingUpdatesBasedOnServerId() { void ModelTypeWorker::DeduplicatePendingUpdatesBasedOnServerId() {
UpdateResponseDataList candidates; UpdateResponseDataList candidates;
pending_updates_.swap(candidates); pending_updates_.swap(candidates);
pending_updates_.reserve(candidates.size());
std::map<std::string, size_t> id_to_index; std::map<std::string, size_t> id_to_index;
for (std::unique_ptr<UpdateResponseData>& candidate : candidates) { for (std::unique_ptr<UpdateResponseData>& candidate : candidates) {
...@@ -716,6 +688,7 @@ void ModelTypeWorker::DeduplicatePendingUpdatesBasedOnServerId() { ...@@ -716,6 +688,7 @@ void ModelTypeWorker::DeduplicatePendingUpdatesBasedOnServerId() {
void ModelTypeWorker::DeduplicatePendingUpdatesBasedOnClientTagHash() { void ModelTypeWorker::DeduplicatePendingUpdatesBasedOnClientTagHash() {
UpdateResponseDataList candidates; UpdateResponseDataList candidates;
pending_updates_.swap(candidates); pending_updates_.swap(candidates);
pending_updates_.reserve(candidates.size());
std::map<ClientTagHash, size_t> tag_to_index; std::map<ClientTagHash, size_t> tag_to_index;
for (std::unique_ptr<UpdateResponseData>& candidate : candidates) { for (std::unique_ptr<UpdateResponseData>& candidate : candidates) {
...@@ -741,6 +714,36 @@ void ModelTypeWorker::DeduplicatePendingUpdatesBasedOnClientTagHash() { ...@@ -741,6 +714,36 @@ void ModelTypeWorker::DeduplicatePendingUpdatesBasedOnClientTagHash() {
} }
} }
void ModelTypeWorker::DeduplicatePendingUpdatesBasedOnOriginatorClientItemId() {
UpdateResponseDataList candidates;
pending_updates_.swap(candidates);
pending_updates_.reserve(candidates.size());
std::map<std::string, size_t> id_to_index;
for (std::unique_ptr<UpdateResponseData>& candidate : candidates) {
DCHECK(candidate);
// Items with empty item ID just get passed through (which is the case for
// all datatypes except bookmarks).
if (candidate->entity->originator_client_item_id.empty()) {
pending_updates_.push_back(std::move(candidate));
continue;
}
// Try to insert. If we already saw an item with the same originator item
// ID, this will fail but give us its iterator.
auto it_and_success = id_to_index.emplace(
candidate->entity->originator_client_item_id, pending_updates_.size());
if (it_and_success.second) {
// New item ID, append at the end. Note that we already inserted the
// correct index (|pending_updates_.size()|) above.
pending_updates_.push_back(std::move(candidate));
} else {
// Duplicate! Overwrite the existing item.
size_t existing_index = it_and_success.first->second;
pending_updates_[existing_index] = std::move(candidate);
}
}
}
// static // static
bool ModelTypeWorker::DecryptSpecifics(const Cryptographer& cryptographer, bool ModelTypeWorker::DecryptSpecifics(const Cryptographer& cryptographer,
const sync_pb::EntitySpecifics& in, const sync_pb::EntitySpecifics& in,
......
...@@ -205,6 +205,11 @@ class ModelTypeWorker : public UpdateHandler, ...@@ -205,6 +205,11 @@ class ModelTypeWorker : public UpdateHandler,
// tag hash. It discards all of them except the last one. // tag hash. It discards all of them except the last one.
void DeduplicatePendingUpdatesBasedOnClientTagHash(); void DeduplicatePendingUpdatesBasedOnClientTagHash();
// Filters our duplicate updates from |pending_updates_| based on the
// originator item ID (in practice used for bookmarks only). It discards all
// of them except the last one.
void DeduplicatePendingUpdatesBasedOnOriginatorClientItemId();
ModelType type_; ModelType type_;
DataTypeDebugInfoEmitter* debug_info_emitter_; DataTypeDebugInfoEmitter* debug_info_emitter_;
......
...@@ -864,11 +864,10 @@ TEST_F(ModelTypeWorkerTest, ReceiveUpdates_MultipleDuplicateHashes) { ...@@ -864,11 +864,10 @@ TEST_F(ModelTypeWorkerTest, ReceiveUpdates_MultipleDuplicateHashes) {
EXPECT_EQ(kValue3, result[2]->entity->specifics.preference().value()); EXPECT_EQ(kValue3, result[2]->entity->specifics.preference().value());
} }
// Covers the scenario where two updates have the same client tag hash but
// different server IDs. This scenario is considered a bug on the server.
TEST_F(ModelTypeWorkerTest, TEST_F(ModelTypeWorkerTest,
ReceiveUpdates_DuplicateClientTagHashesForDistinctServerIds) { ReceiveUpdates_DuplicateClientTagHashesForDistinctServerIds) {
// This is testing that in a a scenario where two updates are having the same
// client tag hashes and different server ids, the proper UMA metrics are
// emitted. This scenario is considered a bug on the server.
NormalInitialize(); NormalInitialize();
// First create two entities with different tags, so they get assigned // First create two entities with different tags, so they get assigned
...@@ -897,6 +896,44 @@ TEST_F(ModelTypeWorkerTest, ...@@ -897,6 +896,44 @@ TEST_F(ModelTypeWorkerTest,
EXPECT_EQ(entity2.id_string(), result[0]->entity->id); EXPECT_EQ(entity2.id_string(), result[0]->entity->id);
} }
// Covers the scenario where two updates have the same originator client item ID
// but different server IDs. This scenario is considered a bug on the server.
TEST_F(ModelTypeWorkerTest,
ReceiveUpdates_DuplicateOriginatorClientIdForDistinctServerIds) {
const std::string kOriginatorClientItemId = "itemid";
const std::string kURL1 = "http://url1";
const std::string kURL2 = "http://url2";
const std::string kServerId1 = "serverid1";
const std::string kServerId2 = "serverid2";
NormalInitialize();
sync_pb::SyncEntity entity1;
sync_pb::SyncEntity entity2;
// Generate two entities with the same originator client item ID.
entity1.set_id_string(kServerId1);
entity2.set_id_string(kServerId2);
entity1.mutable_specifics()->mutable_bookmark()->set_url(kURL1);
entity2.mutable_specifics()->mutable_bookmark()->set_url(kURL2);
entity1.set_originator_client_item_id(kOriginatorClientItemId);
entity2.set_originator_client_item_id(kOriginatorClientItemId);
worker()->ProcessGetUpdatesResponse(
server()->GetProgress(), server()->GetContext(), {&entity1, &entity2},
status_controller());
ApplyUpdates();
// Make sure the first update has been discarded.
ASSERT_EQ(1u, processor()->GetNumUpdateResponses());
std::vector<const UpdateResponseData*> result =
processor()->GetNthUpdateResponse(0);
ASSERT_EQ(1u, result.size());
ASSERT_TRUE(result[0]);
EXPECT_EQ(kURL2, result[0]->entity->specifics.bookmark().url());
}
// Test that an update download coming in multiple parts gets accumulated into // Test that an update download coming in multiple parts gets accumulated into
// one call to the processor. // one call to the processor.
TEST_F(ModelTypeWorkerTest, ReceiveMultiPartUpdates) { TEST_F(ModelTypeWorkerTest, ReceiveMultiPartUpdates) {
...@@ -1902,6 +1939,7 @@ TEST_F(ModelTypeWorkerBookmarksTest, CanDecryptUpdateWithMissingBookmarkGUID) { ...@@ -1902,6 +1939,7 @@ TEST_F(ModelTypeWorkerBookmarksTest, CanDecryptUpdateWithMissingBookmarkGUID) {
sync_pb::SyncEntity entity; sync_pb::SyncEntity entity;
entity.mutable_specifics()->mutable_bookmark()->set_url("www.foo.com"); entity.mutable_specifics()->mutable_bookmark()->set_url("www.foo.com");
entity.mutable_specifics()->mutable_bookmark()->set_title("Title"); entity.mutable_specifics()->mutable_bookmark()->set_title("Title");
entity.set_id_string("testserverid");
entity.set_originator_client_item_id(kGuid1); entity.set_originator_client_item_id(kGuid1);
*entity.mutable_unique_position() = *entity.mutable_unique_position() =
UniquePosition::InitialPosition(UniquePosition::RandomSuffix()).ToProto(); UniquePosition::InitialPosition(UniquePosition::RandomSuffix()).ToProto();
...@@ -1950,6 +1988,7 @@ TEST_F(ModelTypeWorkerBookmarksTest, ...@@ -1950,6 +1988,7 @@ TEST_F(ModelTypeWorkerBookmarksTest,
sync_pb::SyncEntity entity; sync_pb::SyncEntity entity;
entity.mutable_specifics()->mutable_bookmark()->set_url("www.foo.com"); entity.mutable_specifics()->mutable_bookmark()->set_url("www.foo.com");
entity.mutable_specifics()->mutable_bookmark()->set_title("Title"); entity.mutable_specifics()->mutable_bookmark()->set_title("Title");
entity.set_id_string("testserverid");
entity.set_originator_client_item_id(kInvalidOCII); entity.set_originator_client_item_id(kInvalidOCII);
*entity.mutable_unique_position() = *entity.mutable_unique_position() =
UniquePosition::InitialPosition(UniquePosition::RandomSuffix()).ToProto(); UniquePosition::InitialPosition(UniquePosition::RandomSuffix()).ToProto();
...@@ -1996,6 +2035,7 @@ TEST_F(ModelTypeWorkerBookmarksTest, ...@@ -1996,6 +2035,7 @@ TEST_F(ModelTypeWorkerBookmarksTest,
// Generate specifics without a GUID. // Generate specifics without a GUID.
sync_pb::SyncEntity entity; sync_pb::SyncEntity entity;
entity.mutable_specifics()->mutable_bookmark(); entity.mutable_specifics()->mutable_bookmark();
entity.set_id_string("testserverid");
entity.set_originator_client_item_id(kGuid1); entity.set_originator_client_item_id(kGuid1);
*entity.mutable_unique_position() = *entity.mutable_unique_position() =
UniquePosition::InitialPosition(UniquePosition::RandomSuffix()).ToProto(); UniquePosition::InitialPosition(UniquePosition::RandomSuffix()).ToProto();
...@@ -2037,6 +2077,7 @@ TEST_F(ModelTypeWorkerBookmarksTest, ...@@ -2037,6 +2077,7 @@ TEST_F(ModelTypeWorkerBookmarksTest,
// originator_client_item_id. // originator_client_item_id.
sync_pb::SyncEntity entity; sync_pb::SyncEntity entity;
entity.mutable_specifics()->mutable_bookmark(); entity.mutable_specifics()->mutable_bookmark();
entity.set_id_string("testserverid");
entity.set_originator_client_item_id(kInvalidOCII); entity.set_originator_client_item_id(kInvalidOCII);
*entity.mutable_unique_position() = *entity.mutable_unique_position() =
UniquePosition::InitialPosition(UniquePosition::RandomSuffix()).ToProto(); UniquePosition::InitialPosition(UniquePosition::RandomSuffix()).ToProto();
......
...@@ -60,6 +60,26 @@ using UpdatesPerParentId = std::unordered_map<base::StringPiece, ...@@ -60,6 +60,26 @@ using UpdatesPerParentId = std::unordered_map<base::StringPiece,
syncer::UpdateResponseDataList, syncer::UpdateResponseDataList,
base::StringPieceHash>; base::StringPieceHash>;
// Gets the bookmark node corresponding to a permanent folder identified by
// |server_defined_unique_tag|. |bookmark_model| must not be null.
const bookmarks::BookmarkNode* GetPermanentFolder(
const bookmarks::BookmarkModel* bookmark_model,
const std::string& server_defined_unique_tag) {
DCHECK(bookmark_model);
if (server_defined_unique_tag == kBookmarkBarTag) {
return bookmark_model->bookmark_bar_node();
}
if (server_defined_unique_tag == kOtherBookmarksTag) {
return bookmark_model->other_node();
}
if (server_defined_unique_tag == kMobileBookmarksTag) {
return bookmark_model->mobile_node();
}
return nullptr;
}
// Canonicalize |title| similar to legacy client's implementation by truncating // Canonicalize |title| similar to legacy client's implementation by truncating
// up to |kTitleLimitBytes| and the appending ' ' in some cases. // up to |kTitleLimitBytes| and the appending ' ' in some cases.
std::string CanonicalizeTitle(const std::string& title) { std::string CanonicalizeTitle(const std::string& title) {
...@@ -97,6 +117,53 @@ bool NodeSemanticsMatch(const bookmarks::BookmarkNode* local_node, ...@@ -97,6 +117,53 @@ bool NodeSemanticsMatch(const bookmarks::BookmarkNode* local_node,
return local_node->url() == GURL(specifics.url()); return local_node->url() == GURL(specifics.url());
} }
// Goes through remote updates to detect duplicate GUIDs (should be extremely
// rare) and resolve them by ignoring (clearing) all occurrences except one,
// which if possible will be the one that also matches the originator client
// item ID.
// TODO(crbug.com/978430): Remove this logic and deprecate proto field.
void ResolveDuplicateRemoteGUIDs(syncer::UpdateResponseDataList* updates) {
std::set<std::string> known_guids;
// In a first pass we process |originator_client_item_id| which is more
// authoritative and cannot run into duplicates.
for (const std::unique_ptr<UpdateResponseData>& update : *updates) {
DCHECK(update);
DCHECK(update->entity);
// |originator_client_item_id| is empty for permanent nodes.
if (update->entity->is_deleted() ||
update->entity->originator_client_item_id.empty()) {
continue;
}
bool success =
known_guids.insert(update->entity->originator_client_item_id).second;
DCHECK(success);
}
// In a second pass, detect if GUIDs in specifics conflict with each other or
// with |originator_client_item_id| values processed earlier.
for (std::unique_ptr<UpdateResponseData>& update : *updates) {
DCHECK(update);
DCHECK(update->entity);
const std::string& guid_in_specifics =
update->entity->specifics.bookmark().guid();
if (guid_in_specifics.empty() ||
guid_in_specifics == update->entity->originator_client_item_id) {
continue;
}
bool success = known_guids.insert(guid_in_specifics).second;
if (!success) {
// This GUID conflicts with another one, so let's ignore it for the
// purpose of merging. This mimics the data produced by old clients,
// without the GUID being populated.
update->entity->specifics.mutable_bookmark()->clear_guid();
}
}
}
// Groups all valid updates by the server ID of their parent and moves them away // Groups all valid updates by the server ID of their parent and moves them away
// from |*updates|. |updates| must not be null. // from |*updates|. |updates| must not be null.
UpdatesPerParentId GroupValidUpdatesByParentId( UpdatesPerParentId GroupValidUpdatesByParentId(
...@@ -172,7 +239,10 @@ class BookmarkModelMerger::RemoteTreeNode final { ...@@ -172,7 +239,10 @@ class BookmarkModelMerger::RemoteTreeNode final {
const std::string& guid = entity().specifics.bookmark().guid(); const std::string& guid = entity().specifics.bookmark().guid();
if (!guid.empty()) { if (!guid.empty()) {
DCHECK(base::IsValidGUID(guid)); DCHECK(base::IsValidGUID(guid));
guid_to_remote_node_map->emplace(guid, this);
// Duplicate GUIDs have been sorted out before.
bool success = guid_to_remote_node_map->emplace(guid, this).second;
DCHECK(success);
} }
for (const RemoteTreeNode& child : children_) { for (const RemoteTreeNode& child : children_) {
...@@ -267,7 +337,7 @@ void BookmarkModelMerger::Merge() { ...@@ -267,7 +337,7 @@ void BookmarkModelMerger::Merge() {
// Associate permanent folders. // Associate permanent folders.
for (const auto& tree_tag_and_root : remote_forest_) { for (const auto& tree_tag_and_root : remote_forest_) {
const bookmarks::BookmarkNode* permanent_folder = const bookmarks::BookmarkNode* permanent_folder =
GetPermanentFolder(tree_tag_and_root.first); GetPermanentFolder(bookmark_model_, tree_tag_and_root.first);
if (!permanent_folder) { if (!permanent_folder) {
continue; continue;
} }
...@@ -279,6 +349,8 @@ void BookmarkModelMerger::Merge() { ...@@ -279,6 +349,8 @@ void BookmarkModelMerger::Merge() {
// static // static
BookmarkModelMerger::RemoteForest BookmarkModelMerger::BuildRemoteForest( BookmarkModelMerger::RemoteForest BookmarkModelMerger::BuildRemoteForest(
syncer::UpdateResponseDataList updates) { syncer::UpdateResponseDataList updates) {
ResolveDuplicateRemoteGUIDs(&updates);
// Filter out invalid remote updates and group the valid ones by the server ID // Filter out invalid remote updates and group the valid ones by the server ID
// of their parent. // of their parent.
UpdatesPerParentId updates_per_parent_id = UpdatesPerParentId updates_per_parent_id =
...@@ -312,9 +384,6 @@ BookmarkModelMerger::FindGuidMatchesOrReassignLocal( ...@@ -312,9 +384,6 @@ BookmarkModelMerger::FindGuidMatchesOrReassignLocal(
bookmarks::BookmarkModel* bookmark_model) { bookmarks::BookmarkModel* bookmark_model) {
DCHECK(bookmark_model); DCHECK(bookmark_model);
// TODO(crbug.com/978430): Handle potential duplicate GUIDs within remote
// updates.
if (!base::FeatureList::IsEnabled(switches::kMergeBookmarksUsingGUIDs)) { if (!base::FeatureList::IsEnabled(switches::kMergeBookmarksUsingGUIDs)) {
return {}; return {};
} }
...@@ -336,10 +405,6 @@ BookmarkModelMerger::FindGuidMatchesOrReassignLocal( ...@@ -336,10 +405,6 @@ BookmarkModelMerger::FindGuidMatchesOrReassignLocal(
const bookmarks::BookmarkNode* const node = iterator.Next(); const bookmarks::BookmarkNode* const node = iterator.Next();
DCHECK(base::IsValidGUID(node->guid())); DCHECK(base::IsValidGUID(node->guid()));
if (node->is_permanent_node()) {
continue;
}
const auto remote_it = guid_to_remote_node_map.find(node->guid()); const auto remote_it = guid_to_remote_node_map.find(node->guid());
if (remote_it == guid_to_remote_node_map.end()) { if (remote_it == guid_to_remote_node_map.end()) {
continue; continue;
...@@ -347,13 +412,24 @@ BookmarkModelMerger::FindGuidMatchesOrReassignLocal( ...@@ -347,13 +412,24 @@ BookmarkModelMerger::FindGuidMatchesOrReassignLocal(
const RemoteTreeNode* const remote_node = remote_it->second; const RemoteTreeNode* const remote_node = remote_it->second;
const syncer::EntityData& remote_entity = remote_node->entity(); const syncer::EntityData& remote_entity = remote_node->entity();
// Permanent nodes don't match by GUID but by |server_defined_unique_tag|.
// As extra precaution, specially with remote GUIDs in mind, let's ignore
// them explicitly here.
if (node->is_permanent_node() ||
GetPermanentFolder(bookmark_model,
remote_entity.server_defined_unique_tag) !=
nullptr) {
continue;
}
if (node->is_folder() != remote_entity.is_folder || if (node->is_folder() != remote_entity.is_folder ||
(node->is_url() && (node->is_url() &&
node->url() != remote_entity.specifics.bookmark().url())) { node->url() != remote_entity.specifics.bookmark().url())) {
// If local node and its remote node match are conflicting in node type or // If local node and its remote node match are conflicting in node type or
// URL, replace local GUID with a random GUID. // URL, replace local GUID with a random GUID.
// TODO(crbug.com/978430): Local GUIDs should also be reassigned if they // TODO(crbug.com/978430): Local GUIDs should also be reassigned if they
// match a remote originator_item_id. // match a remote originator_client_item_id.
ReplaceBookmarkNodeGUID(node, base::GenerateGUID(), bookmark_model); ReplaceBookmarkNodeGUID(node, base::GenerateGUID(), bookmark_model);
continue; continue;
} }
...@@ -440,7 +516,12 @@ const bookmarks::BookmarkNode* BookmarkModelMerger::FindMatchingLocalNode( ...@@ -440,7 +516,12 @@ const bookmarks::BookmarkNode* BookmarkModelMerger::FindMatchingLocalNode(
return nullptr; return nullptr;
} }
return local_parent->children()[local_index].get(); // The child at |local_index| has matched by semantics, which also means it
// does not match by GUID to any other remote node.
const bookmarks::BookmarkNode* matching_local_node_by_semantics =
local_parent->children()[local_index].get();
DCHECK(!FindMatchingRemoteNodeByGUID(matching_local_node_by_semantics));
return matching_local_node_by_semantics;
} }
const bookmarks::BookmarkNode* const bookmarks::BookmarkNode*
...@@ -457,21 +538,33 @@ BookmarkModelMerger::UpdateBookmarkNodeFromSpecificsIncludingGUID( ...@@ -457,21 +538,33 @@ BookmarkModelMerger::UpdateBookmarkNodeFromSpecificsIncludingGUID(
const sync_pb::BookmarkSpecifics& specifics = const sync_pb::BookmarkSpecifics& specifics =
remote_update_entity.specifics.bookmark(); remote_update_entity.specifics.bookmark();
// If the nodes were matched by GUID, we update the BookmarkNode semantics // Update the local GUID if necessary for semantic matches (it's obviously not
// accordingly. // needed for GUID-based matches).
if (local_node->guid() == specifics.guid()) { const bookmarks::BookmarkNode* possibly_replaced_local_node = local_node;
UpdateBookmarkNodeFromSpecifics(specifics, local_node, bookmark_model_, if (!specifics.guid().empty() && specifics.guid() != local_node->guid()) {
favicon_service_); // If it's a semantic match, neither of the nodes should be involved in any
} // GUID-based match.
DCHECK(!FindMatchingLocalNodeByGUID(remote_node));
DCHECK(!FindMatchingRemoteNodeByGUID(local_node));
possibly_replaced_local_node =
ReplaceBookmarkNodeGUID(local_node, specifics.guid(), bookmark_model_);
// If the nodes were matched by semantics, the local GUID is replaced by its // Update |guid_to_match_map_| to avoid pointing to a deleted node. This
// remote counterpart, unless it is empty, in which case we keep the local // should not be required in practice, because the algorithm processes each
// GUID unchanged. // GUID once, but let's update nevertheless to avoid future issues.
if (specifics.guid().empty() || FindMatchingLocalNodeByGUID(remote_node)) { const auto it =
return local_node; guid_to_match_map_.find(possibly_replaced_local_node->guid());
if (it != guid_to_match_map_.end() && it->second.local_node == local_node) {
it->second.local_node = possibly_replaced_local_node;
}
} }
DCHECK(base::IsValidGUID(specifics.guid()));
return ReplaceBookmarkNodeGUID(local_node, specifics.guid(), bookmark_model_); // Update all fields, where no-op changes are handled well.
UpdateBookmarkNodeFromSpecifics(specifics, possibly_replaced_local_node,
bookmark_model_, favicon_service_);
return possibly_replaced_local_node;
} }
void BookmarkModelMerger::ProcessRemoteCreation( void BookmarkModelMerger::ProcessRemoteCreation(
...@@ -576,20 +669,6 @@ void BookmarkModelMerger::ProcessLocalCreation( ...@@ -576,20 +669,6 @@ void BookmarkModelMerger::ProcessLocalCreation(
} }
} }
const bookmarks::BookmarkNode* BookmarkModelMerger::GetPermanentFolder(
const std::string& server_defined_unique_tag) const {
if (server_defined_unique_tag == kBookmarkBarTag) {
return bookmark_model_->bookmark_bar_node();
}
if (server_defined_unique_tag == kOtherBookmarksTag) {
return bookmark_model_->other_node();
}
if (server_defined_unique_tag == kMobileBookmarksTag) {
return bookmark_model_->mobile_node();
}
return nullptr;
}
size_t BookmarkModelMerger::FindMatchingChildBySemanticsStartingAt( size_t BookmarkModelMerger::FindMatchingChildBySemanticsStartingAt(
const RemoteTreeNode& remote_node, const RemoteTreeNode& remote_node,
const bookmarks::BookmarkNode* local_parent, const bookmarks::BookmarkNode* local_parent,
...@@ -616,6 +695,7 @@ BookmarkModelMerger::FindMatchingRemoteNodeByGUID( ...@@ -616,6 +695,7 @@ BookmarkModelMerger::FindMatchingRemoteNodeByGUID(
return nullptr; return nullptr;
} }
DCHECK_EQ(it->second.local_node, local_node);
return it->second.remote_node; return it->second.remote_node;
} }
...@@ -628,6 +708,7 @@ const bookmarks::BookmarkNode* BookmarkModelMerger::FindMatchingLocalNodeByGUID( ...@@ -628,6 +708,7 @@ const bookmarks::BookmarkNode* BookmarkModelMerger::FindMatchingLocalNodeByGUID(
return nullptr; return nullptr;
} }
DCHECK_EQ(it->second.remote_node, &remote_node);
return it->second.local_node; return it->second.local_node;
} }
......
...@@ -107,11 +107,6 @@ class BookmarkModelMerger { ...@@ -107,11 +107,6 @@ class BookmarkModelMerger {
void ProcessLocalCreation(const bookmarks::BookmarkNode* parent, void ProcessLocalCreation(const bookmarks::BookmarkNode* parent,
size_t index); size_t index);
// Gets the bookmark node corresponding to a permanent folder identified by
// |server_defined_unique_tag|.
const bookmarks::BookmarkNode* GetPermanentFolder(
const std::string& server_defined_unique_tag) const;
// Looks for a local node under |local_parent| that matches |remote_node|, // Looks for a local node under |local_parent| that matches |remote_node|,
// starting at index |local_child_start_index|. First attempts to find a match // starting at index |local_child_start_index|. First attempts to find a match
// by GUID and otherwise attempts to find one by semantics. If no match is // by GUID and otherwise attempts to find one by semantics. If no match is
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
using testing::_; using testing::_;
using testing::Eq; using testing::Eq;
using testing::Ne;
using testing::NotNull; using testing::NotNull;
using testing::UnorderedElementsAre; using testing::UnorderedElementsAre;
...@@ -64,6 +65,7 @@ std::unique_ptr<syncer::UpdateResponseData> CreateUpdateResponseData( ...@@ -64,6 +65,7 @@ std::unique_ptr<syncer::UpdateResponseData> CreateUpdateResponseData(
auto data = std::make_unique<syncer::EntityData>(); auto data = std::make_unique<syncer::EntityData>();
data->id = server_id; data->id = server_id;
data->originator_client_item_id = *guid;
data->parent_id = parent_id; data->parent_id = parent_id;
data->unique_position = unique_position.ToProto(); data->unique_position = unique_position.ToProto();
...@@ -1255,4 +1257,149 @@ TEST(BookmarkModelMergerTest, ShouldIgnoreRemoteGUIDIfInvalidSpecifics) { ...@@ -1255,4 +1257,149 @@ TEST(BookmarkModelMergerTest, ShouldIgnoreRemoteGUIDIfInvalidSpecifics) {
EXPECT_THAT(tracker->GetEntityForBookmarkNode(bookmark), NotNull()); EXPECT_THAT(tracker->GetEntityForBookmarkNode(bookmark), NotNull());
} }
// Tests that the GUID-based matching algorithm does not match two remote nodes
// with the same local node, even if the remote data contains duplicate GUIDs.
TEST(BookmarkModelMergerTest, ShouldIgnoreRemoteDuplicateGUID) {
base::test::ScopedFeatureList override_features;
override_features.InitAndEnableFeature(switches::kMergeBookmarksUsingGUIDs);
const std::string kId1 = "Id1";
const std::string kId2 = "Id2";
const std::string kTitle1 = "Title1";
const std::string kTitle2 = "Title2";
const std::string kLocalTitle = "LocalTitle";
const std::string kUrl = "http://www.foo.com/";
const std::string kGuid = base::GenerateGUID();
std::unique_ptr<bookmarks::BookmarkModel> bookmark_model =
bookmarks::TestBookmarkClient::CreateModel();
// -------- The local model --------
// | - bookmark(kGuid/kUrl/kLocalTitle)
const bookmarks::BookmarkNode* bookmark_bar_node =
bookmark_model->bookmark_bar_node();
const bookmarks::BookmarkNode* bookmark = bookmark_model->AddURL(
/*parent=*/bookmark_bar_node, /*index=*/0, base::UTF8ToUTF16(kLocalTitle),
GURL(kUrl), nullptr, base::Time::Now(), kGuid);
ASSERT_TRUE(bookmark);
ASSERT_THAT(bookmark_bar_node->children(), ElementRawPointersAre(bookmark));
// -------- The remote model --------
// bookmark_bar
// | - bookmark (kGuid/kUrl/kTitle1)
// | - bookmark (kGuid/kUrl/kTitle2)
const std::string suffix = syncer::UniquePosition::RandomSuffix();
syncer::UniquePosition position1 =
syncer::UniquePosition::InitialPosition(suffix);
syncer::UniquePosition position2 =
syncer::UniquePosition::After(position1, suffix);
syncer::UpdateResponseDataList updates;
updates.push_back(CreateBookmarkBarNodeUpdateData());
updates.push_back(CreateUpdateResponseData(
/*server_id=*/kId1, /*parent_id=*/kBookmarkBarId, kTitle1,
/*url=*/kUrl,
/*is_folder=*/false, /*unique_position=*/position1,
/*guid=*/kGuid));
updates.push_back(CreateUpdateResponseData(
/*server_id=*/kId2, /*parent_id=*/kBookmarkBarId, kTitle2,
/*url=*/kUrl,
/*is_folder=*/false, /*unique_position=*/position2,
/*guid=*/kGuid));
// |originator_client_item_id| cannot itself be duplicated because
// ModelTypeWorker guarantees otherwise.
updates.back()->entity->originator_client_item_id = base::GenerateGUID();
std::unique_ptr<SyncedBookmarkTracker> tracker =
Merge(std::move(updates), bookmark_model.get());
// -------- The merged model --------
// | - bookmark (kGuid/kUrl/kTitle1)
// | - bookmark (<some-other-guid>/kUrl/kTitle2)
// Both remote nodes should be present in the merged tree.
ASSERT_EQ(bookmark_bar_node->children().size(), 2u);
const bookmarks::BookmarkNode* bookmark1 =
bookmark_model->bookmark_bar_node()->children()[0].get();
const bookmarks::BookmarkNode* bookmark2 =
bookmark_model->bookmark_bar_node()->children()[1].get();
EXPECT_THAT(bookmark1->guid(), Eq(kGuid));
EXPECT_THAT(bookmark2->guid(), Ne(kGuid));
EXPECT_THAT(tracker->GetEntityForBookmarkNode(bookmark1), NotNull());
EXPECT_THAT(tracker->GetEntityForBookmarkNode(bookmark2), NotNull());
}
// Same as previous test but in addition all nodes match semantically.
TEST(BookmarkModelMergerTest, ShouldIgnoreRemoteDuplicateGUIDAndSemanticMatch) {
base::test::ScopedFeatureList override_features;
override_features.InitAndEnableFeature(switches::kMergeBookmarksUsingGUIDs);
const std::string kId1 = "Id1";
const std::string kId2 = "Id2";
const std::string kTitle = "Title";
const std::string kUrl = "http://www.foo.com/";
const std::string kGuid = base::GenerateGUID();
std::unique_ptr<bookmarks::BookmarkModel> bookmark_model =
bookmarks::TestBookmarkClient::CreateModel();
// -------- The local model --------
// | - bookmark(kGuid/kUrl/kTitle)
const bookmarks::BookmarkNode* bookmark_bar_node =
bookmark_model->bookmark_bar_node();
const bookmarks::BookmarkNode* bookmark = bookmark_model->AddURL(
/*parent=*/bookmark_bar_node, /*index=*/0, base::UTF8ToUTF16(kTitle),
GURL(kUrl), nullptr, base::Time::Now(), kGuid);
ASSERT_TRUE(bookmark);
ASSERT_THAT(bookmark_bar_node->children(), ElementRawPointersAre(bookmark));
// -------- The remote model --------
// bookmark_bar
// | - bookmark (kGuid/kUrl/kTitle)
// | - bookmark (kGuid/kUrl/kTitle)
const std::string suffix = syncer::UniquePosition::RandomSuffix();
syncer::UniquePosition position1 =
syncer::UniquePosition::InitialPosition(suffix);
syncer::UniquePosition position2 =
syncer::UniquePosition::After(position1, suffix);
syncer::UpdateResponseDataList updates;
updates.push_back(CreateBookmarkBarNodeUpdateData());
updates.push_back(CreateUpdateResponseData(
/*server_id=*/kId1, /*parent_id=*/kBookmarkBarId, kTitle,
/*url=*/kUrl,
/*is_folder=*/false, /*unique_position=*/position1,
/*guid=*/kGuid));
updates.push_back(CreateUpdateResponseData(
/*server_id=*/kId2, /*parent_id=*/kBookmarkBarId, kTitle,
/*url=*/kUrl,
/*is_folder=*/false, /*unique_position=*/position2,
/*guid=*/kGuid));
// |originator_client_item_id| cannot itself be duplicated because
// ModelTypeWorker guarantees otherwise.
updates.back()->entity->originator_client_item_id = base::GenerateGUID();
std::unique_ptr<SyncedBookmarkTracker> tracker =
Merge(std::move(updates), bookmark_model.get());
// -------- The merged model --------
// | - bookmark (kGuid/kUrl/kTitle)
// | - bookmark (<some-other-guid>/kUrl/kTitle)
// Both remote nodes should be present in the merged tree.
ASSERT_EQ(bookmark_bar_node->children().size(), 2u);
const bookmarks::BookmarkNode* bookmark1 =
bookmark_model->bookmark_bar_node()->children()[0].get();
const bookmarks::BookmarkNode* bookmark2 =
bookmark_model->bookmark_bar_node()->children()[1].get();
EXPECT_THAT(bookmark1->guid(), Eq(kGuid));
EXPECT_THAT(bookmark2->guid(), Ne(kGuid));
EXPECT_THAT(tracker->GetEntityForBookmarkNode(bookmark1), NotNull());
EXPECT_THAT(tracker->GetEntityForBookmarkNode(bookmark2), NotNull());
}
} // namespace sync_bookmarks } // namespace sync_bookmarks
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment