diff --git a/sync/syncable/directory_backing_store.cc b/sync/syncable/directory_backing_store.cc index 8d09515709e317..378b396b89d168 100644 --- a/sync/syncable/directory_backing_store.cc +++ b/sync/syncable/directory_backing_store.cc @@ -76,6 +76,46 @@ void BindFields(const EntryKernel& entry, } } +// Helper function that loads a number of shareable fields of the +// same type. The sharing criteria is based on comparison of +// the serialized data. Only consecutive DB columns need to compared +// to cover all possible sharing combinations. +template +void UnpackProtoFields(sql::Statement* statement, + EntryKernel* kernel, + int* index, + int end_index) { + const void* prev_blob = nullptr; + int prev_length = -1; + int prev_index = -1; + + for (; *index < end_index; ++(*index)) { + int length = statement->ColumnByteLength(*index); + if (length == 0) { + // Skip this column and keep the default value in the kernel field. + continue; + } + + const void* blob = statement->ColumnBlob(*index); + // According to sqlite3 documentation, the prev_blob pointer should remain + // valid until moving to the next row. + if (length == prev_length && memcmp(blob, prev_blob, length) == 0) { + // Serialized values are the same - share the value from |prev_index| + // field with the current field. + kernel->copy(static_cast(prev_index), + static_cast(*index)); + } else { + // Regular case - deserialize and copy the value to the field. + TValue value; + value.ParseFromArray(blob, length); + kernel->put(static_cast(*index), value); + prev_blob = blob; + prev_length = length; + prev_index = *index; + } + } +} + // The caller owns the returned EntryKernel*. Assumes the statement currently // points to a valid row in the metas table. Returns NULL to indicate that // it detected a corruption in the data on unpacking. @@ -101,10 +141,8 @@ scoped_ptr UnpackEntry(sql::Statement* statement) { kernel->put(static_cast(i), statement->ColumnString(i)); } - for ( ; i < PROTO_FIELDS_END; ++i) { - kernel->mutable_ref(static_cast(i)).ParseFromArray( - statement->ColumnBlob(i), statement->ColumnByteLength(i)); - } + UnpackProtoFields( + statement, kernel.get(), &i, PROTO_FIELDS_END); for ( ; i < UNIQUE_POSITION_FIELDS_END; ++i) { std::string temp; statement->ColumnBlobAsString(i, &temp); @@ -118,10 +156,8 @@ scoped_ptr UnpackEntry(sql::Statement* statement) { kernel->mutable_ref(static_cast(i)) = UniquePosition::FromProto(proto); } - for (; i < ATTACHMENT_METADATA_FIELDS_END; ++i) { - kernel->mutable_ref(static_cast(i)).ParseFromArray( - statement->ColumnBlob(i), statement->ColumnByteLength(i)); - } + UnpackProtoFields( + statement, kernel.get(), &i, ATTACHMENT_METADATA_FIELDS_END); // Sanity check on positions. We risk strange and rare crashes if our // assumptions about unique position values are broken. diff --git a/sync/syncable/directory_unittest.cc b/sync/syncable/directory_unittest.cc index 735dca58e8c467..34e5af7891a142 100644 --- a/sync/syncable/directory_unittest.cc +++ b/sync/syncable/directory_unittest.cc @@ -1768,18 +1768,27 @@ TEST_F(SyncableDirectoryTest, MutableEntry_UpdateAttachmentId) { entry.PutId(TestIdFactory::FromNumber(-1)); entry.PutAttachmentMetadata(attachment_metadata); - const sync_pb::AttachmentMetadata& entry_metadata = - entry.GetAttachmentMetadata(); - ASSERT_EQ(2, entry_metadata.record_size()); - ASSERT_FALSE(entry_metadata.record(0).is_on_server()); - ASSERT_FALSE(entry_metadata.record(1).is_on_server()); - ASSERT_FALSE(entry.GetIsUnsynced()); + { + const sync_pb::AttachmentMetadata& entry_metadata = + entry.GetAttachmentMetadata(); + ASSERT_EQ(2, entry_metadata.record_size()); + ASSERT_FALSE(entry_metadata.record(0).is_on_server()); + ASSERT_FALSE(entry_metadata.record(1).is_on_server()); + ASSERT_FALSE(entry.GetIsUnsynced()); + } entry.MarkAttachmentAsOnServer(attachment_id_proto); - ASSERT_TRUE(entry_metadata.record(0).is_on_server()); - ASSERT_FALSE(entry_metadata.record(1).is_on_server()); - ASSERT_TRUE(entry.GetIsUnsynced()); + { + // Re-get entry_metadata because it is immutable in the directory and + // entry_metadata reference has been made invalid by + // MarkAttachmentAsOnServer call above. + const sync_pb::AttachmentMetadata& entry_metadata = + entry.GetAttachmentMetadata(); + ASSERT_TRUE(entry_metadata.record(0).is_on_server()); + ASSERT_FALSE(entry_metadata.record(1).is_on_server()); + ASSERT_TRUE(entry.GetIsUnsynced()); + } } // Verify that deleted entries with attachments will retain the attachments. @@ -2043,6 +2052,60 @@ TEST_F(SyncableDirectoryTest, CatastrophicError) { ASSERT_EQ(2, unrecoverable_error_handler.invocation_count()); } +bool EntitySpecificsValuesAreSame(const sync_pb::EntitySpecifics& v1, + const sync_pb::EntitySpecifics& v2) { + return &v1 == &v2; +} + +// Verifies that server and client specifics are shared when their values +// are equal. +TEST_F(SyncableDirectoryTest, SharingOfClientAndServerSpecifics) { + sync_pb::EntitySpecifics specifics1; + sync_pb::EntitySpecifics specifics2; + sync_pb::EntitySpecifics specifics3; + AddDefaultFieldValue(BOOKMARKS, &specifics1); + AddDefaultFieldValue(BOOKMARKS, &specifics2); + AddDefaultFieldValue(BOOKMARKS, &specifics3); + specifics1.mutable_bookmark()->set_url("foo"); + specifics2.mutable_bookmark()->set_url("bar"); + // specifics3 has the same URL as specifics1 + specifics3.mutable_bookmark()->set_url("foo"); + + WriteTransaction trans(FROM_HERE, UNITTEST, dir().get()); + MutableEntry item(&trans, CREATE, BOOKMARKS, trans.root_id(), "item"); + item.PutId(TestIdFactory::FromNumber(1)); + item.PutBaseVersion(10); + + // Verify sharing. + item.PutSpecifics(specifics1); + item.PutServerSpecifics(specifics1); + EXPECT_TRUE(EntitySpecificsValuesAreSame(item.GetSpecifics(), + item.GetServerSpecifics())); + + // Verify that specifics are no longer shared. + item.PutServerSpecifics(specifics2); + EXPECT_FALSE(EntitySpecificsValuesAreSame(item.GetSpecifics(), + item.GetServerSpecifics())); + + // Verify that specifics are shared again because specifics3 matches + // specifics1. + item.PutServerSpecifics(specifics3); + EXPECT_TRUE(EntitySpecificsValuesAreSame(item.GetSpecifics(), + item.GetServerSpecifics())); + + // Verify that copying the same value back to SPECIFICS is still OK. + item.PutSpecifics(specifics3); + EXPECT_TRUE(EntitySpecificsValuesAreSame(item.GetSpecifics(), + item.GetServerSpecifics())); + + // Verify sharing with BASE_SERVER_SPECIFICS. + EXPECT_FALSE(EntitySpecificsValuesAreSame(item.GetServerSpecifics(), + item.GetBaseServerSpecifics())); + item.PutBaseServerSpecifics(specifics3); + EXPECT_TRUE(EntitySpecificsValuesAreSame(item.GetServerSpecifics(), + item.GetBaseServerSpecifics())); +} + } // namespace syncable } // namespace syncer diff --git a/sync/syncable/entry_kernel.cc b/sync/syncable/entry_kernel.cc index c51b1a65ec87e6..52481f2f9d7ca3 100644 --- a/sync/syncable/entry_kernel.cc +++ b/sync/syncable/entry_kernel.cc @@ -14,7 +14,7 @@ namespace syncable { EntryKernel::EntryKernel() : dirty_(false) { // Everything else should already be default-initialized. - for (int i = INT64_FIELDS_BEGIN; i < INT64_FIELDS_END; ++i) { + for (int i = 0; i < INT64_FIELDS_COUNT; ++i) { int64_fields[i] = 0; } } diff --git a/sync/syncable/entry_kernel.h b/sync/syncable/entry_kernel.h index 3e0d2c7a65d926..6b627a46b8b9a6 100644 --- a/sync/syncable/entry_kernel.h +++ b/sync/syncable/entry_kernel.h @@ -13,9 +13,8 @@ #include "sync/internal_api/public/base/model_type.h" #include "sync/internal_api/public/base/unique_position.h" #include "sync/internal_api/public/util/immutable.h" -#include "sync/protocol/attachments.pb.h" -#include "sync/protocol/sync.pb.h" #include "sync/syncable/metahandle_set.h" +#include "sync/syncable/proto_value_ptr.h" #include "sync/syncable/syncable_id.h" #include "sync/util/time.h" @@ -197,12 +196,12 @@ enum { struct SYNC_EXPORT_PRIVATE EntryKernel { private: std::string string_fields[STRING_FIELDS_COUNT]; - sync_pb::EntitySpecifics specifics_fields[PROTO_FIELDS_COUNT]; + EntitySpecificsPtr specifics_fields[PROTO_FIELDS_COUNT]; int64 int64_fields[INT64_FIELDS_COUNT]; base::Time time_fields[TIME_FIELDS_COUNT]; Id id_fields[ID_FIELDS_COUNT]; UniquePosition unique_position_fields[UNIQUE_POSITION_FIELDS_COUNT]; - sync_pb::AttachmentMetadata + AttachmentMetadataPtr attachment_metadata_fields[ATTACHMENT_METADATA_FIELDS_COUNT]; std::bitset bit_fields; std::bitset bit_temps; @@ -269,15 +268,15 @@ struct SYNC_EXPORT_PRIVATE EntryKernel { string_fields[field - STRING_FIELDS_BEGIN] = value; } inline void put(ProtoField field, const sync_pb::EntitySpecifics& value) { - specifics_fields[field - PROTO_FIELDS_BEGIN].CopyFrom(value); + specifics_fields[field - PROTO_FIELDS_BEGIN].set_value(value); } inline void put(UniquePositionField field, const UniquePosition& value) { unique_position_fields[field - UNIQUE_POSITION_FIELDS_BEGIN] = value; } inline void put(AttachmentMetadataField field, const sync_pb::AttachmentMetadata& value) { - attachment_metadata_fields[field - ATTACHMENT_METADATA_FIELDS_BEGIN] = - value; + attachment_metadata_fields[field - ATTACHMENT_METADATA_FIELDS_BEGIN] + .set_value(value); } inline void put(BitTemp field, bool value) { bit_temps[field - BIT_TEMPS_BEGIN] = value; @@ -312,14 +311,15 @@ struct SYNC_EXPORT_PRIVATE EntryKernel { return string_fields[field - STRING_FIELDS_BEGIN]; } inline const sync_pb::EntitySpecifics& ref(ProtoField field) const { - return specifics_fields[field - PROTO_FIELDS_BEGIN]; + return specifics_fields[field - PROTO_FIELDS_BEGIN].value(); } inline const UniquePosition& ref(UniquePositionField field) const { return unique_position_fields[field - UNIQUE_POSITION_FIELDS_BEGIN]; } inline const sync_pb::AttachmentMetadata& ref( AttachmentMetadataField field) const { - return attachment_metadata_fields[field - ATTACHMENT_METADATA_FIELDS_BEGIN]; + return attachment_metadata_fields[field - ATTACHMENT_METADATA_FIELDS_BEGIN] + .value(); } inline bool ref(BitTemp field) const { return bit_temps[field - BIT_TEMPS_BEGIN]; @@ -329,18 +329,24 @@ struct SYNC_EXPORT_PRIVATE EntryKernel { inline std::string& mutable_ref(StringField field) { return string_fields[field - STRING_FIELDS_BEGIN]; } - inline sync_pb::EntitySpecifics& mutable_ref(ProtoField field) { - return specifics_fields[field - PROTO_FIELDS_BEGIN]; - } inline Id& mutable_ref(IdField field) { return id_fields[field - ID_FIELDS_BEGIN]; } inline UniquePosition& mutable_ref(UniquePositionField field) { return unique_position_fields[field - UNIQUE_POSITION_FIELDS_BEGIN]; } - inline sync_pb::AttachmentMetadata& mutable_ref( - AttachmentMetadataField field) { - return attachment_metadata_fields[field - ATTACHMENT_METADATA_FIELDS_BEGIN]; + + // Sharing data methods for ::google::protobuf::MessageLite derived types. + inline void copy(ProtoField src, ProtoField dest) { + DCHECK_NE(src, dest); + specifics_fields[dest - PROTO_FIELDS_BEGIN] = + specifics_fields[src - PROTO_FIELDS_BEGIN]; + } + + inline void copy(AttachmentMetadataField src, AttachmentMetadataField dest) { + DCHECK_NE(src, dest); + attachment_metadata_fields[dest - ATTACHMENT_METADATA_FIELDS_BEGIN] = + attachment_metadata_fields[src - ATTACHMENT_METADATA_FIELDS_BEGIN]; } ModelType GetModelType() const; diff --git a/sync/syncable/entry_kernel_unittest.cc b/sync/syncable/entry_kernel_unittest.cc index 55563d5285d3c2..a49a03a4d47f2f 100644 --- a/sync/syncable/entry_kernel_unittest.cc +++ b/sync/syncable/entry_kernel_unittest.cc @@ -26,6 +26,61 @@ TEST_F(EntryKernelTest, ToValue) { } } +bool ProtoFieldValuesEqual(const sync_pb::EntitySpecifics& v1, + const sync_pb::EntitySpecifics& v2) { + return v1.SerializeAsString() == v2.SerializeAsString(); +} + +bool ProtoFieldValuesAreSame(const sync_pb::EntitySpecifics& v1, + const sync_pb::EntitySpecifics& v2) { + return &v1 == &v2; +} + +bool ProtoFieldValueIsDefault(const sync_pb::EntitySpecifics& v) { + return ProtoFieldValuesAreSame(v, + sync_pb::EntitySpecifics::default_instance()); +} + +// Tests default value, assignment, and sharing of proto fields. +TEST_F(EntryKernelTest, ProtoFieldTest) { + EntryKernel kernel; + + // Check default values. + EXPECT_TRUE(ProtoFieldValueIsDefault(kernel.ref(SPECIFICS))); + EXPECT_TRUE(ProtoFieldValueIsDefault(kernel.ref(SERVER_SPECIFICS))); + EXPECT_TRUE(ProtoFieldValueIsDefault(kernel.ref(BASE_SERVER_SPECIFICS))); + + sync_pb::EntitySpecifics specifics; + + // Assign empty value and verify that the field still returns the + // default value. + kernel.put(SPECIFICS, specifics); + EXPECT_TRUE(ProtoFieldValueIsDefault(kernel.ref(SPECIFICS))); + EXPECT_TRUE(ProtoFieldValuesEqual(kernel.ref(SPECIFICS), specifics)); + + // Verifies that the kernel holds the copy of the value assigned to it. + specifics.mutable_bookmark()->set_url("http://demo/"); + EXPECT_FALSE(ProtoFieldValuesEqual(kernel.ref(SPECIFICS), specifics)); + + // Put the new value and verify the equality. + kernel.put(SPECIFICS, specifics); + EXPECT_TRUE(ProtoFieldValuesEqual(kernel.ref(SPECIFICS), specifics)); + EXPECT_FALSE(ProtoFieldValueIsDefault(kernel.ref(SPECIFICS))); + + // Copy the value between the fields and verify that exactly the same + // underlying value is shared. + kernel.copy(SPECIFICS, SERVER_SPECIFICS); + EXPECT_TRUE(ProtoFieldValuesEqual(kernel.ref(SERVER_SPECIFICS), specifics)); + EXPECT_TRUE(ProtoFieldValuesAreSame(kernel.ref(SPECIFICS), + kernel.ref(SERVER_SPECIFICS))); + + // Put the new value into SPECIFICS and verify that that stops sharing even + // though the values are still equal. + kernel.put(SPECIFICS, specifics); + EXPECT_FALSE(ProtoFieldValuesAreSame(kernel.ref(SPECIFICS), + kernel.ref(SERVER_SPECIFICS))); +} + } // namespace syncable } // namespace syncer diff --git a/sync/syncable/model_neutral_mutable_entry.cc b/sync/syncable/model_neutral_mutable_entry.cc index ca05bddf88d62c..bdf5299defa60e 100644 --- a/sync/syncable/model_neutral_mutable_entry.cc +++ b/sync/syncable/model_neutral_mutable_entry.cc @@ -345,8 +345,8 @@ void ModelNeutralMutableEntry::PutServerSpecifics( base_write_transaction_->TrackChangesTo(kernel_); // TODO(ncarter): This is unfortunately heavyweight. Can we do // better? - if (kernel_->ref(SERVER_SPECIFICS).SerializeAsString() != - value.SerializeAsString()) { + const std::string& serialized_value = value.SerializeAsString(); + if (serialized_value != kernel_->ref(SERVER_SPECIFICS).SerializeAsString()) { if (kernel_->ref(IS_UNAPPLIED_UPDATE)) { // Remove ourselves from unapplied_update_metahandles with our // old server type. @@ -358,7 +358,13 @@ void ModelNeutralMutableEntry::PutServerSpecifics( DCHECK_EQ(erase_count, 1u); } - kernel_->put(SERVER_SPECIFICS, value); + // Check for potential sharing - SERVER_SPECIFICS is often + // copied from SPECIFICS. + if (serialized_value == kernel_->ref(SPECIFICS).SerializeAsString()) { + kernel_->copy(SPECIFICS, SERVER_SPECIFICS); + } else { + kernel_->put(SERVER_SPECIFICS, value); + } kernel_->mark_dirty(&dir()->kernel()->dirty_metahandles); if (kernel_->ref(IS_UNAPPLIED_UPDATE)) { @@ -379,9 +385,17 @@ void ModelNeutralMutableEntry::PutBaseServerSpecifics( base_write_transaction_->TrackChangesTo(kernel_); // TODO(ncarter): This is unfortunately heavyweight. Can we do // better? - if (kernel_->ref(BASE_SERVER_SPECIFICS).SerializeAsString() - != value.SerializeAsString()) { - kernel_->put(BASE_SERVER_SPECIFICS, value); + const std::string& serialized_value = value.SerializeAsString(); + if (serialized_value != + kernel_->ref(BASE_SERVER_SPECIFICS).SerializeAsString()) { + // Check for potential sharing - BASE_SERVER_SPECIFICS is often + // copied from SERVER_SPECIFICS. + if (serialized_value == + kernel_->ref(SERVER_SPECIFICS).SerializeAsString()) { + kernel_->copy(SERVER_SPECIFICS, BASE_SERVER_SPECIFICS); + } else { + kernel_->put(BASE_SERVER_SPECIFICS, value); + } kernel_->mark_dirty(&dir()->kernel()->dirty_metahandles); } } @@ -403,10 +417,17 @@ void ModelNeutralMutableEntry::PutServerAttachmentMetadata( const sync_pb::AttachmentMetadata& value) { DCHECK(kernel_); base_write_transaction_->TrackChangesTo(kernel_); - - if (kernel_->ref(SERVER_ATTACHMENT_METADATA).SerializeAsString() != - value.SerializeAsString()) { - kernel_->put(SERVER_ATTACHMENT_METADATA, value); + const std::string& serialized_value = value.SerializeAsString(); + if (serialized_value != + kernel_->ref(SERVER_ATTACHMENT_METADATA).SerializeAsString()) { + // Check for potential sharing - SERVER_ATTACHMENT_METADATA is often + // copied from ATTACHMENT_METADATA. + if (serialized_value == + kernel_->ref(ATTACHMENT_METADATA).SerializeAsString()) { + kernel_->copy(ATTACHMENT_METADATA, SERVER_ATTACHMENT_METADATA); + } else { + kernel_->put(SERVER_ATTACHMENT_METADATA, value); + } kernel_->mark_dirty(&dir()->kernel()->dirty_metahandles); } } diff --git a/sync/syncable/mutable_entry.cc b/sync/syncable/mutable_entry.cc index 0b5ee552bc7677..8e33a67a98f134 100644 --- a/sync/syncable/mutable_entry.cc +++ b/sync/syncable/mutable_entry.cc @@ -222,9 +222,16 @@ void MutableEntry::PutSpecifics(const sync_pb::EntitySpecifics& value) { write_transaction()->TrackChangesTo(kernel_); // TODO(ncarter): This is unfortunately heavyweight. Can we do // better? - if (kernel_->ref(SPECIFICS).SerializeAsString() != - value.SerializeAsString()) { - kernel_->put(SPECIFICS, value); + const std::string& serialized_value = value.SerializeAsString(); + if (serialized_value != kernel_->ref(SPECIFICS).SerializeAsString()) { + // Check for potential sharing - SPECIFICS is often + // copied from SERVER_SPECIFICS. + if (serialized_value == + kernel_->ref(SERVER_SPECIFICS).SerializeAsString()) { + kernel_->copy(SERVER_SPECIFICS, SPECIFICS); + } else { + kernel_->put(SPECIFICS, value); + } kernel_->mark_dirty(&dir()->kernel()->dirty_metahandles); } } @@ -256,15 +263,22 @@ bool MutableEntry::PutPredecessor(const Id& predecessor_id) { } void MutableEntry::PutAttachmentMetadata( - const sync_pb::AttachmentMetadata& attachment_metadata) { + const sync_pb::AttachmentMetadata& value) { DCHECK(kernel_); write_transaction()->TrackChangesTo(kernel_); - if (kernel_->ref(ATTACHMENT_METADATA).SerializeAsString() != - attachment_metadata.SerializeAsString()) { + const std::string& serialized_value = value.SerializeAsString(); + if (serialized_value != + kernel_->ref(ATTACHMENT_METADATA).SerializeAsString()) { dir()->UpdateAttachmentIndex(GetMetahandle(), - kernel_->ref(ATTACHMENT_METADATA), - attachment_metadata); - kernel_->put(ATTACHMENT_METADATA, attachment_metadata); + kernel_->ref(ATTACHMENT_METADATA), value); + // Check for potential sharing - ATTACHMENT_METADATA is often + // copied from SERVER_ATTACHMENT_METADATA. + if (serialized_value == + kernel_->ref(SERVER_ATTACHMENT_METADATA).SerializeAsString()) { + kernel_->copy(SERVER_ATTACHMENT_METADATA, ATTACHMENT_METADATA); + } else { + kernel_->put(ATTACHMENT_METADATA, value); + } kernel_->mark_dirty(&dir()->kernel()->dirty_metahandles); } } @@ -274,8 +288,8 @@ void MutableEntry::MarkAttachmentAsOnServer( DCHECK(kernel_); DCHECK(!attachment_id.unique_id().empty()); write_transaction()->TrackChangesTo(kernel_); - sync_pb::AttachmentMetadata& attachment_metadata = - kernel_->mutable_ref(ATTACHMENT_METADATA); + sync_pb::AttachmentMetadata attachment_metadata = + kernel_->ref(ATTACHMENT_METADATA); for (int i = 0; i < attachment_metadata.record_size(); ++i) { sync_pb::AttachmentMetadataRecord* record = attachment_metadata.mutable_record(i); @@ -283,6 +297,7 @@ void MutableEntry::MarkAttachmentAsOnServer( continue; record->set_is_on_server(true); } + kernel_->put(ATTACHMENT_METADATA, attachment_metadata); kernel_->mark_dirty(&dir()->kernel()->dirty_metahandles); MarkForSyncing(this); } diff --git a/sync/syncable/mutable_entry.h b/sync/syncable/mutable_entry.h index 6f010e6e8787c6..f26842e19cb237 100644 --- a/sync/syncable/mutable_entry.h +++ b/sync/syncable/mutable_entry.h @@ -66,8 +66,7 @@ class SYNC_EXPORT_PRIVATE MutableEntry : public ModelNeutralMutableEntry { // ID to put the node in first position. bool PutPredecessor(const Id& predecessor_id); - void PutAttachmentMetadata( - const sync_pb::AttachmentMetadata& attachment_metadata); + void PutAttachmentMetadata(const sync_pb::AttachmentMetadata& value); // Update attachment metadata for |attachment_id| to indicate that this // attachment has been uploaded to the sync server.