Skip to content

Commit

Permalink
Make Directory's code style a little more consistent.
Browse files Browse the repository at this point in the history
This is a cleanup-only/documentation change, no behavior changes.

Add comments explaining how Directory locking works.

Move protected methods to private (there was no reason for them to be
protected).

Move private data members below private method declarations.

For methods that take a ScopedKernelLock, always pass by const reference
before non-const pointer parameters.

Give ClearDirtyMetahandles a ScopedKernelLock parameter so it's clear
that the caller must be holding a lock.

BUG=

Review URL: https://codereview.chromium.org/585663002

Cr-Commit-Position: refs/heads/master@{#296045}
  • Loading branch information
maniscalco authored and Commit bot committed Sep 22, 2014
1 parent 3d148e7 commit edb7809
Show file tree
Hide file tree
Showing 2 changed files with 87 additions and 79 deletions.
61 changes: 30 additions & 31 deletions sync/syncable/directory.cc
Original file line number Diff line number Diff line change
Expand Up @@ -162,7 +162,7 @@ void Directory::InitializeIndices(MetahandlesMap* handles_map) {
kernel_->ids_map.end()) << "Unexpected duplicate use of ID";
kernel_->ids_map[entry->ref(ID).value()] = entry;
DCHECK(!entry->is_dirty());
AddToAttachmentIndex(metahandle, entry->ref(ATTACHMENT_METADATA), lock);
AddToAttachmentIndex(lock, metahandle, entry->ref(ATTACHMENT_METADATA));
}
}

Expand Down Expand Up @@ -225,11 +225,11 @@ void Directory::OnUnrecoverableError(const BaseTransaction* trans,

EntryKernel* Directory::GetEntryById(const Id& id) {
ScopedKernelLock lock(this);
return GetEntryById(id, &lock);
return GetEntryById(lock, id);
}

EntryKernel* Directory::GetEntryById(const Id& id,
ScopedKernelLock* const lock) {
EntryKernel* Directory::GetEntryById(const ScopedKernelLock& lock,
const Id& id) {
DCHECK(kernel_);
// Find it in the in memory ID index.
IdsMap::iterator id_found = kernel_->ids_map.find(id.value());
Expand Down Expand Up @@ -262,11 +262,11 @@ EntryKernel* Directory::GetEntryByServerTag(const string& tag) {

EntryKernel* Directory::GetEntryByHandle(int64 metahandle) {
ScopedKernelLock lock(this);
return GetEntryByHandle(metahandle, &lock);
return GetEntryByHandle(lock, metahandle);
}

EntryKernel* Directory::GetEntryByHandle(int64 metahandle,
ScopedKernelLock* lock) {
EntryKernel* Directory::GetEntryByHandle(const ScopedKernelLock& lock,
int64 metahandle) {
// Look up in memory
MetahandlesMap::iterator found =
kernel_->metahandles_map.find(metahandle);
Expand Down Expand Up @@ -342,13 +342,12 @@ int Directory::GetPositionIndex(

bool Directory::InsertEntry(BaseWriteTransaction* trans, EntryKernel* entry) {
ScopedKernelLock lock(this);
return InsertEntry(trans, entry, &lock);
return InsertEntry(lock, trans, entry);
}

bool Directory::InsertEntry(BaseWriteTransaction* trans,
EntryKernel* entry,
ScopedKernelLock* lock) {
DCHECK(NULL != lock);
bool Directory::InsertEntry(const ScopedKernelLock& lock,
BaseWriteTransaction* trans,
EntryKernel* entry) {
if (!SyncAssert(NULL != entry, FROM_HERE, "Entry is null", trans))
return false;

Expand Down Expand Up @@ -379,7 +378,7 @@ bool Directory::InsertEntry(BaseWriteTransaction* trans,
}
}
AddToAttachmentIndex(
entry->ref(META_HANDLE), entry->ref(ATTACHMENT_METADATA), *lock);
lock, entry->ref(META_HANDLE), entry->ref(ATTACHMENT_METADATA));

// Should NEVER be created with a client tag or server tag.
if (!SyncAssert(entry->ref(UNIQUE_SERVER_TAG).empty(), FROM_HERE,
Expand All @@ -397,7 +396,7 @@ bool Directory::ReindexId(BaseWriteTransaction* trans,
EntryKernel* const entry,
const Id& new_id) {
ScopedKernelLock lock(this);
if (NULL != GetEntryById(new_id, &lock))
if (NULL != GetEntryById(lock, new_id))
return false;

{
Expand Down Expand Up @@ -427,9 +426,9 @@ bool Directory::ReindexParentId(BaseWriteTransaction* trans,
}

void Directory::RemoveFromAttachmentIndex(
const ScopedKernelLock& lock,
const int64 metahandle,
const sync_pb::AttachmentMetadata& attachment_metadata,
const ScopedKernelLock& lock) {
const sync_pb::AttachmentMetadata& attachment_metadata) {
for (int i = 0; i < attachment_metadata.record_size(); ++i) {
AttachmentIdUniqueId unique_id =
attachment_metadata.record(i).id().unique_id();
Expand All @@ -445,9 +444,9 @@ void Directory::RemoveFromAttachmentIndex(
}

void Directory::AddToAttachmentIndex(
const ScopedKernelLock& lock,
const int64 metahandle,
const sync_pb::AttachmentMetadata& attachment_metadata,
const ScopedKernelLock& lock) {
const sync_pb::AttachmentMetadata& attachment_metadata) {
for (int i = 0; i < attachment_metadata.record_size(); ++i) {
AttachmentIdUniqueId unique_id =
attachment_metadata.record(i).id().unique_id();
Expand All @@ -467,8 +466,8 @@ void Directory::UpdateAttachmentIndex(
const sync_pb::AttachmentMetadata& old_metadata,
const sync_pb::AttachmentMetadata& new_metadata) {
ScopedKernelLock lock(this);
RemoveFromAttachmentIndex(metahandle, old_metadata, lock);
AddToAttachmentIndex(metahandle, new_metadata, lock);
RemoveFromAttachmentIndex(lock, metahandle, old_metadata);
AddToAttachmentIndex(lock, metahandle, new_metadata);
}

void Directory::GetMetahandlesByAttachmentId(
Expand All @@ -492,7 +491,7 @@ bool Directory::unrecoverable_error_set(const BaseTransaction* trans) const {
return unrecoverable_error_set_;
}

void Directory::ClearDirtyMetahandles() {
void Directory::ClearDirtyMetahandles(const ScopedKernelLock& lock) {
kernel_->transaction_mutex.AssertAcquired();
kernel_->dirty_metahandles.clear();
}
Expand Down Expand Up @@ -538,7 +537,7 @@ void Directory::TakeSnapshotForSaveChanges(SaveChangesSnapshot* snapshot) {
// clear dirty flags.
for (MetahandleSet::const_iterator i = kernel_->dirty_metahandles.begin();
i != kernel_->dirty_metahandles.end(); ++i) {
EntryKernel* entry = GetEntryByHandle(*i, &lock);
EntryKernel* entry = GetEntryByHandle(lock, *i);
if (!entry)
continue;
// Skip over false positives; it happens relatively infrequently.
Expand All @@ -551,7 +550,7 @@ void Directory::TakeSnapshotForSaveChanges(SaveChangesSnapshot* snapshot) {
// in a moment, and it unnecessarily complicates iteration.
entry->clear_dirty(NULL);
}
ClearDirtyMetahandles();
ClearDirtyMetahandles(lock);

// Set purged handles.
DCHECK(snapshot->metahandles_to_purge.empty());
Expand Down Expand Up @@ -628,7 +627,7 @@ bool Directory::VacuumAfterSaveChanges(const SaveChangesSnapshot& snapshot) {
(&trans)))
return false;
RemoveFromAttachmentIndex(
entry->ref(META_HANDLE), entry->ref(ATTACHMENT_METADATA), lock);
lock, entry->ref(META_HANDLE), entry->ref(ATTACHMENT_METADATA));

delete entry;
}
Expand Down Expand Up @@ -687,10 +686,10 @@ void Directory::UnapplyEntry(EntryKernel* entry) {
// update. See MutableEntry::MutableEntry(.., CreateNewUpdateItem, ..).
}

void Directory::DeleteEntry(bool save_to_journal,
void Directory::DeleteEntry(const ScopedKernelLock& lock,
bool save_to_journal,
EntryKernel* entry,
EntryKernelSet* entries_to_journal,
const ScopedKernelLock& lock) {
EntryKernelSet* entries_to_journal) {
int64 handle = entry->ref(META_HANDLE);
ModelType server_type = GetModelTypeFromSpecifics(
entry->ref(SERVER_SPECIFICS));
Expand Down Expand Up @@ -720,7 +719,7 @@ void Directory::DeleteEntry(bool save_to_journal,
kernel_->server_tags_map.erase(entry->ref(UNIQUE_SERVER_TAG));
DCHECK_EQ(1u, num_erased);
}
RemoveFromAttachmentIndex(handle, entry->ref(ATTACHMENT_METADATA), lock);
RemoveFromAttachmentIndex(lock, handle, entry->ref(ATTACHMENT_METADATA));

if (save_to_journal) {
entries_to_journal->insert(entry);
Expand Down Expand Up @@ -800,7 +799,7 @@ bool Directory::PurgeEntriesWithTypeIn(ModelTypeSet disabled_types,
types_to_journal.Has(server_type)) &&
(delete_journal_->IsDeleteJournalEnabled(local_type) ||
delete_journal_->IsDeleteJournalEnabled(server_type));
DeleteEntry(save_to_journal, entry, &entries_to_journal, lock);
DeleteEntry(lock, save_to_journal, entry, &entries_to_journal);
}
}

Expand Down Expand Up @@ -841,7 +840,7 @@ bool Directory::ResetVersionsForType(BaseWriteTransaction* trans,

for (Metahandles::iterator it = children.begin(); it != children.end();
++it) {
EntryKernel* entry = GetEntryByHandle(*it, &lock);
EntryKernel* entry = GetEntryByHandle(lock, *it);
if (!entry)
continue;
if (entry->ref(BASE_VERSION) > 1)
Expand Down Expand Up @@ -1497,7 +1496,7 @@ void Directory::GetAttachmentIdsToUpload(BaseTransaction* trans,
const std::vector<int64>::const_iterator end = metahandles.end();
// For all of this type's entries...
for (; iter != end; ++iter) {
EntryKernel* entry = GetEntryByHandle(*iter, &lock);
EntryKernel* entry = GetEntryByHandle(lock, *iter);
DCHECK(entry);
const sync_pb::AttachmentMetadata metadata =
entry->ref(ATTACHMENT_METADATA);
Expand Down
105 changes: 57 additions & 48 deletions sync/syncable/directory.h
Original file line number Diff line number Diff line change
Expand Up @@ -420,29 +420,6 @@ class SYNC_EXPORT Directory {
ModelType type,
AttachmentIdSet* id_set);

protected: // for friends, mainly used by Entry constructors
virtual EntryKernel* GetEntryByHandle(int64 handle);
virtual EntryKernel* GetEntryByHandle(int64 metahandle,
ScopedKernelLock* lock);
virtual EntryKernel* GetEntryById(const Id& id);
EntryKernel* GetEntryByServerTag(const std::string& tag);
virtual EntryKernel* GetEntryByClientTag(const std::string& tag);
bool ReindexId(BaseWriteTransaction* trans, EntryKernel* const entry,
const Id& new_id);
bool ReindexParentId(BaseWriteTransaction* trans, EntryKernel* const entry,
const Id& new_parent_id);
// Update the attachment index for |metahandle| removing it from the index
// under |old_metadata| entries and add it under |new_metadata| entries.
void UpdateAttachmentIndex(const int64 metahandle,
const sync_pb::AttachmentMetadata& old_metadata,
const sync_pb::AttachmentMetadata& new_metadata);
void ClearDirtyMetahandles();

DirOpenResult OpenImpl(
const std::string& name,
DirectoryChangeDelegate* delegate,
const WeakHandle<TransactionObserver>& transaction_observer);

private:
struct Kernel {
// |delegate| must not be NULL. |transaction_observer| must be
Expand Down Expand Up @@ -543,9 +520,57 @@ class SYNC_EXPORT Directory {
const WeakHandle<TransactionObserver> transaction_observer;
};

// These private versions expect the kernel lock to already be held
// before calling.
EntryKernel* GetEntryById(const Id& id, ScopedKernelLock* const lock);
// You'll notice that some of these methods have two forms. One that takes a
// ScopedKernelLock and one that doesn't. The general pattern is that those
// without a ScopedKernelLock parameter construct one internally before
// calling the form that takes one.

virtual EntryKernel* GetEntryByHandle(int64 handle);
virtual EntryKernel* GetEntryByHandle(const ScopedKernelLock& lock,
int64 metahandle);

virtual EntryKernel* GetEntryById(const Id& id);
virtual EntryKernel* GetEntryById(const ScopedKernelLock& lock, const Id& id);

EntryKernel* GetEntryByServerTag(const std::string& tag);
virtual EntryKernel* GetEntryByClientTag(const std::string& tag);

// For new entry creation only
bool InsertEntry(BaseWriteTransaction* trans, EntryKernel* entry);
bool InsertEntry(const ScopedKernelLock& lock,
BaseWriteTransaction* trans,
EntryKernel* entry);

bool ReindexId(BaseWriteTransaction* trans, EntryKernel* const entry,
const Id& new_id);

bool ReindexParentId(BaseWriteTransaction* trans, EntryKernel* const entry,
const Id& new_parent_id);

// Update the attachment index for |metahandle| removing it from the index
// under |old_metadata| entries and add it under |new_metadata| entries.
void UpdateAttachmentIndex(const int64 metahandle,
const sync_pb::AttachmentMetadata& old_metadata,
const sync_pb::AttachmentMetadata& new_metadata);

// Remove each of |metahandle|'s attachment ids from index_by_attachment_id.
void RemoveFromAttachmentIndex(
const ScopedKernelLock& lock,
const int64 metahandle,
const sync_pb::AttachmentMetadata& attachment_metadata);

// Add each of |metahandle|'s attachment ids to the index_by_attachment_id.
void AddToAttachmentIndex(
const ScopedKernelLock& lock,
const int64 metahandle,
const sync_pb::AttachmentMetadata& attachment_metadata);

void ClearDirtyMetahandles(const ScopedKernelLock& lock);

DirOpenResult OpenImpl(
const std::string& name,
DirectoryChangeDelegate* delegate,
const WeakHandle<TransactionObserver>& transaction_observer);

// A helper that implements the logic of checking tree invariants.
bool CheckTreeInvariants(syncable::BaseTransaction* trans,
Expand All @@ -571,11 +596,6 @@ class SYNC_EXPORT Directory {
// processed |snapshot| failed, for example, due to no disk space.
void HandleSaveChangesFailure(const SaveChangesSnapshot& snapshot);

// For new entry creation only
bool InsertEntry(BaseWriteTransaction* trans,
EntryKernel* entry, ScopedKernelLock* lock);
bool InsertEntry(BaseWriteTransaction* trans, EntryKernel* entry);

// Used by CheckTreeInvariants
void GetAllMetaHandles(BaseTransaction* trans, MetahandleSet* result);
bool SafeToPurgeFromMemory(WriteTransaction* trans,
Expand All @@ -588,27 +608,16 @@ class SYNC_EXPORT Directory {
std::deque<const OrderedChildSet*>* child_sets) const;

// Append the handles of the children of |parent_id| to |result|.
void AppendChildHandles(
const ScopedKernelLock& lock,
const Id& parent_id, Directory::Metahandles* result);
void AppendChildHandles(const ScopedKernelLock& lock,
const Id& parent_id,
Directory::Metahandles* result);

// Helper methods used by PurgeDisabledTypes.
void UnapplyEntry(EntryKernel* entry);
void DeleteEntry(bool save_to_journal,
void DeleteEntry(const ScopedKernelLock& lock,
bool save_to_journal,
EntryKernel* entry,
EntryKernelSet* entries_to_journal,
const ScopedKernelLock& lock);

// Remove each of |metahandle|'s attachment ids from index_by_attachment_id.
void RemoveFromAttachmentIndex(
const int64 metahandle,
const sync_pb::AttachmentMetadata& attachment_metadata,
const ScopedKernelLock& lock);
// Add each of |metahandle|'s attachment ids to the index_by_attachment_id.
void AddToAttachmentIndex(
const int64 metahandle,
const sync_pb::AttachmentMetadata& attachment_metadata,
const ScopedKernelLock& lock);
EntryKernelSet* entries_to_journal);

// A private version of the public GetMetaHandlesOfType for when you already
// have a ScopedKernelLock.
Expand Down

0 comments on commit edb7809

Please sign in to comment.