// Import one file and update new items and unresolved links private void ImportItem(ReplicatedItem item) { try { var record = _repstore.SyncItem(item); _stillUnresolved.AddRange(record.UnresolvedLinks); foreach (var ri in record.ReadItems) { _newItems[ri.ID] = ri; } } catch (Exception e) { _logger.Error("IMPORT Error: " + e.Message, e); _journal.Log(e.Message); } }
// perform a full synchronization public int Syncronize(bool force = false) { if (!(IsMaster || IsSlave)) return 0; _journal = new ReplicationJournal(); lock (_syncLock) { if (!_replicationWriteLockManager.IsLocked) { _stillUnresolved = new List<UnresolvedLink>(); _newItems = new Dictionary<int, ContentItem>(); #if DEBUG2 (_persister.Repository as XmlContentItemRepository).SetReadOnly(true); #endif try { if (IsMaster) { // mark intent to lock the replication for write if (_replicationWriteLockManager.Lock() == false) return 0; // Try again later if it is read locked. if (_replicationReadLockManager.IsLocked) { _logger.Info("Read locks exist. Waiting for the next replication scheduled interval."); return 0; } } if (IsSlave && !_replicationWriteLockManager.IsLocked) _replicationReadLockManager.Lock(); var localItems = GetLocalItems(); var remote = _repstore.GetItems().OrderBy(i => i.PublishedDateUtc); if (IsSlave && !remote.Any()) { _logger.ErrorFormat("NO REMOTE ITEMS on Synchronize {0} {1}: local items count: {2} instances {3}", IsMaster ? "Master" : "Slave", SerializationUtility.GetLocalhostFqdn(), localItems.Count(), 0); return -1; // never sync down to zero } _logger.InfoFormat("Synchronize {0} {1}: local items count: {2} instances {3}", IsMaster ? "Master" : "Slave", SerializationUtility.GetLocalhostFqdn(), localItems.Count(), 0); // get a list of remotely deleted items and remove from local working copy var itemsToRemove = (IsSlave) ? localItems.List.Where(l => remote.All(r => r.ID != l.ID)).ToList() : new List<ContentItem>(); itemsToRemove.ForEach(localItems.Remove); // perform main sychronization and download in parallel - check need import / delete (and remove all localItems that have been handled) Parallel.ForEach(remote, replicatedItem => SyncOneItem(replicatedItem, localItems)); // DO THE PUZZLE: combine imported items into new subgraph // first resolve links withing the new items (typically non-page items) - requires no additional local saving // ResolveLinkedItems(_stillUnresolved, (i => ? _newItems[i] : null)); foreach (var unresolvedLink in _stillUnresolved.ToArray()) { if (!_newItems.ContainsKey(unresolvedLink.ReferencedItemID)) continue; unresolvedLink.Setter(_newItems[unresolvedLink.ReferencedItemID]); _stillUnresolved.Remove(unresolvedLink); } #if DEBUG2 (_persister.Repository as XmlContentItemRepository).SetReadOnly(false); #endif // "COMMIT" phase foreach (var item in _newItems.Values) { if (item.IsPage) // TODO check whether saving pages is sufficient { _logger.Info("new page " + item); // _persister.Save(item); // TODO check persister vs repository saving _persister.Repository.SaveOrUpdate(item); //if (_indexer != null) _indexer.Update(item); // only for pages -> otherwise slows everything down -> move outside of lock } } // RESTORE ALL REMAINING LINKS foreach (var unresolvedLink in _stillUnresolved.ToArray()) { var item = _persister.Repository.Get(unresolvedLink.ReferencedItemID); if (item == null) continue; unresolvedLink.Setter(item); _persister.Repository.SaveOrUpdate(unresolvedLink.Item); // ensure proper link, e.g. ParentID _persister.Repository.SaveOrUpdate(item); // this item was not imported, needs local saving _stillUnresolved.Remove(unresolvedLink); } if (_stillUnresolved.Count > 0) { // now something is really bad - typically an indicator of missing inner nodes _journal.Log("UNRESOLVED count is " + _stillUnresolved.Count); // mitigation - delete all item that cannot be linked during this run // in hope for success in an upcoming run foreach (var unresolvedLink in _stillUnresolved.ToArray()) { _journal.Log("REMOVING unlinkable item {0} -> {1}", unresolvedLink.Item, unresolvedLink.ReferencedItemID); DeleteLocal(unresolvedLink.Item); } } //var root = _finder.AllOfType<IRootPage>().Cast<ContentItem>().SingleOrDefault(); //if (root != null) //{ // ContentVersion.ReorderBySortOrderRecursive(root); // UpdateTrailsRecursive(root); // TrailTracker //} // Delete local files that have been removed from remote if (IsSlave) SlaveRemoveDeletedItems(itemsToRemove); // PHASE 3 - check need export (assumes localItems was updated above to have only items not in remote storage) if (IsMaster) Parallel.ForEach(localItems.List, ExportOneItem); } catch (Exception ex) { _logger.Error("Replication failed", ex); _journal.Log(ex.Message); #if DEBUG throw; #endif } finally { #if DEBUG2 (_persister.Repository as XmlContentItemRepository).SetReadOnly(false); #endif if(IsSlave) _replicationReadLockManager.Unlock(); } if (IsMaster) _replicationWriteLockManager.Unlock(); _security.ScopeEnabled = true; WriteLog(_journal); if (_journal.AffectedCount > 0) { _logger.InfoFormat("Synchronize Ended {0} {1}: affected count: {2} instances {3}", IsMaster ? "Master" : "Slave", SerializationUtility.GetLocalhostFqdn(), _journal.AffectedCount, 0); if (IsSlave && _flushable != null) { _flushable.Flush(); // Master doesn't change so just flush Slave } } } else { _logger.WarnFormat("Unable to establish a lock for synchronization. Skipping."); return -1; } } return _journal.AffectedCount; }
// perform a full synchronization public int Syncronize(bool force = false) { if (!(IsMaster || IsSlave)) { return(0); } _journal = new ReplicationJournal(); lock (_syncLock) { if (!_replicationWriteLockManager.IsLocked) { _stillUnresolved = new List <UnresolvedLink>(); _newItems = new Dictionary <int, ContentItem>(); #if DEBUG2 (_persister.Repository as XmlContentItemRepository).SetReadOnly(true); #endif try { if (IsMaster) { // mark intent to lock the replication for write if (_replicationWriteLockManager.Lock() == false) { return(0); } // Try again later if it is read locked. if (_replicationReadLockManager.IsLocked) { _logger.Info("Read locks exist. Waiting for the next replication scheduled interval."); return(0); } } if (IsSlave && !_replicationWriteLockManager.IsLocked) { _replicationReadLockManager.Lock(); } var localItems = GetLocalItems(); var remote = _repstore.GetItems().OrderBy(i => i.PublishedDateUtc); if (IsSlave && !remote.Any()) { _logger.ErrorFormat("NO REMOTE ITEMS on Synchronize {0} {1}: local items count: {2} instances {3}", IsMaster ? "Master" : "Slave", SerializationUtility.GetLocalhostFqdn(), localItems.Count(), 0); return(-1); // never sync down to zero } _logger.InfoFormat("Synchronize {0} {1}: local items count: {2} instances {3}", IsMaster ? "Master" : "Slave", SerializationUtility.GetLocalhostFqdn(), localItems.Count(), 0); // get a list of remotely deleted items and remove from local working copy var itemsToRemove = (IsSlave) ? localItems.List.Where(l => remote.All(r => r.ID != l.ID)).ToList() : new List <ContentItem>(); itemsToRemove.ForEach(localItems.Remove); // perform main sychronization and download in parallel - check need import / delete (and remove all localItems that have been handled) Parallel.ForEach(remote, replicatedItem => SyncOneItem(replicatedItem, localItems)); // DO THE PUZZLE: combine imported items into new subgraph // first resolve links withing the new items (typically non-page items) - requires no additional local saving // ResolveLinkedItems(_stillUnresolved, (i => ? _newItems[i] : null)); foreach (var unresolvedLink in _stillUnresolved.ToArray()) { if (!_newItems.ContainsKey(unresolvedLink.ReferencedItemID)) { continue; } unresolvedLink.Setter(_newItems[unresolvedLink.ReferencedItemID]); _stillUnresolved.Remove(unresolvedLink); } #if DEBUG2 (_persister.Repository as XmlContentItemRepository).SetReadOnly(false); #endif // "COMMIT" phase foreach (var item in _newItems.Values) { if (item.IsPage) // TODO check whether saving pages is sufficient { _logger.Info("new page " + item); // _persister.Save(item); // TODO check persister vs repository saving _persister.Repository.SaveOrUpdate(item); //if (_indexer != null) _indexer.Update(item); // only for pages -> otherwise slows everything down -> move outside of lock } } // RESTORE ALL REMAINING LINKS foreach (var unresolvedLink in _stillUnresolved.ToArray()) { var item = _persister.Repository.Get(unresolvedLink.ReferencedItemID); if (item == null) { continue; } unresolvedLink.Setter(item); _persister.Repository.SaveOrUpdate(unresolvedLink.Item); // ensure proper link, e.g. ParentID _persister.Repository.SaveOrUpdate(item); // this item was not imported, needs local saving _stillUnresolved.Remove(unresolvedLink); } if (_stillUnresolved.Count > 0) { // now something is really bad - typically an indicator of missing inner nodes _journal.Log("UNRESOLVED count is " + _stillUnresolved.Count); // mitigation - delete all item that cannot be linked during this run // in hope for success in an upcoming run foreach (var unresolvedLink in _stillUnresolved.ToArray()) { _journal.Log("REMOVING unlinkable item {0} -> {1}", unresolvedLink.Item, unresolvedLink.ReferencedItemID); DeleteLocal(unresolvedLink.Item); } } //var root = _finder.AllOfType<IRootPage>().Cast<ContentItem>().SingleOrDefault(); //if (root != null) //{ // ContentVersion.ReorderBySortOrderRecursive(root); // UpdateTrailsRecursive(root); // TrailTracker //} // Delete local files that have been removed from remote if (IsSlave) { SlaveRemoveDeletedItems(itemsToRemove); } // PHASE 3 - check need export (assumes localItems was updated above to have only items not in remote storage) if (IsMaster) { Parallel.ForEach(localItems.List, ExportOneItem); } } catch (Exception ex) { _logger.Error("Replication failed", ex); _journal.Log(ex.Message); #if DEBUG throw; #endif } finally { #if DEBUG2 (_persister.Repository as XmlContentItemRepository).SetReadOnly(false); #endif if (IsSlave) { _replicationReadLockManager.Unlock(); } } if (IsMaster) { _replicationWriteLockManager.Unlock(); } _security.ScopeEnabled = true; WriteLog(_journal); if (_journal.AffectedCount > 0) { _logger.InfoFormat("Synchronize Ended {0} {1}: affected count: {2} instances {3}", IsMaster ? "Master" : "Slave", SerializationUtility.GetLocalhostFqdn(), _journal.AffectedCount, 0); if (IsSlave && _flushable != null) { _flushable.Flush(); // Master doesn't change so just flush Slave } } } else { _logger.WarnFormat("Unable to establish a lock for synchronization. Skipping."); return(-1); } } return(_journal.AffectedCount); }