// write summary log file private void WriteLog(ReplicationJournal journal) { var log = journal.ToString(); using (var ms = new MemoryStream()) using (var sw = new StreamWriter(ms)) { sw.Write(log); sw.Flush(); ms.Position = 0; var name = String.Format("{0}.{1}.{2}.log", SerializationUtility.GetLocalhostFqdn(), (IsMaster) ? "M" : "S", DateTime.UtcNow.ToString("yyMMdd_HHmmss")); try { if (journal.AffectedCount > 0) { _fileSystem.WriteFile(Path.Combine(_replicationLogPath, name), ms); } } catch (Exception e) { _logger.Error("Could not write remote blog file " + e.Message); _logger.Info(log); } } }
public IStorageFile CreateFile(string path, DateTime?lastWriteTime = null) { EnsurePathIsRelative(path); if (Container.BlobExists(String.Concat(_root, path))) { // MAB delete the file if it already exists DeleteFile(path); //throw new ArgumentException("File " + path + " already exists"); } // create all folder entries in the hierarchy int lastIndex; var localPath = path; while ((lastIndex = localPath.LastIndexOf('/')) > 0) { localPath = localPath.Substring(0, lastIndex); var folder = Container.GetBlockBlobReference(String.Concat(_root, Combine(localPath, FolderEntry))); folder.OpenWrite().Dispose(); } var blob = Container.GetBlockBlobReference(String.Concat(_root, path)); var contentType = GetContentType(path); if (!String.IsNullOrWhiteSpace(contentType)) { blob.Properties.ContentType = contentType; if (IsCacheable(contentType)) { blob.Properties.CacheControl = "max-age=1800, public"; } } blob.Metadata.Add("UploadedFromNode", SerializationUtility.GetLocalhostFqdn()); if (lastWriteTime.HasValue) { blob.Metadata.Add("LastWriteTime", SerializationUtility.ToUniversalString(lastWriteTime)); } using (var memoryStream = new MemoryStream(new byte[0])) { blob.UploadFromStream(memoryStream); } return(new AzureBlobFileStorage(blob, _absoluteRoot)); }
protected override string GenerateLockFullPath() { return(Path.Combine(LockPath, LockFileNamePrefix + SerializationUtility.GetLocalhostFqdn())); }
// perform a full synchronization public int Syncronize(bool force = false) { if (!(IsMaster || IsSlave)) { return(0); } _journal = new ReplicationJournal(); lock (_syncLock) { if (!_replicationWriteLockManager.IsLocked) { _stillUnresolved = new List <UnresolvedLink>(); _newItems = new Dictionary <int, ContentItem>(); #if DEBUG2 (_persister.Repository as XmlContentItemRepository).SetReadOnly(true); #endif try { if (IsMaster) { // mark intent to lock the replication for write if (_replicationWriteLockManager.Lock() == false) { return(0); } // Try again later if it is read locked. if (_replicationReadLockManager.IsLocked) { _logger.Info("Read locks exist. Waiting for the next replication scheduled interval."); return(0); } } if (IsSlave && !_replicationWriteLockManager.IsLocked) { _replicationReadLockManager.Lock(); } var localItems = GetLocalItems(); var remote = _repstore.GetItems().OrderBy(i => i.PublishedDateUtc); if (IsSlave && !remote.Any()) { _logger.ErrorFormat("NO REMOTE ITEMS on Synchronize {0} {1}: local items count: {2} instances {3}", IsMaster ? "Master" : "Slave", SerializationUtility.GetLocalhostFqdn(), localItems.Count(), 0); return(-1); // never sync down to zero } _logger.InfoFormat("Synchronize {0} {1}: local items count: {2} instances {3}", IsMaster ? "Master" : "Slave", SerializationUtility.GetLocalhostFqdn(), localItems.Count(), 0); // get a list of remotely deleted items and remove from local working copy var itemsToRemove = (IsSlave) ? localItems.List.Where(l => remote.All(r => r.ID != l.ID)).ToList() : new List <ContentItem>(); itemsToRemove.ForEach(localItems.Remove); // perform main sychronization and download in parallel - check need import / delete (and remove all localItems that have been handled) Parallel.ForEach(remote, replicatedItem => SyncOneItem(replicatedItem, localItems)); // DO THE PUZZLE: combine imported items into new subgraph // first resolve links withing the new items (typically non-page items) - requires no additional local saving // ResolveLinkedItems(_stillUnresolved, (i => ? _newItems[i] : null)); foreach (var unresolvedLink in _stillUnresolved.ToArray()) { if (!_newItems.ContainsKey(unresolvedLink.ReferencedItemID)) { continue; } unresolvedLink.Setter(_newItems[unresolvedLink.ReferencedItemID]); _stillUnresolved.Remove(unresolvedLink); } #if DEBUG2 (_persister.Repository as XmlContentItemRepository).SetReadOnly(false); #endif // "COMMIT" phase foreach (var item in _newItems.Values) { if (item.IsPage) // TODO check whether saving pages is sufficient { _logger.Info("new page " + item); // _persister.Save(item); // TODO check persister vs repository saving _persister.Repository.SaveOrUpdate(item); //if (_indexer != null) _indexer.Update(item); // only for pages -> otherwise slows everything down -> move outside of lock } } // RESTORE ALL REMAINING LINKS foreach (var unresolvedLink in _stillUnresolved.ToArray()) { var item = _persister.Repository.Get(unresolvedLink.ReferencedItemID); if (item == null) { continue; } unresolvedLink.Setter(item); _persister.Repository.SaveOrUpdate(unresolvedLink.Item); // ensure proper link, e.g. ParentID _persister.Repository.SaveOrUpdate(item); // this item was not imported, needs local saving _stillUnresolved.Remove(unresolvedLink); } if (_stillUnresolved.Count > 0) { // now something is really bad - typically an indicator of missing inner nodes _journal.Log("UNRESOLVED count is " + _stillUnresolved.Count); // mitigation - delete all item that cannot be linked during this run // in hope for success in an upcoming run foreach (var unresolvedLink in _stillUnresolved.ToArray()) { _journal.Log("REMOVING unlinkable item {0} -> {1}", unresolvedLink.Item, unresolvedLink.ReferencedItemID); DeleteLocal(unresolvedLink.Item); } } //var root = _finder.AllOfType<IRootPage>().Cast<ContentItem>().SingleOrDefault(); //if (root != null) //{ // ContentVersion.ReorderBySortOrderRecursive(root); // UpdateTrailsRecursive(root); // TrailTracker //} // Delete local files that have been removed from remote if (IsSlave) { SlaveRemoveDeletedItems(itemsToRemove); } // PHASE 3 - check need export (assumes localItems was updated above to have only items not in remote storage) if (IsMaster) { Parallel.ForEach(localItems.List, ExportOneItem); } } catch (Exception ex) { _logger.Error("Replication failed", ex); _journal.Log(ex.Message); #if DEBUG throw; #endif } finally { #if DEBUG2 (_persister.Repository as XmlContentItemRepository).SetReadOnly(false); #endif if (IsSlave) { _replicationReadLockManager.Unlock(); } } if (IsMaster) { _replicationWriteLockManager.Unlock(); } _security.ScopeEnabled = true; WriteLog(_journal); if (_journal.AffectedCount > 0) { _logger.InfoFormat("Synchronize Ended {0} {1}: affected count: {2} instances {3}", IsMaster ? "Master" : "Slave", SerializationUtility.GetLocalhostFqdn(), _journal.AffectedCount, 0); if (IsSlave && _flushable != null) { _flushable.Flush(); // Master doesn't change so just flush Slave } } } else { _logger.WarnFormat("Unable to establish a lock for synchronization. Skipping."); return(-1); } } return(_journal.AffectedCount); }
protected virtual string GenerateLockFileContents() { return(SerializationUtility.GetLocalhostFqdn()); }