public void Dispose() { if (_target == null) { return; } _target.Flush(); _target = null; }
void IDisposable.Dispose() { if (_target == null) { return; } _target.Flush(); _target = null; }
public static bool Flush(int millisecondsTimeout) { IFlushable flushableRepository = LoggerManager.GetRepository(Assembly.GetCallingAssembly()) as IFlushable; if (flushableRepository == null) { return(false); } else { return(flushableRepository.Flush(millisecondsTimeout)); } }
public bool Flush(int millisecondsTimeout) { if (millisecondsTimeout < -1) { throw new ArgumentOutOfRangeException("millisecondsTimeout", "Timeout must be -1 (Timeout.Infinite) or non-negative"); } // Assume success until one of the appenders fails bool result = true; // Use DateTime.UtcNow rather than a System.Diagnostics.Stopwatch for compatibility with .NET 1.x DateTime startTimeUtc = DateTime.UtcNow; // Do buffering appenders first. These may be forwarding to other appenders foreach (Appender.IAppender appender in GetAppenders()) { IFlushable flushable = appender as IFlushable; if (flushable == null) { continue; } if (appender is Appender.BufferingAppenderSkeleton) { int timeout = GetWaitTime(startTimeUtc, millisecondsTimeout); if (!flushable.Flush(timeout)) { result = false; } } } // Do non-buffering appenders. foreach (Appender.IAppender appender in GetAppenders()) { IFlushable flushable = appender as IFlushable; if (flushable == null) { continue; } if (!(appender is Appender.BufferingAppenderSkeleton)) { int timeout = GetWaitTime(startTimeUtc, millisecondsTimeout); if (!flushable.Flush(timeout)) { result = false; } } } return(result); }
/// <summary> /// Flushes all configured Appenders that implement <see cref="T:log4net.Appender.IFlushable" />. /// </summary> /// <param name="millisecondsTimeout">The maximum time in milliseconds to wait for logging events from asycnhronous appenders to be flushed, /// or <see cref="F:System.Threading.Timeout.Infinite" /> to wait indefinitely.</param> /// <returns><c>True</c> if all logging events were flushed successfully, else <c>false</c>.</returns> public bool Flush(int millisecondsTimeout) { if (millisecondsTimeout < -1) { throw new ArgumentOutOfRangeException("millisecondsTimeout", "Timeout must be -1 (Timeout.Infinite) or non-negative"); } bool result = true; DateTime utcNow = DateTime.UtcNow; IAppender[] appenders = GetAppenders(); foreach (IAppender appender in appenders) { IFlushable flushable = appender as IFlushable; if (flushable != null && appender is BufferingAppenderSkeleton) { int waitTime = GetWaitTime(utcNow, millisecondsTimeout); if (!flushable.Flush(waitTime)) { result = false; } } } appenders = GetAppenders(); foreach (IAppender appender2 in appenders) { IFlushable flushable2 = appender2 as IFlushable; if (flushable2 != null && !(appender2 is BufferingAppenderSkeleton)) { int waitTime2 = GetWaitTime(utcNow, millisecondsTimeout); if (!flushable2.Flush(waitTime2)) { result = false; } } } return(result); }
// perform a full synchronization public int Syncronize(bool force = false) { if (!(IsMaster || IsSlave)) { return(0); } _journal = new ReplicationJournal(); lock (_syncLock) { if (!_replicationWriteLockManager.IsLocked) { _stillUnresolved = new List <UnresolvedLink>(); _newItems = new Dictionary <int, ContentItem>(); #if DEBUG2 (_persister.Repository as XmlContentItemRepository).SetReadOnly(true); #endif try { if (IsMaster) { // mark intent to lock the replication for write if (_replicationWriteLockManager.Lock() == false) { return(0); } // Try again later if it is read locked. if (_replicationReadLockManager.IsLocked) { _logger.Info("Read locks exist. Waiting for the next replication scheduled interval."); return(0); } } if (IsSlave && !_replicationWriteLockManager.IsLocked) { _replicationReadLockManager.Lock(); } var localItems = GetLocalItems(); var remote = _repstore.GetItems().OrderBy(i => i.PublishedDateUtc); if (IsSlave && !remote.Any()) { _logger.ErrorFormat("NO REMOTE ITEMS on Synchronize {0} {1}: local items count: {2} instances {3}", IsMaster ? "Master" : "Slave", SerializationUtility.GetLocalhostFqdn(), localItems.Count(), 0); return(-1); // never sync down to zero } _logger.InfoFormat("Synchronize {0} {1}: local items count: {2} instances {3}", IsMaster ? "Master" : "Slave", SerializationUtility.GetLocalhostFqdn(), localItems.Count(), 0); // get a list of remotely deleted items and remove from local working copy var itemsToRemove = (IsSlave) ? localItems.List.Where(l => remote.All(r => r.ID != l.ID)).ToList() : new List <ContentItem>(); itemsToRemove.ForEach(localItems.Remove); // perform main sychronization and download in parallel - check need import / delete (and remove all localItems that have been handled) Parallel.ForEach(remote, replicatedItem => SyncOneItem(replicatedItem, localItems)); // DO THE PUZZLE: combine imported items into new subgraph // first resolve links withing the new items (typically non-page items) - requires no additional local saving // ResolveLinkedItems(_stillUnresolved, (i => ? _newItems[i] : null)); foreach (var unresolvedLink in _stillUnresolved.ToArray()) { if (!_newItems.ContainsKey(unresolvedLink.ReferencedItemID)) { continue; } unresolvedLink.Setter(_newItems[unresolvedLink.ReferencedItemID]); _stillUnresolved.Remove(unresolvedLink); } #if DEBUG2 (_persister.Repository as XmlContentItemRepository).SetReadOnly(false); #endif // "COMMIT" phase foreach (var item in _newItems.Values) { if (item.IsPage) // TODO check whether saving pages is sufficient { _logger.Info("new page " + item); // _persister.Save(item); // TODO check persister vs repository saving _persister.Repository.SaveOrUpdate(item); //if (_indexer != null) _indexer.Update(item); // only for pages -> otherwise slows everything down -> move outside of lock } } // RESTORE ALL REMAINING LINKS foreach (var unresolvedLink in _stillUnresolved.ToArray()) { var item = _persister.Repository.Get(unresolvedLink.ReferencedItemID); if (item == null) { continue; } unresolvedLink.Setter(item); _persister.Repository.SaveOrUpdate(unresolvedLink.Item); // ensure proper link, e.g. ParentID _persister.Repository.SaveOrUpdate(item); // this item was not imported, needs local saving _stillUnresolved.Remove(unresolvedLink); } if (_stillUnresolved.Count > 0) { // now something is really bad - typically an indicator of missing inner nodes _journal.Log("UNRESOLVED count is " + _stillUnresolved.Count); // mitigation - delete all item that cannot be linked during this run // in hope for success in an upcoming run foreach (var unresolvedLink in _stillUnresolved.ToArray()) { _journal.Log("REMOVING unlinkable item {0} -> {1}", unresolvedLink.Item, unresolvedLink.ReferencedItemID); DeleteLocal(unresolvedLink.Item); } } //var root = _finder.AllOfType<IRootPage>().Cast<ContentItem>().SingleOrDefault(); //if (root != null) //{ // ContentVersion.ReorderBySortOrderRecursive(root); // UpdateTrailsRecursive(root); // TrailTracker //} // Delete local files that have been removed from remote if (IsSlave) { SlaveRemoveDeletedItems(itemsToRemove); } // PHASE 3 - check need export (assumes localItems was updated above to have only items not in remote storage) if (IsMaster) { Parallel.ForEach(localItems.List, ExportOneItem); } } catch (Exception ex) { _logger.Error("Replication failed", ex); _journal.Log(ex.Message); #if DEBUG throw; #endif } finally { #if DEBUG2 (_persister.Repository as XmlContentItemRepository).SetReadOnly(false); #endif if (IsSlave) { _replicationReadLockManager.Unlock(); } } if (IsMaster) { _replicationWriteLockManager.Unlock(); } _security.ScopeEnabled = true; WriteLog(_journal); if (_journal.AffectedCount > 0) { _logger.InfoFormat("Synchronize Ended {0} {1}: affected count: {2} instances {3}", IsMaster ? "Master" : "Slave", SerializationUtility.GetLocalhostFqdn(), _journal.AffectedCount, 0); if (IsSlave && _flushable != null) { _flushable.Flush(); // Master doesn't change so just flush Slave } } } else { _logger.WarnFormat("Unable to establish a lock for synchronization. Skipping."); return(-1); } } return(_journal.AffectedCount); }