/// <summary> /// Parses all Index files in the provided directory. /// <para></para> /// Providing the ConfigContainer will filter indices to just those used in the CDNConfig. /// </summary> /// <param name="directory">Directory the archives are located</param> /// <param name="configContainer">The Configs for the repo</param> /// <param name="useParallelism">Enables parallel processing</param> public void Open(string directory, Configs.ConfigContainer configContainer = null, bool useParallelism = false) { IsRemote = false; _indices.Clear(); _sourceDirectory = directory; _useParallelism = useParallelism; if (!Directory.Exists(directory)) { throw new ArgumentException("Directory not found", paramName: nameof(directory)); } var indices = Directory.EnumerateFiles(directory, "*.index", SearchOption.AllDirectories); // filter the indices to just this version's if (configContainer != null) { var applicableIndicies = GetRequiredIndices(configContainer); indices = indices.Where(x => applicableIndicies.Contains(Path.GetFileNameWithoutExtension(x))); } ParallelOptions options = new ParallelOptions() { MaxDegreeOfParallelism = useParallelism ? -1 : 1 }; Parallel.ForEach(indices, options, index => _indices.Add(new IndexFile(index))); }
protected override void Dispose() { var exceptionAggregator = new ExceptionAggregator(Log, "Could not dispose of IndexingExecuter"); foreach (var pendingTask in pendingTasks) { exceptionAggregator.Execute(pendingTask.Wait); } pendingTasks.Clear(); exceptionAggregator.Execute(prefetchingBehavior.Dispose); if (indexingCompletedEvent != null) { exceptionAggregator.Execute(indexingCompletedEvent.Dispose); } if (indexingSemaphore != null) { exceptionAggregator.Execute(indexingSemaphore.Dispose); } exceptionAggregator.ThrowIfNeeded(); indexingCompletedEvent = null; indexingSemaphore = null; }
private static void CleanupPointers(IntPtr ignore) { var oldData = allocatedPointers.ToArray(); allocatedPointers.Clear(); foreach (var ptr in oldData) { try { Util.FreeSerializePointer(ptr); } catch { //TODO big error... } } var cleanup = cleanupFunctions.ToArray(); cleanupFunctions.Clear(); foreach (var clean in cleanup) { try { clean(); } catch { //TODO: pity... probably log it } } handleToStructPointer.Clear(); }
public override async Task <bool> Shutdown() { try { var tasks = new List <Task>(); foreach (var channel in ConnectionGroup) { tasks.Add(channel.CloseAsync()); } var all = Task.WhenAll(tasks); await all; var server = ServerChannel?.CloseAsync() ?? TaskEx.Completed; await server; return(all.IsCompleted && server.IsCompleted); } finally { // free all of the connection objects we were holding onto ConnectionGroup.Clear(); #pragma warning disable 4014 // shutting down the worker groups can take up to 10 seconds each. Let that happen asnychronously. _clientEventLoopGroup.ShutdownGracefullyAsync(); _serverEventLoopGroup.ShutdownGracefullyAsync(); #pragma warning restore 4014 } }
private static void ReprocessZipFile() { Debug.Assert(_zipFilePath != null); // Make sure to keep the relative order between the flags, this // code runs in multiple threads if (ZipFileProcessingHappening.Raise() == false) { return; } ZipFileInitialized.Lower(); ZipFileEntries.Clear(); try { using (var fileStream = SafeFileStream.Create(_zipFilePath, FileMode.Open, FileAccess.Read, FileShare.Read)) using (var zipArchive = new ZipArchive(fileStream, ZipArchiveMode.Read, false)) { foreach (var entry in zipArchive.Entries) { ZipFileEntries.Add(entry.FullName); } } } catch (Exception) { // Suppressing this exception is reasonable: there are many // reasons for which the file may not be available right now. // The watcher will let us know whenever we can try again. } ZipFileInitialized.RaiseOrDie(); ZipFileProcessingHappening.LowerOrDie(); }
// private private void ReculculateNodeList() { if (boundsChanged) { root = new QuadLeaf <T>(0.0d, 0.0d, width, height); removedSet.Clear(); foreach (T node in objects) { changedSet.Remove(node); addedSet.Remove(node); root.Add(node); } } else { foreach (T node in addedSet) { root.Add(node); addedSet.Remove(node); } foreach (T node in changedSet) { root.Move(node); changedSet.Remove(node); } foreach (T node in removedSet) { root.Remove(node); removedSet.Remove(node); } } }
public virtual void Dispose() { GC.SuppressFinalize(this); var exceptionAggregator = new ExceptionAggregator("Could not dispose test"); foreach (var store in CreatedStores) { exceptionAggregator.Execute(store.Dispose); } CreatedStores.Clear(); if (_localServer != null) { if (_doNotReuseServer) { exceptionAggregator.Execute(() => { _localServer.Dispose(); _localServer = null; RemoveUsedPort(NonReusedServerPort); RemoveUsedPort(NonReusedTcpServerPort); }); } exceptionAggregator.ThrowIfNeeded(); } }
private void OnSystemDocumentChange(DocumentChangeNotification notification) { if (!notification.Key.Equals(Constants.Replication.DocumentReplicationConfiguration, StringComparison.OrdinalIgnoreCase)) { return; } if (_log.IsInfoEnabled) { _log.Info("System document change detected. Starting and stopping outgoing replication threads."); } //prevent reconnecting to a destination that we shouldn't in case we have flaky network _reconnectQueue.Clear(); foreach (var instance in _outgoing) { instance.Failed -= OnOutgoingSendingFailed; instance.SuccessfulTwoWaysCommunication -= OnOutgoingSendingSucceeded; instance.Dispose(); } _outgoing.Clear(); _outgoingFailureInfo.Clear(); InitializeOutgoingReplications(); InitializeResolvers(); if (_log.IsInfoEnabled) { _log.Info($"Replication configuration was changed: {notification.Key}"); } }
public void should_clear_itself() { var setToClear = new ConcurrentSet <int>(Enumerable.Range(1, 5)); setToClear.Clear(); setToClear.Count.ShouldEqual(0); }
protected override void Dispose(ExceptionAggregator exceptionAggregator) { foreach (var store in CreatedStores) { exceptionAggregator.Execute(store.Dispose); } CreatedStores.Clear(); }
public void should_clear_itself() { var setToClear = new ConcurrentSet<int>(Enumerable.Range(1, 5)); setToClear.Clear(); setToClear.Count.ShouldEqual(0); }
public override async Task FlushAsync(CancellationToken cancellationToken) { if (_activeWriteTasks.Count > 0) { await Task.WhenAll(_activeWriteTasks).ConfigureAwait(false); _activeWriteTasks.Clear(); } }
public void Dispose() { foreach (var systemNotification in systemOnDocumentChangeNotifications) { systemDatabase.Notifications.OnDocumentChange -= systemNotification; } systemOnDocumentChangeNotifications.Clear(); }
public void ClearCollection() { var set = new ConcurrentSet <int> { 0, 4, 2, 5 }; set.Clear(); CollectionAssert.AreEqual(set, new int[] { }); }
private void OnConnectionDisconnected(ConnectionDisconnectedEvent _) { lock (syncLock) { isStarted = false; internalConsumers.Clear(); internalConsumerFactory.OnDisconnected(); } }
/// <summary> /// Dispose all of the monitoring clients /// </summary> public void DisposeAll() { var clients = _activeClients.ToArray(); _activeClients.Clear(); foreach (var client in clients) { client.Dispose(); } }
/// <summary> /// Updates modified data indices and writes enqueued files to archives /// <para>Note: IndexFile saving is limited to new entries if the container was opened remotely</para> /// </summary> /// <param name="directory"></param> /// <param name="dispose">Delete old files</param> /// <param name="configContainer"></param> public void Save(string directory, Configs.ConfigContainer configContainer = null) { bool sameDirectory = directory.Equals(_sourceDirectory, StringComparison.OrdinalIgnoreCase); // save altered Data archive indices if (!IsRemote) { foreach (var index in DataIndices) { if (index.IsGroupIndex) { continue; } if (index.RequiresSave) { // save the index file and blob string prevBlob = Helpers.GetCDNPath(index.Checksum.ToString(), "data", _sourceDirectory); index.Write(directory, configContainer); index.WriteBlob(directory, prevBlob); } else if (!sameDirectory) { // copy the index file and blob string oldblob = Helpers.GetCDNPath(index.Checksum.ToString(), "data", _sourceDirectory); string newblob = Helpers.GetCDNPath(index.Checksum.ToString(), "data", directory, true); File.Copy(oldblob, newblob); File.Copy(oldblob + ".index", newblob + ".index"); } } } // prevent duplicated entries var duplicates = QueuedEntries.Keys.Where(k => GetIndexFileAndEntry(IndexType.Data, k, out _) != null).ToArray(); foreach (var key in duplicates) { QueuedEntries.Remove(key); } // create any new archive indices var partitions = EnumerablePartitioner.ConcreteBatch(QueuedEntries.Values, ArchiveDataSize, (x) => x.EBlock.CompressedSize); foreach (var entries in partitions) { IndexFile index = new IndexFile(IndexType.Data); index.Add(entries); index.Write(directory, configContainer); index.WriteBlob(directory); } // reload indices _indices.Clear(); Open(directory, useParallelism: _useParallelism); }
/// <summary> /// Updates modified data indices and writes enqueued files to archives /// </summary> /// <param name="directory"></param> /// <param name="dispose">Delete old files</param> /// <param name="configContainer"></param> public void Save(string directory, Configs.ConfigContainer configContainer = null) { bool sameDirectory = directory.Equals(_sourceDirectory, StringComparison.OrdinalIgnoreCase); // save altered Data archive indices foreach (var index in DataIndices) { if (index.IsGroupIndex) { continue; } if (index.RequiresSave) { // save the index file and blob string prevBlob = Helpers.GetCDNPath(index.Checksum.ToString(), "data", _sourceDirectory); index.Write(directory, configContainer); index.WriteBlob(directory, prevBlob); } else if (!sameDirectory) { // copy the index file and blob string oldblob = Helpers.GetCDNPath(index.Checksum.ToString(), "data", _sourceDirectory); string newblob = Helpers.GetCDNPath(index.Checksum.ToString(), "data", directory, true); File.Copy(oldblob, newblob); File.Copy(oldblob + ".index", newblob + ".index"); } } // create any new archive indices var partitions = EnumerablePartitioner.ConcreteBatch(_fileQueue.Values, ArchiveDataSize, (x) => x.EBlock.CompressedSize); foreach (var entries in partitions) { IndexFile index = new IndexFile(IndexType.Data); index.Add(entries); index.Write(directory, configContainer); index.WriteBlob(directory); } // TODO 1. verify if this is required 2. fix // compute the Data Index Group hash //GenerateIndexGroup(directory, configContainer); // reload indices _indices.Clear(); Open(directory, _useParallelism); }
private void HandleSystemDocumentChange(DocumentChangeNotification notification) { if (ShouldReloadConfiguration(notification.Key)) { foreach (var replication in Replications) { replication.Dispose(); } Replications.Clear(); LoadConfigurations(); if (_logger.IsInfoEnabled) { _logger.Info($"Replication configuration was changed: {notification.Key}"); } } }
public override Task <bool> Shutdown() { return(Task.Run(() => { try { foreach (var channel in ConnectionGroup) { channel.StopReceive(); channel.Dispose(); } } finally { // free all of the connection objects we were holding onto ConnectionGroup.Clear(); } return true; })); }
public void Clear() { bool didClean = false; lock (_clearLock) { if (_references != null && _references.Count > 0) { _references.Clear(); didClean = true; } if (_reusableBitmaps != null && _reusableBitmaps.Count > 0) { _reusableBitmaps.Clear(); didClean = true; } if (Size() > 0) { EvictAll(); didClean = true; } if (didClean) { // Force immediate Garbage collection System.GC.Collect(); } } if (didClean) { // Can't use minilogger here, we would have too many dependencies System.Diagnostics.Debug.WriteLine("ImageCache cleared and forcing immediate garbage collection."); } }
protected virtual void Dispose(bool disposing) { if (disposing) { // Release owned files first. if (_openFiles != null) { foreach (var of in _openFiles) { of.Dispose(); } _openFiles.Clear(); _openFiles = null; } // Release if (_handle != null && !_handle.IsInvalid) { _handle.Close(); _handle = null; } } }
internal void Clear() { Regions.Clear(); dirtyRegions.Clear(); recentRegions.Clear(); }
public void Dispose() { subscriber.Unsubscribe(notificationChannel, NotificationHandler); handlers.Clear(); }
protected override void Dispose() { System.Threading.Tasks.Task.WaitAll(futureIndexBatches.Select(ObserveDiscardedTask).ToArray()); futureIndexBatches.Clear(); }
public static void ClearTargets() { Targets.Clear(); }
private void OnConnectionDisconnected(ConnectionDisconnectedEvent _) { internalConsumerFactory.OnDisconnected(); internalConsumers.Clear(); }
public Task DisposeAsync() { if (disposed) { return(new CompletedTask()); } disposed = true; OnCompletedNotification(); subscribers.Clear(); if (putDocumentsObserver != null) { putDocumentsObserver.Dispose(); } if (endedBulkInsertsObserver != null) { endedBulkInsertsObserver.Dispose(); } if (dataSubscriptionReleasedObserver != null) { dataSubscriptionReleasedObserver.Dispose(); } cts.Cancel(); newDocuments.Set(); anySubscriber.Set(); changes.ConnectionStatusChanged -= ChangesApiConnectionChanged; foreach (var task in new[] { pullingTask, startPullingTask }) { if (task == null) { continue; } switch (task.Status) { case TaskStatus.RanToCompletion: case TaskStatus.Canceled: break; default: try { task.Wait(); } catch (AggregateException ae) { if (ae.InnerException is OperationCanceledException == false && ae.InnerException is ObjectDisposedException == false) { throw; } } break; } } if (IsConnectionClosed) { return(new CompletedTask()); } return(CloseSubscription()); }