public override void Remove(string[] keys, WorkContext context) { DeletionBatchInfo deletionBatchInfo = null; try { deletionBatchInfo = context.ReportDeletionBatchStarted(PublicName, keys.Length); context.TransactionalStorage.Batch(actions => { var storageCommitDuration = new Stopwatch(); actions.BeforeStorageCommit += storageCommitDuration.Start; actions.AfterStorageCommit += () => { storageCommitDuration.Stop(); deletionBatchInfo.PerformanceStats.Add(PerformanceStats.From(IndexingOperation.StorageCommit, storageCommitDuration.ElapsedMilliseconds)); }; var reduceKeyAndBuckets = new Dictionary <ReduceKeyAndBucket, int>(); var deleteMappedResultsDuration = new Stopwatch(); using (StopwatchScope.For(deleteMappedResultsDuration)) { if (actions.MapReduce.HasMappedResultsForIndex(indexId)) { foreach (var key in keys) { actions.MapReduce.DeleteMappedResultsForDocumentId(key, indexId, reduceKeyAndBuckets); context.CancellationToken.ThrowIfCancellationRequested(); } } } deletionBatchInfo.PerformanceStats.Add(PerformanceStats.From(IndexingOperation.Delete_DeleteMappedResultsForDocumentId, deleteMappedResultsDuration.ElapsedMilliseconds)); actions.MapReduce.UpdateRemovedMapReduceStats(indexId, reduceKeyAndBuckets, context.CancellationToken); var scheduleReductionsDuration = new Stopwatch(); using (StopwatchScope.For(scheduleReductionsDuration)) { foreach (var reduceKeyAndBucket in reduceKeyAndBuckets) { actions.MapReduce.ScheduleReductions(indexId, 0, reduceKeyAndBucket.Key); context.CancellationToken.ThrowIfCancellationRequested(); } } deletionBatchInfo.PerformanceStats.Add(PerformanceStats.From(IndexingOperation.Reduce_ScheduleReductions, scheduleReductionsDuration.ElapsedMilliseconds)); }); } finally { if (deletionBatchInfo != null) { context.ReportDeletionBatchCompleted(deletionBatchInfo); } } }
public override void Remove(string[] keys, WorkContext context) { DeletionBatchInfo deletionBatchInfo = null; try { deletionBatchInfo = context.ReportDeletionBatchStarted(PublicName, keys.Length); Write((writer, analyzer, stats) => { var indexUpdateTriggersDuration = new Stopwatch(); stats.Operation = IndexingWorkStats.Status.Ignore; if (logIndexing.IsDebugEnabled) { logIndexing.Debug(() => string.Format("Deleting ({0}) from {1}", string.Join(", ", keys), PublicName)); } List <AbstractIndexUpdateTriggerBatcher> batchers; using (StopwatchScope.For(indexUpdateTriggersDuration)) { batchers = context.IndexUpdateTriggers.Select(x => x.CreateBatcher(indexId)) .Where(x => x != null) .ToList(); keys.Apply(key => InvokeOnIndexEntryDeletedOnAllBatchers(batchers, new Term(Constants.DocumentIdFieldName, key.ToLowerInvariant()))); } var deleteDocumentsDuration = new Stopwatch(); using (StopwatchScope.For(deleteDocumentsDuration)) { writer.DeleteDocuments(keys.Select(k => new Term(Constants.DocumentIdFieldName, k.ToLowerInvariant())).ToArray()); } deletionBatchInfo.PerformanceStats.Add(PerformanceStats.From(IndexingOperation.Delete_Documents, deleteDocumentsDuration.ElapsedMilliseconds)); using (StopwatchScope.For(indexUpdateTriggersDuration)) { batchers.ApplyAndIgnoreAllErrors( e => { logIndexing.WarnException("Failed to dispose on index update trigger in " + PublicName, e); context.AddError(indexId, PublicName, null, e, "Dispose Trigger"); }, batcher => batcher.Dispose()); } deletionBatchInfo.PerformanceStats.Add(PerformanceStats.From(IndexingOperation.Delete_IndexUpdateTriggers, indexUpdateTriggersDuration.ElapsedMilliseconds)); return(new IndexedItemsInfo(GetLastEtagFromStats()) { ChangedDocs = keys.Length, DeletedKeys = keys }); }, deletionBatchInfo.PerformanceStats); } finally { if (deletionBatchInfo != null) { context.ReportDeletionBatchCompleted(deletionBatchInfo); } } }