public void Execute() { using (LogContext.WithDatabase(context.DatabaseName)) { Init(); var name = GetType().Name; var workComment = "WORK BY " + name; bool isIdle = false; while (ShouldRun) { bool foundWork; try { bool onlyFoundIdleWork; foundWork = ExecuteIndexing(isIdle, out onlyFoundIdleWork); if (foundWork && onlyFoundIdleWork == false) { isIdle = false; } int runs = 32; // we want to drain all of the pending tasks before the next run // but we don't want to halt indexing completely while (context.RunIndexing && runs-- > 0) { if (ExecuteTasks() == false) { break; } foundWork = true; } } catch (OutOfMemoryException oome) { foundWork = true; HandleOutOfMemoryException(oome); } catch (AggregateException ae) { foundWork = true; var actual = ae.ExtractSingleInnerException(); var oome = actual as OutOfMemoryException; if (oome == null) { if (TransactionalStorageHelper.IsOutOfMemoryException(actual)) { autoTuner.HandleOutOfMemory(); } Log.ErrorException("Failed to execute indexing", ae); } else { HandleOutOfMemoryException(oome); } } catch (OperationCanceledException) { Log.Info("Got rude cancellation of indexing as a result of shutdown, aborting current indexing run"); return; } catch (Exception e) { foundWork = true; // we want to keep on trying, anyway, not wait for the timeout or more work Log.ErrorException("Failed to execute indexing", e); if (TransactionalStorageHelper.IsOutOfMemoryException(e)) { autoTuner.HandleOutOfMemory(); } } if (foundWork == false && context.RunIndexing) { isIdle = context.WaitForWork(context.Configuration.TimeToWaitBeforeRunningIdleIndexes, ref workCounter, () => { try { FlushIndexes(); } catch (Exception e) { Log.WarnException("Could not flush indexes properly", e); } try { CleanupPrefetchers(); } catch (Exception e) { Log.WarnException("Could not cleanup prefetchers properly", e); } }, name); } else // notify the tasks executer that it has work to do { context.ShouldNotifyAboutWork(() => workComment); context.NotifyAboutWork(); } } Dispose(); } }
private IndexingPerformanceStats HandleIndexingFor(IndexingBatchForIndex batchForIndex, Etag lastEtag, DateTime lastModified, CancellationToken token) { currentlyProcessedIndexes.TryAdd(batchForIndex.IndexId, batchForIndex.Index); IndexingPerformanceStats performanceResult = null; var wasOutOfMemory = false; var wasOperationCanceled = false; try { transactionalStorage.Batch(actions => { performanceResult = IndexDocuments(actions, batchForIndex, token); }); // This can be null if IndexDocument fails to execute and the exception is catched. if (performanceResult != null) { performanceResult.RunCompleted(); } } catch (OperationCanceledException) { wasOperationCanceled = true; throw; } catch (Exception e) { var exception = e; var aggregateException = exception as AggregateException; if (aggregateException != null) { exception = aggregateException.ExtractSingleInnerException(); } if (TransactionalStorageHelper.IsWriteConflict(exception)) { return(null); } Log.WarnException(string.Format("Failed to index documents for index: {0}", batchForIndex.Index.PublicName), exception); wasOutOfMemory = TransactionalStorageHelper.IsOutOfMemoryException(exception); if (wasOutOfMemory == false) { context.AddError(batchForIndex.IndexId, batchForIndex.Index.PublicName, null, exception); } } finally { if (performanceResult != null) { performanceResult.OnCompleted = null; } if (Log.IsDebugEnabled) { Log.Debug("After indexing {0} documents, the new last etag for is: {1} for {2}", batchForIndex.Batch.Docs.Count, lastEtag, batchForIndex.Index.PublicName); } if (wasOutOfMemory == false && wasOperationCanceled == false) { transactionalStorage.Batch(actions => { // whatever we succeeded in indexing or not, we have to update this // because otherwise we keep trying to re-index failed documents actions.Indexing.UpdateLastIndexed(batchForIndex.IndexId, lastEtag, lastModified); }); } else if (wasOutOfMemory) { HandleOutOfMemory(batchForIndex); } Index _; currentlyProcessedIndexes.TryRemove(batchForIndex.IndexId, out _); } return(performanceResult); }
public static void AddAlert(this DocumentDatabase self, Alert alert) { var tries = 0; while (true) { using (self.TransactionalStorage.DisableBatchNesting()) using (var putSerialLock = self.DocumentLock.TryLock(25)) { if (putSerialLock == null) { continue; } AlertsDocument alertsDocument; var alertsDoc = self.Documents.Get(Constants.RavenAlerts, null); RavenJObject metadata; Etag etag; if (alertsDoc == null) { etag = Etag.Empty; alertsDocument = new AlertsDocument(); metadata = new RavenJObject(); } else { etag = alertsDoc.Etag; alertsDocument = alertsDoc.DataAsJson.JsonDeserialization <AlertsDocument>() ?? new AlertsDocument(); metadata = alertsDoc.Metadata; } var withSameUnique = alertsDocument.Alerts.FirstOrDefault(alert1 => alert1.UniqueKey == alert.UniqueKey); if (withSameUnique != null) { // copy information about observed alert.LastDismissedAt = withSameUnique.LastDismissedAt; alertsDocument.Alerts.Remove(withSameUnique); } alertsDocument.Alerts.Add(alert); var document = RavenJObject.FromObject(alertsDocument); document.Remove("Id"); try { self.Documents.Put(Constants.RavenAlerts, etag, document, metadata, null); return; } catch (ConcurrencyException) { //try again... } catch (Exception e) { if (TransactionalStorageHelper.IsOutOfMemoryException(e)) { if (tries++ < MaxTries) { Thread.Sleep(11); continue; } Logger.WarnException("Couldn't save alerts document due to " + $"{self.TransactionalStorage.FriendlyName} out of memory exception", e); return; } throw; } } } }
private IndexingPerformanceStats HandleIndexingFor(IndexingBatchForIndex batchForIndex, Etag lastEtag, DateTime lastModified, CancellationToken token) { if (currentlyProcessedIndexes.TryAdd(batchForIndex.IndexId, batchForIndex.Index) == false) { Log.Error("Entered handle indexing with index {0} inside currentlyProcessedIndexes", batchForIndex.Index.PublicName); batchForIndex.SignalIndexingComplete(); return null; } IndexingPerformanceStats performanceResult = null; var wasOutOfMemory = false; var wasOperationCanceled = false; try { transactionalStorage.Batch(actions => { performanceResult = IndexDocuments(actions, batchForIndex, token); }); // This can be null if IndexDocument fails to execute and the exception is catched. if (performanceResult != null) performanceResult.RunCompleted(); } catch (OperationCanceledException) { wasOperationCanceled = true; throw; } catch (Exception e) { var exception = e; var aggregateException = exception as AggregateException; if (aggregateException != null) exception = aggregateException.ExtractSingleInnerException(); if (TransactionalStorageHelper.IsWriteConflict(exception)) return null; Log.WarnException(string.Format("Failed to index documents for index: {0}", batchForIndex.Index.PublicName), exception); wasOutOfMemory = TransactionalStorageHelper.IsOutOfMemoryException(exception); if (wasOutOfMemory == false) context.AddError(batchForIndex.IndexId, batchForIndex.Index.PublicName, null, exception); } finally { if (performanceResult != null) { performanceResult.OnCompleted = null; } Index _; if (Log.IsDebugEnabled) { Log.Debug("After indexing {0} documents, the new last etag for is: {1} for {2}", batchForIndex.Batch.Docs.Count, lastEtag, batchForIndex.Index.PublicName); } try { if (wasOutOfMemory == false && wasOperationCanceled == false) { bool keepTrying = true; for (int i = 0; i < 10 && keepTrying; i++) { keepTrying = false; transactionalStorage.Batch(actions => { try { // whatever we succeeded in indexing or not, we have to update this // because otherwise we keep trying to re-index failed documents actions.Indexing.UpdateLastIndexed(batchForIndex.IndexId, lastEtag, lastModified); } catch (Exception e) { if (actions.IsWriteConflict(e)) { keepTrying = true; return; } throw; } }); if (keepTrying) Thread.Sleep(11); } } else if (wasOutOfMemory) HandleOutOfMemory(batchForIndex); } finally { currentlyProcessedIndexes.TryRemove(batchForIndex.IndexId, out _); batchForIndex.SignalIndexingComplete(); batchForIndex.Index.IsMapIndexingInProgress = false; } } return performanceResult; }
protected bool HandleIfOutOfMemory(Exception exception, OutOfMemoryDetails details) { try { var ae = exception as AggregateException; if (ae == null) { if (exception is OutOfMemoryException) { HandleSystemOutOfMemoryException(exception); return(true); } if (TransactionalStorageHelper.IsOutOfMemoryException(exception)) { HandleRavenOutOfMemoryException(exception, details); return(true); } return(false); } var isSystemOutOfMemory = false; var isRavenOutOfMemoryException = false; Exception oome = null; Exception ravenOutOfMemoryException = null; foreach (var innerException in ae.Flatten().InnerExceptions) { if (innerException is OutOfMemoryException) { isSystemOutOfMemory = true; oome = innerException; } if (TransactionalStorageHelper.IsOutOfMemoryException(innerException)) { isRavenOutOfMemoryException = true; ravenOutOfMemoryException = innerException; } if (isSystemOutOfMemory && isRavenOutOfMemoryException) { break; } } if (isSystemOutOfMemory) { HandleSystemOutOfMemoryException(oome); } if (isRavenOutOfMemoryException) { HandleRavenOutOfMemoryException(ravenOutOfMemoryException, details); } return(isRavenOutOfMemoryException); } catch (Exception e) { const string error = "Couldn't execute out of memory exception handler"; Log.WarnException(error, e); context.Database.AddAlert(new Alert { AlertLevel = AlertLevel.Error, CreatedAt = DateTime.UtcNow, Title = error, UniqueKey = error, Message = e.ToString(), Exception = e.Message }); return(false); } }