Пример #1
0
        private IndexingPerformanceStats IndexDocuments(IStorageActionsAccessor actions, IndexingBatchForIndex indexingBatchForIndex, CancellationToken token)
        {
            var viewGenerator = context.IndexDefinitionStorage.GetViewGenerator(indexingBatchForIndex.IndexId);

            if (viewGenerator == null)
            {
                return(null);                // index was deleted, probably
            }
            var batch = indexingBatchForIndex.Batch;

            IndexingPerformanceStats performanceStats = null;

            try
            {
                if (Log.IsDebugEnabled)
                {
                    string ids;
                    if (batch.Ids.Count < 256)
                    {
                        ids = string.Join(",", batch.Ids);
                    }
                    else
                    {
                        ids = string.Join(", ", batch.Ids.Take(128)) + " ... " + string.Join(", ", batch.Ids.Skip(batch.Ids.Count - 128));
                    }
                    Log.Debug("Indexing {0} documents for index: {1}. ({2})", batch.Docs.Count, indexingBatchForIndex.Index.PublicName, ids);
                }
                context.CancellationToken.ThrowIfCancellationRequested();
                token.ThrowIfCancellationRequested();

                performanceStats = context.IndexStorage.Index(indexingBatchForIndex.IndexId, viewGenerator, batch, context, actions, batch.DateTime ?? DateTime.MinValue, token);
            }
            catch (OperationCanceledException)
            {
                throw;
            }
            catch (Exception e)
            {
                if (actions.IsWriteConflict(e))
                {
                    return(null);
                }

                Log.WarnException(string.Format("Failed to index documents for index: {0}", indexingBatchForIndex.Index.PublicName), e);
                context.AddError(indexingBatchForIndex.IndexId, indexingBatchForIndex.Index.PublicName, null, e);
            }

            return(performanceStats);
        }
Пример #2
0
		private IEnumerable<IndexingBatchForIndex> FilterIndexes(IList<IndexToWorkOn> indexesToWorkOn, List<JsonDocument> jsonDocs, Etag highestETagInBatch)
		{
			var last = jsonDocs.Last();

			Debug.Assert(last.Etag != null);
			Debug.Assert(last.LastModified != null);

			var lastEtag = last.Etag;
			var lastModified = last.LastModified.Value;

			var documentRetriever = new DocumentRetriever(null, context.ReadTriggers, context.Database.InFlightTransactionalState);

			var filteredDocs =
				BackgroundTaskExecuter.Instance.Apply(context, jsonDocs, doc =>
				{
					var filteredDoc = documentRetriever.ExecuteReadTriggers(doc, null, ReadOperation.Index);
					return filteredDoc == null ? new
					{
						Doc = doc,
						Json = (object)new FilteredDocument(doc)
					} : new
					{
						Doc = filteredDoc,
						Json = JsonToExpando.Convert(doc.ToJson())
					};
				});

			Log.Debug("After read triggers executed, {0} documents remained", filteredDocs.Count);

			var results = new IndexingBatchForIndex[indexesToWorkOn.Count];
			var actions = new Action<IStorageActionsAccessor>[indexesToWorkOn.Count];

			BackgroundTaskExecuter.Instance.ExecuteAll(context, indexesToWorkOn, (indexToWorkOn, i) =>
			{
				var indexName = indexToWorkOn.IndexName;
				var viewGenerator = context.IndexDefinitionStorage.GetViewGenerator(indexName);
				if (viewGenerator == null)
					return; // probably deleted

				var batch = new IndexingBatch(highestETagInBatch);

				foreach (var item in filteredDocs)
				{
					if (prefetchingBehavior.FilterDocuments(item.Doc) == false)
						continue;

					// did we already indexed this document in this index?
					var etag = item.Doc.Etag;
					if (etag == null)
						continue;

					// is the Raven-Entity-Name a match for the things the index executes on?
					if (viewGenerator.ForEntityNames.Count != 0 &&
						viewGenerator.ForEntityNames.Contains(item.Doc.Metadata.Value<string>(Constants.RavenEntityName)) == false)
					{
						continue;
					}

					batch.Add(item.Doc, item.Json, prefetchingBehavior.ShouldSkipDeleteFromIndex(item.Doc));

					if (batch.DateTime == null)
						batch.DateTime = item.Doc.LastModified;
					else
						batch.DateTime = batch.DateTime > item.Doc.LastModified
											 ? item.Doc.LastModified
											 : batch.DateTime;
				}

				if (batch.Docs.Count == 0)
				{
					Log.Debug("All documents have been filtered for {0}, no indexing will be performed, updating to {1}, {2}", indexName,
							  lastEtag, lastModified);
					// we use it this way to batch all the updates together
					actions[i] = accessor => accessor.Indexing.UpdateLastIndexed(indexName, lastEtag, lastModified);
					return;
				}
				if (Log.IsDebugEnabled)
				{
					Log.Debug("Going to index {0} documents in {1}: ({2})", batch.Ids.Count, indexToWorkOn, string.Join(", ", batch.Ids));
				}
				results[i] = new IndexingBatchForIndex
				{
					Batch = batch,
					IndexName = indexToWorkOn.IndexName,
					Index = indexToWorkOn.Index,
					LastIndexedEtag = indexToWorkOn.LastIndexedEtag
				};

			});

			transactionalStorage.Batch(actionsAccessor =>
			{
				foreach (var action in actions)
				{
					if (action != null)
						action(actionsAccessor);
				}
			});

			return results.Where(x => x != null);
		}
Пример #3
0
        private IEnumerable <IndexingBatchForIndex> FilterIndexes(IList <IndexToWorkOn> indexesToWorkOn, List <JsonDocument> jsonDocs, Etag highestETagInBatch)
        {
            var last = jsonDocs.Last();

            Debug.Assert(last.Etag != null);
            Debug.Assert(last.LastModified != null);

            var lastEtag     = last.Etag;
            var lastModified = last.LastModified.Value;

            var documentRetriever = new DocumentRetriever(null, null, context.ReadTriggers, context.Database.InFlightTransactionalState);

            var filteredDocs =
                BackgroundTaskExecuter.Instance.Apply(context, jsonDocs, doc =>
            {
                var filteredDoc = documentRetriever.ExecuteReadTriggers(doc, null, ReadOperation.Index);
                return(filteredDoc == null ? new
                {
                    Doc = doc,
                    Json = (object)new FilteredDocument(doc)
                } : new
                {
                    Doc = filteredDoc,
                    Json = JsonToExpando.Convert(doc.ToJson())
                });
            });

            Log.Debug("After read triggers executed, {0} documents remained", filteredDocs.Count);

            var results = new IndexingBatchForIndex[indexesToWorkOn.Count];
            var actions = new Action <IStorageActionsAccessor> [indexesToWorkOn.Count];

            BackgroundTaskExecuter.Instance.ExecuteAll(context, indexesToWorkOn, (indexToWorkOn, i) =>
            {
                var indexName     = indexToWorkOn.Index.PublicName;
                var viewGenerator = context.IndexDefinitionStorage.GetViewGenerator(indexName);
                if (viewGenerator == null)
                {
                    return;                     // probably deleted
                }
                var batch = new IndexingBatch(highestETagInBatch);

                foreach (var item in filteredDocs)
                {
                    if (defaultPrefetchingBehavior.FilterDocuments(item.Doc) == false)
                    {
                        continue;
                    }

                    // did we already indexed this document in this index?
                    var etag = item.Doc.Etag;
                    if (etag == null)
                    {
                        continue;
                    }

                    // is the Raven-Entity-Name a match for the things the index executes on?
                    if (viewGenerator.ForEntityNames.Count != 0 &&
                        viewGenerator.ForEntityNames.Contains(item.Doc.Metadata.Value <string>(Constants.RavenEntityName)) == false)
                    {
                        continue;
                    }

                    batch.Add(item.Doc, item.Json, defaultPrefetchingBehavior.ShouldSkipDeleteFromIndex(item.Doc));

                    if (batch.DateTime == null)
                    {
                        batch.DateTime = item.Doc.LastModified;
                    }
                    else
                    {
                        batch.DateTime = batch.DateTime > item.Doc.LastModified
                                                                                         ? item.Doc.LastModified
                                                                                         : batch.DateTime;
                    }
                }

                if (batch.Docs.Count == 0)
                {
                    Log.Debug("All documents have been filtered for {0}, no indexing will be performed, updating to {1}, {2}", indexName,
                              lastEtag, lastModified);
                    // we use it this way to batch all the updates together
                    actions[i] = accessor =>
                    {
                        accessor.Indexing.UpdateLastIndexed(indexToWorkOn.Index.indexId, lastEtag, lastModified);

                        accessor.AfterStorageCommit += () =>
                        {
                            indexToWorkOn.Index.EnsureIndexWriter();
                            indexToWorkOn.Index.Flush(lastEtag);
                        };
                    };
                    return;
                }
                if (Log.IsDebugEnabled)
                {
                    Log.Debug("Going to index {0} documents in {1}: ({2})", batch.Ids.Count, indexToWorkOn, string.Join(", ", batch.Ids));
                }
                results[i] = new IndexingBatchForIndex
                {
                    Batch           = batch,
                    IndexId         = indexToWorkOn.IndexId,
                    Index           = indexToWorkOn.Index,
                    LastIndexedEtag = indexToWorkOn.LastIndexedEtag
                };
            });

            transactionalStorage.Batch(actionsAccessor =>
            {
                foreach (var action in actions)
                {
                    if (action != null)
                    {
                        action(actionsAccessor);
                    }
                }
            });

            return(results.Where(x => x != null));
        }
Пример #4
0
        private IndexingPerformanceStats IndexDocuments(IStorageActionsAccessor actions, IndexingBatchForIndex indexingBatchForIndex, CancellationToken token)
        {
            var viewGenerator = context.IndexDefinitionStorage.GetViewGenerator(indexingBatchForIndex.IndexId);

            if (viewGenerator == null)
            {
                return(null);                // index was deleted, probably
            }
            var batch = indexingBatchForIndex.Batch;

            if (Log.IsDebugEnabled)
            {
                string ids;
                if (batch.Ids.Count < 256)
                {
                    ids = string.Join(",", batch.Ids);
                }
                else
                {
                    ids = string.Join(", ", batch.Ids.Take(128)) + " ... " + string.Join(", ", batch.Ids.Skip(batch.Ids.Count - 128));
                }
                Log.Debug("Indexing {0} documents for index: {1}. ({2})", batch.Docs.Count, indexingBatchForIndex.Index.PublicName, ids);
            }

            token.ThrowIfCancellationRequested();

            return(context.IndexStorage.Index(indexingBatchForIndex.IndexId, viewGenerator, batch, context, actions, batch.DateTime ?? DateTime.MinValue, token));;
        }
Пример #5
0
		private void HandleIndexingFor(IndexingBatchForIndex batchForIndex, Etag lastEtag, DateTime lastModified)
		{
			try
			{
				transactionalStorage.Batch(actions => IndexDocuments(actions, batchForIndex.IndexName, batchForIndex.Batch));
			}
			catch (Exception e)
			{
				Log.Warn("Failed to index " + batchForIndex.IndexName, e);
			}
			finally
			{
				if (Log.IsDebugEnabled)
				{
					Log.Debug("After indexing {0} documents, the new last etag for is: {1} for {2}",
							  batchForIndex.Batch.Docs.Count,
							  lastEtag,
							  batchForIndex.IndexName);
				}

				transactionalStorage.Batch(actions =>
					// whatever we succeeded in indexing or not, we have to update this
					// because otherwise we keep trying to re-index failed documents
										   actions.Indexing.UpdateLastIndexed(batchForIndex.IndexName, lastEtag, lastModified));
			}
		}
Пример #6
0
        private IndexingPerformanceStats IndexDocuments(IStorageActionsAccessor actions, IndexingBatchForIndex indexingBatchForIndex, CancellationToken token)
        {
            var viewGenerator = context.IndexDefinitionStorage.GetViewGenerator(indexingBatchForIndex.IndexId);
            if (viewGenerator == null)
                return null; // index was deleted, probably

            var batch = indexingBatchForIndex.Batch;

            if (Log.IsDebugEnabled)
            {
                string ids;
                if (batch.Ids.Count < 256)
                    ids = string.Join(",", batch.Ids);
                else
                {
                    ids = string.Join(", ", batch.Ids.Take(128)) + " ... " + string.Join(", ", batch.Ids.Skip(batch.Ids.Count - 128));
                }
                Log.Debug("Indexing {0} documents for index: {1}. ({2})", batch.Docs.Count, indexingBatchForIndex.Index.PublicName, ids);
            }

            token.ThrowIfCancellationRequested();

            return context.IndexStorage.Index(indexingBatchForIndex.IndexId, viewGenerator, batch, context, actions, batch.DateTime ?? DateTime.MinValue, token); ;
        }
Пример #7
0
        private IndexingPerformanceStats HandleIndexingFor(IndexingBatchForIndex batchForIndex, Etag lastEtag, DateTime lastModified, CancellationToken token)
        {
            currentlyProcessedIndexes.TryAdd(batchForIndex.IndexId, batchForIndex.Index);

            IndexingPerformanceStats performanceResult = null;
            var wasOutOfMemory       = false;
            var wasOperationCanceled = false;

            try
            {
                transactionalStorage.Batch(actions =>
                {
                    performanceResult = IndexDocuments(actions, batchForIndex, token);
                });

                // This can be null if IndexDocument fails to execute and the exception is catched.
                if (performanceResult != null)
                {
                    performanceResult.RunCompleted();
                }
            }
            catch (OperationCanceledException)
            {
                wasOperationCanceled = true;
                throw;
            }
            catch (Exception e)
            {
                var exception          = e;
                var aggregateException = exception as AggregateException;
                if (aggregateException != null)
                {
                    exception = aggregateException.ExtractSingleInnerException();
                }

                if (TransactionalStorageHelper.IsWriteConflict(exception))
                {
                    return(null);
                }

                Log.WarnException(string.Format("Failed to index documents for index: {0}", batchForIndex.Index.PublicName), exception);

                wasOutOfMemory = TransactionalStorageHelper.IsOutOfMemoryException(exception);

                if (wasOutOfMemory == false)
                {
                    context.AddError(batchForIndex.IndexId, batchForIndex.Index.PublicName, null, exception);
                }
            }
            finally
            {
                if (performanceResult != null)
                {
                    performanceResult.OnCompleted = null;
                }

                if (Log.IsDebugEnabled)
                {
                    Log.Debug("After indexing {0} documents, the new last etag for is: {1} for {2}",
                              batchForIndex.Batch.Docs.Count,
                              lastEtag,
                              batchForIndex.Index.PublicName);
                }

                if (wasOutOfMemory == false && wasOperationCanceled == false)
                {
                    transactionalStorage.Batch(actions =>
                    {
                        // whatever we succeeded in indexing or not, we have to update this
                        // because otherwise we keep trying to re-index failed documents
                        actions.Indexing.UpdateLastIndexed(batchForIndex.IndexId, lastEtag, lastModified);
                    });
                }
                else if (wasOutOfMemory)
                {
                    HandleOutOfMemory(batchForIndex);
                }

                Index _;
                currentlyProcessedIndexes.TryRemove(batchForIndex.IndexId, out _);
            }

            return(performanceResult);
        }
Пример #8
0
        private IndexingPerformanceStats HandleIndexingFor(IndexingBatchForIndex batchForIndex, Etag lastEtag, DateTime lastModified, CancellationToken token)
        {
            currentlyProcessedIndexes.TryAdd(batchForIndex.IndexId, batchForIndex.Index);

            IndexingPerformanceStats performanceResult = null;
            var wasOutOfMemory = false;
            var wasOperationCanceled = false;
            try
            {
                transactionalStorage.Batch(actions =>
                {
                    performanceResult = IndexDocuments(actions, batchForIndex, token);
                });

                // This can be null if IndexDocument fails to execute and the exception is catched.
                if (performanceResult != null)
                    performanceResult.RunCompleted();
            }
            catch (OperationCanceledException)
            {
                wasOperationCanceled = true;
                throw;
            }
            catch (Exception e)
            {
                var exception = e;
                var aggregateException = exception as AggregateException;
                if (aggregateException != null)
                    exception = aggregateException.ExtractSingleInnerException();

                if (TransactionalStorageHelper.IsWriteConflict(exception))
                    return null;

                Log.WarnException(string.Format("Failed to index documents for index: {0}", batchForIndex.Index.PublicName), exception);

                wasOutOfMemory = TransactionalStorageHelper.IsOutOfMemoryException(exception);

                if (wasOutOfMemory == false)
                    context.AddError(batchForIndex.IndexId, batchForIndex.Index.PublicName, null, exception);
            }
            finally
            {
                if (performanceResult != null)
                {                    
                    performanceResult.OnCompleted = null;
                }				

                if (Log.IsDebugEnabled)
                {
                    Log.Debug("After indexing {0} documents, the new last etag for is: {1} for {2}",
                              batchForIndex.Batch.Docs.Count,
                              lastEtag,
                              batchForIndex.Index.PublicName);
                }

                if (wasOutOfMemory == false && wasOperationCanceled == false)
                {
                    transactionalStorage.Batch(actions =>
                    {
                        // whatever we succeeded in indexing or not, we have to update this
                        // because otherwise we keep trying to re-index failed documents
                        actions.Indexing.UpdateLastIndexed(batchForIndex.IndexId, lastEtag, lastModified);
                    });
                }
                else if (wasOutOfMemory)
                    HandleOutOfMemory(batchForIndex);

                Index _;
                currentlyProcessedIndexes.TryRemove(batchForIndex.IndexId, out _);
            }

            return performanceResult;
        }
Пример #9
0
        private void HandleOutOfMemory(IndexingBatchForIndex batchForIndex)
        {
            transactionalStorage.Batch(actions =>
            {
                var instance = context.IndexStorage.GetIndexInstance(batchForIndex.IndexId);
                if (instance == null)
                {
                    return;
                }

                Log.Error("Disabled index '{0}'. Reason: out of memory.", instance.PublicName);

                string configurationKey = null;
                if (string.Equals(context.Database.TransactionalStorage.FriendlyName, InMemoryRavenConfiguration.VoronTypeName, StringComparison.OrdinalIgnoreCase))
                {
                    configurationKey = Constants.Voron.MaxScratchBufferSize;
                }
                else if (string.Equals(context.Database.TransactionalStorage.FriendlyName, InMemoryRavenConfiguration.EsentTypeName, StringComparison.OrdinalIgnoreCase))
                {
                    configurationKey = Constants.Esent.MaxVerPages;
                }

                Debug.Assert(configurationKey != null);

                actions.Indexing.SetIndexPriority(batchForIndex.IndexId, IndexingPriority.Disabled);
                context.Database.AddAlert(new Alert { AlertLevel = AlertLevel.Error, CreatedAt = SystemTime.UtcNow, Title = string.Format("Index '{0}' was disabled", instance.PublicName), UniqueKey = string.Format("Index '{0}' was disabled", instance.IndexId), Message = string.Format("Out of memory exception occured in storage during indexing process for index '{0}'. As a result of this action, index changed state to disabled. Try increasing '{1}' value in configuration.", instance.PublicName, configurationKey) });
                instance.Priority = IndexingPriority.Disabled;
            });
        }
Пример #10
0
        public void IndexPrecomputedBatch(PrecomputedIndexingBatch precomputedBatch, CancellationToken token)
        {
            token.ThrowIfCancellationRequested();

            context.MetricsCounters.IndexedPerSecond.Mark(precomputedBatch.Documents.Count);

            var indexToWorkOn = new IndexToWorkOn
            {
                Index = precomputedBatch.Index,
                IndexId = precomputedBatch.Index.indexId,
                LastIndexedEtag = Etag.Empty
            };

            using (LogContext.WithDatabase(context.DatabaseName))
            using (MapIndexingInProgress(new List<IndexToWorkOn> { indexToWorkOn }))
            {
                IndexingBatchForIndex indexingBatchForIndex;
                if (precomputedBatch.Documents.Count > 0)
                {
                    indexingBatchForIndex = 
                        FilterIndexes(
                                new List<IndexToWorkOn> {indexToWorkOn}, 
                                precomputedBatch.Documents,
                                precomputedBatch.LastIndexed)
                          .FirstOrDefault();
                }
                else
                {
                    indexingBatchForIndex = new IndexingBatchForIndex
                    {
                        Batch = new IndexingBatch(precomputedBatch.LastIndexed),
                        Index = precomputedBatch.Index,
                        IndexId = precomputedBatch.Index.indexId,
                        LastIndexedEtag = precomputedBatch.LastIndexed
                    };
                }

                if (indexingBatchForIndex == null)
                    return;

                IndexingBatchInfo batchInfo = null;
                IndexingPerformanceStats performance = null;
                try
                {
                    batchInfo = context.ReportIndexingBatchStarted(precomputedBatch.Documents.Count, -1, new List<string>
                    {
                        indexToWorkOn.Index.PublicName
                    });

                    batchInfo.BatchType = BatchType.Precomputed;

                    if (Log.IsDebugEnabled)
                    {
                        Log.Debug("Going to index precomputed documents for a new index {0}. Count of precomputed docs {1}",
                            precomputedBatch.Index.PublicName, precomputedBatch.Documents.Count);
                    }

                    performance = HandleIndexingFor(indexingBatchForIndex, precomputedBatch.LastIndexed, precomputedBatch.LastModified, token);
                }
                finally
                {
                    if (batchInfo != null)
                    {
                        if (performance != null)
                            batchInfo.PerformanceStats.TryAdd(indexingBatchForIndex.Index.PublicName, performance);

                        context.ReportIndexingBatchCompleted(batchInfo);
                    }
                }
            }

            indexReplacer.ReplaceIndexes(new []{ indexToWorkOn.IndexId });
        }
Пример #11
0
        public void IndexPrecomputedBatch(PrecomputedIndexingBatch precomputedBatch, CancellationToken token)
        {
            token.ThrowIfCancellationRequested();

            context.MetricsCounters.IndexedPerSecond.Mark(precomputedBatch.Documents.Count);

            var indexToWorkOn = new IndexToWorkOn
            {
                Index           = precomputedBatch.Index,
                IndexId         = precomputedBatch.Index.indexId,
                LastIndexedEtag = Etag.Empty
            };

            using (LogContext.WithDatabase(context.DatabaseName))
                using (MapIndexingInProgress(new List <IndexToWorkOn> {
                    indexToWorkOn
                }))
                {
                    IndexingBatchForIndex indexingBatchForIndex;
                    if (precomputedBatch.Documents.Count > 0)
                    {
                        indexingBatchForIndex =
                            FilterIndexes(
                                new List <IndexToWorkOn> {
                            indexToWorkOn
                        },
                                precomputedBatch.Documents,
                                precomputedBatch.LastIndexed)
                            .FirstOrDefault();
                    }
                    else
                    {
                        indexingBatchForIndex = new IndexingBatchForIndex
                        {
                            Batch           = new IndexingBatch(precomputedBatch.LastIndexed),
                            Index           = precomputedBatch.Index,
                            IndexId         = precomputedBatch.Index.indexId,
                            LastIndexedEtag = precomputedBatch.LastIndexed
                        };
                    }

                    if (indexingBatchForIndex == null)
                    {
                        return;
                    }

                    IndexingBatchInfo        batchInfo   = null;
                    IndexingPerformanceStats performance = null;
                    try
                    {
                        batchInfo = context.ReportIndexingBatchStarted(precomputedBatch.Documents.Count, -1, new List <string>
                        {
                            indexToWorkOn.Index.PublicName
                        });

                        batchInfo.BatchType = BatchType.Precomputed;

                        if (Log.IsDebugEnabled)
                        {
                            Log.Debug("Going to index precomputed documents for a new index {0}. Count of precomputed docs {1}",
                                      precomputedBatch.Index.PublicName, precomputedBatch.Documents.Count);
                        }

                        performance = HandleIndexingFor(indexingBatchForIndex, precomputedBatch.LastIndexed, precomputedBatch.LastModified, token);
                    }
                    finally
                    {
                        if (batchInfo != null)
                        {
                            if (performance != null)
                            {
                                batchInfo.PerformanceStats.TryAdd(indexingBatchForIndex.Index.PublicName, performance);
                            }

                            context.ReportIndexingBatchCompleted(batchInfo);
                        }
                    }
                }

            indexReplacer.ReplaceIndexes(new [] { indexToWorkOn.IndexId });
        }
Пример #12
0
        private IndexingPerformanceStats HandleIndexingFor(IndexingBatchForIndex batchForIndex, Etag lastEtag, DateTime lastModified, CancellationToken token)
        {
            if (currentlyProcessedIndexes.TryAdd(batchForIndex.IndexId, batchForIndex.Index) == false)
            {
                Log.Error("Entered handle indexing with index {0} inside currentlyProcessedIndexes", batchForIndex.Index.PublicName);
                batchForIndex.SignalIndexingComplete();
                return null;
            }
            IndexingPerformanceStats performanceResult = null;
            var wasOutOfMemory = false;
            var wasOperationCanceled = false;
            try
            {
                transactionalStorage.Batch(actions => { performanceResult = IndexDocuments(actions, batchForIndex, token); });


                // This can be null if IndexDocument fails to execute and the exception is catched.
                if (performanceResult != null)
                    performanceResult.RunCompleted();
            }
            catch (OperationCanceledException)
            {
                wasOperationCanceled = true;
                throw;
            }
            catch (Exception e)
            {
                var exception = e;
                var aggregateException = exception as AggregateException;
                if (aggregateException != null)
                    exception = aggregateException.ExtractSingleInnerException();

                if (TransactionalStorageHelper.IsWriteConflict(exception))
                    return null;

                Log.WarnException(string.Format("Failed to index documents for index: {0}", batchForIndex.Index.PublicName), exception);

                wasOutOfMemory = TransactionalStorageHelper.IsOutOfMemoryException(exception);

                if (wasOutOfMemory == false)
                    context.AddError(batchForIndex.IndexId, batchForIndex.Index.PublicName, null, exception);
            }
            finally
            {
                if (performanceResult != null)
                {                    
                    performanceResult.OnCompleted = null;
                }				

                Index _;
                if (Log.IsDebugEnabled)
                {
                    Log.Debug("After indexing {0} documents, the new last etag for is: {1} for {2}",
                              batchForIndex.Batch.Docs.Count,
                              lastEtag,
                              batchForIndex.Index.PublicName);
                }

                try
                {
                if (wasOutOfMemory == false && wasOperationCanceled == false)
                {
                        bool keepTrying = true;

                        for (int i = 0; i < 10 && keepTrying; i++)
                        {
                            keepTrying = false;
                    transactionalStorage.Batch(actions =>
                    {
                                try
                                {
                        // whatever we succeeded in indexing or not, we have to update this
                        // because otherwise we keep trying to re-index failed documents
                        actions.Indexing.UpdateLastIndexed(batchForIndex.IndexId, lastEtag, lastModified);
                                }
                                catch (Exception e)
                                {
                                    if (actions.IsWriteConflict(e))
                                    {
                                        keepTrying = true;
                                        return;
                                    }
                                    throw;
                                }
                    });

                            if (keepTrying)
                                Thread.Sleep(11);
                }
                    }
                else if (wasOutOfMemory)
                    HandleOutOfMemory(batchForIndex);
                }
                finally
                {
                currentlyProcessedIndexes.TryRemove(batchForIndex.IndexId, out _);
                    batchForIndex.SignalIndexingComplete();
                    batchForIndex.Index.IsMapIndexingInProgress = false;
            }
            }

            return performanceResult;
        }
Пример #13
0
		private void IndexDocuments(IStorageActionsAccessor actions, IndexingBatchForIndex indexingBatchForIndex)
		{
			var viewGenerator = context.IndexDefinitionStorage.GetViewGenerator(indexingBatchForIndex.IndexId);
			if (viewGenerator == null)
				return; // index was deleted, probably

			var batch = indexingBatchForIndex.Batch;
			try
			{
				if (Log.IsDebugEnabled)
				{
					string ids;
					if (batch.Ids.Count < 256)
						ids = string.Join(",", batch.Ids);
					else
					{
						ids = string.Join(", ", batch.Ids.Take(128)) + " ... " + string.Join(", ", batch.Ids.Skip(batch.Ids.Count - 128));
					}
					Log.Debug("Indexing {0} documents for index: {1}. ({2})", batch.Docs.Count, indexingBatchForIndex.Index.PublicName, ids);
				}
				context.CancellationToken.ThrowIfCancellationRequested();

				
				context.IndexStorage.Index(indexingBatchForIndex.IndexId, viewGenerator, batch, context, actions, batch.DateTime ?? DateTime.MinValue);
			}
			catch (OperationCanceledException)
			{
				throw;
			}
			catch (Exception e)
			{
				if (actions.IsWriteConflict(e))
					return;
				Log.WarnException(string.Format("Failed to index documents for index: {0}", indexingBatchForIndex.Index.PublicName), e);
				context.AddError(indexingBatchForIndex.IndexId, indexingBatchForIndex.Index.PublicName, null, e.Message);
			}
		}
Пример #14
0
		private IndexingPerformanceStats HandleIndexingFor(IndexingBatchForIndex batchForIndex, Etag lastEtag, DateTime lastModified, CancellationToken token)
		{
		    currentlyProcessedIndexes.TryAdd(batchForIndex.IndexId, batchForIndex.Index);

			IndexingPerformanceStats performanceResult = null;

			try
			{
				transactionalStorage.Batch(actions =>
				{
					performanceResult = IndexDocuments(actions, batchForIndex, token);
				});

                // This can be null if IndexDocument fails to execute and the exception is catched.
                if ( performanceResult != null )
                    performanceResult.RunCompleted();
			}
			catch (Exception e)
			{
				Log.WarnException("Failed to index " + batchForIndex.Index.PublicName, e);
			}
			finally
			{
				if (performanceResult != null)
                {                    
                    performanceResult.OnCompleted = null;
                }				

				if (Log.IsDebugEnabled)
				{
					Log.Debug("After indexing {0} documents, the new last etag for is: {1} for {2}",
							  batchForIndex.Batch.Docs.Count,
							  lastEtag,
							  batchForIndex.Index.PublicName);
				}

				transactionalStorage.Batch(actions =>
					// whatever we succeeded in indexing or not, we have to update this
					// because otherwise we keep trying to re-index failed documents
					actions.Indexing.UpdateLastIndexed(batchForIndex.IndexId, lastEtag, lastModified));

				Index _;
				currentlyProcessedIndexes.TryRemove(batchForIndex.IndexId, out _);
			}

			return performanceResult;
		}