public override void HandleDelete(Tombstone tombstone, string collection, Lazy <IndexWriteOperation> writer, TransactionOperationContext indexContext, IndexingStatsScope stats) { throw new NotSupportedException($"Index {Name} is in-memory implementation of a faulty index", _e); }
private AggregationResult AggregateBatchResults(List <BlittableJsonReaderObject> aggregationBatch, TransactionOperationContext indexContext, IndexingStatsScope stats, CancellationToken token) { AggregationResult result; try { result = AggregateOn(aggregationBatch, indexContext, stats, token); } finally { aggregationBatch.Clear(); } return(result); }
protected abstract AggregationResult AggregateOn(List <BlittableJsonReaderObject> aggregationBatch, TransactionOperationContext indexContext, IndexingStatsScope stats, CancellationToken token);
public bool Execute(DocumentsOperationContext databaseContext, TransactionOperationContext indexContext, Lazy <IndexWriteOperation> writeOperation, IndexingStatsScope stats, CancellationToken token) { var maxTimeForDocumentTransactionToRemainOpen = Debugger.IsAttached == false ? _configuration.MaxTimeForDocumentTransactionToRemainOpen.AsTimeSpan : TimeSpan.FromMinutes(15); var moreWorkFound = false; var totalProcessedCount = 0; foreach (var collection in _index.Collections) { using (var collectionStats = stats.For("Collection_" + collection)) { var lastMappedEtag = _indexStorage.ReadLastIndexedEtag(indexContext.Transaction, collection); if (_logger.IsInfoEnabled) { _logger.Info($"Executing map for '{_index.Name}'. Collection: {collection} LastMappedEtag: {lastMappedEtag:#,#;;0}."); } var inMemoryStats = _index.GetStats(collection); var lastEtag = lastMappedEtag; var resultsCount = 0; var pageSize = int.MaxValue; var sw = new Stopwatch(); var keepRunning = true; var lastCollectionEtag = -1L; while (keepRunning) { using (databaseContext.OpenReadTransaction()) { sw.Restart(); if (lastCollectionEtag == -1) { lastCollectionEtag = _index.GetLastDocumentEtagInCollection(databaseContext, collection); } var documents = GetDocumentsEnumerator(databaseContext, collection, lastEtag, pageSize); using (var docsEnumerator = _index.GetMapEnumerator(documents, collection, indexContext, collectionStats, _index.Type)) { while (true) { if (docsEnumerator.MoveNext(out IEnumerable mapResults) == false) { collectionStats.RecordMapCompletedReason("No more documents to index"); keepRunning = false; break; } token.ThrowIfCancellationRequested(); var current = docsEnumerator.Current; totalProcessedCount++; collectionStats.RecordMapAttempt(); stats.RecordDocumentSize(current.Data.Size); if (_logger.IsInfoEnabled && totalProcessedCount % 8192 == 0) { _logger.Info($"Executing map for '{_index.Name}'. Processed count: {totalProcessedCount:#,#;;0} etag: {lastEtag:#,#;;0}."); } lastEtag = current.Etag; inMemoryStats.UpdateLastEtag(lastEtag, isTombstone: false); try { var numberOfResults = _index.HandleMap(current.LowerId, current.Id, mapResults, writeOperation, indexContext, collectionStats); resultsCount += numberOfResults; collectionStats.RecordMapSuccess(); _index.MapsPerSec.MarkSingleThreaded(numberOfResults); } catch (Exception e) when(e.IsIndexError()) { docsEnumerator.OnError(); _index.ErrorIndexIfCriticalException(e); collectionStats.RecordMapError(); if (_logger.IsInfoEnabled) { _logger.Info($"Failed to execute mapping function on '{current.Id}' for '{_index.Name}'.", e); } collectionStats.AddMapError(current.Id, $"Failed to execute mapping function on {current.Id}. " + $"Exception: {e}"); } if (CanContinueBatch(databaseContext, indexContext, collectionStats, writeOperation, lastEtag, lastCollectionEtag, totalProcessedCount) == false) { keepRunning = false; break; } if (totalProcessedCount >= pageSize) { keepRunning = false; break; } if (MaybeRenewTransaction(databaseContext, sw, _configuration, ref maxTimeForDocumentTransactionToRemainOpen)) { break; } } } } } if (lastMappedEtag == lastEtag) { // the last mapped etag hasn't changed continue; } moreWorkFound = true; if (_logger.IsInfoEnabled) { _logger.Info($"Executed map for '{_index.Name}' index and '{collection}' collection. Got {resultsCount:#,#;;0} map results in {collectionStats.Duration.TotalMilliseconds:#,#;;0} ms."); } if (_index.Type.IsMap()) { _index.SaveLastState(); _indexStorage.WriteLastIndexedEtag(indexContext.Transaction, collection, lastEtag); } else { _mapReduceContext.ProcessedDocEtags[collection] = lastEtag; } } } return(moreWorkFound); }
public bool CanContinueBatch(DocumentsOperationContext documentsContext, TransactionOperationContext indexingContext, IndexingStatsScope stats, long currentEtag, long maxEtag, int count) { throw new NotSupportedException(); }
public void Set(ActionType actionType, string collection, Reference reference, string itemId, long lastIndexedParentEtag, TransactionOperationContext indexContext) { var dictionary = GetDictionary(actionType); var referencedItemId = (string)reference.Key; indexContext.Transaction.InnerTransaction.LowLevelTransaction.AfterCommitWhenNewReadTransactionsPrevented += _ => { // we update this only after the transaction was committed dictionary[collection] = new ReferenceState(referencedItemId, reference.Etag, itemId, lastIndexedParentEtag); #if DEBUG if (_setCollections.Add((actionType, collection)) == false) { throw new InvalidOperationException($"Double set of collection {collection} of action type {actionType}"); } #endif }; #if DEBUG indexContext.Transaction.InnerTransaction.LowLevelTransaction.OnDispose += _ => { _setCollections.Remove((actionType, collection)); }; #endif }
protected Query GetLuceneQuery(DocumentsOperationContext context, QueryMetadata metadata, QueryExpression whereExpression, BlittableJsonReaderObject parameters, Analyzer analyzer, QueryBuilderFactories factories) { Query documentQuery; if (string.IsNullOrEmpty(metadata.QueryText)) { if (_logger.IsInfoEnabled) { _logger.Info($"Issuing query on index {_indexName} for all documents"); } documentQuery = new MatchAllDocsQuery(); } else { if (_logger.IsInfoEnabled) { _logger.Info($"Issuing query on index {_indexName} for: {metadata.Query}"); } // RavenPerFieldAnalyzerWrapper searchAnalyzer = null; try { //_persistance._a //searchAnalyzer = parent.CreateAnalyzer(new LowerCaseKeywordAnalyzer(), toDispose, true); //searchAnalyzer = parent.AnalyzerGenerators.Aggregate(searchAnalyzer, (currentAnalyzer, generator) => //{ // Analyzer newAnalyzer = generator.GenerateAnalyzerForQuerying(parent.PublicName, query.Query, currentAnalyzer); // if (newAnalyzer != currentAnalyzer) // { // DisposeAnalyzerAndFriends(toDispose, currentAnalyzer); // } // return parent.CreateAnalyzer(newAnalyzer, toDispose, true); //}); IDisposable releaseServerContext = null; IDisposable closeServerTransaction = null; TransactionOperationContext serverContext = null; try { if (metadata.HasCmpXchg) { releaseServerContext = context.DocumentDatabase.ServerStore.ContextPool.AllocateOperationContext(out serverContext); closeServerTransaction = serverContext.OpenReadTransaction(); } using (closeServerTransaction) documentQuery = QueryBuilder.BuildQuery(serverContext, context, metadata, whereExpression, _index.Definition, parameters, analyzer, factories); } finally { releaseServerContext?.Dispose(); } } finally { //DisposeAnalyzerAndFriends(toDispose, searchAnalyzer); } } //var afterTriggers = ApplyIndexTriggers(documentQuery); return(documentQuery); }
private async Task <LastEtagsInfo> GetLastStateByOperationId(long operationId, TransactionOperationContext context) { var retries = 0; while (true) { if (++retries > 15) { return(null); } var operationStatus = await GetOperationStatus(Options.DatabaseName, operationId, context); if (operationStatus == null) { return(null); } if (operationStatus.TryGet("Completed", out bool completed) == false) { return(null); } if (completed == false) { await Task.Delay(1000, Parameters.CancelToken.Token); continue; } if (operationStatus.TryGet("OperationState", out BlittableJsonReaderObject operationStateBlittable) == false) { // OperationState was added in the latest release of v3.5 return(null); } operationStateBlittable.TryGet(nameof(LastEtagsInfo.LastDocsEtag), out string lastDocsEtag); operationStateBlittable.TryGet(nameof(LastEtagsInfo.LastDocDeleteEtag), out string lastDocsDeleteEtag); operationStateBlittable.TryGet(nameof(LastEtagsInfo.LastAttachmentsEtag), out string lastAttachmentsEtag); operationStateBlittable.TryGet(nameof(LastEtagsInfo.LastAttachmentsDeleteEtag), out string lastAttachmentsDeleteEtag); var lastEtagsInfo = new LastEtagsInfo { ServerUrl = Options.ServerUrl, DatabaseName = Options.DatabaseName, LastDocsEtag = lastDocsEtag, LastDocDeleteEtag = lastDocsDeleteEtag, LastAttachmentsEtag = lastAttachmentsEtag, LastAttachmentsDeleteEtag = lastAttachmentsDeleteEtag }; return(lastEtagsInfo); } }
public override void HandleDelete(DocumentTombstone tombstone, string collection, IndexWriteOperation writer, TransactionOperationContext indexContext, IndexingStatsScope stats) { if (_referencedCollections.Count > 0) { _handleReferences.HandleDelete(tombstone, collection, writer, indexContext, stats); } base.HandleDelete(tombstone, collection, writer, indexContext, stats); }
public IEnumerable <SubscriptionGeneralDataAndStats> GetAllRunningSubscriptions(TransactionOperationContext context, bool history, int start, int take) { foreach (var kvp in _subscriptionConnectionStates) { var subscriptionState = kvp.Value; var subscriptionStateConnection = subscriptionState.Connection; if (subscriptionStateConnection == null) { continue; } if (start > 0) { start--; continue; } if (take-- <= 0) { yield break; } var subscriptionData = GetSubscriptionFromServerStore(context, subscriptionStateConnection.Options.SubscriptionName); GetRunningSubscriptionInternal(history, subscriptionData, subscriptionState); yield return(subscriptionData); } }
private async Task <BlittableJsonReaderArray> GetRavenFsHeadersArray(string lastEtag, TransactionOperationContext context) { var response = await RunWithAuthRetry(async() => { var url = $"{Options.ServerUrl}/fs/{Options.DatabaseName}/streams/files?pageSize={RavenFsHeadersPageSize}&etag={lastEtag}"; var request = new HttpRequestMessage(HttpMethod.Get, url); var responseMessage = await Parameters.HttpClient.SendAsync(request, Parameters.CancelToken.Token); return(responseMessage); }); if (response.IsSuccessStatusCode == false) { var responseString = await response.Content.ReadAsStringAsync(); throw new InvalidOperationException($"Failed to get RavenFS headers list from server: {Options.ServerUrl}, " + $"status code: {response.StatusCode}, " + $"error: {responseString}"); } using (var responseStream = await response.Content.ReadAsStreamAsync()) { var headersList = await context.ReadForMemoryAsync(responseStream, "ravenfs-headers-list"); if (headersList.TryGet("Results", out BlittableJsonReaderArray headers) == false) { throw new InvalidDataException("Response is invalid"); } return(headers); } }
public override IIndexedDocumentsEnumerator GetMapEnumerator(IEnumerable <Document> documents, string collection, TransactionOperationContext indexContext, IndexingStatsScope stats, IndexType type) { return(new StaticIndexDocsEnumerator(documents, _compiled.Maps[collection], collection, stats, type)); }
protected override unsafe long CalculateIndexEtag(DocumentsOperationContext documentsContext, TransactionOperationContext indexContext, QueryMetadata query, bool isStale) { if (_referencedCollections.Count == 0) { return(base.CalculateIndexEtag(documentsContext, indexContext, query, isStale)); } var minLength = MinimumSizeForCalculateIndexEtagLength(query); var length = minLength + sizeof(long) * 4 * (Collections.Count * _referencedCollections.Count); // last referenced collection etags (document + tombstone) and last processed reference collection etags (document + tombstone) var indexEtagBytes = stackalloc byte[length]; CalculateIndexEtagInternal(indexEtagBytes, isStale, State, documentsContext, indexContext); UseAllDocumentsCounterAndCmpXchgEtags(documentsContext, query, length, indexEtagBytes); var writePos = indexEtagBytes + minLength; return(StaticIndexHelper.CalculateIndexEtag(this, length, indexEtagBytes, writePos, documentsContext, indexContext)); }
public override int HandleMap(LazyStringValue lowerId, LazyStringValue id, IEnumerable mapResults, Lazy <IndexWriteOperation> writer, TransactionOperationContext indexContext, IndexingStatsScope stats) { throw new NotSupportedException($"Index {Name} is in-memory implementation of a faulty index", _e); }
public override IIndexedItemEnumerator GetMapEnumerator(IEnumerable <IndexItem> items, string collection, TransactionOperationContext indexContext, IndexingStatsScope stats, IndexType type) { return(new StaticIndexItemEnumerator <DynamicTimeSeriesSegment>(items, filter: null, _compiled.Maps[collection], collection, stats, type)); }
protected override unsafe long CalculateIndexEtag(bool isStale, DocumentsOperationContext documentsContext, TransactionOperationContext indexContext) { if (_referencedCollections.Count == 0) { return(base.CalculateIndexEtag(isStale, documentsContext, indexContext)); } var minLength = MinimumSizeForCalculateIndexEtagLength(); var length = minLength + sizeof(long) * 2 * (Collections.Count * _referencedCollections.Count); // last referenced collection etags and last processed reference collection etags var indexEtagBytes = stackalloc byte[length]; CalculateIndexEtagInternal(indexEtagBytes, isStale, documentsContext, indexContext); var writePos = indexEtagBytes + minLength; return(StaticIndexHelper.CalculateIndexEtag(this, length, indexEtagBytes, writePos, documentsContext, indexContext)); }
public override int HandleMap(IndexItem indexItem, IEnumerable mapResults, IndexWriteOperation writer, TransactionOperationContext indexContext, IndexingStatsScope stats) { if (_enumerationWrappers.TryGetValue(CurrentIndexingScope.Current.SourceCollection, out AnonymousObjectToBlittableMapResultsEnumerableWrapper wrapper) == false) { _enumerationWrappers[CurrentIndexingScope.Current.SourceCollection] = wrapper = new AnonymousObjectToBlittableMapResultsEnumerableWrapper(this, indexContext); } wrapper.InitializeForEnumeration(mapResults, indexContext, stats); return(PutMapResults(indexItem.LowerId, indexItem.SourceDocumentId, wrapper, indexContext, stats)); }
public AnonymousObjectToBlittableMapResultsEnumerableWrapper(MapReduceIndex index, TransactionOperationContext indexContext) { _indexContext = indexContext; _groupByFields = index.Definition.GroupByFields; _isMultiMap = index.IsMultiMap; _reduceKeyProcessor = new ReduceKeyProcessor(index.Definition.GroupByFields.Count, index._unmanagedBuffersPool); _compiledIndex = index._compiled; }
public void Clear(bool earlyExit, ActionType actionType, string collection, TransactionOperationContext indexContext) { if (earlyExit) { return; } var dictionary = GetDictionary(actionType); if (dictionary.Count == 0) { return; } indexContext.Transaction.InnerTransaction.LowLevelTransaction.AfterCommitWhenNewReadTransactionsPrevented += _ => { // we update this only after the transaction was committed dictionary.Remove(collection); }; }
public S3RestorePoints(SortedList <DateTime, RestorePoint> sortedList, TransactionOperationContext context, S3Settings s3Settings) : base(sortedList, context) { _client = new RavenAwsS3Client(s3Settings); }
private Dictionary <string, DatabaseStatusReport> CollectDatabaseInformation(TransactionOperationContext ctx, Dictionary <string, DatabaseStatusReport> prevReport) { var result = new Dictionary <string, DatabaseStatusReport>(); foreach (var dbName in _server.Cluster.GetDatabaseNames(ctx)) { if (_token.IsCancellationRequested) { return(result); } var report = new DatabaseStatusReport { Name = dbName, NodeName = _server.NodeTag }; if (_server.DatabasesLandlord.DatabasesCache.TryGetValue(dbName, out var dbTask) == false) { DatabaseTopology topology; using (var rawRecord = _server.Cluster.ReadRawDatabaseRecord(ctx, dbName)) { if (rawRecord == null) { continue; // Database does not exists in this server } topology = rawRecord.Topology; } if (topology == null) { continue; } if (topology.RelevantFor(_server.NodeTag) == false) { continue; } report.Status = DatabaseStatus.Unloaded; result[dbName] = report; continue; } if (dbTask.IsFaulted) { var extractSingleInnerException = dbTask.Exception.ExtractSingleInnerException(); if (Equals(extractSingleInnerException.Data[DatabasesLandlord.DoNotRemove], true)) { report.Status = DatabaseStatus.Unloaded; result[dbName] = report; continue; } } if (dbTask.IsCanceled || dbTask.IsFaulted) { report.Status = DatabaseStatus.Faulted; report.Error = dbTask.Exception.ToString(); result[dbName] = report; continue; } if (dbTask.IsCompleted == false) { report.Status = DatabaseStatus.Loading; if (_server.IdleDatabases.ContainsKey(dbName)) { report.UpTime = TimeSpan.MinValue; } result[dbName] = report; continue; } var dbInstance = dbTask.Result; var currentHash = dbInstance.GetEnvironmentsHash(); report.EnvironmentsHash = currentHash; var documentsStorage = dbInstance.DocumentsStorage; var indexStorage = dbInstance.IndexStore; if (dbInstance.DatabaseShutdown.IsCancellationRequested) { report.Status = DatabaseStatus.Shutdown; result[dbName] = report; continue; } report.Status = DatabaseStatus.Loaded; try { var now = dbInstance.Time.GetUtcNow(); report.UpTime = now - dbInstance.StartTime; FillReplicationInfo(dbInstance, report); prevReport.TryGetValue(dbName, out var prevDatabaseReport); if (SupportedFeatures.Heartbeats.SendChangesOnly && prevDatabaseReport != null && prevDatabaseReport.EnvironmentsHash == currentHash) { report.Status = DatabaseStatus.NoChange; result[dbName] = report; continue; } using (var context = QueryOperationContext.Allocate(dbInstance, needsServerContext: true)) { FillDocumentsInfo(prevDatabaseReport, dbInstance, report, context.Documents, documentsStorage); FillClusterTransactionInfo(report, dbInstance); if (indexStorage != null) { foreach (var index in indexStorage.GetIndexes()) { DatabaseStatusReport.ObservedIndexStatus stat = null; if (prevDatabaseReport?.LastIndexStats.TryGetValue(index.Name, out stat) == true && stat?.LastTransactionId == index.LastTransactionId) { report.LastIndexStats[index.Name] = stat; continue; } using (context.OpenReadTransaction()) { FillIndexInfo(index, context, now, report); } } } } } catch (Exception e) { report.EnvironmentsHash = 0; // on error we should do the complete report collaction path report.Error = e.ToString(); } result[dbName] = report; } return(result); }
public override IIndexedDocumentsEnumerator GetMapEnumerator(IEnumerable <Document> documents, string collection, TransactionOperationContext indexContext, IndexingStatsScope stats) { return(new AutoIndexDocsEnumerator(documents, stats)); }
private IEnumerable <NotificationTableValue> ReadPostponedActionsByPostponedUntilIndex(TransactionOperationContext context, DateTime cutoff) { var table = context.Transaction.InnerTransaction.OpenTable(_actionsSchema, NotificationsSchema.NotificationsTree); foreach (var tvr in table.SeekForwardFrom(_actionsSchema.Indexes[ByPostponedUntil], Slices.BeforeAllKeys, 0)) { var action = Read(context, ref tvr.Result.Reader); if (action.PostponedUntil == null) { continue; } if (action.PostponedUntil > cutoff) { break; } if (action.PostponedUntil == DateTime.MaxValue) { break; } yield return(action); } }
public bool Execute(DocumentsOperationContext databaseContext, TransactionOperationContext indexContext, Lazy <IndexWriteOperation> writeOperation, IndexingStatsScope stats, CancellationToken token) { if (_mapReduceContext.StoreByReduceKeyHash.Count == 0) { WriteLastEtags(indexContext); // we need to write etags here, because if we filtered everything during map then we will loose last indexed etag information and this will cause an endless indexing loop return(false); } ReduceResultsSchema.Create(indexContext.Transaction.InnerTransaction, PageNumberToReduceResultTableName, 32); var table = indexContext.Transaction.InnerTransaction.OpenTable(ReduceResultsSchema, PageNumberToReduceResultTableName); var lowLevelTransaction = indexContext.Transaction.InnerTransaction.LowLevelTransaction; var writer = writeOperation.Value; var treeScopeStats = stats.For(IndexingOperation.Reduce.TreeScope, start: false); var nestedValuesScopeStats = stats.For(IndexingOperation.Reduce.NestedValuesScope, start: false); foreach (var store in _mapReduceContext.StoreByReduceKeyHash) { token.ThrowIfCancellationRequested(); using (var reduceKeyHash = indexContext.GetLazyString(store.Key.ToString(CultureInfo.InvariantCulture))) using (store.Value) using (_aggregationBatch) { var modifiedStore = store.Value; switch (modifiedStore.Type) { case MapResultsStorageType.Tree: using (treeScopeStats.Start()) { HandleTreeReduction(indexContext, treeScopeStats, modifiedStore, lowLevelTransaction, writer, reduceKeyHash, table, token); } break; case MapResultsStorageType.Nested: using (nestedValuesScopeStats.Start()) { HandleNestedValuesReduction(indexContext, nestedValuesScopeStats, modifiedStore, writer, reduceKeyHash, token); } break; default: throw new ArgumentOutOfRangeException(modifiedStore.Type.ToString()); } } if (_mapReduceContext.FreedPages.Count > 0) { long tmp = 0; using (treeScopeStats.Start()) using (Slice.External(indexContext.Allocator, (byte *)&tmp, sizeof(long), out Slice pageNumberSlice)) { foreach (var freedPage in _mapReduceContext.FreedPages) { tmp = Bits.SwapBytes(freedPage); table.DeleteByKey(pageNumberSlice); } } } } if (stats.Duration >= MinReduceDurationToCalculateProcessMemoryUsage) { var workingSet = MemoryInformation.GetWorkingSetInBytes(); var privateMemory = AbstractLowMemoryMonitor.GetManagedMemoryInBytes() + AbstractLowMemoryMonitor.GetUnmanagedAllocationsInBytes(); stats.RecordReduceMemoryStats(workingSet, privateMemory); } WriteLastEtags(indexContext); _mapReduceContext.StoreNextMapResultId(); return(false); }
private void HandleTreeReduction(TransactionOperationContext indexContext, IndexingStatsScope stats, MapReduceResultsStore modifiedStore, LowLevelTransaction lowLevelTransaction, IndexWriteOperation writer, LazyStringValue reduceKeyHash, Table table, CancellationToken token) { EnsureValidTreeReductionStats(stats); var tree = modifiedStore.Tree; var branchesToAggregate = new HashSet <long>(); var parentPagesToAggregate = new HashSet <long>(); var page = new TreePage(null, Constants.Storage.PageSize); HashSet <long> compressedEmptyLeafs = null; Dictionary <long, Exception> failedAggregatedLeafs = null; foreach (var modifiedPage in modifiedStore.ModifiedPages) { token.ThrowIfCancellationRequested(); page.Base = lowLevelTransaction.GetPage(modifiedPage).Pointer; stats.RecordReduceTreePageModified(page.IsLeaf); if (page.IsLeaf == false) { Debug.Assert(page.IsBranch); branchesToAggregate.Add(modifiedPage); continue; } var leafPage = page; var compressed = leafPage.IsCompressed; if (compressed) { stats.RecordCompressedLeafPage(); } using (compressed ? (DecompressedLeafPage)(leafPage = tree.DecompressPage(leafPage, skipCache: true)) : null) { if (leafPage.NumberOfEntries == 0) { if (leafPage.PageNumber == tree.State.RootPageNumber) { writer.DeleteReduceResult(reduceKeyHash, stats); var emptyPageNumber = Bits.SwapBytes(leafPage.PageNumber); using (Slice.External(indexContext.Allocator, (byte *)&emptyPageNumber, sizeof(long), out Slice pageNumSlice)) table.DeleteByKey(pageNumSlice); continue; } if (compressed) { // it doesn't have any entries after decompression because // each compressed entry has the delete tombstone if (compressedEmptyLeafs == null) { compressedEmptyLeafs = new HashSet <long>(); } compressedEmptyLeafs.Add(leafPage.PageNumber); continue; } throw new UnexpectedReduceTreePageException( $"Encountered empty page which isn't a root. Page {leafPage} in '{tree.Name}' tree."); } var parentPage = tree.GetParentPageOf(leafPage); stats.RecordReduceAttempts(leafPage.NumberOfEntries); try { using (var result = AggregateLeafPage(leafPage, lowLevelTransaction, indexContext, token)) { if (parentPage == -1) { writer.DeleteReduceResult(reduceKeyHash, stats); foreach (var output in result.GetOutputs()) { writer.IndexDocument(reduceKeyHash, output, stats, indexContext); } } else { StoreAggregationResult(leafPage, table, result); parentPagesToAggregate.Add(parentPage); } _metrics.MapReduceIndexes.ReducedPerSec.Mark(leafPage.NumberOfEntries); stats.RecordReduceSuccesses(leafPage.NumberOfEntries); } } catch (Exception e) when(e is OperationCanceledException == false) { if (failedAggregatedLeafs == null) { failedAggregatedLeafs = new Dictionary <long, Exception>(); } failedAggregatedLeafs.Add(leafPage.PageNumber, e); _index.ErrorIndexIfCriticalException(e); HandleReductionError(e, reduceKeyHash, writer, stats, updateStats: parentPage == -1, page: leafPage); } } } long tmp = 0; using (Slice.External(indexContext.Allocator, (byte *)&tmp, sizeof(long), out Slice pageNumberSlice)) { foreach (var freedPage in modifiedStore.FreedPages) { tmp = Bits.SwapBytes(freedPage); table.DeleteByKey(pageNumberSlice); } } while (parentPagesToAggregate.Count > 0 || branchesToAggregate.Count > 0) { token.ThrowIfCancellationRequested(); var branchPages = parentPagesToAggregate; parentPagesToAggregate = new HashSet <long>(); foreach (var pageNumber in branchPages) { page.Base = lowLevelTransaction.GetPage(pageNumber).Pointer; try { if (page.IsBranch == false) { throw new UnexpectedReduceTreePageException("Parent page was found that wasn't a branch, error at " + page); } stats.RecordReduceAttempts(page.NumberOfEntries); var parentPage = tree.GetParentPageOf(page); using (var result = AggregateBranchPage(page, table, indexContext, branchesToAggregate, compressedEmptyLeafs, failedAggregatedLeafs, tree, token)) { if (parentPage == -1) { writer.DeleteReduceResult(reduceKeyHash, stats); foreach (var output in result.GetOutputs()) { writer.IndexDocument(reduceKeyHash, output, stats, indexContext); } } else { parentPagesToAggregate.Add(parentPage); StoreAggregationResult(page, table, result); } _metrics.MapReduceIndexes.ReducedPerSec.Mark(page.NumberOfEntries); stats.RecordReduceSuccesses(page.NumberOfEntries); } } catch (Exception e) when(e is OperationCanceledException == false) { _index.ErrorIndexIfCriticalException(e); HandleReductionError(e, reduceKeyHash, writer, stats, updateStats: true, page: page); } finally { branchesToAggregate.Remove(pageNumber); } } if (parentPagesToAggregate.Count == 0 && branchesToAggregate.Count > 0) { // we still have unaggregated branches which were modified but their children were not modified (branch page splitting) so we missed them parentPagesToAggregate.Add(branchesToAggregate.First()); } stats.RecordReduceAllocations(_index._threadAllocations.Allocations); } if (compressedEmptyLeafs != null && compressedEmptyLeafs.Count > 0) { // we had some compressed pages that are empty after decompression // let's remove them and reduce the tree once again modifiedStore.ModifiedPages.Clear(); modifiedStore.FreedPages.Clear(); foreach (var pageNumber in compressedEmptyLeafs) { page.Base = lowLevelTransaction.GetPage(pageNumber).Pointer; using (var emptyPage = tree.DecompressPage(page, skipCache: true)) { if (emptyPage.NumberOfEntries > 0) // could be changed meanwhile { continue; } modifiedStore.Tree.RemoveEmptyDecompressedPage(emptyPage); } } HandleTreeReduction(indexContext, stats, modifiedStore, lowLevelTransaction, writer, reduceKeyHash, table, token); } }
public abstract CompareExchangeResult Execute(TransactionOperationContext context, Table items, long index);
private bool TryAggregateChildPageOrThrow(long pageNumber, Table table, TransactionOperationContext indexContext, HashSet <long> remainingBranchesToAggregate, HashSet <long> compressedEmptyLeafs, Dictionary <long, Exception> failedAggregatatedLeafs, Tree tree, CancellationToken token) { if (remainingBranchesToAggregate.Contains(pageNumber)) { // RavenDB-5363: we have a modified branch page but its children were not modified (branch page splitting) so we didn't // aggregated it yet, let's do it now try { var page = indexContext.Transaction.InnerTransaction.LowLevelTransaction.GetPage(pageNumber); var unaggregatedBranch = new TreePage(page.Pointer, Constants.Storage.PageSize); using (var result = AggregateBranchPage(unaggregatedBranch, table, indexContext, remainingBranchesToAggregate, compressedEmptyLeafs, failedAggregatatedLeafs, tree, token)) { StoreAggregationResult(unaggregatedBranch, table, result); } } finally { remainingBranchesToAggregate.Remove(pageNumber); } return(true); } if (compressedEmptyLeafs != null && compressedEmptyLeafs.Contains(pageNumber)) { // it's empty after decompression, we can safely skip it here return(false); } var relatedPage = indexContext.Transaction.InnerTransaction.LowLevelTransaction.GetPage(pageNumber); var relatedTreePage = new TreePage(relatedPage.Pointer, Constants.Storage.PageSize); string decompressedDebug = null; if (relatedTreePage.IsCompressed) { // let's try to decompress it and check if it's empty // we decompress it for validation purposes only although it's very rare case using (var decompressed = tree.DecompressPage(relatedTreePage, skipCache: true)) { if (decompressed.NumberOfEntries == 0) { // it's empty so there is no related aggregation result, we can safely skip it return(false); } decompressedDebug = decompressed.ToString(); } } var message = $"Couldn't find a pre-computed aggregation result for the existing page: {relatedTreePage}. "; if (decompressedDebug != null) { message += $"Decompressed: {decompressedDebug}). "; } message += $"Tree state: {tree.State}. "; if (failedAggregatatedLeafs != null && failedAggregatatedLeafs.TryGetValue(pageNumber, out var exception)) { message += $"The aggregation of this leaf (#{pageNumber}) has failed so the relevant result doesn't exist. " + "Check the inner exception for leaf aggregation error details"; throw new AggregationResultNotFoundException(message, exception); } throw new AggregationResultNotFoundException(message); }
public override void HandleDelete(Tombstone tombstone, string collection, IndexWriteOperation writer, TransactionOperationContext indexContext, IndexingStatsScope stats) { StaticIndexHelper.HandleDeleteBySourceDocumentId(this, _handleReferences, _handleCompareExchangeReferences, tombstone, collection, writer, indexContext, stats); }
public override IIndexedDocumentsEnumerator GetMapEnumerator(IEnumerable <Document> documents, string collection, TransactionOperationContext indexContext, IndexingStatsScope stats) { throw new NotSupportedException($"Index {Name} is in-memory implementation of a faulty index", _e); }
private void NegotiateMatchEntryWithLeaderAndApplyEntries(TransactionOperationContext context, RemoteConnection connection, LogLengthNegotiation negotiation) { long minIndex; long maxIndex; long midpointTerm; long midpointIndex; using (context.OpenReadTransaction()) { minIndex = _engine.GetFirstEntryIndex(context); if (minIndex == 0) // no entries at all { connection.Send(context, new LogLengthNegotiationResponse { Status = LogLengthNegotiationResponse.ResponseStatus.Acceptable, Message = "No entries at all here, give me everything from the start", CurrentTerm = _term, LastLogIndex = 0, }); return; // leader will know where to start from here } maxIndex = Math.Min( _engine.GetLastEntryIndex(context), // max negotiation.PrevLogIndex ); midpointIndex = (maxIndex + minIndex) / 2; midpointTerm = _engine.GetTermForKnownExisting(context, midpointIndex); } while ((midpointTerm == negotiation.PrevLogTerm && midpointIndex == negotiation.PrevLogIndex) == false) { _engine.Timeout.Defer(_connection.Source); _engine.ValidateTerm(_term); if (midpointIndex == negotiation.PrevLogIndex && midpointTerm != negotiation.PrevLogTerm) { // our appended entries has been diverged, same index with different terms. var msg = $"Our appended entries has been diverged, same index with different terms. " + $"My index/term {midpointIndex}/{midpointTerm}, while yours is {negotiation.PrevLogIndex}/{negotiation.PrevLogTerm}."; if (_engine.Log.IsInfoEnabled) { _engine.Log.Info($"{ToString()}: {msg}"); } connection.Send(context, new LogLengthNegotiationResponse { Status = LogLengthNegotiationResponse.ResponseStatus.Acceptable, Message = msg, CurrentTerm = _term, LastLogIndex = 0 }); return; } connection.Send(context, new LogLengthNegotiationResponse { Status = LogLengthNegotiationResponse.ResponseStatus.Negotiation, Message = $"Term/Index mismatch from leader, need to figure out at what point the logs match, range: {maxIndex} - {minIndex} | {midpointIndex} in term {midpointTerm}", CurrentTerm = _term, MaxIndex = maxIndex, MinIndex = minIndex, MidpointIndex = midpointIndex, MidpointTerm = midpointTerm }); negotiation = connection.Read <LogLengthNegotiation>(context); _engine.Timeout.Defer(_connection.Source); if (negotiation.Truncated) { if (_engine.Log.IsInfoEnabled) { _engine.Log.Info($"{ToString()}: Got a truncated response from the leader will request all entries"); } connection.Send(context, new LogLengthNegotiationResponse { Status = LogLengthNegotiationResponse.ResponseStatus.Acceptable, Message = "We have entries that are already truncated at the leader, will ask for full snapshot", CurrentTerm = _term, LastLogIndex = 0 }); return; } using (context.OpenReadTransaction()) { if (_engine.GetTermFor(context, negotiation.PrevLogIndex) == negotiation.PrevLogTerm) { minIndex = Math.Min(midpointIndex + 1, maxIndex); } else { maxIndex = Math.Max(midpointIndex - 1, minIndex); } } midpointIndex = (maxIndex + minIndex) / 2; using (context.OpenReadTransaction()) midpointTerm = _engine.GetTermForKnownExisting(context, midpointIndex); } if (_engine.Log.IsInfoEnabled) { _engine.Log.Info($"{ToString()}: agreed upon last matched index = {midpointIndex} on term = {midpointTerm}"); } connection.Send(context, new LogLengthNegotiationResponse { Status = LogLengthNegotiationResponse.ResponseStatus.Acceptable, Message = $"Found a log index / term match at {midpointIndex} with term {midpointTerm}", CurrentTerm = _term, LastLogIndex = midpointIndex, }); }