private void HandleReductionError(Exception error, LazyStringValue reduceKeyHash, IndexWriteOperation writer, IndexingStatsScope stats, bool updateStats, TreePage page, int numberOfNestedValues = -1) { var builder = new StringBuilder("Failed to execute reduce function on "); if (page != null) { builder.Append($"page {page} "); } else { builder.Append("nested values "); } builder.Append($"of '{_indexDefinition.Name}' index. The relevant reduce result is going to be removed from the index "); builder.Append($"as it would be incorrect due to encountered errors (reduce key hash: {reduceKeyHash}"); var sampleItem = _aggregationBatch?.Items?.FirstOrDefault(); if (sampleItem != null) { builder.Append($", sample item to reduce: {sampleItem}"); } builder.Append(")"); var message = builder.ToString(); if (_logger.IsInfoEnabled) { _logger.Info(message, error); } try { writer.DeleteReduceResult(reduceKeyHash, stats); } catch (Exception e) { if (_logger.IsInfoEnabled) { _logger.Info($"Failed to delete an index result from '${_indexDefinition.Name}' index on reduce error (reduce key hash: ${reduceKeyHash})", e); } } if (updateStats) { var numberOfEntries = page?.NumberOfEntries ?? numberOfNestedValues; Debug.Assert(numberOfEntries != -1); // we'll only want to record exceptions on some of these, to give the // user information about what is going on, otherwise we'll have to wait a lot if we // are processing a big batch, and this can be a perf killer. See: RavenDB-11038 stats.RecordReduceErrors(numberOfEntries); if (stats.NumberOfKeptReduceErrors < IndexStorage.MaxNumberOfKeptErrors) { stats.AddReduceError(message + $" Exception: {error}"); } var failureInfo = new IndexFailureInformation { Name = _index.Name, MapErrors = stats.MapErrors, MapAttempts = stats.MapAttempts, ReduceErrors = stats.ReduceErrors, ReduceAttempts = stats.ReduceAttempts }; if (failureInfo.IsInvalidIndex(_isStaleBecauseOfRunningReduction)) { throw new ExcessiveNumberOfReduceErrorsException("Excessive number of errors during the reduce phase for the current batch. Failure info: " + failureInfo.GetErrorMessage()); } } }
private void HandleTreeReduction(TransactionOperationContext indexContext, IndexingStatsScope stats, CancellationToken token, MapReduceResultsStore modifiedStore, LowLevelTransaction lowLevelTransaction, IndexWriteOperation writer, LazyStringValue reduceKeyHash, Table table) { EnsureValidTreeReductionStats(stats); var tree = modifiedStore.Tree; var branchesToAggregate = new HashSet <long>(); var parentPagesToAggregate = new HashSet <long>(); var page = new TreePage(null, Constants.Storage.PageSize); foreach (var modifiedPage in modifiedStore.ModifiedPages) { token.ThrowIfCancellationRequested(); page.Base = lowLevelTransaction.GetPage(modifiedPage).Pointer; stats.RecordReduceTreePageModified(page.IsLeaf); if (page.IsLeaf == false) { Debug.Assert(page.IsBranch); branchesToAggregate.Add(modifiedPage); continue; } var leafPage = page; var compressed = leafPage.IsCompressed; if (compressed) { stats.RecordCompressedLeafPage(); } using (compressed ? (DecompressedLeafPage)(leafPage = tree.DecompressPage(leafPage, skipCache: true)) : null) { if (leafPage.NumberOfEntries == 0) { if (leafPage.PageNumber != tree.State.RootPageNumber) { throw new InvalidOperationException( $"Encountered empty page which isn't a root. Page #{leafPage.PageNumber} in '{tree.Name}' tree."); } writer.DeleteReduceResult(reduceKeyHash, stats); var emptyPageNumber = Bits.SwapBytes(leafPage.PageNumber); using (Slice.External(indexContext.Allocator, (byte *)&emptyPageNumber, sizeof(long), out Slice pageNumSlice)) table.DeleteByKey(pageNumSlice); continue; } var parentPage = tree.GetParentPageOf(leafPage); stats.RecordReduceAttempts(leafPage.NumberOfEntries); try { using (var result = AggregateLeafPage(leafPage, lowLevelTransaction, indexContext, token)) { if (parentPage == -1) { writer.DeleteReduceResult(reduceKeyHash, stats); foreach (var output in result.GetOutputs()) { writer.IndexDocument(reduceKeyHash, output, stats, indexContext); } } else { StoreAggregationResult(leafPage.PageNumber, leafPage.NumberOfEntries, table, result); parentPagesToAggregate.Add(parentPage); } _metrics.MapReduceReducedPerSecond.Mark(leafPage.NumberOfEntries); stats.RecordReduceSuccesses(leafPage.NumberOfEntries); } } catch (Exception e) { _index.HandleError(e); var message = $"Failed to execute reduce function for reduce key '{tree.Name}' on a leaf page #{leafPage} of '{_indexDefinition.Name}' index."; if (_logger.IsInfoEnabled) { _logger.Info(message, e); } if (parentPage == -1) { stats.RecordReduceErrors(leafPage.NumberOfEntries); stats.AddReduceError(message + $" Exception: {e}"); } } } } long tmp = 0; using (Slice.External(indexContext.Allocator, (byte *)&tmp, sizeof(long), out Slice pageNumberSlice)) { foreach (var freedPage in modifiedStore.FreedPages) { tmp = Bits.SwapBytes(freedPage); table.DeleteByKey(pageNumberSlice); } } while (parentPagesToAggregate.Count > 0 || branchesToAggregate.Count > 0) { token.ThrowIfCancellationRequested(); var branchPages = parentPagesToAggregate; parentPagesToAggregate = new HashSet <long>(); foreach (var pageNumber in branchPages) { page.Base = lowLevelTransaction.GetPage(pageNumber).Pointer; try { if (page.IsBranch == false) { throw new InvalidOperationException("Parent page was found that wasn't a branch, error at " + page.PageNumber); } stats.RecordReduceAttempts(page.NumberOfEntries); var parentPage = tree.GetParentPageOf(page); using (var result = AggregateBranchPage(page, table, indexContext, branchesToAggregate, token)) { if (parentPage == -1) { writer.DeleteReduceResult(reduceKeyHash, stats); foreach (var output in result.GetOutputs()) { writer.IndexDocument(reduceKeyHash, output, stats, indexContext); } } else { parentPagesToAggregate.Add(parentPage); StoreAggregationResult(page.PageNumber, page.NumberOfEntries, table, result); } _metrics.MapReduceReducedPerSecond.Mark(page.NumberOfEntries); stats.RecordReduceSuccesses(page.NumberOfEntries); } } catch (Exception e) { _index.HandleError(e); var message = $"Failed to execute reduce function for reduce key '{tree.Name}' on a branch page #{page} of '{_indexDefinition.Name}' index."; if (_logger.IsInfoEnabled) { _logger.Info(message, e); } stats.RecordReduceErrors(page.NumberOfEntries); stats.AddReduceError(message + $" Exception: {e}"); } finally { branchesToAggregate.Remove(pageNumber); } } if (parentPagesToAggregate.Count == 0 && branchesToAggregate.Count > 0) { // we still have unaggregated branches which were modified but their children were not modified (branch page splitting) so we missed them parentPagesToAggregate.Add(branchesToAggregate.First()); } } }
private void HandleTreeReduction(TransactionOperationContext indexContext, IndexingStatsScope stats, MapReduceResultsStore modifiedStore, LowLevelTransaction lowLevelTransaction, IndexWriteOperation writer, LazyStringValue reduceKeyHash, Table table, CancellationToken token) { EnsureValidTreeReductionStats(stats); var tree = modifiedStore.Tree; var branchesToAggregate = new HashSet <long>(); var parentPagesToAggregate = new HashSet <long>(); var page = new TreePage(null, Constants.Storage.PageSize); HashSet <long> compressedEmptyLeafs = null; Dictionary <long, Exception> failedAggregatedLeafs = null; foreach (var modifiedPage in modifiedStore.ModifiedPages) { token.ThrowIfCancellationRequested(); page.Base = lowLevelTransaction.GetPage(modifiedPage).Pointer; stats.RecordReduceTreePageModified(page.IsLeaf); if (page.IsLeaf == false) { Debug.Assert(page.IsBranch); branchesToAggregate.Add(modifiedPage); continue; } var leafPage = page; var compressed = leafPage.IsCompressed; if (compressed) { stats.RecordCompressedLeafPage(); } using (compressed ? (DecompressedLeafPage)(leafPage = tree.DecompressPage(leafPage, skipCache: true)) : null) { if (leafPage.NumberOfEntries == 0) { if (leafPage.PageNumber == tree.State.RootPageNumber) { writer.DeleteReduceResult(reduceKeyHash, stats); var emptyPageNumber = Bits.SwapBytes(leafPage.PageNumber); using (Slice.External(indexContext.Allocator, (byte *)&emptyPageNumber, sizeof(long), out Slice pageNumSlice)) table.DeleteByKey(pageNumSlice); continue; } if (compressed) { // it doesn't have any entries after decompression because // each compressed entry has the delete tombstone if (compressedEmptyLeafs == null) { compressedEmptyLeafs = new HashSet <long>(); } compressedEmptyLeafs.Add(leafPage.PageNumber); continue; } throw new UnexpectedReduceTreePageException( $"Encountered empty page which isn't a root. Page {leafPage} in '{tree.Name}' tree."); } var parentPage = tree.GetParentPageOf(leafPage); stats.RecordReduceAttempts(leafPage.NumberOfEntries); try { using (var result = AggregateLeafPage(leafPage, lowLevelTransaction, indexContext, token)) { if (parentPage == -1) { writer.DeleteReduceResult(reduceKeyHash, stats); foreach (var output in result.GetOutputs()) { writer.IndexDocument(reduceKeyHash, output, stats, indexContext); } } else { StoreAggregationResult(leafPage, table, result); parentPagesToAggregate.Add(parentPage); } _metrics.MapReduceIndexes.ReducedPerSec.Mark(leafPage.NumberOfEntries); stats.RecordReduceSuccesses(leafPage.NumberOfEntries); } } catch (Exception e) when(e is OperationCanceledException == false) { if (failedAggregatedLeafs == null) { failedAggregatedLeafs = new Dictionary <long, Exception>(); } failedAggregatedLeafs.Add(leafPage.PageNumber, e); _index.ErrorIndexIfCriticalException(e); HandleReductionError(e, reduceKeyHash, writer, stats, updateStats: parentPage == -1, page: leafPage); } } } long tmp = 0; using (Slice.External(indexContext.Allocator, (byte *)&tmp, sizeof(long), out Slice pageNumberSlice)) { foreach (var freedPage in modifiedStore.FreedPages) { tmp = Bits.SwapBytes(freedPage); table.DeleteByKey(pageNumberSlice); } } while (parentPagesToAggregate.Count > 0 || branchesToAggregate.Count > 0) { token.ThrowIfCancellationRequested(); var branchPages = parentPagesToAggregate; parentPagesToAggregate = new HashSet <long>(); foreach (var pageNumber in branchPages) { page.Base = lowLevelTransaction.GetPage(pageNumber).Pointer; try { if (page.IsBranch == false) { throw new UnexpectedReduceTreePageException("Parent page was found that wasn't a branch, error at " + page); } stats.RecordReduceAttempts(page.NumberOfEntries); var parentPage = tree.GetParentPageOf(page); using (var result = AggregateBranchPage(page, table, indexContext, branchesToAggregate, compressedEmptyLeafs, failedAggregatedLeafs, token)) { if (parentPage == -1) { writer.DeleteReduceResult(reduceKeyHash, stats); foreach (var output in result.GetOutputs()) { writer.IndexDocument(reduceKeyHash, output, stats, indexContext); } } else { parentPagesToAggregate.Add(parentPage); StoreAggregationResult(page, table, result); } _metrics.MapReduceIndexes.ReducedPerSec.Mark(page.NumberOfEntries); stats.RecordReduceSuccesses(page.NumberOfEntries); } } catch (Exception e) when(e is OperationCanceledException == false) { _index.ErrorIndexIfCriticalException(e); HandleReductionError(e, reduceKeyHash, writer, stats, updateStats: true, page: page); } finally { branchesToAggregate.Remove(pageNumber); } } if (parentPagesToAggregate.Count == 0 && branchesToAggregate.Count > 0) { // we still have unaggregated branches which were modified but their children were not modified (branch page splitting) so we missed them parentPagesToAggregate.Add(branchesToAggregate.First()); } } if (compressedEmptyLeafs != null && compressedEmptyLeafs.Count > 0) { // we had some compressed pages that are empty after decompression // let's remove them and reduce the tree once again modifiedStore.ModifiedPages.Clear(); modifiedStore.FreedPages.Clear(); foreach (var pageNumber in compressedEmptyLeafs) { page.Base = lowLevelTransaction.GetPage(pageNumber).Pointer; using (var emptyPage = tree.DecompressPage(page, skipCache: true)) { if (emptyPage.NumberOfEntries > 0) // could be changed meanwhile { continue; } modifiedStore.Tree.RemoveEmptyDecompressedPage(emptyPage); } } HandleTreeReduction(indexContext, stats, modifiedStore, lowLevelTransaction, writer, reduceKeyHash, table, token); } }
private void HandleNestedValuesReduction(TransactionOperationContext indexContext, IndexingStatsScope stats, CancellationToken token, MapReduceResultsStore modifiedStore, IndexWriteOperation writer, LazyStringValue reduceKeyHash) { EnsureValidNestedValuesReductionStats(stats); var numberOfEntriesToReduce = 0; try { var section = modifiedStore.GetNestedResultsSection(); if (section.IsModified == false) { return; } using (_nestedValuesReductionStats.NestedValuesRead.Start()) { numberOfEntriesToReduce += section.GetResults(indexContext, _aggregationBatch); } stats.RecordReduceAttempts(numberOfEntriesToReduce); AggregationResult result; using (_nestedValuesReductionStats.NestedValuesAggregation.Start()) { result = AggregateOn(_aggregationBatch, indexContext, token); } if (section.IsNew == false) { writer.DeleteReduceResult(reduceKeyHash, stats); } foreach (var output in result.GetOutputs()) { writer.IndexDocument(reduceKeyHash, output, stats, indexContext); } _index.ReducesPerSec.Mark(numberOfEntriesToReduce); _metrics.MapReduceReducedPerSecond.Mark(numberOfEntriesToReduce); stats.RecordReduceSuccesses(numberOfEntriesToReduce); } catch (Exception e) { _index.HandleError(e); foreach (var item in _aggregationBatch) { item.Dispose(); } var message = $"Failed to execute reduce function for reduce key '{reduceKeyHash}' on nested values of '{_indexDefinition.Name}' index."; if (_logger.IsInfoEnabled) { _logger.Info(message, e); } stats.RecordReduceErrors(numberOfEntriesToReduce); stats.AddReduceError(message + $" Exception: {e}"); } finally { _aggregationBatch.Clear(); } }