public override int HandleMap(LazyStringValue lowerId, LazyStringValue id, IEnumerable mapResults, IndexWriteOperation writer, TransactionOperationContext indexContext, IndexingStatsScope stats) { EnsureValidStats(stats); bool mustDelete; using (_stats.BloomStats.Start()) { mustDelete = _filters.Add(lowerId) == false; } if (mustDelete) { writer.Delete(lowerId, stats); } var numberOfOutputs = 0; foreach (var mapResult in mapResults) { writer.IndexDocument(lowerId, mapResult, stats, indexContext); numberOfOutputs++; } HandleIndexOutputsPerDocument(id ?? lowerId, numberOfOutputs, stats); DocumentDatabase.Metrics.MapIndexes.IndexedPerSec.Mark(numberOfOutputs); return(numberOfOutputs); }
private void HandleNestedValuesReduction(TransactionOperationContext indexContext, IndexingStatsScope stats, CancellationToken token, MapReduceResultsStore modifiedStore, IndexWriteOperation writer, LazyStringValue reduceKeyHash) { EnsureValidNestedValuesReductionStats(stats); var numberOfEntriesToReduce = 0; try { var section = modifiedStore.GetNestedResultsSection(); if (section.IsModified == false) { return; } using (_nestedValuesReductionStats.NestedValuesRead.Start()) { numberOfEntriesToReduce += section.GetResults(indexContext, _aggregationBatch.Items); } stats.RecordReduceAttempts(numberOfEntriesToReduce); AggregationResult result; using (_nestedValuesReductionStats.NestedValuesAggregation.Start()) { result = AggregateOn(_aggregationBatch.Items, indexContext, token); } if (section.IsNew == false) { writer.DeleteReduceResult(reduceKeyHash, stats); } foreach (var output in result.GetOutputs()) { writer.IndexDocument(reduceKeyHash, output, stats, indexContext); } _index.ReducesPerSec.Mark(numberOfEntriesToReduce); _metrics.MapReduceIndexes.ReducedPerSec.Mark(numberOfEntriesToReduce); stats.RecordReduceSuccesses(numberOfEntriesToReduce); } catch (Exception e) { _index.HandleError(e); var message = $"Failed to execute reduce function for reduce key '{reduceKeyHash}' on nested values of '{_indexDefinition.Name}' index."; if (_logger.IsInfoEnabled) { _logger.Info(message, e); } stats.RecordReduceErrors(numberOfEntriesToReduce); stats.AddReduceError(message + $" Exception: {e}"); } }
private void HandleNestedValuesReduction(TransactionOperationContext indexContext, IndexingStatsScope stats, MapReduceResultsStore modifiedStore, IndexWriteOperation writer, LazyStringValue reduceKeyHash, CancellationToken token) { EnsureValidNestedValuesReductionStats(stats); var numberOfEntriesToReduce = 0; try { var section = modifiedStore.GetNestedResultsSection(); if (section.IsModified == false) { return; } using (_nestedValuesReductionStats.NestedValuesRead.Start()) { numberOfEntriesToReduce += section.GetResults(indexContext, _aggregationBatch.Items); } stats.RecordReduceAttempts(numberOfEntriesToReduce); AggregationResult result; using (_nestedValuesReductionStats.NestedValuesAggregation.Start()) { result = AggregateOn(_aggregationBatch.Items, indexContext, _nestedValuesReductionStats.NestedValuesAggregation, token); } if (section.IsNew == false) { writer.DeleteReduceResult(reduceKeyHash, stats); } foreach (var output in result.GetOutputs()) { writer.IndexDocument(reduceKeyHash, output, stats, indexContext); } _index.ReducesPerSec.MarkSingleThreaded(numberOfEntriesToReduce); _metrics.MapReduceIndexes.ReducedPerSec.Mark(numberOfEntriesToReduce); stats.RecordReduceSuccesses(numberOfEntriesToReduce); _index.UpdateThreadAllocations(indexContext, writer, stats, updateReduceStats: true); } catch (Exception e) when(e.IsIndexError()) { _index.ErrorIndexIfCriticalException(e); HandleReductionError(e, reduceKeyHash, writer, stats, updateStats: true, page: null, numberOfNestedValues: numberOfEntriesToReduce); } }
public override int HandleMap(LazyStringValue key, IEnumerable mapResults, IndexWriteOperation writer, TransactionOperationContext indexContext, IndexingStatsScope stats) { EnsureValidStats(stats); bool mustDelete; using (_stats.BloomStats.Start()) { mustDelete = _filter.Add(key) == false; } if (mustDelete) { writer.Delete(key, stats); } var numberOfOutputs = 0; foreach (var mapResult in mapResults) { writer.IndexDocument(key, mapResult, stats, indexContext); numberOfOutputs++; if (EnsureValidNumberOfOutputsForDocument(numberOfOutputs)) { continue; } writer.Delete(key, stats); throw new InvalidOperationException($"Index '{Name}' has already produced {numberOfOutputs} map results for a source document '{key}', while the allowed max number of outputs is {MaxNumberOfIndexOutputs} per one document. Please verify this index definition and consider a re-design of your entities or index."); } DocumentDatabase.Metrics.IndexedPerSecond.Mark(); return(numberOfOutputs); }
private void HandleTreeReduction(TransactionOperationContext indexContext, IndexingStatsScope stats, MapReduceResultsStore modifiedStore, LowLevelTransaction lowLevelTransaction, IndexWriteOperation writer, LazyStringValue reduceKeyHash, Table table, CancellationToken token) { EnsureValidTreeReductionStats(stats); var tree = modifiedStore.Tree; var branchesToAggregate = new HashSet <long>(); var parentPagesToAggregate = new HashSet <long>(); var page = new TreePage(null, Constants.Storage.PageSize); HashSet <long> compressedEmptyLeafs = null; Dictionary <long, Exception> failedAggregatedLeafs = null; foreach (var modifiedPage in modifiedStore.ModifiedPages) { token.ThrowIfCancellationRequested(); page.Base = lowLevelTransaction.GetPage(modifiedPage).Pointer; stats.RecordReduceTreePageModified(page.IsLeaf); if (page.IsLeaf == false) { Debug.Assert(page.IsBranch); branchesToAggregate.Add(modifiedPage); continue; } var leafPage = page; var compressed = leafPage.IsCompressed; if (compressed) { stats.RecordCompressedLeafPage(); } using (compressed ? (DecompressedLeafPage)(leafPage = tree.DecompressPage(leafPage, skipCache: true)) : null) { if (leafPage.NumberOfEntries == 0) { if (leafPage.PageNumber == tree.State.RootPageNumber) { writer.DeleteReduceResult(reduceKeyHash, stats); var emptyPageNumber = Bits.SwapBytes(leafPage.PageNumber); using (Slice.External(indexContext.Allocator, (byte *)&emptyPageNumber, sizeof(long), out Slice pageNumSlice)) table.DeleteByKey(pageNumSlice); continue; } if (compressed) { // it doesn't have any entries after decompression because // each compressed entry has the delete tombstone if (compressedEmptyLeafs == null) { compressedEmptyLeafs = new HashSet <long>(); } compressedEmptyLeafs.Add(leafPage.PageNumber); continue; } throw new UnexpectedReduceTreePageException( $"Encountered empty page which isn't a root. Page {leafPage} in '{tree.Name}' tree."); } var parentPage = tree.GetParentPageOf(leafPage); stats.RecordReduceAttempts(leafPage.NumberOfEntries); try { using (var result = AggregateLeafPage(leafPage, lowLevelTransaction, indexContext, token)) { if (parentPage == -1) { writer.DeleteReduceResult(reduceKeyHash, stats); foreach (var output in result.GetOutputs()) { writer.IndexDocument(reduceKeyHash, output, stats, indexContext); } } else { StoreAggregationResult(leafPage, table, result); parentPagesToAggregate.Add(parentPage); } _metrics.MapReduceIndexes.ReducedPerSec.Mark(leafPage.NumberOfEntries); stats.RecordReduceSuccesses(leafPage.NumberOfEntries); } } catch (Exception e) when(e is OperationCanceledException == false) { if (failedAggregatedLeafs == null) { failedAggregatedLeafs = new Dictionary <long, Exception>(); } failedAggregatedLeafs.Add(leafPage.PageNumber, e); _index.ErrorIndexIfCriticalException(e); HandleReductionError(e, reduceKeyHash, writer, stats, updateStats: parentPage == -1, page: leafPage); } } } long tmp = 0; using (Slice.External(indexContext.Allocator, (byte *)&tmp, sizeof(long), out Slice pageNumberSlice)) { foreach (var freedPage in modifiedStore.FreedPages) { tmp = Bits.SwapBytes(freedPage); table.DeleteByKey(pageNumberSlice); } } while (parentPagesToAggregate.Count > 0 || branchesToAggregate.Count > 0) { token.ThrowIfCancellationRequested(); var branchPages = parentPagesToAggregate; parentPagesToAggregate = new HashSet <long>(); foreach (var pageNumber in branchPages) { page.Base = lowLevelTransaction.GetPage(pageNumber).Pointer; try { if (page.IsBranch == false) { throw new UnexpectedReduceTreePageException("Parent page was found that wasn't a branch, error at " + page); } stats.RecordReduceAttempts(page.NumberOfEntries); var parentPage = tree.GetParentPageOf(page); using (var result = AggregateBranchPage(page, table, indexContext, branchesToAggregate, compressedEmptyLeafs, failedAggregatedLeafs, token)) { if (parentPage == -1) { writer.DeleteReduceResult(reduceKeyHash, stats); foreach (var output in result.GetOutputs()) { writer.IndexDocument(reduceKeyHash, output, stats, indexContext); } } else { parentPagesToAggregate.Add(parentPage); StoreAggregationResult(page, table, result); } _metrics.MapReduceIndexes.ReducedPerSec.Mark(page.NumberOfEntries); stats.RecordReduceSuccesses(page.NumberOfEntries); } } catch (Exception e) when(e is OperationCanceledException == false) { _index.ErrorIndexIfCriticalException(e); HandleReductionError(e, reduceKeyHash, writer, stats, updateStats: true, page: page); } finally { branchesToAggregate.Remove(pageNumber); } } if (parentPagesToAggregate.Count == 0 && branchesToAggregate.Count > 0) { // we still have unaggregated branches which were modified but their children were not modified (branch page splitting) so we missed them parentPagesToAggregate.Add(branchesToAggregate.First()); } } if (compressedEmptyLeafs != null && compressedEmptyLeafs.Count > 0) { // we had some compressed pages that are empty after decompression // let's remove them and reduce the tree once again modifiedStore.ModifiedPages.Clear(); modifiedStore.FreedPages.Clear(); foreach (var pageNumber in compressedEmptyLeafs) { page.Base = lowLevelTransaction.GetPage(pageNumber).Pointer; using (var emptyPage = tree.DecompressPage(page, skipCache: true)) { if (emptyPage.NumberOfEntries > 0) // could be changed meanwhile { continue; } modifiedStore.Tree.RemoveEmptyDecompressedPage(emptyPage); } } HandleTreeReduction(indexContext, stats, modifiedStore, lowLevelTransaction, writer, reduceKeyHash, table, token); } }
private void HandleTreeReduction(TransactionOperationContext indexContext, IndexingStatsScope stats, CancellationToken token, MapReduceResultsStore modifiedStore, LowLevelTransaction lowLevelTransaction, IndexWriteOperation writer, LazyStringValue reduceKeyHash, Table table) { EnsureValidTreeReductionStats(stats); var tree = modifiedStore.Tree; var branchesToAggregate = new HashSet <long>(); var parentPagesToAggregate = new HashSet <long>(); var page = new TreePage(null, Constants.Storage.PageSize); foreach (var modifiedPage in modifiedStore.ModifiedPages) { token.ThrowIfCancellationRequested(); page.Base = lowLevelTransaction.GetPage(modifiedPage).Pointer; stats.RecordReduceTreePageModified(page.IsLeaf); if (page.IsLeaf == false) { Debug.Assert(page.IsBranch); branchesToAggregate.Add(modifiedPage); continue; } var leafPage = page; var compressed = leafPage.IsCompressed; if (compressed) { stats.RecordCompressedLeafPage(); } using (compressed ? (DecompressedLeafPage)(leafPage = tree.DecompressPage(leafPage, skipCache: true)) : null) { if (leafPage.NumberOfEntries == 0) { if (leafPage.PageNumber != tree.State.RootPageNumber) { throw new InvalidOperationException( $"Encountered empty page which isn't a root. Page #{leafPage.PageNumber} in '{tree.Name}' tree."); } writer.DeleteReduceResult(reduceKeyHash, stats); var emptyPageNumber = Bits.SwapBytes(leafPage.PageNumber); using (Slice.External(indexContext.Allocator, (byte *)&emptyPageNumber, sizeof(long), out Slice pageNumSlice)) table.DeleteByKey(pageNumSlice); continue; } var parentPage = tree.GetParentPageOf(leafPage); stats.RecordReduceAttempts(leafPage.NumberOfEntries); try { using (var result = AggregateLeafPage(leafPage, lowLevelTransaction, indexContext, token)) { if (parentPage == -1) { writer.DeleteReduceResult(reduceKeyHash, stats); foreach (var output in result.GetOutputs()) { writer.IndexDocument(reduceKeyHash, output, stats, indexContext); } } else { StoreAggregationResult(leafPage.PageNumber, leafPage.NumberOfEntries, table, result); parentPagesToAggregate.Add(parentPage); } _metrics.MapReduceReducedPerSecond.Mark(leafPage.NumberOfEntries); stats.RecordReduceSuccesses(leafPage.NumberOfEntries); } } catch (Exception e) { _index.HandleError(e); var message = $"Failed to execute reduce function for reduce key '{tree.Name}' on a leaf page #{leafPage} of '{_indexDefinition.Name}' index."; if (_logger.IsInfoEnabled) { _logger.Info(message, e); } if (parentPage == -1) { stats.RecordReduceErrors(leafPage.NumberOfEntries); stats.AddReduceError(message + $" Exception: {e}"); } } } } long tmp = 0; using (Slice.External(indexContext.Allocator, (byte *)&tmp, sizeof(long), out Slice pageNumberSlice)) { foreach (var freedPage in modifiedStore.FreedPages) { tmp = Bits.SwapBytes(freedPage); table.DeleteByKey(pageNumberSlice); } } while (parentPagesToAggregate.Count > 0 || branchesToAggregate.Count > 0) { token.ThrowIfCancellationRequested(); var branchPages = parentPagesToAggregate; parentPagesToAggregate = new HashSet <long>(); foreach (var pageNumber in branchPages) { page.Base = lowLevelTransaction.GetPage(pageNumber).Pointer; try { if (page.IsBranch == false) { throw new InvalidOperationException("Parent page was found that wasn't a branch, error at " + page.PageNumber); } stats.RecordReduceAttempts(page.NumberOfEntries); var parentPage = tree.GetParentPageOf(page); using (var result = AggregateBranchPage(page, table, indexContext, branchesToAggregate, token)) { if (parentPage == -1) { writer.DeleteReduceResult(reduceKeyHash, stats); foreach (var output in result.GetOutputs()) { writer.IndexDocument(reduceKeyHash, output, stats, indexContext); } } else { parentPagesToAggregate.Add(parentPage); StoreAggregationResult(page.PageNumber, page.NumberOfEntries, table, result); } _metrics.MapReduceReducedPerSecond.Mark(page.NumberOfEntries); stats.RecordReduceSuccesses(page.NumberOfEntries); } } catch (Exception e) { _index.HandleError(e); var message = $"Failed to execute reduce function for reduce key '{tree.Name}' on a branch page #{page} of '{_indexDefinition.Name}' index."; if (_logger.IsInfoEnabled) { _logger.Info(message, e); } stats.RecordReduceErrors(page.NumberOfEntries); stats.AddReduceError(message + $" Exception: {e}"); } finally { branchesToAggregate.Remove(pageNumber); } } if (parentPagesToAggregate.Count == 0 && branchesToAggregate.Count > 0) { // we still have unaggregated branches which were modified but their children were not modified (branch page splitting) so we missed them parentPagesToAggregate.Add(branchesToAggregate.First()); } } }