private bool TryAggregateChildPageOrThrow(long pageNumber, Table table, TransactionOperationContext indexContext,
                                                  HashSet <long> remainingBranchesToAggregate,
                                                  HashSet <long> compressedEmptyLeafs,
                                                  Dictionary <long, Exception> failedAggregatedLeafs,
                                                  Tree tree,
                                                  CancellationToken token)
        {
            if (remainingBranchesToAggregate.Contains(pageNumber))
            {
                // RavenDB-5363: we have a modified branch page but its children were not modified (branch page splitting) so we didn't
                // aggregated it yet, let's do it now

                try
                {
                    var page = indexContext.Transaction.InnerTransaction.LowLevelTransaction.GetPage(pageNumber);
                    var unaggregatedBranch = new TreePage(page.Pointer, Constants.Storage.PageSize);

                    using (var result = AggregateBranchPage(unaggregatedBranch, table, indexContext, remainingBranchesToAggregate, compressedEmptyLeafs,
                                                            failedAggregatedLeafs, tree, token))
                    {
                        StoreAggregationResult(unaggregatedBranch, table, result);
                    }
                }
                finally
                {
                    remainingBranchesToAggregate.Remove(pageNumber);
                }

                return(true);
            }

            if (compressedEmptyLeafs != null && compressedEmptyLeafs.Contains(pageNumber))
            {
                // it's empty after decompression, we can safely skip it here

                return(false);
            }

            var relatedPage     = indexContext.Transaction.InnerTransaction.LowLevelTransaction.GetPage(pageNumber);
            var relatedTreePage = new TreePage(relatedPage.Pointer, Constants.Storage.PageSize);

            string decompressedDebug = null;

            if (relatedTreePage.IsCompressed)
            {
                // let's try to decompress it and check if it's empty
                // we decompress it for validation purposes only although it's very rare case

                using (var decompressed = tree.DecompressPage(relatedTreePage, skipCache: true))
                {
                    if (decompressed.NumberOfEntries == 0)
                    {
                        // it's empty so there is no related aggregation result, we can safely skip it

                        return(false);
                    }

                    decompressedDebug = decompressed.ToString();
                }
            }

            var message = $"Couldn't find a pre-computed aggregation result for the existing page: {relatedTreePage.PageNumber}. ";

            var debugDetails = $"Debug details - page: {relatedTreePage}, ";

            if (decompressedDebug != null)
            {
                debugDetails += $"decompressed: {decompressedDebug}), ";
            }

            debugDetails += $"tree state: {tree.State}. ";

            if (failedAggregatedLeafs != null && failedAggregatedLeafs.TryGetValue(pageNumber, out var exception))
            {
                message += $"The aggregation of this leaf (#{pageNumber}) has failed so the relevant result doesn't exist. " +
                           "Check the inner exception for leaf aggregation error details. ";

                throw new AggregationResultNotFoundException(message + debugDetails, exception);
            }

            message += "Please check if there are other aggregate failures at earlier phase of the reduce stage. They could lead to this error due to missing intermediate results. ";

            throw new AggregationResultNotFoundException(message + debugDetails);
        }
        private void HandleReductionError(Exception error, LazyStringValue reduceKeyHash, IndexWriteOperation writer, IndexingStatsScope stats, bool updateStats, TreePage page,
                                          int numberOfNestedValues = -1)
        {
            var builder = new StringBuilder("Failed to execute reduce function on ");

            if (page != null)
            {
                builder.Append($"page {page} ");
            }
            else
            {
                builder.Append("nested values ");
            }

            builder.Append($"of '{_indexDefinition.Name}' index. The relevant reduce result is going to be removed from the index ");
            builder.Append($"as it would be incorrect due to encountered errors (reduce key hash: {reduceKeyHash}");

            var sampleItem = _aggregationBatch?.Items?.FirstOrDefault();

            if (sampleItem != null)
            {
                builder.Append($", sample item to reduce: {sampleItem}");
            }

            builder.Append(")");

            var message = builder.ToString();

            if (_logger.IsInfoEnabled)
            {
                _logger.Info(message, error);
            }

            try
            {
                writer.DeleteReduceResult(reduceKeyHash, stats);
            }
            catch (Exception e)
            {
                if (_logger.IsInfoEnabled)
                {
                    _logger.Info($"Failed to delete an index result from '${_indexDefinition.Name}' index on reduce error (reduce key hash: ${reduceKeyHash})", e);
                }
            }

            if (updateStats)
            {
                var numberOfEntries = page?.NumberOfEntries ?? numberOfNestedValues;

                Debug.Assert(numberOfEntries != -1);

                // we'll only want to record exceptions on some of these, to give the
                // user information about what is going on, otherwise we'll have to wait a lot if we
                // are processing a big batch, and this can be a perf killer. See: RavenDB-11038

                stats.RecordReduceErrors(numberOfEntries);

                if (stats.NumberOfKeptReduceErrors < IndexStorage.MaxNumberOfKeptErrors)
                {
                    stats.AddReduceError(message + $" Exception: {error}");
                }

                var failureInfo = new IndexFailureInformation
                {
                    Name           = _index.Name,
                    MapErrors      = stats.MapErrors,
                    MapAttempts    = stats.MapAttempts,
                    ReduceErrors   = stats.ReduceErrors,
                    ReduceAttempts = stats.ReduceAttempts
                };

                if (failureInfo.IsInvalidIndex(true))
                {
                    throw new ExcessiveNumberOfReduceErrorsException("Excessive number of errors during the reduce phase for the current batch. Failure info: " +
                                                                     failureInfo.GetErrorMessage());
                }
            }
        }
Exemple #3
0
        private void HandleTreeReduction(TransactionOperationContext indexContext, IndexingStatsScope stats,
                                         CancellationToken token, MapReduceResultsStore modifiedStore, LowLevelTransaction lowLevelTransaction,
                                         IndexWriteOperation writer, LazyStringValue reduceKeyHash, Table table)
        {
            EnsureValidTreeReductionStats(stats);

            var tree = modifiedStore.Tree;

            var branchesToAggregate = new HashSet <long>();

            var parentPagesToAggregate = new HashSet <long>();

            var page = new TreePage(null, lowLevelTransaction.PageSize);

            foreach (var modifiedPage in modifiedStore.ModifiedPages)
            {
                token.ThrowIfCancellationRequested();

                page.Base = lowLevelTransaction.GetPage(modifiedPage).Pointer;

                stats.RecordReduceTreePageModified(page.IsLeaf);

                if (page.IsLeaf == false)
                {
                    Debug.Assert(page.IsBranch);
                    branchesToAggregate.Add(modifiedPage);

                    continue;
                }

                var leafPage = page;

                var compressed = leafPage.IsCompressed;

                if (compressed)
                {
                    stats.RecordCompressedLeafPage();
                }

                using (compressed ? (DecompressedLeafPage)(leafPage = tree.DecompressPage(leafPage, skipCache: true)) : null)
                {
                    if (leafPage.NumberOfEntries == 0)
                    {
                        if (leafPage.PageNumber != tree.State.RootPageNumber)
                        {
                            throw new InvalidOperationException(
                                      $"Encountered empty page which isn't a root. Page #{leafPage.PageNumber} in '{tree.Name}' tree.");
                        }

                        writer.DeleteReduceResult(reduceKeyHash, stats);

                        var   emptyPageNumber = Bits.SwapBytes(leafPage.PageNumber);
                        Slice pageNumSlice;
                        using (Slice.External(indexContext.Allocator, (byte *)&emptyPageNumber, sizeof(long), out pageNumSlice))
                            table.DeleteByKey(pageNumSlice);

                        continue;
                    }

                    var parentPage = tree.GetParentPageOf(leafPage);

                    stats.RecordReduceAttempts(leafPage.NumberOfEntries);

                    try
                    {
                        using (var result = AggregateLeafPage(leafPage, lowLevelTransaction, indexContext, stats, token))
                        {
                            if (parentPage == -1)
                            {
                                writer.DeleteReduceResult(reduceKeyHash, stats);

                                foreach (var output in result.GetOutputs())
                                {
                                    writer.IndexDocument(reduceKeyHash, output, stats, indexContext);
                                }
                            }
                            else
                            {
                                StoreAggregationResult(leafPage.PageNumber, leafPage.NumberOfEntries, table, result, stats);
                                parentPagesToAggregate.Add(parentPage);
                            }

                            _metrics.MapReduceReducedPerSecond.Mark(leafPage.NumberOfEntries);

                            stats.RecordReduceSuccesses(leafPage.NumberOfEntries);
                        }
                    }
                    catch (Exception e)
                    {
                        _index.HandleError(e);

                        var message =
                            $"Failed to execute reduce function for reduce key '{tree.Name}' on a leaf page #{leafPage} of '{_indexDefinition.Name}' index.";

                        if (_logger.IsInfoEnabled)
                        {
                            _logger.Info(message, e);
                        }

                        if (parentPage == -1)
                        {
                            stats.RecordReduceErrors(leafPage.NumberOfEntries);
                            stats.AddReduceError(message + $"  Exception: {e}");
                        }
                    }
                }
            }

            long  tmp = 0;
            Slice pageNumberSlice;

            using (Slice.External(indexContext.Allocator, (byte *)&tmp, sizeof(long), out pageNumberSlice))
            {
                foreach (var freedPage in modifiedStore.FreedPages)
                {
                    tmp = Bits.SwapBytes(freedPage);
                    table.DeleteByKey(pageNumberSlice);
                }
            }

            while (parentPagesToAggregate.Count > 0 || branchesToAggregate.Count > 0)
            {
                token.ThrowIfCancellationRequested();

                var branchPages = parentPagesToAggregate;
                parentPagesToAggregate = new HashSet <long>();

                foreach (var pageNumber in branchPages)
                {
                    page.Base = lowLevelTransaction.GetPage(pageNumber).Pointer;

                    try
                    {
                        if (page.IsBranch == false)
                        {
                            throw new InvalidOperationException("Parent page was found that wasn't a branch, error at " +
                                                                page.PageNumber);
                        }

                        stats.RecordReduceAttempts(page.NumberOfEntries);

                        var parentPage = tree.GetParentPageOf(page);

                        using (var result = AggregateBranchPage(page, table, indexContext, branchesToAggregate, stats, token))
                        {
                            if (parentPage == -1)
                            {
                                writer.DeleteReduceResult(reduceKeyHash, stats);

                                foreach (var output in result.GetOutputs())
                                {
                                    writer.IndexDocument(reduceKeyHash, output, stats, indexContext);
                                }
                            }
                            else
                            {
                                parentPagesToAggregate.Add(parentPage);

                                StoreAggregationResult(page.PageNumber, page.NumberOfEntries, table, result, stats);
                            }

                            _metrics.MapReduceReducedPerSecond.Mark(page.NumberOfEntries);

                            stats.RecordReduceSuccesses(page.NumberOfEntries);
                        }
                    }
                    catch (Exception e)
                    {
                        _index.HandleError(e);

                        var message =
                            $"Failed to execute reduce function for reduce key '{tree.Name}' on a branch page #{page} of '{_indexDefinition.Name}' index.";

                        if (_logger.IsInfoEnabled)
                        {
                            _logger.Info(message, e);
                        }

                        stats.RecordReduceErrors(page.NumberOfEntries);
                        stats.AddReduceError(message + $" Exception: {e}");
                    }
                    finally
                    {
                        branchesToAggregate.Remove(pageNumber);
                    }
                }

                if (parentPagesToAggregate.Count == 0 && branchesToAggregate.Count > 0)
                {
                    // we still have unaggregated branches which were modified but their children were not modified (branch page splitting) so we missed them
                    parentPagesToAggregate.Add(branchesToAggregate.First());
                }
            }
        }
        private void HandleTreeReduction(TransactionOperationContext indexContext, IndexingStatsScope stats,
                                         MapReduceResultsStore modifiedStore, LowLevelTransaction lowLevelTransaction,
                                         IndexWriteOperation writer, LazyStringValue reduceKeyHash, Table table, CancellationToken token)
        {
            if (modifiedStore.ModifiedPages.Count == 0)
            {
                return;
            }

            EnsureValidTreeReductionStats(stats);

            var tree = modifiedStore.Tree;

            var branchesToAggregate = new HashSet <long>();

            var parentPagesToAggregate = new HashSet <long>();

            var page = new TreePage(null, Constants.Storage.PageSize);

            HashSet <long> compressedEmptyLeafs = null;

            Dictionary <long, Exception> failedAggregatedLeafs = null;

            foreach (var modifiedPage in modifiedStore.ModifiedPages)
            {
                token.ThrowIfCancellationRequested();

                page.Base = lowLevelTransaction.GetPage(modifiedPage).Pointer;

                stats.RecordReduceTreePageModified(page.IsLeaf);

                if (page.IsLeaf == false)
                {
                    Debug.Assert(page.IsBranch);
                    branchesToAggregate.Add(modifiedPage);

                    continue;
                }

                var leafPage = page;

                var compressed = leafPage.IsCompressed;

                if (compressed)
                {
                    stats.RecordCompressedLeafPage();
                }

                using (compressed ? (DecompressedLeafPage)(leafPage = tree.DecompressPage(leafPage, skipCache: true)) : null)
                {
                    if (leafPage.NumberOfEntries == 0)
                    {
                        if (leafPage.PageNumber == tree.State.RootPageNumber)
                        {
                            writer.DeleteReduceResult(reduceKeyHash, stats);

                            var emptyPageNumber = Bits.SwapBytes(leafPage.PageNumber);
                            using (Slice.External(indexContext.Allocator, (byte *)&emptyPageNumber, sizeof(long), out Slice pageNumSlice))
                                table.DeleteByKey(pageNumSlice);

                            continue;
                        }

                        if (compressed)
                        {
                            // it doesn't have any entries after decompression because
                            // each compressed entry has the delete tombstone

                            if (compressedEmptyLeafs == null)
                            {
                                compressedEmptyLeafs = new HashSet <long>();
                            }

                            compressedEmptyLeafs.Add(leafPage.PageNumber);
                            continue;
                        }

                        throw new UnexpectedReduceTreePageException(
                                  $"Encountered empty page which isn't a root. Page {leafPage} in '{tree.Name}' tree (tree state: {tree.State})");
                    }

                    var parentPage = tree.GetParentPageOf(leafPage);

                    stats.RecordReduceAttempts(leafPage.NumberOfEntries);

                    try
                    {
                        using (var result = AggregateLeafPage(leafPage, lowLevelTransaction, indexContext, token))
                        {
                            if (parentPage == -1)
                            {
                                writer.DeleteReduceResult(reduceKeyHash, stats);

                                foreach (var output in result.GetOutputs())
                                {
                                    writer.IndexDocument(reduceKeyHash, null, output, stats, indexContext);
                                }
                            }
                            else
                            {
                                StoreAggregationResult(leafPage, table, result);
                                parentPagesToAggregate.Add(parentPage);
                            }

                            _index.ReducesPerSec.MarkSingleThreaded(leafPage.NumberOfEntries);
                            _metrics.MapReduceIndexes.ReducedPerSec.Mark(leafPage.NumberOfEntries);

                            stats.RecordReduceSuccesses(leafPage.NumberOfEntries);
                        }
                    }
                    catch (Exception e) when(e.IsIndexError())
                    {
                        if (failedAggregatedLeafs == null)
                        {
                            failedAggregatedLeafs = new Dictionary <long, Exception>();
                        }

                        failedAggregatedLeafs.Add(leafPage.PageNumber, e);

                        _index.ErrorIndexIfCriticalException(e);

                        HandleReductionError(e, reduceKeyHash, writer, stats, updateStats: parentPage == -1, page: leafPage);
                    }
                }
            }

            while (parentPagesToAggregate.Count > 0 || branchesToAggregate.Count > 0)
            {
                token.ThrowIfCancellationRequested();

                var branchPages = parentPagesToAggregate;
                parentPagesToAggregate = new HashSet <long>();

                foreach (var pageNumber in branchPages)
                {
                    page.Base = lowLevelTransaction.GetPage(pageNumber).Pointer;

                    try
                    {
                        if (page.IsBranch == false)
                        {
                            throw new UnexpectedReduceTreePageException("Parent page was found that wasn't a branch, error at " + page);
                        }

                        stats.RecordReduceAttempts(page.NumberOfEntries);

                        var parentPage = tree.GetParentPageOf(page);

                        using (var result = AggregateBranchPage(page, table, indexContext, branchesToAggregate, compressedEmptyLeafs, failedAggregatedLeafs, tree, token))
                        {
                            if (parentPage == -1)
                            {
                                writer.DeleteReduceResult(reduceKeyHash, stats);

                                foreach (var output in result.GetOutputs())
                                {
                                    writer.IndexDocument(reduceKeyHash, null, output, stats, indexContext);
                                }
                            }
                            else
                            {
                                parentPagesToAggregate.Add(parentPage);

                                StoreAggregationResult(page, table, result);
                            }

                            _index.ReducesPerSec.MarkSingleThreaded(page.NumberOfEntries);
                            _metrics.MapReduceIndexes.ReducedPerSec.Mark(page.NumberOfEntries);

                            stats.RecordReduceSuccesses(page.NumberOfEntries);
                        }
                    }
                    catch (Exception e) when(e.IsIndexError())
                    {
                        _index.ErrorIndexIfCriticalException(e);

                        HandleReductionError(e, reduceKeyHash, writer, stats, updateStats: true, page: page);
                    }
                    finally
                    {
                        branchesToAggregate.Remove(pageNumber);
                    }
                }

                if (parentPagesToAggregate.Count == 0 && branchesToAggregate.Count > 0)
                {
                    // we still have unaggregated branches which were modified but their children were not modified (branch page splitting) so we missed them
                    parentPagesToAggregate.Add(branchesToAggregate.First());
                }

                _index.UpdateThreadAllocations(indexContext, writer, stats, updateReduceStats: true);
            }

            if (compressedEmptyLeafs != null && compressedEmptyLeafs.Count > 0)
            {
                // we had some compressed pages that are empty after decompression
                // let's remove them and reduce the tree once again

                modifiedStore.ModifiedPages.Clear();

                foreach (var pageNumber in compressedEmptyLeafs)
                {
                    page.Base = lowLevelTransaction.GetPage(pageNumber).Pointer;

                    using (var emptyPage = tree.DecompressPage(page, skipCache: true))
                    {
                        if (emptyPage.NumberOfEntries > 0) // could be changed meanwhile
                        {
                            continue;
                        }

                        modifiedStore.Tree.RemoveEmptyDecompressedPage(emptyPage);
                    }
                }

                HandleTreeReduction(indexContext, stats, modifiedStore, lowLevelTransaction, writer, reduceKeyHash, table, token);
            }
        }
 private static void ThrowNullCompressionOutputButNonEmptyPage(TreePage page)
 {
     throw new InvalidOperationException($"{nameof(CompressionResult.CompressionOutputPtr)} was null but the page was not empty: {page}. Should never happen");
 }
        public static IDisposable TryGetCompressedTempPage(LowLevelTransaction tx, TreePage page, out CompressionResult result, bool defrag = true)
        {
            var returnTempPage = tx.Environment.GetTemporaryPage(tx, out TemporaryPage temp);

            var tempPage = temp.GetTempPage();

            if (page.NumberOfEntries == 0)
            {
                Memory.Copy(tempPage.Base, page.Base, Constants.Tree.PageHeaderSize);
                tempPage.Lower = Constants.Tree.PageHeaderSize;
                tempPage.Upper = (ushort)tempPage.PageSize;

                Debug.Assert(tempPage.Lower <= tempPage.Upper);

                result = new CompressionResult
                {
                    CompressedPage       = tempPage,
                    CompressionOutputPtr = null,
                    Header = new CompressedNodesHeader
                    {
                        SectionSize               = 0,
                        CompressedSize            = 0,
                        UncompressedSize          = 0,
                        NumberOfCompressedEntries = 0,
                    },
                    InvalidateFromCache = true
                };

                return(returnTempPage);
            }

            if (defrag)
            {
                if (page.CalcSizeUsed() != page.SizeUsed - Constants.Tree.PageHeaderSize) // check if the page really requires defrag
                {
                    page.Defrag(tx);
                }
            }

            var valuesSize = page.PageSize - page.Upper;

            var compressionInput  = page.Base + page.Upper;
            var compressionResult = tempPage.Base + Constants.Tree.PageHeaderSize + Constants.Compression.HeaderSize; // temp compression result has compressed values at the beginning of the page
            var offsetsSize       = page.NumberOfEntries * Constants.Tree.NodeOffsetSize;

            var compressionOutput = compressionResult + offsetsSize;

            var compressedSize = LZ4.Encode64(
                compressionInput,
                compressionOutput,
                valuesSize,
                tempPage.PageSize - (Constants.Tree.PageHeaderSize + Constants.Compression.HeaderSize) - offsetsSize);

            if (compressedSize == 0 || compressedSize > valuesSize)
            {
                // output buffer size not enough or compressed output size is greater than uncompressed input

                result = null;
                return(returnTempPage);
            }

            var compressedOffsets = (ushort *)compressionResult;
            var offsets           = page.KeysOffsets;

            int    numberOfEntries = page.NumberOfEntries;
            ushort upper           = page.Upper;

            for (var i = 0; i < numberOfEntries; i++)
            {
                compressedOffsets[i] = (ushort)(offsets[i] - upper);
            }

            var compressionSectionSize = compressedSize + offsetsSize;

            var sizeLeftInDecompressedPage     = Constants.Compression.MaxPageSize - page.SizeUsed;
            var sizeLeftForUncompressedEntries = Constants.Storage.PageSize - (Constants.Tree.PageHeaderSize + Constants.Compression.HeaderSize + compressionSectionSize);

            if (sizeLeftForUncompressedEntries > sizeLeftInDecompressedPage)
            {
                // expand compression section to prevent from adding next uncompressed entries what would result in
                // exceeding MaxPageSize after the decompression

                compressionSectionSize += sizeLeftForUncompressedEntries - sizeLeftInDecompressedPage;
            }

            compressionSectionSize += compressionSectionSize & 1; // ensure 2-byte alignment

            // check that after decompression we won't exceed MaxPageSize
            Debug.Assert(page.SizeUsed +               // page header, node offsets, existing entries
                         (Constants.Storage.PageSize - // space that can be still used to insert next uncompressed entries
                          (Constants.Tree.PageHeaderSize + Constants.Compression.HeaderSize + compressionSectionSize))
                         <= Constants.Compression.MaxPageSize);

            Memory.Copy(tempPage.Base, page.Base, Constants.Tree.PageHeaderSize);
            tempPage.Lower = (ushort)(Constants.Tree.PageHeaderSize + Constants.Compression.HeaderSize + compressionSectionSize);
            tempPage.Upper = (ushort)tempPage.PageSize;

            Debug.Assert(tempPage.Lower <= tempPage.Upper);

            result = new CompressionResult
            {
                CompressedPage       = tempPage,
                CompressionOutputPtr = compressionResult,
                Header = new CompressedNodesHeader
                {
                    SectionSize               = (ushort)compressionSectionSize,
                    CompressedSize            = (ushort)compressedSize,
                    UncompressedSize          = (ushort)valuesSize,
                    NumberOfCompressedEntries = page.NumberOfEntries,
                }
            };

            return(returnTempPage);
        }
        public DecompressedLeafPage GetPage(LowLevelTransaction tx, int pageSize, DecompressionUsage usage, TreePage original)
        {
            GetTemporaryPage(tx, pageSize, out var tempPage);

            var treePage = tempPage.GetTempPage();

            return(new DecompressedLeafPage(treePage.Base, treePage.PageSize, usage, original, tempPage));
        }
        public DecompressedLeafPage(byte *basePtr, int pageSize, DecompressionUsage usage, TreePage original, TemporaryPage tempPage) : base(basePtr, pageSize)
        {
            Original  = original;
            Usage     = usage;
            _tempPage = tempPage;

            PageNumber = Original.PageNumber;
            TreeFlags  = Original.TreeFlags;
            Flags      = Original.Flags & ~PageFlags.Compressed;
        }
Exemple #9
0
        private void HandleReductionError(Exception error, LazyStringValue reduceKeyHash, Lazy <IndexWriteOperation> writer, IndexingStatsScope stats, bool updateStats, TreePage page,
                                          int numberOfNestedValues = -1)
        {
            var builder = new StringBuilder("Failed to execute reduce function on ");

            if (page != null)
            {
                builder.Append($"page {page} ");
            }
            else
            {
                builder.Append("nested values ");
            }

            builder.Append($"of '{_indexDefinition.Name}' index. The relevant reduce result is going to be removed from the index ");
            builder.Append($"as it would be incorrect due to encountered errors (reduce key hash: {reduceKeyHash}");

            var erroringResult = OnErrorResult;

            if (erroringResult != null)
            {
                builder.Append($", current item to reduce: {erroringResult}");
            }
            else
            {
                erroringResult = _aggregationBatch?.Items?.FirstOrDefault();

                if (erroringResult != null)
                {
                    builder.Append($", sample item to reduce: {erroringResult}");
                }
            }


            builder.Append(")");

            var message = builder.ToString();

            if (_logger.IsInfoEnabled)
            {
                _logger.Info(message, error);
            }

            try
            {
                writer.Value.DeleteReduceResult(reduceKeyHash, stats);
            }
            catch (Exception e)
            {
                if (_logger.IsInfoEnabled)
                {
                    _logger.Info($"Failed to delete an index result from '${_indexDefinition.Name}' index on reduce error (reduce key hash: ${reduceKeyHash})", e);
                }
            }

            if (updateStats)
            {
                var numberOfEntries = page?.NumberOfEntries ?? numberOfNestedValues;

                Debug.Assert(numberOfEntries != -1);

                // we'll only want to record exceptions on some of these, to give the
                // user information about what is going on, otherwise we'll have to wait a lot if we
                // are processing a big batch, and this can be a perf killer. See: RavenDB-11038

                stats.RecordReduceErrors(numberOfEntries);


                if (stats.NumberOfKeptReduceErrors < IndexStorage.MaxNumberOfKeptErrors)
                {
                    var reduceKey = GetReduceKey();

                    stats.AddReduceError(message + $" Exception: {error}", reduceKey);
                }

                var failureInfo = new IndexFailureInformation
                {
                    Name           = _index.Name,
                    MapErrors      = stats.MapErrors,
                    MapAttempts    = stats.MapAttempts,
                    ReduceErrors   = stats.ReduceErrors,
                    ReduceAttempts = stats.ReduceAttempts
                };

                if (failureInfo.IsInvalidIndex(true))
                {
                    throw new ExcessiveNumberOfReduceErrorsException("Excessive number of errors during the reduce phase for the current batch. Failure info: " +
                                                                     failureInfo.GetErrorMessage());
                }

                string GetReduceKey()
                {
                    if (erroringResult == null)
                    {
                        return(null);
                    }

                    try
                    {
                        var mapReduceDef     = _index.Definition as MapReduceIndexDefinition;
                        var autoMapReduceDef = _index.Definition as AutoMapReduceIndexDefinition;

                        var groupByKeys = (mapReduceDef?.GroupByFields.Select(x => x.Key) ??
                                           autoMapReduceDef?.GroupByFields.Select(x => x.Key))?.ToList();

                        StringBuilder reduceKeyValue = null;

                        if (groupByKeys != null)
                        {
                            foreach (var key in groupByKeys)
                            {
                                if (erroringResult.TryGetMember(key, out var result))
                                {
                                    if (reduceKeyValue == null)
                                    {
                                        reduceKeyValue = new StringBuilder("Reduce key: { ");
                                    }

                                    reduceKeyValue.Append($"'{key}' : {result?.ToString() ?? "null"}");
                                }
                            }

                            reduceKeyValue?.Append(" }");
                        }

                        return(reduceKeyValue?.ToString());
                    }
                    catch
                    {
                        // ignore - make sure we don't error on error reporting

                        return(null);
                    }
                }
            }
        }