public IndexFailureInformation GetFailureRate(int id) { var key = new Slice(CreateKey(id)); ushort version; var indexStats = LoadStruct(tableStorage.IndexingStats, key, out version); var reduceStats = LoadStruct(tableStorage.ReduceStats, key, out version); var reduceAttempts = reduceStats.ReadInt(ReducingWorkStatsFields.ReduceAttempts); var reduceErrors = reduceStats.ReadInt(ReducingWorkStatsFields.ReduceErrors); var reduceSuccesses = reduceStats.ReadInt(ReducingWorkStatsFields.ReduceSuccesses); var indexFailureInformation = new IndexFailureInformation { Attempts = indexStats.ReadInt(IndexingWorkStatsFields.IndexingAttempts), Errors = indexStats.ReadInt(IndexingWorkStatsFields.IndexingErrors), Successes = indexStats.ReadInt(IndexingWorkStatsFields.IndexingSuccesses), ReduceAttempts = reduceAttempts == -1 ? (int?)null : reduceAttempts, ReduceErrors = reduceErrors == -1 ? (int?)null : reduceErrors, ReduceSuccesses = reduceSuccesses == -1 ? (int?)null : reduceSuccesses, Id = indexStats.ReadInt(IndexingWorkStatsFields.IndexId) }; return(indexFailureInformation); }
public bool IsIndexInvalid(RavenTransaction tx) { var statsTree = tx.InnerTransaction.ReadTree(IndexSchema.StatsTree); var mapAttempts = statsTree.Read(IndexSchema.MapAttemptsSlice)?.Reader.ReadLittleEndianInt32() ?? 0; var mapErrors = statsTree.Read(IndexSchema.MapErrorsSlice)?.Reader.ReadLittleEndianInt32() ?? 0; int?reduceAttempts = null, reduceErrors = null; if (_index.Type.IsMapReduce()) { reduceAttempts = statsTree.Read(IndexSchema.ReduceAttemptsSlice)?.Reader.ReadLittleEndianInt32() ?? 0; reduceErrors = statsTree.Read(IndexSchema.ReduceErrorsSlice)?.Reader.ReadLittleEndianInt32() ?? 0; } int mapReferenceAttempts = 0, mapReferenceErrors = 0; if (_index.GetReferencedCollections()?.Count > 0) { mapReferenceAttempts = statsTree.Read(IndexSchema.MapReferencedAttemptsSlice)?.Reader.ReadLittleEndianInt32() ?? 0; mapReferenceErrors = statsTree.Read(IndexSchema.MapReferenceErrorsSlice)?.Reader.ReadLittleEndianInt32() ?? 0; } return(IndexFailureInformation.CheckIndexInvalid(mapAttempts, mapErrors, mapReferenceAttempts, mapReferenceErrors, reduceAttempts, reduceErrors, false)); }
public IndexFailureInformation GetFailureRate(string index) { var readResult = storage.IndexingStats.Read(index); if (readResult == null) { throw new IndexDoesNotExistsException("There is no index named: " + index); } var indexFailureInformation = new IndexFailureInformation { Attempts = readResult.Key.Value <int>("attempts"), Errors = readResult.Key.Value <int>("failures"), Successes = readResult.Key.Value <int>("successes"), ReduceAttempts = readResult.Key.Value <int?>("reduce_attempts"), ReduceErrors = readResult.Key.Value <int?>("reduce_failures"), ReduceSuccesses = readResult.Key.Value <int?>("reduce_successes"), Name = readResult.Key.Value <string>("index"), }; return(indexFailureInformation); }
public IndexFailureInformation GetFailureRate(int id) { var key = CreateKey(id); ushort version; var indexStats = Load(tableStorage.IndexingStats, key, out version); var reduceStats = Load(tableStorage.ReduceStats, key, out version); var indexFailureInformation = new IndexFailureInformation { Attempts = indexStats.Value <int>("attempts"), Errors = indexStats.Value <int>("failures"), Successes = indexStats.Value <int>("successes"), ReduceAttempts = reduceStats.Value <int?>("reduce_attempts"), ReduceErrors = reduceStats.Value <int?>("reduce_failures"), ReduceSuccesses = reduceStats.Value <int?>("reduce_successes"), Id = indexStats.Value <int>("index") }; return(indexFailureInformation); }
private void HandleReductionError(Exception error, LazyStringValue reduceKeyHash, IndexWriteOperation writer, IndexingStatsScope stats, bool updateStats, TreePage page, int numberOfNestedValues = -1) { var builder = new StringBuilder("Failed to execute reduce function on "); if (page != null) { builder.Append($"page {page} "); } else { builder.Append("nested values "); } builder.Append($"of '{_indexDefinition.Name}' index. The relevant reduce result is going to be removed from the index "); builder.Append($"as it would be incorrect due to encountered errors (reduce key hash: {reduceKeyHash}"); var sampleItem = _aggregationBatch?.Items?.FirstOrDefault(); if (sampleItem != null) { builder.Append($", sample item to reduce: {sampleItem}"); } builder.Append(")"); var message = builder.ToString(); if (_logger.IsInfoEnabled) { _logger.Info(message, error); } try { writer.DeleteReduceResult(reduceKeyHash, stats); } catch (Exception e) { if (_logger.IsInfoEnabled) { _logger.Info($"Failed to delete an index result from '${_indexDefinition.Name}' index on reduce error (reduce key hash: ${reduceKeyHash})", e); } } if (updateStats) { var numberOfEntries = page?.NumberOfEntries ?? numberOfNestedValues; Debug.Assert(numberOfEntries != -1); // we'll only want to record exceptions on some of these, to give the // user information about what is going on, otherwise we'll have to wait a lot if we // are processing a big batch, and this can be a perf killer. See: RavenDB-11038 stats.RecordReduceErrors(numberOfEntries); if (stats.NumberOfKeptReduceErrors < IndexStorage.MaxNumberOfKeptErrors) { stats.AddReduceError(message + $" Exception: {error}"); } var failureInfo = new IndexFailureInformation { Name = _index.Name, MapErrors = stats.MapErrors, MapAttempts = stats.MapAttempts, ReduceErrors = stats.ReduceErrors, ReduceAttempts = stats.ReduceAttempts }; if (failureInfo.IsInvalidIndex(_isStaleBecauseOfRunningReduction)) { throw new ExcessiveNumberOfReduceErrorsException("Excessive number of errors during the reduce phase for the current batch. Failure info: " + failureInfo.GetErrorMessage()); } } }
/// <summary> /// Initializes a new instance of the <see cref="IndexDisabledException"/> class. /// </summary> /// <param name="information">The information.</param> public IndexDisabledException(IndexFailureInformation information) { Information = information; }
public unsafe IndexFailureInformation UpdateStats(DateTime indexingTime, IndexingRunStats stats) { if (_logger.IsInfoEnabled) { _logger.Info($"Updating statistics for '{_index.Name}'. Stats: {stats}."); } using (_contextPool.AllocateOperationContext(out TransactionOperationContext context)) using (var tx = context.OpenWriteTransaction()) { var result = new IndexFailureInformation { Name = _index.Name }; var table = tx.InnerTransaction.OpenTable(_errorsSchema, "Errors"); var statsTree = tx.InnerTransaction.ReadTree(IndexSchema.StatsTree); result.MapAttempts = statsTree.Increment(IndexSchema.MapAttemptsSlice, stats.MapAttempts); result.MapSuccesses = statsTree.Increment(IndexSchema.MapSuccessesSlice, stats.MapSuccesses); result.MapErrors = statsTree.Increment(IndexSchema.MapErrorsSlice, stats.MapErrors); var currentMaxNumberOfOutputs = statsTree.Read(IndexSchema.MaxNumberOfOutputsPerDocument)?.Reader.ReadLittleEndianInt32(); using (statsTree.DirectAdd(IndexSchema.MaxNumberOfOutputsPerDocument, sizeof(int), out byte *ptr)) { *(int *)ptr = currentMaxNumberOfOutputs > stats.MaxNumberOfOutputsPerDocument ? currentMaxNumberOfOutputs.Value : stats.MaxNumberOfOutputsPerDocument; } if (_index.Type.IsMapReduce()) { result.ReduceAttempts = statsTree.Increment(IndexSchema.ReduceAttemptsSlice, stats.ReduceAttempts); result.ReduceSuccesses = statsTree.Increment(IndexSchema.ReduceSuccessesSlice, stats.ReduceSuccesses); result.ReduceErrors = statsTree.Increment(IndexSchema.ReduceErrorsSlice, stats.ReduceErrors); } var binaryDate = indexingTime.ToBinary(); using (Slice.External(context.Allocator, (byte *)&binaryDate, sizeof(long), out Slice binaryDateslice)) statsTree.Add(IndexSchema.LastIndexingTimeSlice, binaryDateslice); if (stats.Errors != null) { for (var i = Math.Max(stats.Errors.Count - MaxNumberOfKeptErrors, 0); i < stats.Errors.Count; i++) { var error = stats.Errors[i]; var ticksBigEndian = Bits.SwapBytes(error.Timestamp.Ticks); using (var document = context.GetLazyString(error.Document)) using (var action = context.GetLazyString(error.Action)) using (var e = context.GetLazyString(error.Error)) { var tvb = new TableValueBuilder { { (byte *)&ticksBigEndian, sizeof(long) }, { document.Buffer, document.Size }, { action.Buffer, action.Size }, { e.Buffer, e.Size } }; table.Insert(tvb); } } CleanupErrors(table); } tx.Commit(); return(result); } }
public IndexFailureInformation GetFailureRate(string index) { var readResult = storage.IndexingStats.Read(index); if (readResult == null) throw new IndexDoesNotExistsException("There is no index named: " + index); var indexFailureInformation = new IndexFailureInformation { Attempts = readResult.Key.Value<int>("attempts"), Errors = readResult.Key.Value<int>("failures"), Successes = readResult.Key.Value<int>("successes"), ReduceAttempts = readResult.Key.Value<int?>("reduce_attempts"), ReduceErrors = readResult.Key.Value<int?>("reduce_failures"), ReduceSuccesses = readResult.Key.Value<int?>("reduce_successes"), Name = readResult.Key.Value<string>("index"), }; return indexFailureInformation; }
private void HandleReductionError(Exception error, LazyStringValue reduceKeyHash, Lazy <IndexWriteOperation> writer, IndexingStatsScope stats, bool updateStats, TreePage page, int numberOfNestedValues = -1) { var builder = new StringBuilder("Failed to execute reduce function on "); if (page != null) { builder.Append($"page {page} "); } else { builder.Append("nested values "); } builder.Append($"of '{_indexDefinition.Name}' index. The relevant reduce result is going to be removed from the index "); builder.Append($"as it would be incorrect due to encountered errors (reduce key hash: {reduceKeyHash}"); var erroringResult = OnErrorResult; if (erroringResult != null) { builder.Append($", current item to reduce: {erroringResult}"); } else { erroringResult = _aggregationBatch?.Items?.FirstOrDefault(); if (erroringResult != null) { builder.Append($", sample item to reduce: {erroringResult}"); } } builder.Append(")"); var message = builder.ToString(); if (_logger.IsInfoEnabled) { _logger.Info(message, error); } try { writer.Value.DeleteReduceResult(reduceKeyHash, stats); } catch (Exception e) { if (_logger.IsInfoEnabled) { _logger.Info($"Failed to delete an index result from '${_indexDefinition.Name}' index on reduce error (reduce key hash: ${reduceKeyHash})", e); } } if (updateStats) { var numberOfEntries = page?.NumberOfEntries ?? numberOfNestedValues; Debug.Assert(numberOfEntries != -1); // we'll only want to record exceptions on some of these, to give the // user information about what is going on, otherwise we'll have to wait a lot if we // are processing a big batch, and this can be a perf killer. See: RavenDB-11038 stats.RecordReduceErrors(numberOfEntries); if (stats.NumberOfKeptReduceErrors < IndexStorage.MaxNumberOfKeptErrors) { var reduceKey = GetReduceKey(); stats.AddReduceError(message + $" Exception: {error}", reduceKey); } var failureInfo = new IndexFailureInformation { Name = _index.Name, MapErrors = stats.MapErrors, MapAttempts = stats.MapAttempts, ReduceErrors = stats.ReduceErrors, ReduceAttempts = stats.ReduceAttempts }; if (failureInfo.IsInvalidIndex(true)) { throw new ExcessiveNumberOfReduceErrorsException("Excessive number of errors during the reduce phase for the current batch. Failure info: " + failureInfo.GetErrorMessage()); } string GetReduceKey() { if (erroringResult == null) { return(null); } try { var mapReduceDef = _index.Definition as MapReduceIndexDefinition; var autoMapReduceDef = _index.Definition as AutoMapReduceIndexDefinition; var groupByKeys = (mapReduceDef?.GroupByFields.Select(x => x.Key) ?? autoMapReduceDef?.GroupByFields.Select(x => x.Key))?.ToList(); StringBuilder reduceKeyValue = null; if (groupByKeys != null) { foreach (var key in groupByKeys) { if (erroringResult.TryGetMember(key, out var result)) { if (reduceKeyValue == null) { reduceKeyValue = new StringBuilder("Reduce key: { "); } reduceKeyValue.Append($"'{key}' : {result?.ToString() ?? "null"}"); } } reduceKeyValue?.Append(" }"); } return(reduceKeyValue?.ToString()); } catch { // ignore - make sure we don't error on error reporting return(null); } } } }