private static IDisposable ReadStats(DocumentsOperationContext context, Table table, TimeSeriesSliceHolder slicer, out long count, out DateTime start, out DateTime end, out Slice name) { count = 0; start = DateTime.MaxValue; end = DateTime.MinValue; name = slicer.NameSlice; if (table.ReadByKey(slicer.StatsKey, out var tvr) == false) { return(null); } count = DocumentsStorage.TableValueToLong((int)StatsColumns.Count, ref tvr); start = new DateTime(Bits.SwapBytes(DocumentsStorage.TableValueToLong((int)StatsColumns.Start, ref tvr))); end = DocumentsStorage.TableValueToDateTime((int)StatsColumns.End, ref tvr); if (count == 0 && start == default && end == default) { // this is delete a stats, that we re-create, so we need to treat is as a new one. start = DateTime.MaxValue; end = DateTime.MinValue; return(null); } return(DocumentsStorage.TableValueToSlice(context, (int)StatsColumns.Name, ref tvr, out name)); }
internal void PrepareRollups(DocumentsOperationContext context, DateTime currentTime, long take, long start, List <RollupState> states, out Stopwatch duration) { duration = Stopwatch.StartNew(); var table = context.Transaction.InnerTransaction.OpenTable(RollupSchema, TimeSeriesRollupTable); if (table == null) { return; } var currentTicks = currentTime.Ticks; using (DocumentsStorage.GetEtagAsSlice(context, start, out var startSlice)) { foreach (var item in table.SeekForwardFrom(RollupSchema.Indexes[NextRollupIndex], startSlice, 0)) { if (take <= 0) { return; } var rollUpTime = DocumentsStorage.TableValueToEtag((int)RollupColumns.NextRollup, ref item.Result.Reader); if (rollUpTime > currentTicks) { return; } DocumentsStorage.TableValueToSlice(context, (int)RollupColumns.Key, ref item.Result.Reader, out var key); SplitKey(key, out var docId, out var name); name = context.DocumentDatabase.DocumentsStorage.TimeSeriesStorage.GetOriginalName(context, docId, name); var state = new RollupState { Key = key, DocId = docId, Name = name, Collection = DocumentsStorage.TableValueToId(context, (int)RollupColumns.Collection, ref item.Result.Reader), NextRollup = new DateTime(rollUpTime), RollupPolicy = DocumentsStorage.TableValueToString(context, (int)RollupColumns.PolicyToApply, ref item.Result.Reader), Etag = DocumentsStorage.TableValueToLong((int)RollupColumns.Etag, ref item.Result.Reader), ChangeVector = DocumentsStorage.TableValueToChangeVector(context, (int)RollupColumns.ChangeVector, ref item.Result.Reader) }; if (_logger.IsInfoEnabled) { _logger.Info($"{state} is prepared."); } states.Add(state); take--; } } }
protected override long ExecuteCmd(DocumentsOperationContext context) { var storage = context.DocumentDatabase.DocumentsStorage; RollupSchema.Create(context.Transaction.InnerTransaction, TimeSeriesRollupTable, 16); var table = context.Transaction.InnerTransaction.OpenTable(RollupSchema, TimeSeriesRollupTable); foreach (var item in _states) { if (_configuration == null) { return(RolledUp); } if (_configuration.Collections.TryGetValue(item.Collection, out var config) == false) { continue; } if (config.Disabled) { continue; } if (table.ReadByKey(item.Key, out var current) == false) { continue; } var policy = config.GetPolicyByName(item.RollupPolicy, out _); if (policy == null) { table.DeleteByKey(item.Key); continue; } if (item.Etag != DocumentsStorage.TableValueToLong((int)RollupColumns.Etag, ref current)) { continue; // concurrency check } try { RollupOne(context, table, item, policy, config); } catch (NanValueException e) { if (_logger.IsInfoEnabled) { _logger.Info($"{item} failed", e); } if (table.VerifyKeyExists(item.Key) == false) { // we should re-add it, in case we already removed this rollup using (var slicer = new TimeSeriesSliceHolder(context, item.DocId, item.Name, item.Collection)) using (Slice.From(context.Allocator, item.ChangeVector, ByteStringType.Immutable, out var cv)) using (Slice.From(context.Allocator, item.RollupPolicy, ByteStringType.Immutable, out var policySlice)) using (table.Allocate(out var tvb)) { tvb.Add(slicer.StatsKey); tvb.Add(slicer.CollectionSlice); tvb.Add(Bits.SwapBytes(item.NextRollup.Ticks)); tvb.Add(policySlice); tvb.Add(item.Etag); tvb.Add(cv); table.Set(tvb); } } } catch (RollupExceedNumberOfValuesException e) { var name = item.Name; var docId = item.DocId; try { var document = storage.Get(context, item.DocId, throwOnConflict: false); docId = document?.Id ?? docId; name = storage.TimeSeriesStorage.GetOriginalName(context, docId, name); } catch { // ignore } var msg = $"Rollup '{item.RollupPolicy}' for time-series '{name}' in document '{docId}' failed."; if (_logger.IsInfoEnabled) { _logger.Info(msg, e); } var alert = AlertRaised.Create(context.DocumentDatabase.Name, "Failed to perform rollup because the time-series has more than 5 values", msg, AlertType.RollupExceedNumberOfValues, NotificationSeverity.Warning, $"{item.Collection}/{item.Name}", new ExceptionDetails(e)); context.DocumentDatabase.NotificationCenter.Add(alert); } } return(RolledUp); }
protected override long ExecuteCmd(DocumentsOperationContext context) { var tss = context.DocumentDatabase.DocumentsStorage.TimeSeriesStorage; RollupSchema.Create(context.Transaction.InnerTransaction, TimeSeriesRollupTable, 16); var table = context.Transaction.InnerTransaction.OpenTable(RollupSchema, TimeSeriesRollupTable); foreach (var item in _states) { if (_configuration == null) { return(RolledUp); } if (_configuration.Collections.TryGetValue(item.Collection, out var config) == false) { continue; } if (config.Disabled) { continue; } if (table.ReadByKey(item.Key, out var current) == false) { continue; } var policy = config.GetPolicyByName(item.RollupPolicy, out _); if (policy == null) { table.DeleteByKey(item.Key); continue; } if (item.Etag != DocumentsStorage.TableValueToLong((int)RollupColumns.Etag, ref current)) { continue; // concurrency check } var rawTimeSeries = item.Name.Split(TimeSeriesConfiguration.TimeSeriesRollupSeparator)[0]; var intoTimeSeries = policy.GetTimeSeriesName(rawTimeSeries); var rollupStart = item.NextRollup.Add(-policy.AggregationTime); if (config.MaxRetention < TimeValue.MaxValue) { var next = new DateTime(NextRollup(_now.Add(-config.MaxRetention), policy)).Add(-policy.AggregationTime); var rollupStartTicks = Math.Max(rollupStart.Ticks, next.Ticks); rollupStart = new DateTime(rollupStartTicks); } var intoReader = tss.GetReader(context, item.DocId, intoTimeSeries, rollupStart, DateTime.MaxValue); var previouslyAggregated = intoReader.AllValues().Any(); if (previouslyAggregated) { var changeVector = intoReader.GetCurrentSegmentChangeVector(); if (ChangeVectorUtils.GetConflictStatus(item.ChangeVector, changeVector) == ConflictStatus.AlreadyMerged) { // this rollup is already done table.DeleteByKey(item.Key); continue; } } if (_isFirstInTopology == false) { continue; // we execute the actual rollup only on the primary node to avoid conflicts } var rollupEnd = new DateTime(NextRollup(_now, policy)).Add(-policy.AggregationTime).AddMilliseconds(-1); var reader = tss.GetReader(context, item.DocId, item.Name, rollupStart, rollupEnd); if (previouslyAggregated) { var hasPriorValues = tss.GetReader(context, item.DocId, item.Name, DateTime.MinValue, rollupStart).AllValues().Any(); if (hasPriorValues == false) { table.DeleteByKey(item.Key); var first = tss.GetReader(context, item.DocId, item.Name, rollupStart, DateTime.MaxValue).First(); if (first == default) { continue; // nothing we can do here } if (first.Timestamp > item.NextRollup) { // if the 'source' time-series doesn't have any values it is retained. // so we need to aggregate only from the next time frame using (var slicer = new TimeSeriesSliceHolder(context, item.DocId, item.Name, item.Collection)) { tss.Rollups.MarkForPolicy(context, slicer, policy, first.Timestamp); } continue; } } } // rollup from the the raw data will generate 6-value roll up of (first, last, min, max, sum, count) // other rollups will aggregate each of those values by the type var mode = item.Name.Contains(TimeSeriesConfiguration.TimeSeriesRollupSeparator) ? AggregationMode.FromAggregated : AggregationMode.FromRaw; var rangeSpec = new RangeGroup(); switch (policy.AggregationTime.Unit) { case TimeValueUnit.Second: rangeSpec.Ticks = TimeSpan.FromSeconds(policy.AggregationTime.Value).Ticks; rangeSpec.TicksAlignment = RangeGroup.Alignment.Second; break; case TimeValueUnit.Month: rangeSpec.Months = policy.AggregationTime.Value; break; default: throw new ArgumentOutOfRangeException(nameof(policy.AggregationTime.Unit), $"Not supported time value unit '{policy.AggregationTime.Unit}'"); } rangeSpec.InitializeRange(rollupStart); List <SingleResult> values = null; try { values = GetAggregatedValues(reader, rangeSpec, mode); } catch (RollupExceedNumberOfValuesException e) { var name = item.Name; var docId = item.DocId; try { var document = context.DocumentDatabase.DocumentsStorage.Get(context, item.DocId, throwOnConflict: false); docId = document?.Id ?? docId; name = tss.GetOriginalName(context, docId, name); } catch { // ignore } var msg = $"Rollup '{item.RollupPolicy}' for time-series '{name}' in document '{docId}' failed."; if (_logger.IsInfoEnabled) { _logger.Info(msg, e); } var alert = AlertRaised.Create(context.DocumentDatabase.Name, "Failed to perform rollup because the time-series has more than 5 values", msg, AlertType.RollupExceedNumberOfValues, NotificationSeverity.Warning, $"{item.DocId}/{item.Name}", new ExceptionDetails(e)); context.DocumentDatabase.NotificationCenter.Add(alert); continue; } if (previouslyAggregated) { // if we need to re-aggregate we need to delete everything we have from that point on. var removeRequest = new TimeSeriesStorage.DeletionRangeRequest { Collection = item.Collection, DocumentId = item.DocId, Name = intoTimeSeries, From = rollupStart, To = DateTime.MaxValue, }; tss.DeleteTimestampRange(context, removeRequest); } var before = context.LastDatabaseChangeVector; var after = tss.AppendTimestamp(context, item.DocId, item.Collection, intoTimeSeries, values, verifyName: false); if (before != after) { RolledUp++; } table.DeleteByKey(item.Key); var stats = tss.Stats.GetStats(context, item.DocId, item.Name); if (stats.End > rollupEnd) { // we know that we have values after the current rollup and we need to mark them var nextRollup = rollupEnd.AddMilliseconds(1); intoReader = tss.GetReader(context, item.DocId, item.Name, nextRollup, DateTime.MaxValue); if (intoReader.Init() == false) { Debug.Assert(false, "We have values but no segment?"); continue; } using (var slicer = new TimeSeriesSliceHolder(context, item.DocId, item.Name, item.Collection)) { tss.Rollups.MarkForPolicy(context, slicer, policy, intoReader.First().Timestamp); } } } return(RolledUp); }
public bool Update(UpdateStep step) { var oldCompareExchangeSchema = new TableSchema(). DefineKey(new TableSchema.SchemaIndexDef { StartIndex = (int)ClusterStateMachine.CompareExchangeTable.Key, Count = 1 }); var newCompareExchangeSchema = new TableSchema() .DefineKey(new TableSchema.SchemaIndexDef { StartIndex = (int)ClusterStateMachine.CompareExchangeTable.Key, Count = 1 }).DefineIndex(new TableSchema.SchemaIndexDef { StartIndex = (int)ClusterStateMachine.CompareExchangeTable.PrefixIndex, Count = 1, Name = ClusterStateMachine.CompareExchangeIndex }); const string oldTableName = "CmpXchg"; using (Slice.From(step.WriteTx.Allocator, oldTableName, out var oldCompareExchangeTable)) { var oldTable = step.ReadTx.OpenTable(oldCompareExchangeSchema, oldCompareExchangeTable); if (oldTable == null) { return(true); } var newTableName = ClusterStateMachine.CompareExchange.ToString(); foreach (var db in SchemaUpgradeExtensions.GetDatabases(step)) { // update CompareExchange newCompareExchangeSchema.Create(step.WriteTx, newTableName, null); var newTable = step.WriteTx.OpenTable(newCompareExchangeSchema, newTableName); var compareExchangeOldKey = $"{db.ToLowerInvariant()}/"; using (Slice.From(step.ReadTx.Allocator, compareExchangeOldKey, out var keyPrefix)) { foreach (var item in oldTable.SeekByPrimaryKeyPrefix(keyPrefix, Slices.Empty, 0)) { var index = DocumentsStorage.TableValueToLong((int)ClusterStateMachine.CompareExchangeTable.Index, ref item.Value.Reader); using (CompareExchangeCommandBase.GetPrefixIndexSlices(step.ReadTx.Allocator, db, index, out var buffer)) using (Slice.External(step.WriteTx.Allocator, buffer.Ptr, buffer.Length, out var prefixIndexSlice)) using (newTable.Allocate(out TableValueBuilder write)) using (var ctx = JsonOperationContext.ShortTermSingleUse()) { using (var bjro = new BlittableJsonReaderObject( item.Value.Reader.Read((int)ClusterStateMachine.CompareExchangeTable.Value, out var size1), size1, ctx).Clone(ctx) ) { write.Add(item.Key); write.Add(index); write.Add(bjro.BasePointer, bjro.Size); write.Add(prefixIndexSlice); newTable.Set(write); } } } } } } // delete the old table step.WriteTx.DeleteTable(oldTableName); // remove the remaining CompareExchange global index if (step.WriteTx.LowLevelTransaction.RootObjects.Read(ClusterStateMachine.CompareExchangeIndex) != null) { step.WriteTx.DeleteTree(ClusterStateMachine.CompareExchangeIndex); } return(true); }