protected override long ExecuteCmd(DocumentsOperationContext context) { var tss = context.DocumentDatabase.DocumentsStorage.TimeSeriesStorage; RollupSchema.Create(context.Transaction.InnerTransaction, TimeSeriesRollupTable, 16); var table = context.Transaction.InnerTransaction.OpenTable(RollupSchema, TimeSeriesRollupTable); foreach (var key in tss.Stats.GetTimeSeriesNameByPolicy(context, _collection, _from.Name, _skip, BatchSize)) { using (table.Allocate(out var tvb)) using (DocumentIdWorker.GetStringPreserveCase(context, _collection.Name, out var collectionSlice)) using (Slice.From(context.Allocator, _to.Name, ByteStringType.Immutable, out var policyToApply)) using (Slice.From(context.Allocator, string.Empty, ByteStringType.Immutable, out var changeVectorSlice)) { tvb.Add(key); tvb.Add(collectionSlice); tvb.Add(Bits.SwapBytes(NextRollup(DateTime.MinValue, _to))); tvb.Add(policyToApply); tvb.Add(0L); tvb.Add(changeVectorSlice); table.Set(tvb); } Marked++; } return(Marked); }
protected override long ExecuteCmd(DocumentsOperationContext context) { var storage = context.DocumentDatabase.DocumentsStorage; RollupSchema.Create(context.Transaction.InnerTransaction, TimeSeriesRollupTable, 16); var table = context.Transaction.InnerTransaction.OpenTable(RollupSchema, TimeSeriesRollupTable); foreach (var item in _states) { if (_configuration == null) { return(RolledUp); } if (_configuration.Collections.TryGetValue(item.Collection, out var config) == false) { continue; } if (config.Disabled) { continue; } if (table.ReadByKey(item.Key, out var current) == false) { continue; } var policy = config.GetPolicyByName(item.RollupPolicy, out _); if (policy == null) { table.DeleteByKey(item.Key); continue; } if (item.Etag != DocumentsStorage.TableValueToLong((int)RollupColumns.Etag, ref current)) { continue; // concurrency check } try { RollupOne(context, table, item, policy, config); } catch (NanValueException e) { if (_logger.IsInfoEnabled) { _logger.Info($"{item} failed", e); } if (table.VerifyKeyExists(item.Key) == false) { // we should re-add it, in case we already removed this rollup using (var slicer = new TimeSeriesSliceHolder(context, item.DocId, item.Name, item.Collection)) using (Slice.From(context.Allocator, item.ChangeVector, ByteStringType.Immutable, out var cv)) using (Slice.From(context.Allocator, item.RollupPolicy, ByteStringType.Immutable, out var policySlice)) using (table.Allocate(out var tvb)) { tvb.Add(slicer.StatsKey); tvb.Add(slicer.CollectionSlice); tvb.Add(Bits.SwapBytes(item.NextRollup.Ticks)); tvb.Add(policySlice); tvb.Add(item.Etag); tvb.Add(cv); table.Set(tvb); } } } catch (RollupExceedNumberOfValuesException e) { var name = item.Name; var docId = item.DocId; try { var document = storage.Get(context, item.DocId, throwOnConflict: false); docId = document?.Id ?? docId; name = storage.TimeSeriesStorage.GetOriginalName(context, docId, name); } catch { // ignore } var msg = $"Rollup '{item.RollupPolicy}' for time-series '{name}' in document '{docId}' failed."; if (_logger.IsInfoEnabled) { _logger.Info(msg, e); } var alert = AlertRaised.Create(context.DocumentDatabase.Name, "Failed to perform rollup because the time-series has more than 5 values", msg, AlertType.RollupExceedNumberOfValues, NotificationSeverity.Warning, $"{item.Collection}/{item.Name}", new ExceptionDetails(e)); context.DocumentDatabase.NotificationCenter.Add(alert); } } return(RolledUp); }
protected override long ExecuteCmd(DocumentsOperationContext context) { var tss = context.DocumentDatabase.DocumentsStorage.TimeSeriesStorage; RollupSchema.Create(context.Transaction.InnerTransaction, TimeSeriesRollupTable, 16); var table = context.Transaction.InnerTransaction.OpenTable(RollupSchema, TimeSeriesRollupTable); foreach (var item in _states) { if (_configuration == null) { return(RolledUp); } if (_configuration.Collections.TryGetValue(item.Collection, out var config) == false) { continue; } if (config.Disabled) { continue; } if (table.ReadByKey(item.Key, out var current) == false) { continue; } var policy = config.GetPolicyByName(item.RollupPolicy, out _); if (policy == null) { table.DeleteByKey(item.Key); continue; } if (item.Etag != DocumentsStorage.TableValueToLong((int)RollupColumns.Etag, ref current)) { continue; // concurrency check } var rawTimeSeries = item.Name.Split(TimeSeriesConfiguration.TimeSeriesRollupSeparator)[0]; var intoTimeSeries = policy.GetTimeSeriesName(rawTimeSeries); var rollupStart = item.NextRollup.Add(-policy.AggregationTime); if (config.MaxRetention < TimeValue.MaxValue) { var next = new DateTime(NextRollup(_now.Add(-config.MaxRetention), policy)).Add(-policy.AggregationTime); var rollupStartTicks = Math.Max(rollupStart.Ticks, next.Ticks); rollupStart = new DateTime(rollupStartTicks); } var intoReader = tss.GetReader(context, item.DocId, intoTimeSeries, rollupStart, DateTime.MaxValue); var previouslyAggregated = intoReader.AllValues().Any(); if (previouslyAggregated) { var changeVector = intoReader.GetCurrentSegmentChangeVector(); if (ChangeVectorUtils.GetConflictStatus(item.ChangeVector, changeVector) == ConflictStatus.AlreadyMerged) { // this rollup is already done table.DeleteByKey(item.Key); continue; } } if (_isFirstInTopology == false) { continue; // we execute the actual rollup only on the primary node to avoid conflicts } var rollupEnd = new DateTime(NextRollup(_now, policy)).Add(-policy.AggregationTime).AddMilliseconds(-1); var reader = tss.GetReader(context, item.DocId, item.Name, rollupStart, rollupEnd); if (previouslyAggregated) { var hasPriorValues = tss.GetReader(context, item.DocId, item.Name, DateTime.MinValue, rollupStart).AllValues().Any(); if (hasPriorValues == false) { table.DeleteByKey(item.Key); var first = tss.GetReader(context, item.DocId, item.Name, rollupStart, DateTime.MaxValue).First(); if (first == default) { continue; // nothing we can do here } if (first.Timestamp > item.NextRollup) { // if the 'source' time-series doesn't have any values it is retained. // so we need to aggregate only from the next time frame using (var slicer = new TimeSeriesSliceHolder(context, item.DocId, item.Name, item.Collection)) { tss.Rollups.MarkForPolicy(context, slicer, policy, first.Timestamp); } continue; } } } // rollup from the the raw data will generate 6-value roll up of (first, last, min, max, sum, count) // other rollups will aggregate each of those values by the type var mode = item.Name.Contains(TimeSeriesConfiguration.TimeSeriesRollupSeparator) ? AggregationMode.FromAggregated : AggregationMode.FromRaw; var rangeSpec = new RangeGroup(); switch (policy.AggregationTime.Unit) { case TimeValueUnit.Second: rangeSpec.Ticks = TimeSpan.FromSeconds(policy.AggregationTime.Value).Ticks; rangeSpec.TicksAlignment = RangeGroup.Alignment.Second; break; case TimeValueUnit.Month: rangeSpec.Months = policy.AggregationTime.Value; break; default: throw new ArgumentOutOfRangeException(nameof(policy.AggregationTime.Unit), $"Not supported time value unit '{policy.AggregationTime.Unit}'"); } rangeSpec.InitializeRange(rollupStart); List <SingleResult> values = null; try { values = GetAggregatedValues(reader, rangeSpec, mode); } catch (RollupExceedNumberOfValuesException e) { var name = item.Name; var docId = item.DocId; try { var document = context.DocumentDatabase.DocumentsStorage.Get(context, item.DocId, throwOnConflict: false); docId = document?.Id ?? docId; name = tss.GetOriginalName(context, docId, name); } catch { // ignore } var msg = $"Rollup '{item.RollupPolicy}' for time-series '{name}' in document '{docId}' failed."; if (_logger.IsInfoEnabled) { _logger.Info(msg, e); } var alert = AlertRaised.Create(context.DocumentDatabase.Name, "Failed to perform rollup because the time-series has more than 5 values", msg, AlertType.RollupExceedNumberOfValues, NotificationSeverity.Warning, $"{item.DocId}/{item.Name}", new ExceptionDetails(e)); context.DocumentDatabase.NotificationCenter.Add(alert); continue; } if (previouslyAggregated) { // if we need to re-aggregate we need to delete everything we have from that point on. var removeRequest = new TimeSeriesStorage.DeletionRangeRequest { Collection = item.Collection, DocumentId = item.DocId, Name = intoTimeSeries, From = rollupStart, To = DateTime.MaxValue, }; tss.DeleteTimestampRange(context, removeRequest); } var before = context.LastDatabaseChangeVector; var after = tss.AppendTimestamp(context, item.DocId, item.Collection, intoTimeSeries, values, verifyName: false); if (before != after) { RolledUp++; } table.DeleteByKey(item.Key); var stats = tss.Stats.GetStats(context, item.DocId, item.Name); if (stats.End > rollupEnd) { // we know that we have values after the current rollup and we need to mark them var nextRollup = rollupEnd.AddMilliseconds(1); intoReader = tss.GetReader(context, item.DocId, item.Name, nextRollup, DateTime.MaxValue); if (intoReader.Init() == false) { Debug.Assert(false, "We have values but no segment?"); continue; } using (var slicer = new TimeSeriesSliceHolder(context, item.DocId, item.Name, item.Collection)) { tss.Rollups.MarkForPolicy(context, slicer, policy, intoReader.First().Timestamp); } } } return(RolledUp); }