private static bool RequiredForNextPolicy(DocumentsOperationContext context, TimeSeriesCollectionConfiguration config, TimeSeriesPolicy policy, Slice item, DateTime to) { var tss = context.DocumentDatabase.DocumentsStorage.TimeSeriesStorage; var next = config.GetNextPolicy(policy); if (ReferenceEquals(next, TimeSeriesPolicy.AfterAllPolices) == false) { TimeSeriesRollups.SplitKey(item, out var docId, out var name); var raw = name.Split(TimeSeriesConfiguration.TimeSeriesRollupSeparator)[0]; var currentStats = tss.Stats.GetStats(context, docId, policy.GetTimeSeriesName(raw)); var nextStats = tss.Stats.GetStats(context, docId, next.GetTimeSeriesName(raw)); var nextEnd = nextStats.End.Add(next.AggregationTime).AddMilliseconds(-1); if (nextEnd > currentStats.End) { return(false); } if (nextEnd < to) { return(true); } } return(false); }
private async Task ApplyRetention( DocumentsOperationContext context, TimeSeriesCollectionConfiguration config, CollectionName collectionName, TimeSeriesPolicy policy, DateTime now) { var tss = context.DocumentDatabase.DocumentsStorage.TimeSeriesStorage; if (policy.RetentionTime == TimeValue.MaxValue) { return; } var to = now.Add(-policy.RetentionTime); var list = new List <Slice>(); while (true) { Cts.Token.ThrowIfCancellationRequested(); context.Reset(); context.Renew(); list.Clear(); using (context.OpenReadTransaction()) { foreach (var item in tss.Stats.GetTimeSeriesByPolicyFromStartDate(context, collectionName, policy.Name, to, TimeSeriesRollups.TimeSeriesRetentionCommand.BatchSize)) { if (RequiredForNextPolicy(context, config, policy, item, to)) { continue; } if (tss.Rollups.HasPendingRollupFrom(context, item, to) == false) { list.Add(item); } } if (list.Count == 0) { return; } if (Logger.IsInfoEnabled) { Logger.Info($"Found {list.Count} time-series for retention in policy {policy.Name} with collection '{collectionName.Name}' up-to {to}" #if DEBUG + $"{Environment.NewLine}{string.Join(Environment.NewLine, list)}" #endif ); } var cmd = new TimeSeriesRollups.TimeSeriesRetentionCommand(list, collectionName.Name, to); await _database.TxMerger.Enqueue(cmd); } } }
internal async Task VerifyPolicyExecutionAsync(DocumentStore store, TimeSeriesCollectionConfiguration configuration, int retentionNumberOfDays, string rawName = "Heartrate", List <TimeSeriesPolicy> policies = null) { var raw = configuration.RawPolicy; configuration.ValidateAndInitialize(); await WaitForValueAsync(() => { using (var session = store.OpenSession()) { var ts = session.TimeSeriesFor("users/karmel", rawName) .Get()? .ToList(); Assert.NotNull(ts); if (raw != null) { Assert.Equal(((TimeSpan)raw.RetentionTime).TotalMinutes, ts.Count); } var policiesList = policies ?? configuration.Policies; foreach (var policy in policiesList) { ts = session.TimeSeriesFor("users/karmel", policy.GetTimeSeriesName(rawName)) .Get()? .ToList(); TimeValue retentionTime = policy.RetentionTime; if (retentionTime == TimeValue.MaxValue) { var seconds = TimeSpan.FromDays(retentionNumberOfDays).TotalSeconds; var x = Math.Ceiling(seconds / policy.AggregationTime.Value); var max = Math.Max(x *policy.AggregationTime.Value, seconds); retentionTime = TimeSpan.FromSeconds(max); } Assert.NotNull(ts); var expected = ((TimeSpan)retentionTime).TotalMinutes / ((TimeSpan)policy.AggregationTime).TotalMinutes; if ((int)expected != ts.Count && Math.Ceiling(expected) != ts.Count) { Assert.False(true, $"Expected {expected}, but got {ts.Count}"); } } } return(true); }, true); }
private void RollupOne(DocumentsOperationContext context, Table table, RollupState item, TimeSeriesPolicy policy, TimeSeriesCollectionConfiguration config) { var tss = context.DocumentDatabase.DocumentsStorage.TimeSeriesStorage; var rawTimeSeries = item.Name.Split(TimeSeriesConfiguration.TimeSeriesRollupSeparator)[0]; var intoTimeSeries = policy.GetTimeSeriesName(rawTimeSeries); var rollupStart = item.NextRollup.Add(-policy.AggregationTime); if (config.MaxRetention < TimeValue.MaxValue) { var next = new DateTime(NextRollup(_now.Add(-config.MaxRetention), policy)).Add(-policy.AggregationTime); var rollupStartTicks = Math.Max(rollupStart.Ticks, next.Ticks); rollupStart = new DateTime(rollupStartTicks); } var intoReader = tss.GetReader(context, item.DocId, intoTimeSeries, rollupStart, DateTime.MaxValue); var previouslyAggregated = intoReader.AllValues().Any(); if (previouslyAggregated) { var changeVector = intoReader.GetCurrentSegmentChangeVector(); if (ChangeVectorUtils.GetConflictStatus(item.ChangeVector, changeVector) == ConflictStatus.AlreadyMerged) { // this rollup is already done table.DeleteByKey(item.Key); return; } } if (_isFirstInTopology == false) { return; } var rollupEnd = new DateTime(NextRollup(_now, policy)).Add(-policy.AggregationTime).AddMilliseconds(-1); var reader = tss.GetReader(context, item.DocId, item.Name, rollupStart, rollupEnd); if (previouslyAggregated) { var hasPriorValues = tss.GetReader(context, item.DocId, item.Name, DateTime.MinValue, rollupStart).AllValues().Any(); if (hasPriorValues == false) { table.DeleteByKey(item.Key); var first = tss.GetReader(context, item.DocId, item.Name, rollupStart, DateTime.MaxValue).First(); if (first == default) { return; } if (first.Timestamp > item.NextRollup) { // if the 'source' time-series doesn't have any values it is retained. // so we need to aggregate only from the next time frame using (var slicer = new TimeSeriesSliceHolder(context, item.DocId, item.Name, item.Collection)) { tss.Rollups.MarkForPolicy(context, slicer, policy, first.Timestamp); } return; } } } // rollup from the the raw data will generate 6-value roll up of (first, last, min, max, sum, count) // other rollups will aggregate each of those values by the type var mode = item.Name.Contains(TimeSeriesConfiguration.TimeSeriesRollupSeparator) ? AggregationMode.FromAggregated : AggregationMode.FromRaw; var rangeSpec = new RangeGroup(); switch (policy.AggregationTime.Unit) { case TimeValueUnit.Second: rangeSpec.Ticks = TimeSpan.FromSeconds(policy.AggregationTime.Value).Ticks; rangeSpec.TicksAlignment = RangeGroup.Alignment.Second; break; case TimeValueUnit.Month: rangeSpec.Months = policy.AggregationTime.Value; break; default: throw new ArgumentOutOfRangeException(nameof(policy.AggregationTime.Unit), $"Not supported time value unit '{policy.AggregationTime.Unit}'"); } rangeSpec.InitializeRange(rollupStart); var values = GetAggregatedValues(reader, rangeSpec, mode); if (previouslyAggregated) { // if we need to re-aggregate we need to delete everything we have from that point on. var removeRequest = new TimeSeriesStorage.DeletionRangeRequest { Collection = item.Collection, DocumentId = item.DocId, Name = intoTimeSeries, From = rollupStart, To = DateTime.MaxValue, }; tss.DeleteTimestampRange(context, removeRequest); } var before = context.LastDatabaseChangeVector; var after = tss.AppendTimestamp(context, item.DocId, item.Collection, intoTimeSeries, values, verifyName: false); if (before != after) { RolledUp++; } table.DeleteByKey(item.Key); var stats = tss.Stats.GetStats(context, item.DocId, item.Name); if (stats.End > rollupEnd) { // we know that we have values after the current rollup and we need to mark them var nextRollup = rollupEnd.AddMilliseconds(1); intoReader = tss.GetReader(context, item.DocId, item.Name, nextRollup, DateTime.MaxValue); if (intoReader.Init() == false) { Debug.Assert(false, "We have values but no segment?"); return; } using (var slicer = new TimeSeriesSliceHolder(context, item.DocId, item.Name, item.Collection)) { tss.Rollups.MarkForPolicy(context, slicer, policy, intoReader.First().Timestamp); } } }
public void Examples() { var store = new DocumentStore { Urls = new[] { "http://*****:*****@DailyRollupForOneYear"); //Create local instance of the rollup time-series - second method: //using the rollup policy itself and the raw time-series' name var rollupTimeSeries2 = session.TimeSeriesFor("sales/1", dailyRollup.GetTimeSeriesName("rawSales")); //Retrieve all the data from both time-series var rawData = rawTS.Get(DateTime.MinValue, DateTime.MaxValue).ToList(); var rollupData = dailyRollupTS.Get(DateTime.MinValue, DateTime.MaxValue).ToList(); #endregion } }