internal void PrepareRollups(DocumentsOperationContext context, DateTime currentTime, long take, long start, List <RollupState> states, out Stopwatch duration)
        {
            duration = Stopwatch.StartNew();

            var table = context.Transaction.InnerTransaction.OpenTable(RollupSchema, TimeSeriesRollupTable);

            if (table == null)
            {
                return;
            }

            var currentTicks = currentTime.Ticks;

            using (DocumentsStorage.GetEtagAsSlice(context, start, out var startSlice))
            {
                foreach (var item in table.SeekForwardFrom(RollupSchema.Indexes[NextRollupIndex], startSlice, 0))
                {
                    if (take <= 0)
                    {
                        return;
                    }

                    var rollUpTime = DocumentsStorage.TableValueToEtag((int)RollupColumns.NextRollup, ref item.Result.Reader);
                    if (rollUpTime > currentTicks)
                    {
                        return;
                    }

                    DocumentsStorage.TableValueToSlice(context, (int)RollupColumns.Key, ref item.Result.Reader, out var key);
                    SplitKey(key, out var docId, out var name);
                    name = context.DocumentDatabase.DocumentsStorage.TimeSeriesStorage.GetOriginalName(context, docId, name);

                    var state = new RollupState
                    {
                        Key          = key,
                        DocId        = docId,
                        Name         = name,
                        Collection   = DocumentsStorage.TableValueToId(context, (int)RollupColumns.Collection, ref item.Result.Reader),
                        NextRollup   = new DateTime(rollUpTime),
                        RollupPolicy = DocumentsStorage.TableValueToString(context, (int)RollupColumns.PolicyToApply, ref item.Result.Reader),
                        Etag         = DocumentsStorage.TableValueToLong((int)RollupColumns.Etag, ref item.Result.Reader),
                        ChangeVector = DocumentsStorage.TableValueToChangeVector(context, (int)RollupColumns.ChangeVector, ref item.Result.Reader)
                    };

                    if (_logger.IsInfoEnabled)
                    {
                        _logger.Info($"{state} is prepared.");
                    }

                    states.Add(state);
                    take--;
                }
            }
        }
            private void RollupOne(DocumentsOperationContext context, Table table, RollupState item, TimeSeriesPolicy policy, TimeSeriesCollectionConfiguration config)
            {
                var tss = context.DocumentDatabase.DocumentsStorage.TimeSeriesStorage;

                var rawTimeSeries  = item.Name.Split(TimeSeriesConfiguration.TimeSeriesRollupSeparator)[0];
                var intoTimeSeries = policy.GetTimeSeriesName(rawTimeSeries);
                var rollupStart    = item.NextRollup.Add(-policy.AggregationTime);

                if (config.MaxRetention < TimeValue.MaxValue)
                {
                    var next             = new DateTime(NextRollup(_now.Add(-config.MaxRetention), policy)).Add(-policy.AggregationTime);
                    var rollupStartTicks = Math.Max(rollupStart.Ticks, next.Ticks);
                    rollupStart = new DateTime(rollupStartTicks);
                }

                var intoReader           = tss.GetReader(context, item.DocId, intoTimeSeries, rollupStart, DateTime.MaxValue);
                var previouslyAggregated = intoReader.AllValues().Any();

                if (previouslyAggregated)
                {
                    var changeVector = intoReader.GetCurrentSegmentChangeVector();
                    if (ChangeVectorUtils.GetConflictStatus(item.ChangeVector, changeVector) == ConflictStatus.AlreadyMerged)
                    {
                        // this rollup is already done
                        table.DeleteByKey(item.Key);
                        return;
                    }
                }

                if (_isFirstInTopology == false)
                {
                    return;
                }

                var rollupEnd = new DateTime(NextRollup(_now, policy)).Add(-policy.AggregationTime).AddMilliseconds(-1);
                var reader    = tss.GetReader(context, item.DocId, item.Name, rollupStart, rollupEnd);

                if (previouslyAggregated)
                {
                    var hasPriorValues = tss.GetReader(context, item.DocId, item.Name, DateTime.MinValue, rollupStart).AllValues().Any();
                    if (hasPriorValues == false)
                    {
                        table.DeleteByKey(item.Key);
                        var first = tss.GetReader(context, item.DocId, item.Name, rollupStart, DateTime.MaxValue).First();
                        if (first == default)
                        {
                            return;
                        }

                        if (first.Timestamp > item.NextRollup)
                        {
                            // if the 'source' time-series doesn't have any values it is retained.
                            // so we need to aggregate only from the next time frame
                            using (var slicer = new TimeSeriesSliceHolder(context, item.DocId, item.Name, item.Collection))
                            {
                                tss.Rollups.MarkForPolicy(context, slicer, policy, first.Timestamp);
                            }

                            return;
                        }
                    }
                }

                // rollup from the the raw data will generate 6-value roll up of (first, last, min, max, sum, count)
                // other rollups will aggregate each of those values by the type
                var mode      = item.Name.Contains(TimeSeriesConfiguration.TimeSeriesRollupSeparator) ? AggregationMode.FromAggregated : AggregationMode.FromRaw;
                var rangeSpec = new RangeGroup();

                switch (policy.AggregationTime.Unit)
                {
                case TimeValueUnit.Second:
                    rangeSpec.Ticks          = TimeSpan.FromSeconds(policy.AggregationTime.Value).Ticks;
                    rangeSpec.TicksAlignment = RangeGroup.Alignment.Second;
                    break;

                case TimeValueUnit.Month:
                    rangeSpec.Months = policy.AggregationTime.Value;
                    break;

                default:
                    throw new ArgumentOutOfRangeException(nameof(policy.AggregationTime.Unit), $"Not supported time value unit '{policy.AggregationTime.Unit}'");
                }

                rangeSpec.InitializeRange(rollupStart);

                var values = GetAggregatedValues(reader, rangeSpec, mode);

                if (previouslyAggregated)
                {
                    // if we need to re-aggregate we need to delete everything we have from that point on.
                    var removeRequest = new TimeSeriesStorage.DeletionRangeRequest
                    {
                        Collection = item.Collection,
                        DocumentId = item.DocId,
                        Name       = intoTimeSeries,
                        From       = rollupStart,
                        To         = DateTime.MaxValue,
                    };

                    tss.DeleteTimestampRange(context, removeRequest);
                }

                var before = context.LastDatabaseChangeVector;
                var after  = tss.AppendTimestamp(context, item.DocId, item.Collection, intoTimeSeries, values, verifyName: false);

                if (before != after)
                {
                    RolledUp++;
                }

                table.DeleteByKey(item.Key);

                var stats = tss.Stats.GetStats(context, item.DocId, item.Name);

                if (stats.End > rollupEnd)
                {
                    // we know that we have values after the current rollup and we need to mark them
                    var nextRollup = rollupEnd.AddMilliseconds(1);
                    intoReader = tss.GetReader(context, item.DocId, item.Name, nextRollup, DateTime.MaxValue);
                    if (intoReader.Init() == false)
                    {
                        Debug.Assert(false, "We have values but no segment?");
                        return;
                    }

                    using (var slicer = new TimeSeriesSliceHolder(context, item.DocId, item.Name, item.Collection))
                    {
                        tss.Rollups.MarkForPolicy(context, slicer, policy, intoReader.First().Timestamp);
                    }
                }
            }
            private static void MarkForNextPolicyAfterRollup(DocumentsOperationContext context, Table table, RollupState item, TimeSeriesPolicy policy, TimeSeriesStorage tss,
                                                             DateTime rollupEnd)
            {
                table.DeleteByKey(item.Key);
                (long Count, DateTime Start, DateTime End)stats = tss.Stats.GetStats(context, item.DocId, item.Name);

                if (stats.End > rollupEnd)
                {
                    // we know that we have values after the current rollup and we need to mark them
                    var nextRollup = rollupEnd.AddMilliseconds(1);
                    TimeSeriesReader intoReader = tss.GetReader(context, item.DocId, item.Name, nextRollup, DateTime.MaxValue);
                    if (intoReader.Init() == false)
                    {
                        Debug.Assert(false, "We have values but no segment?");
                        return;
                    }

                    using (var slicer = new TimeSeriesSliceHolder(context, item.DocId, item.Name, item.Collection))
                    {
                        tss.Rollups.MarkForPolicy(context, slicer, policy, intoReader.First().Timestamp);
                    }
                }
            }