public async Task Stream() { var documentId = GetStringQueryString("docId"); var name = GetStringQueryString("name"); var fromStr = GetStringQueryString("from", required: false); var toStr = GetStringQueryString("to", required: false); var offset = GetTimeSpanQueryString("offset", required: false); var from = string.IsNullOrEmpty(fromStr) ? DateTime.MinValue : TimeSeriesHandler.ParseDate(fromStr, name); var to = string.IsNullOrEmpty(toStr) ? DateTime.MaxValue : TimeSeriesHandler.ParseDate(toStr, name); using (ContextPool.AllocateOperationContext(out DocumentsOperationContext context)) using (context.OpenReadTransaction()) { using (var token = CreateOperationToken()) await using (var writer = new AsyncBlittableJsonTextWriter(context, ResponseBodyStream())) { var reader = new TimeSeriesReader(context, documentId, name, from, to, offset, token.Token); writer.WriteStartObject(); writer.WritePropertyName("Results"); writer.WriteStartArray(); foreach (var entry in reader.AllValues()) { context.Write(writer, entry.ToTimeSeriesEntryJson()); writer.WriteComma(); await writer.MaybeFlushAsync(token.Token); } writer.WriteEndArray(); writer.WriteEndObject(); await writer.MaybeFlushAsync(token.Token); } } }
private static void MarkForNextPolicyAfterRollup(DocumentsOperationContext context, Table table, RollupState item, TimeSeriesPolicy policy, TimeSeriesStorage tss, DateTime rollupEnd) { table.DeleteByKey(item.Key); (long Count, DateTime Start, DateTime End)stats = tss.Stats.GetStats(context, item.DocId, item.Name); if (stats.End > rollupEnd) { // we know that we have values after the current rollup and we need to mark them var nextRollup = rollupEnd.AddMilliseconds(1); TimeSeriesReader intoReader = tss.GetReader(context, item.DocId, item.Name, nextRollup, DateTime.MaxValue); if (intoReader.Init() == false) { Debug.Assert(false, "We have values but no segment?"); return; } using (var slicer = new TimeSeriesSliceHolder(context, item.DocId, item.Name, item.Collection)) { tss.Rollups.MarkForPolicy(context, slicer, policy, intoReader.First().Timestamp); } } }
public SegmentResult(TimeSeriesReader reader) { _reader = reader; }
internal static unsafe TimeSeriesRangeResult GetTimeSeriesRange(DocumentsOperationContext context, string docId, string name, DateTime from, DateTime to, ref int start, ref int pageSize) { if (pageSize == 0) { return(new TimeSeriesRangeResult()); } List <TimeSeriesEntry> values = new List <TimeSeriesEntry>(); var reader = new TimeSeriesReader(context, docId, name, from, to, offset: null); // init hash var size = Sodium.crypto_generichash_bytes(); Debug.Assert((int)size == 32); var cryptoGenerichashStatebytes = (int)Sodium.crypto_generichash_statebytes(); var state = stackalloc byte[cryptoGenerichashStatebytes]; if (Sodium.crypto_generichash_init(state, null, UIntPtr.Zero, size) != 0) { ComputeHttpEtags.ThrowFailToInitHash(); } var oldStart = start; var lastResult = true; foreach (var(individualValues, segmentResult) in reader.SegmentsOrValues()) { if (individualValues == null && start > segmentResult.Summary.Span[0].Count) { start -= segmentResult.Summary.Span[0].Count; continue; } var enumerable = individualValues ?? segmentResult.Values; foreach (var singleResult in enumerable) { if (start-- > 0) { continue; } if (pageSize-- <= 0) { lastResult = false; break; } values.Add(new TimeSeriesEntry { Timestamp = singleResult.Timestamp, Tag = singleResult.Tag, Values = singleResult.Values.ToArray(), IsRollup = singleResult.Type == SingleResultType.RolledUp }); } ComputeHttpEtags.HashChangeVector(state, segmentResult?.ChangeVector); if (pageSize <= 0) { break; } } if ((oldStart > 0) && (values.Count == 0)) { return(new TimeSeriesRangeResult()); } return(new TimeSeriesRangeResult { From = (oldStart > 0) ? values[0].Timestamp : from, To = lastResult ? to : values.Last().Timestamp, Entries = values.ToArray(), Hash = ComputeHttpEtags.FinalizeHash(size, state) }); }
public static List <SingleResult> GetAggregatedValues(TimeSeriesReader reader, RangeGroup rangeSpec, AggregationMode mode) { var aggStates = new TimeSeriesAggregation(mode); // we always will aggregate here by Min, Max, First, Last, Sum, Count, Mean var results = new List <SingleResult>(); foreach (var it in reader.SegmentsOrValues()) { if (it.IndividualValues != null) { AggregateIndividualItems(it.IndividualValues); } else { //We might need to close the old aggregation range and start a new one MaybeMoveToNextRange(it.Segment.Start); // now we need to see if we can consume the whole segment, or // if the range it cover needs to be broken up to multiple ranges. // For example, if the segment covers 3 days, but we have group by 1 hour, // we still have to deal with the individual values if (it.Segment.End > rangeSpec.End) { AggregateIndividualItems(it.Segment.Values); } else { var span = it.Segment.Summary.SegmentValues.Span; aggStates.Segment(span); } } } if (aggStates.Any) { var result = new SingleResult { Timestamp = rangeSpec.Start, Values = new Memory <double>(aggStates.Values.ToArray()), Status = TimeSeriesValuesSegment.Live, Type = SingleResultType.RolledUp // TODO: Tag = "" }; TimeSeriesStorage.AssertNoNanValue(result); results.Add(result); } return(results); void MaybeMoveToNextRange(DateTime ts) { if (rangeSpec.WithinRange(ts)) { return; } if (aggStates.Any) { var result = new SingleResult { Timestamp = rangeSpec.Start, Values = new Memory <double>(aggStates.Values.ToArray()), Status = TimeSeriesValuesSegment.Live, Type = SingleResultType.RolledUp // TODO: Tag = "" }; TimeSeriesStorage.AssertNoNanValue(result); results.Add(result); } rangeSpec.MoveToNextRange(ts); aggStates.Init(); } void AggregateIndividualItems(IEnumerable <SingleResult> items) { foreach (var cur in items) { if (cur.Status == TimeSeriesValuesSegment.Dead) { continue; } MaybeMoveToNextRange(cur.Timestamp); aggStates.Step(cur.Values.Span); } } }
internal static unsafe TimeSeriesRangeResult GetTimeSeriesRange(DocumentsOperationContext context, string docId, string name, DateTime from, DateTime to, ref int start, ref int pageSize) { if (pageSize == 0) { return(null); } List <TimeSeriesEntry> values = new List <TimeSeriesEntry>(); var reader = new TimeSeriesReader(context, docId, name, from, to, offset: null); // init hash var size = Sodium.crypto_generichash_bytes(); Debug.Assert((int)size == 32); var cryptoGenerichashStatebytes = (int)Sodium.crypto_generichash_statebytes(); var state = stackalloc byte[cryptoGenerichashStatebytes]; if (Sodium.crypto_generichash_init(state, null, UIntPtr.Zero, size) != 0) { ComputeHttpEtags.ThrowFailToInitHash(); } var initialStart = start; var hasMore = false; DateTime lastSeenEntry = from; foreach (var(individualValues, segmentResult) in reader.SegmentsOrValues()) { if (individualValues == null && start > segmentResult.Summary.NumberOfLiveEntries) { lastSeenEntry = segmentResult.End; start -= segmentResult.Summary.NumberOfLiveEntries; continue; } var enumerable = individualValues ?? segmentResult.Values; foreach (var singleResult in enumerable) { lastSeenEntry = segmentResult.End; if (start-- > 0) { continue; } if (pageSize-- <= 0) { hasMore = true; break; } values.Add(new TimeSeriesEntry { Timestamp = singleResult.Timestamp, Tag = singleResult.Tag, Values = singleResult.Values.ToArray(), IsRollup = singleResult.Type == SingleResultType.RolledUp }); } ComputeHttpEtags.HashChangeVector(state, segmentResult.ChangeVector); if (pageSize <= 0) { break; } } var hash = ComputeHttpEtags.FinalizeHash(size, state); if ((initialStart > 0) && (values.Count == 0)) { // this is a special case, because before the 'start' we might have values return new TimeSeriesRangeResult { From = lastSeenEntry, To = to, Entries = values.ToArray(), Hash = hash } } ; return(new TimeSeriesRangeResult { From = (initialStart > 0) ? values[0].Timestamp : from, To = hasMore ? values.Last().Timestamp : to, Entries = values.ToArray(), Hash = hash }); }