// Could flush on a timer. private async Task FlushTimelineAggregateAsync(bool always = false) { long currentBucket = TimeBucket.ConvertToBucket(DateTime.UtcNow); List <TimelineAggregateEntity> flush = new List <TimelineAggregateEntity>(); lock (_lock) { foreach (var entity in _timespan) { long thisBucket = TimeBucket.ConvertToBucket(entity.Timestamp.DateTime); if ((thisBucket < currentBucket) || always) { flush.Add(entity); } } foreach (var val in flush) { _timespan.Remove(val.RowKey); } } if (flush.Count > 0) { await WriteBatchAsync(flush); } }
internal static string RowKeyTimeInterval(string functionId, DateTime dateTime, string hostId) { var bucket = TimeBucket.ConvertToBucket(dateTime); string rowKey = string.Format(CultureInfo.InvariantCulture, RowKeyFormat, TableScheme.NormalizeFunctionName(functionId), bucket, hostId); return(rowKey); }
internal static string RowKeyTimeIntervalPrefix(DateTime dateTime) { var bucket = TimeBucket.ConvertToBucket(dateTime); string rowKey = string.Format(CultureInfo.InvariantCulture, RowKeyPrefixTimeFormat, bucket); return(rowKey); }
// Could flush on a timer. private async Task FlushTimelineAggregateAsync(bool always = false) { long currentBucket = TimeBucket.ConvertToBucket(DateTime.UtcNow); List <TimelineAggregateEntity> flush = new List <TimelineAggregateEntity>(); lock (_lock) { foreach (var entity in _timespan) { long thisBucket = TimeBucket.ConvertToBucket(entity.Timestamp.DateTime); if ((thisBucket < currentBucket) || always) { flush.Add(entity); } } // https://github.com/Azure/azure-webjobs-sdk/issues/1761 // Just making a note while I'm in this code fixing something else. // It looks like this will drop data if there is any type of error when communicating with storage. // Other code paths drop the data after successfully writing to storage foreach (var val in flush) { _timespan.Remove(val.RowKey); } } if (flush.Count > 0) { await WriteBatchAsync(flush); } }
internal static string RowKeyTimeInterval(FunctionId functionId, DateTime dateTime, string hostId) { var bucket = TimeBucket.ConvertToBucket(dateTime); string rowKey = string.Format(CultureInfo.InvariantCulture, RowKeyFormat, functionId, bucket, hostId); return(rowKey); }
// this should be the only thread writing to this container // Write a "active" entry every interval. // If a previous entry exists, then extend its duration (rather that writing many entries). That simplifies the reader. async Task PollerAsync() { do { try { await Task.Delay(_interval, _cancel.Token); } catch (OperationCanceledException) { // Don't return yet. One last chance to flush } bool hasOutstanding; lock (_lock) { hasOutstanding = _outstandingCount.Count > 0; } bool active = _recent || hasOutstanding; _recent = false; if (active) { var now = DateTime.UtcNow; // If previous exists, update it var currentBucket = TimeBucket.ConvertToBucket(now); var prevBucket = currentBucket - 1; ContainerActiveEntity prevEntry = await TryGetAsync(prevBucket); if (prevEntry == null) { prevEntry = await TryGetAsync(currentBucket); } if (prevEntry != null) { if (prevEntry.GetLength() > LengthThreshold) { prevEntry = null; } } if (prevEntry == null) { prevEntry = ContainerActiveEntity.New(now, _containerName); } // Update the length on the previous entry prevEntry.EndTime = now; await SaveAsync(prevEntry); } } while (!_cancel.IsCancellationRequested); }
public long GetStartBucket() { return(TimeBucket.ConvertToBucket(this.StartTime)); }
internal static string RowKeyTimeInterval(DateTime dateTime, string containerName) { var bucket = TimeBucket.ConvertToBucket(dateTime); return(RowKeyTimeInterval(bucket, containerName)); }