private static void Increment(FunctionInstanceLogItem item, TimelineAggregateEntity x) { x.TotalRun++; if (item.IsSucceeded()) { x.TotalPass++; } else { x.TotalFail++; } }
public async Task <Segment <IAggregateEntry> > GetAggregateStatsAsync(string functionName, DateTime start, DateTime end, string continuationToken) { if (functionName == null) { throw new ArgumentNullException("functionName"); } if (start > end) { throw new ArgumentOutOfRangeException("start"); } var rangeQuery = TimelineAggregateEntity.GetQuery(functionName, start, end); var results = await _instanceTable.SafeExecuteQueryAsync(rangeQuery); return(new Segment <IAggregateEntry>(results)); }
public async Task <Segment <IAggregateEntry> > GetAggregateStatsAsync(FunctionId functionId, DateTime start, DateTime end, string continuationToken) { functionId.Validate(); if (start > end) { throw new ArgumentOutOfRangeException("start"); } var iter = await EpochTableIterator.NewAsync(_tableLookup); var rangeQuery = TimelineAggregateEntity.GetQuery(functionId, start, end); var results = await iter.SafeExecuteQuerySegmentedAsync <TimelineAggregateEntity>(rangeQuery, start, end); return(results.As <IAggregateEntry>()); }
public async Task AddAsync(FunctionInstanceLogItem item, CancellationToken cancellationToken = default(CancellationToken)) { item.Validate(); { lock (_lock) { StartBackgroundFlusher(); if (_container == null) { _container = new ContainerActiveLogger(_containerName, _instanceTable); } if (_instanceLogger == null) { int size = GetContainerSize(); _instanceLogger = new CloudTableInstanceCountLogger(_containerName, _instanceTable, size); } } if (item.IsCompleted()) { _container.Decrement(item.FunctionInstanceId); _instanceLogger.Decrement(item.FunctionInstanceId); } else { _container.Increment(item.FunctionInstanceId); _instanceLogger.Increment(item.FunctionInstanceId); } } lock (_lock) { if (_seenFunctions.Add(item.FunctionName)) { _funcDefs.Add(FunctionDefinitionEntity.New(item.FunctionName)); } } // Both Start and Completed log here. Completed will overwrite a Start entry. lock (_lock) { _instances.Add(InstanceTableEntity.New(item)); _recents.Add(RecentPerFuncEntity.New(_containerName, item)); } if (item.IsCompleted()) { // For completed items, aggregate total passed and failed within a time bucket. // Time aggregate is flushed later. // Don't flush until we've moved onto the next interval. { var newEntity = TimelineAggregateEntity.New(_containerName, item.FunctionName, item.StartTime, _uniqueId); lock (_lock) { // If we already have an entity at this time slot (specified by rowkey), then use that so that // we update the existing counters. var existingEntity = _timespan.GetFromRowKey(newEntity.RowKey); if (existingEntity == null) { _timespan.Add(newEntity); existingEntity = newEntity; } Increment(item, existingEntity); } } } // Flush every 100 items, maximize with tables. Task t1 = FlushIntancesAsync(false); Task t2 = FlushTimelineAggregateAsync(); await Task.WhenAll(t1, t2); }
public async Task AddAsync(FunctionInstanceLogItem item, CancellationToken cancellationToken = default(CancellationToken)) { item.Validate(); { lock (_lock) { StartBackgroundFlusher(); if (_container == null) { _container = new ContainerActiveLogger(_containerName, _instanceTable); } if (_instanceLogger == null) { int size = GetContainerSize(); _instanceLogger = new CloudTableInstanceCountLogger(_containerName, _instanceTable, size); } } if (item.IsCompleted()) { _container.Decrement(item.FunctionInstanceId); _instanceLogger.Decrement(item.FunctionInstanceId); } else { _container.Increment(item.FunctionInstanceId); _instanceLogger.Increment(item.FunctionInstanceId); } } lock (_lock) { if (_seenFunctions.Add(item.FunctionName)) { _funcDefs.Add(FunctionDefinitionEntity.New(item.FunctionName)); } } if (!item.IsCompleted()) { return; } lock (_lock) { _instances.Add(InstanceTableEntity.New(item)); _recents.Add(RecentPerFuncEntity.New(_containerName, item)); } // Time aggregate is flushed later. // Don't flush until we've moved onto the next interval. { var rowKey = TimelineAggregateEntity.RowKeyTimeInterval(item.FunctionName, item.StartTime, _uniqueId); lock (_lock) { TimelineAggregateEntity x; if (!_timespan.TryGetValue(rowKey, out x)) { // Can we flush the old counters? x = TimelineAggregateEntity.New(_containerName, item.FunctionName, item.StartTime, _uniqueId); _timespan[rowKey] = x; } Increment(item, x); } } // Flush every 100 items, maximize with tables. Task t1 = FlushIntancesAsync(false); Task t2 = FlushTimelineAggregateAsync(); await Task.WhenAll(t1, t2); }
public Task AddAsync(FunctionInstanceLogItem item, CancellationToken cancellationToken = default(CancellationToken)) { if (item == null) { throw new ArgumentNullException("item"); } item.Validate(); item.FunctionId = FunctionId.Build(this._hostName, item.FunctionName); // Both Start and Completed log here. Completed will overwrite a Start entry. lock (_lock) { _activeFuncs[item.FunctionInstanceId] = item; } lock (_lock) { StartBackgroundFlusher(); if (_container == null) { _container = new ContainerActiveLogger(_machineName, _logTableProvider); } if (_instanceLogger == null) { int size = GetContainerSize(); _instanceLogger = new CloudTableInstanceCountLogger(_machineName, _logTableProvider, size); } } if (item.IsCompleted()) { _container.Decrement(item.FunctionInstanceId); _instanceLogger.Decrement(item.FunctionInstanceId); lock (_lock) { _completedFunctions.Add(item.FunctionInstanceId); } } else { _container.Increment(item.FunctionInstanceId); _instanceLogger.Increment(item.FunctionInstanceId); } lock (_lock) { if (_seenFunctions.Add(item.FunctionName)) { _funcDefs.Add(FunctionDefinitionEntity.New(item.FunctionId, item.FunctionName)); } } if (item.IsCompleted()) { // For completed items, aggregate total passed and failed within a time bucket. // Time aggregate is flushed later. // Don't flush until we've moved onto the next interval. { var newEntity = TimelineAggregateEntity.New(_machineName, item.FunctionId, item.StartTime, _uniqueId); lock (_lock) { // If we already have an entity at this time slot (specified by rowkey), then use that so that // we update the existing counters. var existingEntity = _timespan.GetFromRowKey(newEntity.RowKey); if (existingEntity == null) { _timespan.Add(newEntity); existingEntity = newEntity; } Increment(item, existingEntity); } } } // Results will get written on a background thread return(Task.FromResult(0)); }
public Task AddAsync(FunctionInstanceLogItem item, CancellationToken cancellationToken = default(CancellationToken)) { if (item == null) { throw new ArgumentNullException("item"); } item.Validate(); item.FunctionId = FunctionId.Build(this._hostName, item.FunctionName); // Both Start and Completed log here. Completed will overwrite a Start entry. lock (_lock) { // Permanent failures when flushing to storage can result in many log entries being buffered. // This basically becomes a memory leak. Mitigate by enforcing a max. if (_activeFuncs.Count >= MaxBufferedEntryCount || _completedFunctions.Count >= MaxBufferedEntryCount) { _onException?.Invoke(new Exception($"The limit on the number of buffered log entries was reached. A total of '{MaxBufferedEntryCount}' log entries were dropped.")); _activeFuncs.Clear(); _completedFunctions.Clear(); } _activeFuncs[item.FunctionInstanceId] = item; } lock (_lock) { StartBackgroundFlusher(); if (_container == null) { _container = new ContainerActiveLogger(_machineName, _logTableProvider); } if (_instanceLogger == null) { int size = GetContainerSize(); _instanceLogger = new CloudTableInstanceCountLogger(_machineName, _logTableProvider, size); } } if (item.IsCompleted()) { _container.Decrement(item.FunctionInstanceId); _instanceLogger.Decrement(item.FunctionInstanceId); lock (_lock) { _completedFunctions.Add(item.FunctionInstanceId); } } else { _container.Increment(item.FunctionInstanceId); _instanceLogger.Increment(item.FunctionInstanceId); } lock (_lock) { if (_seenFunctions.Add(item.FunctionName)) { _funcDefs.Add(FunctionDefinitionEntity.New(item.FunctionId, item.FunctionName)); } } if (item.IsCompleted()) { // For completed items, aggregate total passed and failed within a time bucket. // Time aggregate is flushed later. // Don't flush until we've moved onto the next interval. { var newEntity = TimelineAggregateEntity.New(_machineName, item.FunctionId, item.StartTime, _uniqueId); lock (_lock) { // If we already have an entity at this time slot (specified by rowkey), then use that so that // we update the existing counters. var existingEntity = _timespan.GetFromRowKey(newEntity.RowKey); if (existingEntity == null) { _timespan.Add(newEntity); existingEntity = newEntity; } Increment(item, existingEntity); } } } // Results will get written on a background thread return(Task.FromResult(0)); }