public async Task AddAsync(FunctionInstanceLogItem item, CancellationToken cancellationToken = default(CancellationToken)) { item.Validate(); item.FunctionId = FunctionId.Build(this._hostName, item.FunctionName); { lock (_lock) { StartBackgroundFlusher(); if (_container == null) { _container = new ContainerActiveLogger(_machineName, _logTableProvider); } if (_instanceLogger == null) { int size = GetContainerSize(); _instanceLogger = new CloudTableInstanceCountLogger(_machineName, _logTableProvider, size); } } if (item.IsCompleted()) { _container.Decrement(item.FunctionInstanceId); _instanceLogger.Decrement(item.FunctionInstanceId); } else { _container.Increment(item.FunctionInstanceId); _instanceLogger.Increment(item.FunctionInstanceId); } } lock (_lock) { if (_seenFunctions.Add(item.FunctionName)) { _funcDefs.Add(FunctionDefinitionEntity.New(item.FunctionId, item.FunctionName)); } } // Both Start and Completed log here. Completed will overwrite a Start entry. lock (_lock) { _instances.Add(InstanceTableEntity.New(item)); _recents.Add(RecentPerFuncEntity.New(_machineName, item)); } if (item.IsCompleted()) { // For completed items, aggregate total passed and failed within a time bucket. // Time aggregate is flushed later. // Don't flush until we've moved onto the next interval. { var newEntity = TimelineAggregateEntity.New(_machineName, item.FunctionId, item.StartTime, _uniqueId); lock (_lock) { // If we already have an entity at this time slot (specified by rowkey), then use that so that // we update the existing counters. var existingEntity = _timespan.GetFromRowKey(newEntity.RowKey); if (existingEntity == null) { _timespan.Add(newEntity); existingEntity = newEntity; } Increment(item, existingEntity); } } } // Flush every 100 items, maximize with tables. Task t1 = FlushIntancesAsync(false); Task t2 = FlushTimelineAggregateAsync(); await Task.WhenAll(t1, t2); }
public Task AddAsync(FunctionInstanceLogItem item, CancellationToken cancellationToken = default(CancellationToken)) { if (item == null) { throw new ArgumentNullException("item"); } item.Validate(); item.FunctionId = FunctionId.Build(this._hostName, item.FunctionName); // Both Start and Completed log here. Completed will overwrite a Start entry. lock (_lock) { _activeFuncs[item.FunctionInstanceId] = item; } lock (_lock) { StartBackgroundFlusher(); if (_container == null) { _container = new ContainerActiveLogger(_machineName, _logTableProvider); } if (_instanceLogger == null) { int size = GetContainerSize(); _instanceLogger = new CloudTableInstanceCountLogger(_machineName, _logTableProvider, size); } } if (item.IsCompleted()) { _container.Decrement(item.FunctionInstanceId); _instanceLogger.Decrement(item.FunctionInstanceId); _completedFunctions.Add(item.FunctionInstanceId); } else { _container.Increment(item.FunctionInstanceId); _instanceLogger.Increment(item.FunctionInstanceId); } lock (_lock) { if (_seenFunctions.Add(item.FunctionName)) { _funcDefs.Add(FunctionDefinitionEntity.New(item.FunctionId, item.FunctionName)); } } if (item.IsCompleted()) { // For completed items, aggregate total passed and failed within a time bucket. // Time aggregate is flushed later. // Don't flush until we've moved onto the next interval. { var newEntity = TimelineAggregateEntity.New(_machineName, item.FunctionId, item.StartTime, _uniqueId); lock (_lock) { // If we already have an entity at this time slot (specified by rowkey), then use that so that // we update the existing counters. var existingEntity = _timespan.GetFromRowKey(newEntity.RowKey); if (existingEntity == null) { _timespan.Add(newEntity); existingEntity = newEntity; } Increment(item, existingEntity); } } } // Results will get written on a background thread return(Task.FromResult(0)); }
public Task AddAsync(FunctionInstanceLogItem item, CancellationToken cancellationToken = default(CancellationToken)) { if (item == null) { throw new ArgumentNullException("item"); } item.Validate(); item.FunctionId = FunctionId.Build(this._hostName, item.FunctionName); // Both Start and Completed log here. Completed will overwrite a Start entry. lock (_lock) { // Permanent failures when flushing to storage can result in many log entries being buffered. // This basically becomes a memory leak. Mitigate by enforcing a max. if (_activeFuncs.Count >= MaxBufferedEntryCount || _completedFunctions.Count >= MaxBufferedEntryCount) { _onException?.Invoke(new Exception($"The limit on the number of buffered log entries was reached. A total of '{MaxBufferedEntryCount}' log entries were dropped.")); _activeFuncs.Clear(); _completedFunctions.Clear(); } _activeFuncs[item.FunctionInstanceId] = item; } lock (_lock) { StartBackgroundFlusher(); if (_container == null) { _container = new ContainerActiveLogger(_machineName, _logTableProvider); } if (_instanceLogger == null) { int size = GetContainerSize(); _instanceLogger = new CloudTableInstanceCountLogger(_machineName, _logTableProvider, size); } } if (item.IsCompleted()) { _container.Decrement(item.FunctionInstanceId); _instanceLogger.Decrement(item.FunctionInstanceId); lock (_lock) { _completedFunctions.Add(item.FunctionInstanceId); } } else { _container.Increment(item.FunctionInstanceId); _instanceLogger.Increment(item.FunctionInstanceId); } lock (_lock) { if (_seenFunctions.Add(item.FunctionName)) { _funcDefs.Add(FunctionDefinitionEntity.New(item.FunctionId, item.FunctionName)); } } if (item.IsCompleted()) { // For completed items, aggregate total passed and failed within a time bucket. // Time aggregate is flushed later. // Don't flush until we've moved onto the next interval. { var newEntity = TimelineAggregateEntity.New(_machineName, item.FunctionId, item.StartTime, _uniqueId); lock (_lock) { // If we already have an entity at this time slot (specified by rowkey), then use that so that // we update the existing counters. var existingEntity = _timespan.GetFromRowKey(newEntity.RowKey); if (existingEntity == null) { _timespan.Add(newEntity); existingEntity = newEntity; } Increment(item, existingEntity); } } } // Results will get written on a background thread return(Task.FromResult(0)); }