// Write logs. Return what we wrote. // This is baseline data. REader will verify against it exactly. This helps in aggressively catching subtle breaking changes. private async Task <FunctionInstanceLogItem[]> WriteTestLoggingDataAsync(ILogTableProvider provider) { ILogWriter writer = LogFactory.NewWriter("c1", provider); string Func1 = "alpha"; var time = new DateTime(2010, 3, 6, 10, 11, 20); List <FunctionInstanceLogItem> list = new List <FunctionInstanceLogItem>(); list.Add(new FunctionInstanceLogItem { FunctionInstanceId = Guid.NewGuid(), FunctionName = Func1, StartTime = time, EndTime = time.AddMinutes(2), LogOutput = "one", Status = Microsoft.Azure.WebJobs.Logging.FunctionInstanceStatus.CompletedSuccess }); foreach (var item in list) { await writer.AddAsync(item); } await writer.FlushAsync(); return(list.ToArray()); }
public LogReader(ILogTableProvider tableLookup) { if (tableLookup == null) { throw new ArgumentNullException("tableLookup"); } this._tableLookup = tableLookup; }
/// public CloudTableInstanceCountLogger(string containerName, ILogTableProvider tableLookup, int containerSize) { // Default polling interval this.PollingInterval = TimeSpan.FromSeconds(60); this._tableLookup = tableLookup; this._containerName = containerName; this._containerSize = containerSize; }
static async Task Reader(string accountConnectionString, DateTime startDate, DateTime endDate) { CloudStorageAccount account = CloudStorageAccount.Parse(accountConnectionString); CloudTableClient client = account.CreateCloudTableClient(); ILogTableProvider tableProvider = LogFactory.NewLogTableProvider(client); ILogReader reader = LogFactory.NewReader(tableProvider); await Reader(reader, startDate, endDate); }
// Returns null if no legacy table. public static CloudTable GetLegacyTable(ILogTableProvider tableProvider) { var table = tableProvider.GetTable(OldTableName); if (!table.Exists()) { return(null); } return(table); }
// Limit of 100 per batch. // Parallel uploads. public static async Task WriteBatchAsync <T>(this ILogTableProvider logTableProvider, IEnumerable <T> e1) where T : TableEntity, IEntityWithEpoch { HashSet <string> rowKeys = new HashSet <string>(); int batchSize = 90; // Batches must be within a single table partition, so Key is "tableName + ParitionKey". var batches = new Dictionary <string, Tuple <CloudTable, TableBatchOperation> >(); List <Task> t = new List <Task>(); foreach (var e in e1) { if (!rowKeys.Add(e.RowKey)) { // Already present } var epoch = e.GetEpoch(); var instanceTable = logTableProvider.GetTableForDateTime(epoch); string key = instanceTable.Name + "/" + e.PartitionKey; Tuple <CloudTable, TableBatchOperation> tuple; if (!batches.TryGetValue(key, out tuple)) { tuple = Tuple.Create(instanceTable, new TableBatchOperation()); batches[key] = tuple; } TableBatchOperation batch = tuple.Item2; batch.InsertOrMerge(e); if (batch.Count >= batchSize) { Task tUpload = instanceTable.SafeExecuteAsync(batch); t.Add(tUpload); batches.Remove(key); } } // Flush remaining foreach (var tuple in batches.Values) { var instanceTable = tuple.Item1; var batch = tuple.Item2; if (batch.Count > 0) { Task tUpload = instanceTable.SafeExecuteAsync(batch); t.Add(tUpload); } } await Task.WhenAll(t); }
public static async Task <EpochTableIterator> NewAsync(ILogTableProvider tableLookup) { Dictionary <long, CloudTable> d = new Dictionary <long, CloudTable>(); var tables = await tableLookup.ListTablesAsync(); foreach (var table in tables) { var epoch = TimeBucket.GetEpochNumberFromTable(table); d[epoch] = table; } return(new EpochTableIterator(d)); }
public LogWriter(string machineName, ILogTableProvider logTableProvider) { if (machineName == null) { throw new ArgumentNullException("machineName"); } if (logTableProvider == null) { throw new ArgumentNullException("logTableProvider"); } this._machineName = machineName; this._logTableProvider = logTableProvider; }
public static CloudTable GetTableForDateTime(this ILogTableProvider tableLookup, DateTime epoch) { // Epoch(DateTime.MaxValue) is 94146, still a 5 digit number. string suffix; if (epoch == CommonEpoch) { suffix = CommonEpochSuffix; } else { var ts = GetEpochSuffixNumber(epoch); suffix = string.Format(CultureInfo.InvariantCulture, "{0:D5}", ts); } var table = tableLookup.GetTable(suffix); return(table); }
public LogWriter(string hostName, string machineName, ILogTableProvider logTableProvider, Action <Exception> onException = null) { if (machineName == null) { throw new ArgumentNullException("machineName"); } if (logTableProvider == null) { throw new ArgumentNullException("logTableProvider"); } if (hostName == null) { throw new ArgumentNullException("hostName"); } this._hostName = hostName; this._machineName = machineName; this._logTableProvider = logTableProvider; this._onException = onException; }
public async Task Init() { var tableClient = GetNewLoggingTableClient(); var tablePrefix = "logtesZZ" + Guid.NewGuid().ToString("n"); ConfigurationManager.AppSettings[FunctionLogTableAppSettingName] = tablePrefix; // tell dashboard to use it _provider = LogFactory.NewLogTableProvider(tableClient, tablePrefix); await WriteTestLoggingDataAsync(_provider); var config = new HttpConfiguration(); var container = MvcApplication.BuildContainer(config); WebApiConfig.Register(config); var server = new HttpServer(config); var client = new HttpClient(server); this.Client = client; this.Endpoint = "http://localhost:8080"; // ignored }
public async Task TimeRangeAcrossEpochs() { // Make some very precise writes and verify we read exactly what we'd expect. ILogWriter writer = LogFactory.NewWriter(defaultHost, "c1", this); ILogReader reader = LogFactory.NewReader(this); // Time that functios are called. DateTime[] times = new DateTime[] { // Epoch 37 new DateTime(2012, 3, 6, 10, 11, 20, DateTimeKind.Utc), new DateTime(2012, 3, 7, 10, 11, 20, DateTimeKind.Utc), // consecutive Epoch 38 new DateTime(2012, 4, 8, 10, 11, 20, DateTimeKind.Utc), // Skip to Epoch 41 new DateTime(2012, 7, 9, 10, 11, 20, DateTimeKind.Utc) }; var logs = Array.ConvertAll(times, time => new FunctionInstanceLogItem { FunctionInstanceId = Guid.NewGuid(), FunctionName = commonFuncName1, StartTime = time, }); var tasks = Array.ConvertAll(logs, log => WriteAsync(writer, log)); await Task.WhenAll(tasks); await writer.FlushAsync(); // Test point lookups for individual function instances. foreach (var log in logs) { var entry = await reader.LookupFunctionInstanceAsync(log.FunctionInstanceId); Assert.NotNull(entry); Assert.Equal(log.FunctionInstanceId, entry.FunctionInstanceId); Assert.Equal(log.FunctionName, entry.FunctionName); Assert.Equal(log.StartTime, entry.StartTime); Assert.Equal(log.EndTime, entry.EndTime); } // Try various combinations. await Verify(reader, DateTime.MinValue, DateTime.MaxValue, logs[3], logs[2], logs[1], logs[0]); // Infinite range, includes all. // Various combinations of straddling an epoch boundary await Verify(reader, Before(times[1]), After(times[2]), logs[2], logs[1]); await Verify(reader, Before(times[1]), Before(times[2]), logs[1]); await Verify(reader, After(times[1]), Before(times[2])); // Skipping over an empty epoch await Verify(reader, Before(times[1]), Before(times[3]), logs[2], logs[1]); // Now... delete the middle table; and verify the other data is still there. ILogTableProvider provider = this; var table = provider.GetTable("201204"); Assert.True(await table.ExistsAsync()); await table.DeleteAsync(); await Verify(reader, DateTime.MinValue, DateTime.MaxValue, logs[3], logs[1], logs[0]); // Infinite range, includes all. // function instance entry from the table we deleted is now missing. var entry2 = await reader.LookupFunctionInstanceAsync(logs[2].FunctionInstanceId); Assert.Null(entry2); }
/// <summary> /// Create a new log writer. /// Pass in machineName to facilitate multiple compute instances writing to the same table simultaneously without interference. /// </summary> /// <param name="hostName">name of host. A host is a homegenous collection of compute containers, like an Azure Website / appservice. /// Multiple hosts can share a single set of azure tables. Logging is scoped per-host.</param> /// <param name="machineName">name of the compute container. Likely %COMPUTERNAME%. </param> /// <param name="logTableProvider">callback interface that gets invoked to get azure tables to write logging to.</param> /// <returns></returns> public static ILogWriter NewWriter(string hostName, string machineName, ILogTableProvider logTableProvider) { return(new LogWriter(hostName, machineName, logTableProvider)); }
/// <summary> /// Get a reader that reads from the given table. A single reader can handle all hosts in the given storage account. /// </summary> /// <param name="logTableProvider">callback interface to retrieve logging tables</param> /// <returns></returns> public static ILogReader NewReader(ILogTableProvider logTableProvider) { return(new LogReader(logTableProvider)); }
public static CloudTable GetTableForTimeBucket(this ILogTableProvider tableLookup, long timeBucket) { var time = ConvertToDateTime(timeBucket); return(tableLookup.GetTableForDateTime(time)); }
/// <summary> /// Create a new log writer. /// Pass in machineName to facilitate multiple compute instances writing to the same table simultaneously without interference. /// </summary> /// <param name="hostName">name of host. A host is a homegenous collection of compute containers, like an Azure Website / appservice. /// Multiple hosts can share a single set of azure tables. Logging is scoped per-host.</param> /// <param name="machineName">name of the compute container. Likely %COMPUTERNAME%. </param> /// <param name="logTableProvider">callback interface that gets invoked to get azure tables to write logging to.</param> /// <param name="onException">An action to be called when the log writer throws an exception.</param> /// <returns></returns> public static ILogWriter NewWriter(string hostName, string machineName, ILogTableProvider logTableProvider, Action <Exception> onException = null) { return(new LogWriter(hostName, machineName, logTableProvider, onException)); }
// Write logs. Return what we wrote. // This is baseline data. REader will verify against it exactly. This helps in aggressively catching subtle breaking changes. private async Task WriteTestLoggingDataAsync(ILogTableProvider provider) { ILogWriter writer = LogFactory.NewWriter(HostName, "c1", provider); string Func1 = "alpha"; var time = new DateTime(2010, 3, 6, 18, 11, 20, DateTimeKind.Utc); List <FunctionInstanceLogItem> list = new List <FunctionInstanceLogItem>(); List <InvocationLogViewModel> expected = new List <InvocationLogViewModel>(); this.ExpectedItems = expected; this.Data = list; // List in reverse chronology. // Completed Success { var item = new FunctionInstanceLogItem { FunctionInstanceId = Guid.NewGuid(), FunctionName = Func1, StartTime = time, EndTime = time.AddMinutes(2), // Completed LogOutput = "one", }; list.Add(item); expected.Add(new InvocationLogViewModel { id = item.FunctionInstanceId.ToString(), status = "CompletedSuccess", whenUtc = "2010-03-06T18:13:20Z", // since it's completed, specifies end-time duration = 120000.0 }); } // Completed Error { time = time.AddMinutes(-1); var item = new FunctionInstanceLogItem { FunctionInstanceId = Guid.NewGuid(), FunctionName = Func1, StartTime = time, EndTime = time.AddMinutes(2), ErrorDetails = "some failure", // signifies failure LogOutput = "two", }; list.Add(item); expected.Add(new InvocationLogViewModel { id = item.FunctionInstanceId.ToString(), status = "CompletedFailed", whenUtc = "2010-03-06T18:12:20Z", // end-time. duration = 120000.0 }); } // Still running { time = time.AddMinutes(-1); var item = new FunctionInstanceLogItem { FunctionInstanceId = Guid.NewGuid(), FunctionName = Func1, StartTime = time, // Recent heartbeat LogOutput = "two", }; list.Add(item); expected.Add(new InvocationLogViewModel { id = item.FunctionInstanceId.ToString(), status = "Running", whenUtc = "2010-03-06T18:09:20Z", // specifies start-time }); } // Never Finished { time = time.AddMinutes(-1); var item = new TestFunctionInstanceLogItem { FunctionInstanceId = Guid.NewGuid(), FunctionName = Func1, StartTime = time, // Never Finished LogOutput = "two", OnRefresh = (me) => { me.FunctionInstanceHeartbeatExpiry = time; }, // stale heartbeat }; list.Add(item); expected.Add(new InvocationLogViewModel { id = item.FunctionInstanceId.ToString(), status = "NeverFinished", whenUtc = "2010-03-06T18:08:20Z", // starttime duration = null }); } // No heartbeat (legacy example) { time = time.AddMinutes(-1); var item = new TestFunctionInstanceLogItem { FunctionInstanceId = Guid.NewGuid(), FunctionName = Func1, StartTime = time, // Never Finished LogOutput = "two", OnRefresh = (me) => { } // No heart beat }; list.Add(item); expected.Add(new InvocationLogViewModel { id = item.FunctionInstanceId.ToString(), status = "Running", whenUtc = "2010-03-06T18:07:20Z", // starttime }); } foreach (var item in list) { await writer.AddAsync(item); } await writer.FlushAsync(); }
public ContainerActiveLogger(string containerName, ILogTableProvider tableLookup) { this._tableLookup = tableLookup; this._containerName = containerName; }