public async Task LogStart() { // Make some very precise writes and verify we read exactly what we'd expect. var table = GetNewLoggingTable(); try { ILogWriter writer = LogFactory.NewWriter("c1", table); ILogReader reader = LogFactory.NewReader(table); string Func1 = "alpha"; var t1a = new DateTime(2010, 3, 6, 10, 11, 20, DateTimeKind.Utc); FunctionInstanceLogItem l1 = new FunctionInstanceLogItem { FunctionInstanceId = Guid.NewGuid(), FunctionName = Func1, StartTime = t1a, LogOutput = "one" // inferred as Running since no end time. }; await writer.AddAsync(l1); await writer.FlushAsync(); // Start event should exist. var entries = await GetRecentAsync(reader, Func1); Assert.Equal(1, entries.Length); Assert.Equal(entries[0].Status, FunctionInstanceStatus.Running); Assert.Equal(entries[0].EndTime, null); l1.EndTime = l1.StartTime.Add(TimeSpan.FromSeconds(1)); l1.Status = FunctionInstanceStatus.CompletedSuccess; await writer.AddAsync(l1); await writer.FlushAsync(); // Should overwrite the previous row. entries = await GetRecentAsync(reader, Func1); Assert.Equal(1, entries.Length); Assert.Equal(entries[0].Status, FunctionInstanceStatus.CompletedSuccess); Assert.Equal(entries[0].EndTime.Value.DateTime, l1.EndTime); } finally { // Cleanup table.DeleteIfExists(); } }
// Write logs. Return what we wrote. // This is baseline data. REader will verify against it exactly. This helps in aggressively catching subtle breaking changes. private async Task <FunctionInstanceLogItem[]> WriteTestLoggingDataAsync(ILogTableProvider provider) { ILogWriter writer = LogFactory.NewWriter("c1", provider); string Func1 = "alpha"; var time = new DateTime(2010, 3, 6, 10, 11, 20); List <FunctionInstanceLogItem> list = new List <FunctionInstanceLogItem>(); list.Add(new FunctionInstanceLogItem { FunctionInstanceId = Guid.NewGuid(), FunctionName = Func1, StartTime = time, EndTime = time.AddMinutes(2), LogOutput = "one", Status = Microsoft.Azure.WebJobs.Logging.FunctionInstanceStatus.CompletedSuccess }); foreach (var item in list) { await writer.AddAsync(item); } await writer.FlushAsync(); return(list.ToArray()); }
public Task FlushAsync(CancellationToken cancellationToken = default(CancellationToken)) { if (_writer == null) { return(Task.CompletedTask); } return(_writer.FlushAsync()); }
public async Task LogStart() { // Make some very precise writes and verify we read exactly what we'd expect. ILogWriter writer = LogFactory.NewWriter(defaultHost, "c1", this); ILogReader reader = LogFactory.NewReader(this); string Func1 = "alpha"; var t1a = new DateTime(2010, 3, 6, 10, 11, 20, DateTimeKind.Utc); FunctionInstanceLogItem l1 = new FunctionInstanceLogItem { FunctionInstanceId = Guid.NewGuid(), FunctionName = Func1, StartTime = t1a, LogOutput = "one" // inferred as Running since no end time. }; await writer.AddAsync(l1); await writer.FlushAsync(); // Start event should exist. var entries = await GetRecentAsync(reader, l1.FunctionId); Assert.Single(entries); Assert.Equal(entries[0].GetStatus(), FunctionInstanceStatus.Running); Assert.Equal(entries[0].EndTime, null); l1.EndTime = l1.StartTime.Add(TimeSpan.FromSeconds(1)); await writer.AddAsync(l1); await writer.FlushAsync(); // Should overwrite the previous row. entries = await GetRecentAsync(reader, l1.FunctionId); Assert.Single(entries); Assert.Equal(entries[0].GetStatus(), FunctionInstanceStatus.CompletedSuccess); Assert.Equal(entries[0].EndTime.Value, l1.EndTime); }
public async Task TimeRange() { // Make some very precise writes and verify we read exactly what we'd expect. var table = GetNewLoggingTable(); try { ILogWriter writer = LogFactory.NewWriter("c1", table); ILogReader reader = LogFactory.NewReader(table); // Time that functios are called. DateTime[] times = new DateTime[] { new DateTime(2010, 3, 6, 10, 11, 20), new DateTime(2010, 3, 7, 10, 11, 20), }; DateTime tBefore0 = times[0].AddMinutes(-1); DateTime tAfter0 = times[0].AddMinutes(1); DateTime tBefore1 = times[1].AddMinutes(-1); DateTime tAfter1 = times[1].AddMinutes(1); var logs = Array.ConvertAll(times, time => new FunctionInstanceLogItem { FunctionInstanceId = Guid.NewGuid(), FunctionName = CommonFuncName1, StartTime = time }); var tasks = Array.ConvertAll(logs, log => WriteAsync(writer, log)); await Task.WhenAll(tasks); await writer.FlushAsync(); // Try various combinations. await Verify(reader, DateTime.MinValue, DateTime.MaxValue, logs[1], logs[0]); // Infinite range, includes all. await Verify(reader, tBefore0, tAfter1, logs[1], logs[0]); // barely hugs both instances await Verify(reader, DateTime.MinValue, tBefore0); await Verify(reader, DateTime.MinValue, tAfter0, logs[0]); await Verify(reader, DateTime.MinValue, tBefore1, logs[0]); await Verify(reader, DateTime.MinValue, tAfter1, logs[1], logs[0]); await Verify(reader, tAfter0, tBefore1); // inbetween, 0 await Verify(reader, tBefore1, tAfter1, logs[1]); await Verify(reader, tBefore1, DateTime.MaxValue, logs[1]); } finally { table.DeleteIfExists(); } }
public async Task Casing() { // Make some very precise writes and verify we read exactly what we'd expect. var table = GetNewLoggingTable(); try { ILogWriter writer = LogFactory.NewWriter("c1", table); ILogReader reader = LogFactory.NewReader(table); string FuncOriginal = "UPPER-lower"; string Func2 = FuncOriginal.ToLower(); // casing permutations string Func3 = Func2.ToLower(); var t1a = new DateTime(2010, 3, 6, 10, 11, 20, DateTimeKind.Utc); FunctionInstanceLogItem l1 = new FunctionInstanceLogItem { FunctionInstanceId = Guid.NewGuid(), FunctionName = FuncOriginal, StartTime = t1a, LogOutput = "one" // inferred as Running since no end time. }; await writer.AddAsync(l1); await writer.FlushAsync(); // Start event should exist. var definitionSegment = await reader.GetFunctionDefinitionsAsync(null); Assert.Equal(1, definitionSegment.Results.Length); Assert.Equal(FuncOriginal, definitionSegment.Results[0].Name); // Lookup various casings foreach (var name in new string[] { FuncOriginal, Func2, Func3 }) { var entries = await GetRecentAsync(reader, name); Assert.Equal(1, entries.Length); Assert.Equal(entries[0].Status, FunctionInstanceStatus.Running); Assert.Equal(entries[0].EndTime, null); Assert.Equal(entries[0].FunctionName, FuncOriginal); // preserving. } } finally { // Cleanup table.DeleteIfExists(); } }
public async Task LargeWritesWithParametersAreTruncated() { ILogWriter writer = LogFactory.NewWriter(defaultHost, "c1", this); ILogReader reader = LogFactory.NewReader(this); // Max table request size is 4mb. That gives roughly 40kb per row. string largeValue = new string('x', 100 * 1000); string truncatedPrefix = largeValue.Substring(0, 100); List <Guid> functionIds = new List <Guid>(); for (int i = 0; i < 90; i++) { var functionId = Guid.NewGuid(); functionIds.Add(functionId); var now = DateTime.UtcNow; var item = new FunctionInstanceLogItem { FunctionInstanceId = functionId, Arguments = new Dictionary <string, string>(), StartTime = now, EndTime = now.AddSeconds(3), FunctionName = "tst2", LogOutput = largeValue, ErrorDetails = largeValue, TriggerReason = largeValue }; for (int j = 0; j < 1000; j++) { string paramName = "p" + j.ToString(); item.Arguments[paramName] = largeValue; } await writer.AddAsync(item); } // If we didn't truncate, then this would throw with a 413 "too large" exception. await writer.FlushAsync(); // If we got here without an exception, then we successfully truncated the rows. // Lookup and verify var instance = await reader.LookupFunctionInstanceAsync(functionIds[0]); Assert.True(instance.LogOutput.StartsWith(truncatedPrefix)); Assert.True(instance.ErrorDetails.StartsWith(truncatedPrefix)); Assert.True(instance.TriggerReason.StartsWith(truncatedPrefix)); Assert.Equal(0, instance.Arguments.Count); // totally truncated. }
// Write a function entry. Don't care about any other details. async Task <Guid> QuickWriteAsync(ILogWriter writer, string functionName) { FunctionInstanceLogItem l1 = new FunctionInstanceLogItem { FunctionInstanceId = Guid.NewGuid(), FunctionName = functionName, StartTime = _quickTimestamp, EndTime = _quickTimestamp.AddMinutes(1) // inferred as Running since no end time. }; _quickTimestamp = _quickTimestamp.AddMinutes(2); await writer.AddAsync(l1); await writer.FlushAsync(); return(l1.FunctionInstanceId); }
// Write logs. Return what we wrote. // This is baseline data. REader will verify against it exactly. This helps in aggressively catching subtle breaking changes. private async Task WriteTestLoggingDataAsync(ILogTableProvider provider) { ILogWriter writer = LogFactory.NewWriter(HostName, "c1", provider); string Func1 = "alpha"; var time = new DateTime(2010, 3, 6, 18, 11, 20, DateTimeKind.Utc); List <FunctionInstanceLogItem> list = new List <FunctionInstanceLogItem>(); List <InvocationLogViewModel> expected = new List <InvocationLogViewModel>(); this.ExpectedItems = expected; this.Data = list; // List in reverse chronology. // Completed Success { var item = new FunctionInstanceLogItem { FunctionInstanceId = Guid.NewGuid(), FunctionName = Func1, StartTime = time, EndTime = time.AddMinutes(2), // Completed LogOutput = "one", }; list.Add(item); expected.Add(new InvocationLogViewModel { id = item.FunctionInstanceId.ToString(), status = "CompletedSuccess", whenUtc = "2010-03-06T18:13:20Z", // since it's completed, specifies end-time duration = 120000.0 }); } // Completed Error { time = time.AddMinutes(-1); var item = new FunctionInstanceLogItem { FunctionInstanceId = Guid.NewGuid(), FunctionName = Func1, StartTime = time, EndTime = time.AddMinutes(2), ErrorDetails = "some failure", // signifies failure LogOutput = "two", }; list.Add(item); expected.Add(new InvocationLogViewModel { id = item.FunctionInstanceId.ToString(), status = "CompletedFailed", whenUtc = "2010-03-06T18:12:20Z", // end-time. duration = 120000.0 }); } // Still running { time = time.AddMinutes(-1); var item = new FunctionInstanceLogItem { FunctionInstanceId = Guid.NewGuid(), FunctionName = Func1, StartTime = time, // Recent heartbeat LogOutput = "two", }; list.Add(item); expected.Add(new InvocationLogViewModel { id = item.FunctionInstanceId.ToString(), status = "Running", whenUtc = "2010-03-06T18:09:20Z", // specifies start-time }); } // Never Finished { time = time.AddMinutes(-1); var item = new TestFunctionInstanceLogItem { FunctionInstanceId = Guid.NewGuid(), FunctionName = Func1, StartTime = time, // Never Finished LogOutput = "two", OnRefresh = (me) => { me.FunctionInstanceHeartbeatExpiry = time; }, // stale heartbeat }; list.Add(item); expected.Add(new InvocationLogViewModel { id = item.FunctionInstanceId.ToString(), status = "NeverFinished", whenUtc = "2010-03-06T18:08:20Z", // starttime duration = null }); } // No heartbeat (legacy example) { time = time.AddMinutes(-1); var item = new TestFunctionInstanceLogItem { FunctionInstanceId = Guid.NewGuid(), FunctionName = Func1, StartTime = time, // Never Finished LogOutput = "two", OnRefresh = (me) => { } // No heart beat }; list.Add(item); expected.Add(new InvocationLogViewModel { id = item.FunctionInstanceId.ToString(), status = "Running", whenUtc = "2010-03-06T18:07:20Z", // starttime }); } foreach (var item in list) { await writer.AddAsync(item); } await writer.FlushAsync(); }
public async Task OnExceptionCalled_WhenFlushFails() { Exception exToThrow = new InvalidOperationException("Some storage exception"); Mock <CloudTable> mockTable = new Mock <CloudTable>(MockBehavior.Strict, new Uri("https://fakeaccount.table.core.windows.net/sometable")); mockTable .Setup(t => t.ExecuteAsync(It.IsAny <TableOperation>(), null, null)) .ReturnsAsync(new TableResult()); mockTable .Setup(t => t.ExecuteAsync(It.IsAny <TableOperation>())) .ReturnsAsync(new TableResult()); mockTable .Setup(t => t.ExecuteBatchAsync(It.IsAny <TableBatchOperation>())) .ThrowsAsync(exToThrow); Mock <ILogTableProvider> mockProvider = new Mock <ILogTableProvider>(MockBehavior.Strict); mockProvider .Setup(c => c.GetTable(It.IsAny <string>())) .Returns(mockTable.Object); Exception caughtException = null; ILogWriter writer = LogFactory.NewWriter(defaultHost, "exceptions", mockProvider.Object, (ex) => { caughtException = ex; }); FunctionInstanceLogItem item = new FunctionInstanceLogItem { FunctionInstanceId = Guid.NewGuid(), FunctionName = "test", StartTime = DateTime.UtcNow, LogOutput = "three", ErrorDetails = "this failed" }; await writer.AddAsync(item); var thrownEx = await Assert.ThrowsAsync <InvalidOperationException>(() => writer.FlushAsync()); Assert.Same(exToThrow, thrownEx); Assert.Same(exToThrow, caughtException); }
public async Task LogExactWriteAndRead() { // Make some very precise writes and verify we read exactly what we'd expect. ILogWriter writer = LogFactory.NewWriter(defaultHost, "c1", this); ILogReader reader = LogFactory.NewReader(this); string Func1 = "alpha"; string Func2 = "beta"; var t1a = new DateTime(2010, 3, 6, 10, 11, 20); var t1b = new DateTime(2010, 3, 6, 10, 11, 21); // same time bucket as t1a var t2 = new DateTime(2010, 3, 7, 10, 11, 21); FunctionInstanceLogItem l1 = new FunctionInstanceLogItem { FunctionInstanceId = Guid.NewGuid(), FunctionName = Func1, StartTime = t1a, LogOutput = "one" }; await WriteAsync(writer, l1); await writer.FlushAsync(); // Multiple flushes; test starting & stopping the backgrounf worker. FunctionInstanceLogItem l2 = new FunctionInstanceLogItem { FunctionInstanceId = Guid.NewGuid(), FunctionName = Func2, StartTime = t1b, LogOutput = "two" }; await WriteAsync(writer, l2); FunctionInstanceLogItem l3 = new FunctionInstanceLogItem { FunctionInstanceId = Guid.NewGuid(), FunctionName = Func1, StartTime = t2, LogOutput = "three", ErrorDetails = "this failed" }; await WriteAsync(writer, l3); await writer.FlushAsync(); // Now read var definitionSegment = await reader.GetFunctionDefinitionsAsync(null, null); string[] functionNames = Array.ConvertAll(definitionSegment.Results, definition => definition.Name); Array.Sort(functionNames); Assert.Equal(Func1, functionNames[0]); Assert.Equal(Func2, functionNames[1]); // Read Func1 { var segment1 = await reader.GetAggregateStatsAsync(l3.FunctionId, DateTime.MinValue, DateTime.MaxValue, null); Assert.Null(segment1.ContinuationToken); var stats1 = segment1.Results; Assert.Equal(2, stats1.Length); // includes t1 and t2 // First bucket has l1, second bucket has l3 Assert.Equal(stats1[0].TotalPass, 1); Assert.Equal(stats1[0].TotalRun, 1); Assert.Equal(stats1[0].TotalFail, 0); Assert.Equal(stats1[1].TotalPass, 0); Assert.Equal(stats1[1].TotalRun, 1); Assert.Equal(stats1[1].TotalFail, 1); // reverse order. So l3 latest function, is listed first. var recent1 = await GetRecentAsync(reader, l3.FunctionId); Assert.Equal(2, recent1.Length); Assert.Equal(recent1[0].FunctionInstanceId, l3.FunctionInstanceId); Assert.Equal(recent1[1].FunctionInstanceId, l1.FunctionInstanceId); } // Read Func2 { var segment2 = await reader.GetAggregateStatsAsync(l2.FunctionId, DateTime.MinValue, DateTime.MaxValue, null); var stats2 = segment2.Results; Assert.Single(stats2); Assert.Equal(stats2[0].TotalPass, 1); Assert.Equal(stats2[0].TotalRun, 1); Assert.Equal(stats2[0].TotalFail, 0); var recent2 = await GetRecentAsync(reader, l2.FunctionId); Assert.Single(recent2); Assert.Equal(recent2[0].FunctionInstanceId, l2.FunctionInstanceId); } }
public async Task TimeRangeAcrossEpochs() { // Make some very precise writes and verify we read exactly what we'd expect. ILogWriter writer = LogFactory.NewWriter(defaultHost, "c1", this); ILogReader reader = LogFactory.NewReader(this); // Time that functios are called. DateTime[] times = new DateTime[] { // Epoch 37 new DateTime(2012, 3, 6, 10, 11, 20, DateTimeKind.Utc), new DateTime(2012, 3, 7, 10, 11, 20, DateTimeKind.Utc), // consecutive Epoch 38 new DateTime(2012, 4, 8, 10, 11, 20, DateTimeKind.Utc), // Skip to Epoch 41 new DateTime(2012, 7, 9, 10, 11, 20, DateTimeKind.Utc) }; var logs = Array.ConvertAll(times, time => new FunctionInstanceLogItem { FunctionInstanceId = Guid.NewGuid(), FunctionName = commonFuncName1, StartTime = time, }); var tasks = Array.ConvertAll(logs, log => WriteAsync(writer, log)); await Task.WhenAll(tasks); await writer.FlushAsync(); // Test point lookups for individual function instances. foreach (var log in logs) { var entry = await reader.LookupFunctionInstanceAsync(log.FunctionInstanceId); Assert.NotNull(entry); Assert.Equal(log.FunctionInstanceId, entry.FunctionInstanceId); Assert.Equal(log.FunctionName, entry.FunctionName); Assert.Equal(log.StartTime, entry.StartTime); Assert.Equal(log.EndTime, entry.EndTime); } // Try various combinations. await Verify(reader, DateTime.MinValue, DateTime.MaxValue, logs[3], logs[2], logs[1], logs[0]); // Infinite range, includes all. // Various combinations of straddling an epoch boundary await Verify(reader, Before(times[1]), After(times[2]), logs[2], logs[1]); await Verify(reader, Before(times[1]), Before(times[2]), logs[1]); await Verify(reader, After(times[1]), Before(times[2])); // Skipping over an empty epoch await Verify(reader, Before(times[1]), Before(times[3]), logs[2], logs[1]); // Now... delete the middle table; and verify the other data is still there. ILogTableProvider provider = this; var table = provider.GetTable("201204"); Assert.True(await table.ExistsAsync()); await table.DeleteAsync(); await Verify(reader, DateTime.MinValue, DateTime.MaxValue, logs[3], logs[1], logs[0]); // Infinite range, includes all. // function instance entry from the table we deleted is now missing. var entry2 = await reader.LookupFunctionInstanceAsync(logs[2].FunctionInstanceId); Assert.Null(entry2); }
public Task FlushAsync(CancellationToken cancellationToken = default(CancellationToken)) { return(_writer.FlushAsync()); }