public async Task GetLogsAsTextTest() { // Arrange string iotHub = "foo.azure-devices.net"; string deviceId = "dev1"; string moduleId = "mod1"; Option <int> tail = Option.None <int>(); Option <string> since = Option.None <string>(); Option <string> until = Option.None <string>(); Option <bool> includeTimestamp = Option.None <bool>(); CancellationToken cancellationToken = CancellationToken.None; string expectedLogText = TestLogTexts.Join(string.Empty); var runtimeInfoProvider = new Mock <IRuntimeInfoProvider>(); runtimeInfoProvider.Setup(r => r.GetModuleLogs(moduleId, false, tail, since, until, includeTimestamp, cancellationToken)) .ReturnsAsync(new MemoryStream(DockerFraming.Frame(TestLogTexts))); var logsProcessor = new LogsProcessor(new LogMessageParser(iotHub, deviceId)); var logsProvider = new LogsProvider(runtimeInfoProvider.Object, logsProcessor); var logOptions = new ModuleLogOptions(LogsContentEncoding.None, LogsContentType.Text, ModuleLogFilter.Empty, LogOutputFraming.None, Option.None <LogsOutputGroupingConfig>(), false); // Act byte[] bytes = await logsProvider.GetLogs(moduleId, logOptions, cancellationToken); // Assert string logsText = Encoding.UTF8.GetString(bytes); Assert.Equal(expectedLogText, logsText); }
public async Task GetLogsAsTextWithCompressionTest() { // Arrange string iotHub = "foo.azure-devices.net"; string deviceId = "dev1"; string moduleId = "mod1"; Option <int> tail = Option.None <int>(); Option <int> since = Option.None <int>(); CancellationToken cancellationToken = CancellationToken.None; string expectedLogText = TestLogTexts.Join(string.Empty); var runtimeInfoProvider = new Mock <IRuntimeInfoProvider>(); runtimeInfoProvider.Setup(r => r.GetModuleLogs(moduleId, false, tail, since, cancellationToken)) .ReturnsAsync(new MemoryStream(DockerFraming.Frame(TestLogTexts))); var logsProcessor = new LogsProcessor(new LogMessageParser(iotHub, deviceId)); var logsProvider = new LogsProvider(runtimeInfoProvider.Object, logsProcessor); var logOptions = new ModuleLogOptions(LogsContentEncoding.Gzip, LogsContentType.Text, ModuleLogFilter.Empty); // Act byte[] bytes = await logsProvider.GetLogs(moduleId, logOptions, cancellationToken); // Assert byte[] decompressedBytes = Compression.DecompressFromGzip(bytes); string logsText = Encoding.UTF8.GetString(decompressedBytes); Assert.Equal(expectedLogText, logsText); }
public LogsProcessorTests() { MockProcessor = new Mock <IBlockchainProcessor>(); MockProgressService = new Mock <IBlockProgressService>(); Service = new LogsProcessor( MockProcessor.Object, MockProgressService.Object, MaxBlocksPerBatch); }
public async Task HandleWithCancellationTest() { // Arrange var buffer = DockerFraming.Frame(TestLogTexts); string id = "m1"; var runtimeInfoProvider = new Mock <IRuntimeInfoProvider>(); runtimeInfoProvider.Setup(r => r.GetModuleLogs(id, true, Option.None <int>(), Option.None <string>(), Option.None <string>(), Option.None <bool>(), It.IsAny <CancellationToken>())) .ReturnsAsync(new MemoryStream(buffer)); runtimeInfoProvider.Setup(r => r.GetModules(It.IsAny <CancellationToken>())) .ReturnsAsync(new[] { new ModuleRuntimeInfo(id, "docker", ModuleStatus.Running, "foo", 0, Option.None <DateTime>(), Option.None <DateTime>()) }); var logsProcessor = new LogsProcessor(new LogMessageParser("testIotHub", "d1")); var logsProvider = new LogsProvider(runtimeInfoProvider.Object, logsProcessor); var logRequestItem = new LogRequestItem(id, ModuleLogFilter.Empty); var logsStreamRequest = new LogsStreamRequest("1.0", new List <LogRequestItem> { logRequestItem }, LogsContentEncoding.None, LogsContentType.Text); byte[] logsStreamRequestBytes = logsStreamRequest.ToBytes(); var logsStreamRequestArraySeg = new ArraySegment <byte>(logsStreamRequestBytes); var clientWebSocket = new Mock <IClientWebSocket>(); clientWebSocket.Setup(c => c.ReceiveAsync(It.IsAny <ArraySegment <byte> >(), It.IsAny <CancellationToken>())) .Callback <ArraySegment <byte>, CancellationToken>((a, c) => logsStreamRequestArraySeg.CopyTo(a)) .ReturnsAsync(new WebSocketReceiveResult(logsStreamRequestBytes.Length, WebSocketMessageType.Binary, true)); var receivedBytes = new List <byte>(); clientWebSocket.Setup(c => c.SendAsync(It.IsAny <ArraySegment <byte> >(), WebSocketMessageType.Binary, true, It.IsAny <CancellationToken>())) .Callback <ArraySegment <byte>, WebSocketMessageType, bool, CancellationToken>((a, w, f, c) => receivedBytes.AddRange(a.Array)) .Returns(async() => await Task.Delay(TimeSpan.FromSeconds(3))); clientWebSocket.SetupGet(c => c.State).Returns(WebSocketState.Open); // Act var logsStreamRequestHandler = new LogsStreamRequestHandler(logsProvider, runtimeInfoProvider.Object); Task handleTask = logsStreamRequestHandler.Handle(clientWebSocket.Object, CancellationToken.None); await Task.Delay(TimeSpan.FromSeconds(10)); clientWebSocket.SetupGet(c => c.State).Returns(WebSocketState.Closed); // Assert await Task.Delay(TimeSpan.FromSeconds(5)); Assert.True(handleTask.IsCompleted); runtimeInfoProvider.VerifyAll(); clientWebSocket.VerifyAll(); Assert.True(receivedBytes.Count < buffer.Length); IList <string> receivedChunks = SimpleFraming.Parse(receivedBytes.ToArray()) .Select(r => Encoding.UTF8.GetString(r)) .ToList(); Assert.Equal(TestLogTexts.Take(receivedChunks.Count), receivedChunks); }
public void TestLogsAgeLimit5Days() { TestPage.SelectInDropDown(TestPage.LogsAgeLimitDrDown, "5"); TestPage.CollectLogs(out string message); Assert.AreEqual(expected: "Logs collected successfully", actual: message); bool logsLimitNotExceeded = new LogsProcessor().CheckLogsAgeLimit(logsPath: "Path to collected logs", limitDays: 5); Assert.IsTrue(logsLimitNotExceeded); }
public void CollectLogsForAllSelected() { TestPage.SelectAllCheckboxes(); TestPage.CollectLogs(out string message); Assert.AreEqual(expected: "Logs collected successfully", actual: message); bool logsAreEqual = new LogsProcessor().CompareLogs(actual: "Path to collected logs", expected: "Path to logs template"); Assert.IsTrue(logsAreEqual); }
public void CollectSystemConfigurationOnly() { TestPage.SelectSystemConfiguration(); TestPage.CollectLogs(out string message); Assert.AreEqual(expected: "Logs collected successfully", actual: message); bool logsAreEqual = new LogsProcessor().CompareLogs(actual: "Path to collected logs", expected: "Path to logs template"); Assert.IsTrue(logsAreEqual); }
public async Task UsingTheIndividualComponents() { TransferMetadata.CurrentChainUrl = BlockchainUrl; var web3 = new Web3.Web3(BlockchainUrl); using (var azureSearchService = new AzureSearchService(AzureSearchServiceName, _azureSearchApiKey)) { await azureSearchService.DeleteIndexAsync(AzureTransferIndexName); try { using (var transferIndexer = await azureSearchService.CreateEventIndexer <TransferEvent_ERC20>(AzureTransferIndexName)) { using (var transferProcessor = new EventIndexProcessor <TransferEvent_ERC20>(transferIndexer)) { var logProcessor = new BlockRangeLogsProcessor( web3.Eth.Filters.GetLogs, new ILogProcessor[] { transferProcessor }); var progressRepository = new JsonBlockProgressRepository(CreateJsonFileToHoldProgress()); var progressService = new StaticBlockRangeProgressService( 3146684, 3146694, progressRepository); var batchProcessorService = new LogsProcessor( logProcessor, progressService, maxNumberOfBlocksPerBatch: 2); BlockRange?lastBlockRangeProcessed; do { lastBlockRangeProcessed = await batchProcessorService.ProcessOnceAsync(); } while (lastBlockRangeProcessed != null); Assert.Equal(19, transferIndexer.Indexed); } } } finally { await azureSearchService.DeleteIndexAsync(AzureTransferIndexName); } } }
public async Task GetLogsStreamWithFiltersTest() { // Arrange string iotHub = "foo.azure-devices.net"; string deviceId = "dev1"; string moduleId = "mod1"; Option <int> tail = Option.Some(10); Option <int> since = Option.Some(1552887267); CancellationToken cancellationToken = CancellationToken.None; byte[] dockerLogsStreamBytes = DockerFraming.Frame(TestLogTexts); var runtimeInfoProvider = new Mock <IRuntimeInfoProvider>(); runtimeInfoProvider.Setup(r => r.GetModuleLogs(moduleId, true, tail, since, cancellationToken)) .ReturnsAsync(new MemoryStream(dockerLogsStreamBytes)); runtimeInfoProvider.Setup(r => r.GetModules(It.IsAny <CancellationToken>())) .ReturnsAsync(new[] { new ModuleRuntimeInfo(moduleId, "docker", ModuleStatus.Running, "foo", 0, Option.None <DateTime>(), Option.None <DateTime>()) }); var logsProcessor = new LogsProcessor(new LogMessageParser(iotHub, deviceId)); var logsProvider = new LogsProvider(runtimeInfoProvider.Object, logsProcessor); var filter = new ModuleLogFilter(tail, since, Option.Some(6), Option.Some("Starting")); var logOptions = new ModuleLogOptions(LogsContentEncoding.Gzip, LogsContentType.Text, filter); var receivedBytes = new List <byte>(); Task Callback(ArraySegment <byte> bytes) { receivedBytes.AddRange(bytes.ToArray()); return(Task.CompletedTask); } // Act await logsProvider.GetLogsStream(moduleId, logOptions, Callback, cancellationToken); await Task.Delay(TimeSpan.FromSeconds(3)); // Assert Assert.NotEmpty(receivedBytes); string receivedText = Compression.DecompressFromGzip(receivedBytes.ToArray()) .Skip(8) .ToArray() .FromBytes(); Assert.Equal(TestLogTexts[0], receivedText); }
public async Task GetLogsAsJsonWithCompressionTest() { // Arrange string iotHub = "foo.azure-devices.net"; string deviceId = "dev1"; string moduleId = "mod1"; Option <int> tail = Option.None <int>(); Option <string> since = Option.None <string>(); Option <string> until = Option.None <string>(); Option <bool> includeTimestamp = Option.Some(true); CancellationToken cancellationToken = CancellationToken.None; var runtimeInfoProvider = new Mock <IRuntimeInfoProvider>(); // Note: EdgeAgent automatically includes the timestamp for log parsing by default for content type JSON runtimeInfoProvider.Setup(r => r.GetModuleLogs(moduleId, false, tail, since, until, includeTimestamp, cancellationToken)) .ReturnsAsync(new MemoryStream(DockerFraming.Frame(TestLogTexts))); var logsProcessor = new LogsProcessor(new LogMessageParser(iotHub, deviceId)); var logsProvider = new LogsProvider(runtimeInfoProvider.Object, logsProcessor); ModuleLogFilter filter = new ModuleLogFilter(tail, since, until, Option.None <int>(), includeTimestamp, Option.None <string>()); var logOptions = new ModuleLogOptions(LogsContentEncoding.Gzip, LogsContentType.Json, filter, LogOutputFraming.None, Option.None <LogsOutputGroupingConfig>(), false); // Act byte[] bytes = await logsProvider.GetLogs(moduleId, logOptions, cancellationToken); // Assert byte[] decompressedBytes = Compression.DecompressFromGzip(bytes); var logMessages = decompressedBytes.FromBytes <List <ModuleLogMessage> >(); Assert.NotNull(logMessages); Assert.Equal(TestLogTexts.Length, logMessages.Count); for (int i = 0; i < logMessages.Count; i++) { ModuleLogMessage logMessage = logMessages[i]; (int logLevel, Option <DateTime> timeStamp, string text) = LogMessageParser.ParseLogText(TestLogTexts[i]); Assert.Equal(logLevel, logMessage.LogLevel); Assert.Equal(timeStamp.HasValue, logMessage.TimeStamp.HasValue); Assert.Equal(timeStamp.OrDefault(), logMessage.TimeStamp.OrDefault()); Assert.Equal(text, logMessage.Text); Assert.Equal(iotHub, logMessage.IoTHub); Assert.Equal(deviceId, logMessage.DeviceId); Assert.Equal(moduleId, logMessage.ModuleId); } }
public async Task GetLogsAsJsonWithCompressionTest() { // Arrange string iotHub = "foo.azure-devices.net"; string deviceId = "dev1"; string moduleId = "mod1"; Option <int> tail = Option.None <int>(); CancellationToken cancellationToken = CancellationToken.None; string expectedLogText = TestLogTexts.Join(string.Empty); var runtimeInfoProvider = new Mock <IRuntimeInfoProvider>(); runtimeInfoProvider.Setup(r => r.GetModuleLogs(moduleId, false, tail, cancellationToken)) .ReturnsAsync(new MemoryStream(GetDockerLogsStream(TestLogTexts))); var logsProcessor = new LogsProcessor(new LogMessageParser(iotHub, deviceId)); var logsProvider = new LogsProvider(runtimeInfoProvider.Object, logsProcessor); var logOptions = new ModuleLogOptions(moduleId, LogsContentEncoding.Gzip, LogsContentType.Json); // Act byte[] bytes = await logsProvider.GetLogs(logOptions, cancellationToken); // Assert byte[] decompressedBytes = Compression.DecompressFromGzip(bytes); var logMessages = decompressedBytes.FromBytes <List <ModuleLogMessage> >(); Assert.NotNull(logMessages); Assert.Equal(TestLogTexts.Length, logMessages.Count); for (int i = 0; i < logMessages.Count; i++) { ModuleLogMessage logMessage = logMessages[i]; (int logLevel, Option <DateTime> timeStamp, string text) = LogMessageParser.ParseLogText(TestLogTexts[i]); Assert.Equal(logLevel, logMessage.LogLevel); Assert.Equal(timeStamp.HasValue, logMessage.TimeStamp.HasValue); Assert.Equal(timeStamp.OrDefault(), logMessage.TimeStamp.OrDefault()); Assert.Equal(text, logMessage.Text); Assert.Equal(iotHub, logMessage.IoTHub); Assert.Equal(deviceId, logMessage.DeviceId); Assert.Equal(moduleId, logMessage.ModuleId); } }
public async Task GetLogsAsJsonTest() { // Arrange string iotHub = "foo.azure-devices.net"; string deviceId = "dev1"; string moduleId = "mod1"; Option <int> tail = Option.None <int>(); Option <int> since = Option.None <int>(); CancellationToken cancellationToken = CancellationToken.None; var runtimeInfoProvider = new Mock <IRuntimeInfoProvider>(); runtimeInfoProvider.Setup(r => r.GetModuleLogs(moduleId, false, tail, since, cancellationToken)) .ReturnsAsync(new MemoryStream(DockerFraming.Frame(TestLogTexts))); var logsProcessor = new LogsProcessor(new LogMessageParser(iotHub, deviceId)); var logsProvider = new LogsProvider(runtimeInfoProvider.Object, logsProcessor); var logOptions = new ModuleLogOptions(LogsContentEncoding.None, LogsContentType.Json, ModuleLogFilter.Empty, LogOutputFraming.None, Option.None <LogsOutputGroupingConfig>(), false); // Act byte[] bytes = await logsProvider.GetLogs(moduleId, logOptions, cancellationToken); // Assert var logMessages = bytes.FromBytes <List <ModuleLogMessage> >(); Assert.NotNull(logMessages); Assert.Equal(TestLogTexts.Length, logMessages.Count); for (int i = 0; i < logMessages.Count; i++) { ModuleLogMessage logMessage = logMessages[i]; (int logLevel, Option <DateTime> timeStamp, string text) = LogMessageParser.ParseLogText(TestLogTexts[i]); Assert.Equal(logLevel, logMessage.LogLevel); Assert.Equal(timeStamp.HasValue, logMessage.TimeStamp.HasValue); Assert.Equal(timeStamp.OrDefault(), logMessage.TimeStamp.OrDefault()); Assert.Equal(text, logMessage.Text); Assert.Equal(iotHub, logMessage.IoTHub); Assert.Equal(deviceId, logMessage.DeviceId); Assert.Equal(moduleId, logMessage.ModuleId); } }
public async Task GetLogsStreamTest() { // Arrange string iotHub = "foo.azure-devices.net"; string deviceId = "dev3"; string moduleId = "mod3"; Option <int> tail = Option.None <int>(); Option <string> since = Option.None <string>(); Option <string> until = Option.None <string>(); Option <bool> includeTimestamp = Option.None <bool>(); CancellationToken cancellationToken = CancellationToken.None; byte[] dockerLogsStreamBytes = DockerFraming.Frame(TestLogTexts); var runtimeInfoProvider = new Mock <IRuntimeInfoProvider>(); runtimeInfoProvider.Setup(r => r.GetModuleLogs(moduleId, true, tail, since, until, includeTimestamp, cancellationToken)) .ReturnsAsync(new MemoryStream(dockerLogsStreamBytes)); runtimeInfoProvider.Setup(r => r.GetModules(It.IsAny <CancellationToken>())) .ReturnsAsync(new[] { new ModuleRuntimeInfo(moduleId, "docker", ModuleStatus.Running, "foo", 0, Option.None <DateTime>(), Option.None <DateTime>()) }); var logsProcessor = new LogsProcessor(new LogMessageParser(iotHub, deviceId)); var logsProvider = new LogsProvider(runtimeInfoProvider.Object, logsProcessor); var logOptions = new ModuleLogOptions(LogsContentEncoding.None, LogsContentType.Text, ModuleLogFilter.Empty, LogOutputFraming.None, Option.None <LogsOutputGroupingConfig>(), true); var receivedBytes = new List <byte>(); Task Callback(ArraySegment <byte> bytes) { receivedBytes.AddRange(bytes.ToArray()); return(Task.CompletedTask); } // Act await logsProvider.GetLogsStream(moduleId, logOptions, Callback, cancellationToken); // Assert Assert.NotEmpty(receivedBytes); Assert.Equal(string.Join(string.Empty, TestLogTexts).ToBytes(), receivedBytes); }
public virtual async Task <BigInteger> ProcessAsync(BigInteger from, BigInteger?to = null, CancellationTokenSource ctx = null, Action <LogBatchProcessedArgs> logBatchProcessedCallback = null) { if (!LogProcessors.Any()) { throw new InvalidOperationException("No events to capture - use AddEventAsync to add listeners for indexable events"); } var logProcessor = new BlockRangeLogsProcessor( Web3.Eth.Filters.GetLogs, LogProcessors, Filters); IBlockProgressService progressService = CreateProgressService(from, to); var batchProcessorService = new LogsProcessor( logProcessor, progressService, maxNumberOfBlocksPerBatch: MaxBlocksPerBatch); if (to != null) { return(await ProcessRange(ctx, logBatchProcessedCallback, batchProcessorService)); } return(await batchProcessorService.ProcessContinuallyAsync(ctx?.Token ?? new CancellationToken(), logBatchProcessedCallback)); }
public async Task QueueAllEventsForMakerDAOContract() { // Load config // - this will contain the secrets and connection strings we don't want to hard code var config = TestConfiguration.LoadConfig(); string azureStorageConnectionString = config["AzureStorageConnectionString"]; // Create a proxy for the blockchain var web3 = new Web3.Web3(TestConfiguration.BlockchainUrls.Infura.Mainnet); // Create Queue Factory // - In this sample we're targetting Azure // - The factory communicates with Azure to create and get different queues var queueFactory = new AzureSubscriberQueueFactory(azureStorageConnectionString); // Create a Queue // - This is where we're going to put the matching event logs var queue = await queueFactory.GetOrCreateQueueAsync("makerdaoevents"); // Get the maker DAO contract abi // - from this we're able to match and decode the events in the contract var contractAbi = new ABIDeserialiser().DeserialiseContract(MAKER_DAO_ABI); // Create an event subscription for these events // - Passing in the maker dao address to ensure only logs with a matching address are processed // - There is an option to pass an implementation of IEventHandlerHistoryRepository in to the constructor // - This would record history for each event handler and is used to prevent duplication var eventSubscription = new EventSubscription(contractAbi.Events, new[] { MAKER_CONTRACT_ADDRESS }); // Assign the queue to the event subscription // - Matching events will be written to this queue // - By default a generic message is written to the queue // - The message contains the raw log (aka FilterLog), decoded event parameter values and event metadata // - Therefore the message schema is consistent across all messages sent to any queues // - However - should you require your own queue message schema the method below accepts a custom message mapper // - Ultimately the message is converted to json eventSubscription.AddQueueHandler(queue); // Azure storage setup // - this example reads and writes block progress to an Azure storage table // - to avoid collision with other samples we provide a prefix var storageCloudSetup = new CloudTableSetup(azureStorageConnectionString, prefix: $"makerdao"); // Create a progress repository // - It stores and retrieves the most recent block processed var blockProgressRepo = storageCloudSetup.CreateBlockProgressRepository(); // Create a progress service // - This uses the progress repo to dictate what blocks to process next // - The MIN_BLOCK_NUMBER dictates the starting point if the progress repo is empty or has fallen too far behind var progressService = new BlockProgressService(web3, MIN_BLOCK_NUMBER, blockProgressRepo); // Create a filter // - This is essentially the query that is sent to the chain when retrieving logs // - It is OPTIONAL - without it, all logs in the block range are requested // - The filter is invoked before any event subscriptions evaluate the logs // - The subscriptions are free to implement their own matching logic // - In this sample we're only interested in MakerDAO logs // - Therefore it makes sense to restrict the number of logs to retrieve from the chain var makerAddressFilter = new NewFilterInput() { Address = new[] { MAKER_CONTRACT_ADDRESS } }; // Create a log processor // - This uses the blockchainProxy to get the logs // - It sends each log to the event subscriptions to indicate if the logs matches the subscription criteria // - It then allocates matching logs to separate batches per event subscription var logProcessor = new BlockRangeLogsProcessor(web3, new[] { eventSubscription }, makerAddressFilter); // Create a batch log processor service // - It uses the progress service to calculates the block range to progress // - It then invokes the log processor - passing in the range to process // - It updates progress via the progress service var batchProcessorService = new LogsProcessor(logProcessor, progressService, MAX_BLOCKS_PER_BATCH); // execute try { // Optional cancellation token // - Useful for cancelling long running processing operations var ctx = new System.Threading.CancellationTokenSource(); // instruct the service to get and process the next range of blocks // when the rangeProcessed is null - it means there was nothing to process var rangeProcessed = await batchProcessorService.ProcessOnceAsync(ctx.Token); // ensure we have processed the expected number of events // the event subscription has state which can record running totals across many processing batches Assert.Equal(16, eventSubscription.State.GetInt("EventsHandled")); // get the message count from the queue Assert.Equal(16, await queue.GetApproxMessageCountAsync()); } finally { // delete any data from Azure await ClearDown(queue, storageCloudSetup, queueFactory); } }
public async Task GetLogsStreamWithMultipleModulesWithRegexMatchTest() { // Arrange string iotHub = "foo.azure-devices.net"; string deviceId = "dev1"; Option <int> tail = Option.None <int>(); Option <int> since = Option.None <int>(); CancellationToken cancellationToken = CancellationToken.None; string moduleId1 = "mod1"; string moduleId2 = "mod2"; var filter = new ModuleLogFilter(tail, since, Option.None <int>(), Option.Some("bad")); byte[] dockerLogsStreamBytes1 = DockerFraming.Frame(TestLogTexts); byte[] dockerLogsStreamBytes2 = DockerFraming.Frame(TestLogTexts); var modulesList = new List <ModuleRuntimeInfo> { new ModuleRuntimeInfo(moduleId1, "docker", ModuleStatus.Running, "foo", 0, Option.None <DateTime>(), Option.None <DateTime>()), new ModuleRuntimeInfo(moduleId2, "docker", ModuleStatus.Running, "foo", 0, Option.None <DateTime>(), Option.None <DateTime>()) }; var runtimeInfoProvider = new Mock <IRuntimeInfoProvider>(); runtimeInfoProvider.Setup(r => r.GetModuleLogs(moduleId1, true, tail, since, cancellationToken)) .ReturnsAsync(new MemoryStream(dockerLogsStreamBytes1)); runtimeInfoProvider.Setup(r => r.GetModuleLogs(moduleId2, true, tail, since, cancellationToken)) .ReturnsAsync(new MemoryStream(dockerLogsStreamBytes2)); runtimeInfoProvider.Setup(r => r.GetModules(It.IsAny <CancellationToken>())) .ReturnsAsync(modulesList); var logsProcessor = new LogsProcessor(new LogMessageParser(iotHub, deviceId)); var logsProvider = new LogsProvider(runtimeInfoProvider.Object, logsProcessor); string regex = "mod"; var logOptions = new ModuleLogOptions(LogsContentEncoding.Gzip, LogsContentType.Text, filter); var receivedBytes = new List <byte[]>(); Task Callback(ArraySegment <byte> bytes) { receivedBytes.Add(bytes.ToArray()); return(Task.CompletedTask); } var expectedTextLines = new List <string> { TestLogTexts[3], TestLogTexts[4], TestLogTexts[3], TestLogTexts[4] }; expectedTextLines.Sort(); // Act await logsProvider.GetLogsStream(regex, logOptions, Callback, cancellationToken); await Task.Delay(TimeSpan.FromSeconds(6)); // Assert Assert.NotEmpty(receivedBytes); List <string> receivedText = receivedBytes .Select( r => Compression.DecompressFromGzip(r) .Skip(8) .ToArray() .FromBytes()) .ToList(); receivedText.Sort(); Assert.Equal(expectedTextLines, receivedText); }
public async Task WebJobExample() { var config = TestConfiguration.LoadConfig(); string azureStorageConnectionString = config["AzureStorageConnectionString"]; string azureSearchKey = config["AzureSearchApiKey"]; var configurationContext = EventProcessingConfigMock.Create(PARTITION, out IdGenerator idGenerator); IEventProcessingConfigurationRepository configurationRepository = configurationContext.CreateMockRepository(idGenerator); var web3 = new Web3.Web3(TestConfiguration.BlockchainUrls.Infura.Rinkeby); // search components var searchService = new AzureSearchService(serviceName: AZURE_SEARCH_SERVICE_NAME, searchApiKey: azureSearchKey); var searchIndexFactory = new AzureSubscriberSearchIndexFactory(searchService); // queue components var queueFactory = new AzureSubscriberQueueFactory(azureStorageConnectionString); // subscriber repository var repositoryFactory = new AzureTablesSubscriberRepositoryFactory(azureStorageConnectionString); // load subscribers and event subscriptions var eventSubscriptionFactory = new EventSubscriptionFactory( web3, configurationRepository, queueFactory, searchIndexFactory, repositoryFactory); List <IEventSubscription> eventSubscriptions = await eventSubscriptionFactory.LoadAsync(PARTITION); // progress repo (dictates which block ranges to process next) // maintain separate progress per partition via a prefix var storageCloudSetup = new CloudTableSetup(azureStorageConnectionString, prefix: $"Partition{PARTITION}"); var blockProgressRepo = storageCloudSetup.CreateBlockProgressRepository(); // load service var progressService = new BlockProgressService(web3, MIN_BLOCK_NUMBER, blockProgressRepo); var logProcessor = new BlockRangeLogsProcessor(web3, eventSubscriptions); var batchProcessorService = new LogsProcessor(logProcessor, progressService, MAX_BLOCKS_PER_BATCH); // execute BlockRange?rangeProcessed; try { var ctx = new System.Threading.CancellationTokenSource(); rangeProcessed = await batchProcessorService.ProcessOnceAsync(ctx.Token); } finally { await ClearDown(configurationContext, storageCloudSetup, searchService, queueFactory, repositoryFactory); } // save event subscription state await configurationRepository.EventSubscriptionStates.UpsertAsync(eventSubscriptions.Select(s => s.State)); // assertions Assert.NotNull(rangeProcessed); Assert.Equal((ulong)10, rangeProcessed.Value.BlockCount); var subscriptionState1 = configurationContext.GetEventSubscriptionState(eventSubscriptionId: 1); // interested in transfers with contract queries and aggregations var subscriptionState2 = configurationContext.GetEventSubscriptionState(eventSubscriptionId: 2); // interested in transfers with simple aggregation var subscriptionState3 = configurationContext.GetEventSubscriptionState(eventSubscriptionId: 3); // interested in any event for a specific address Assert.Equal("4009000000002040652615", subscriptionState1.Values["RunningTotalForTransferValue"].ToString()); Assert.Equal((uint)19, subscriptionState2.Values["CurrentTransferCount"]); var txForSpecificAddress = (List <string>)subscriptionState3.Values["AllTransactionHashes"]; Assert.Equal("0x362bcbc78a5cc6156e8d24d95bee6b8f53d7821083940434d2191feba477ae0e", txForSpecificAddress[0]); Assert.Equal("0xe63e9422dedf84d0ce13f9f75ebfd86333ce917b2572925fbdd51b51caf89b77", txForSpecificAddress[1]); var blockNumbersForSpecificAddress = (List <HexBigInteger>)subscriptionState3.Values["AllBlockNumbers"]; Assert.Equal((BigInteger)4063362, blockNumbersForSpecificAddress[0].Value); Assert.Equal((BigInteger)4063362, blockNumbersForSpecificAddress[1].Value); }
public async Task WriteAnyMakerEventToQueue() { var config = TestConfiguration.LoadConfig(); string azureStorageConnectionString = config["AzureStorageConnectionString"]; var configurationContext = MakerDAOEventProcessingConfig.Create(PARTITION, out IdGenerator idGenerator); IEventProcessingConfigurationRepository configurationRepository = configurationContext.CreateMockRepository(idGenerator); var web3 = new Web3.Web3(TestConfiguration.BlockchainUrls.Infura.Mainnet); // queue components var queueFactory = new AzureSubscriberQueueFactory(azureStorageConnectionString); // load subscribers and event subscriptions var eventSubscriptionFactory = new EventSubscriptionFactory( web3, configurationRepository, queueFactory); List <IEventSubscription> eventSubscriptions = await eventSubscriptionFactory.LoadAsync(PARTITION); // progress repo (dictates which block ranges to process next) // maintain separate progress per partition via a prefix var storageCloudSetup = new CloudTableSetup(azureStorageConnectionString, prefix: $"Partition{PARTITION}"); var blockProgressRepo = storageCloudSetup.CreateBlockProgressRepository(); //this ensures we only query the chain for events relating to this contract var makerAddressFilter = new NewFilterInput() { Address = new[] { MakerDAOEventProcessingConfig.MAKER_CONTRACT_ADDRESS } }; // load service var progressService = new BlockProgressService(web3, MIN_BLOCK_NUMBER, blockProgressRepo); var logProcessor = new BlockRangeLogsProcessor(web3, eventSubscriptions, makerAddressFilter); var batchProcessorService = new LogsProcessor(logProcessor, progressService, MAX_BLOCKS_PER_BATCH); // execute var blockRangesProcessed = new List <BlockRange?>(); try { for (var i = 0; i < 2; i++) // 2 batch iterations { var ctx = new System.Threading.CancellationTokenSource(); var rangeProcessed = await batchProcessorService.ProcessOnceAsync(ctx.Token); blockRangesProcessed.Add(rangeProcessed); // save event subscription state after each batch await configurationRepository.EventSubscriptionStates.UpsertAsync(eventSubscriptions.Select(s => s.State)); } } finally { await ClearDown(configurationContext, storageCloudSetup, queueFactory); } var subscriptionState = await configurationRepository.EventSubscriptionStates.GetAsync(eventSubscriptions[0].Id); Assert.Equal(2, (int)subscriptionState.Values["HandlerInvocations"]); Assert.Equal(28, (int)subscriptionState.Values["EventsHandled"]); }
public async Task WritingCustomMessagesToTheQueue() { // Load config // - this will contain the secrets and connection strings we don't want to hard code var config = TestConfiguration.LoadConfig(); string azureStorageConnectionString = config["AzureStorageConnectionString"]; // Create a proxy for the blockchain var web3 = new Web3.Web3(TestConfiguration.BlockchainUrls.Infura.Mainnet); // Create Queue Factory // - In this sample we're targetting Azure // - The factory communicates with Azure to create and get different queues var queueFactory = new AzureSubscriberQueueFactory(azureStorageConnectionString); // Create a Queue // - This is where we're going to put the matching event logs var queue = await queueFactory.GetOrCreateQueueAsync("makerdaotransferscustom"); // Create an event subscription specifically for ERC20 Transfers // - Passing in the maker dao address to ensure only logs with a matching address are processed // - There is an option to pass an implementation of IEventHandlerHistoryRepository in to the constructor // - This would record history for each event handler and is used to prevent duplication var eventSubscription = new EventSubscription <TransferEventDto>( contractAddressesToMatch: new[] { MAKER_CONTRACT_ADDRESS }); // Create a mapper that will convert the DecodedEvent into a custom message we want on the queue // In this sample we're using a subscription that is specific to an EventDTO (EventSubscription<TransferEventDto>) // This ensures that the decodedEvent.DecodedEventDto property is populated during processing // ( If the subscription is not tied to an EventDTO the decodedEvent.DecodedEventDto property would be null // BUT we can still read the event arguments (aka parameters or topics) from the decodedEvent.Event property) var queueMessageMapper = new QueueMessageMapper((decodedEvent) => { return(new CustomQueueMessageForTransfers { BlockNumber = decodedEvent.Log.BlockNumber.Value.ToString(), TransactionHash = decodedEvent.Log.TransactionHash, LogIndex = decodedEvent.Log.LogIndex.Value.ToString(), Transfer = decodedEvent.DecodedEventDto as TransferEventDto }); }); // Assign the queue to the event subscription // - Matching events will be written to this queue // - Pass a custom mapper to create a suitable queue message // - Ultimately the message is converted to json eventSubscription.AddQueueHandler(queue, queueMessageMapper); // Azure storage setup // - this example reads and writes block progress to an Azure storage table // - to avoid collision with other samples we provide a prefix var storageCloudSetup = new CloudTableSetup(azureStorageConnectionString, prefix: $"makerdaotransferscustom"); // Create a progress repository // - It stores and retrieves the most recent block processed var blockProgressRepo = storageCloudSetup.CreateBlockProgressRepository(); // Create a progress service // - This uses the progress repo to dictate what blocks to process next // - The MIN_BLOCK_NUMBER dictates the starting point if the progress repo is empty or has fallen too far behind var progressService = new BlockProgressService(web3, MIN_BLOCK_NUMBER, blockProgressRepo); // Create a filter // - This is essentially the query that is sent to the chain when retrieving logs // - It is OPTIONAL - without it, all logs in the block range are requested // - The filter is invoked before any event subscriptions evaluate the logs // - The subscriptions are free to implement their own matching logic // - In this sample we're only interested in MakerDAO logs // - Therefore it makes sense to restrict the number of logs to retrieve from the chain var makerAddressFilter = new NewFilterInput() { Address = new[] { MAKER_CONTRACT_ADDRESS } }; // Create a log processor // - This uses the blockchainProxy to get the logs // - It sends each log to the event subscriptions to indicate if the logs matches the subscription criteria // - It then allocates matching logs to separate batches per event subscription var logProcessor = new BlockRangeLogsProcessor(web3, new[] { eventSubscription }, makerAddressFilter); // Create a batch log processor service // - It uses the progress service to calculates the block range to progress // - It then invokes the log processor - passing in the range to process // - It updates progress via the progress service var batchProcessorService = new LogsProcessor(logProcessor, progressService, MAX_BLOCKS_PER_BATCH); // execute try { // Optional cancellation token // - Useful for cancelling long running processing operations var ctx = new System.Threading.CancellationTokenSource(); // instruct the service to get and process the next range of blocks // when the rangeProcessed is null - it means there was nothing to process var rangeProcessed = await batchProcessorService.ProcessOnceAsync(ctx.Token); // ensure we have processed the expected number of events // the event subscription has state which can record running totals across many processing batches Assert.Equal(11, eventSubscription.State.GetInt("EventsHandled")); // get the message count from the queue Assert.Equal(11, await queue.GetApproxMessageCountAsync()); //A sample message body from the queue /* * {"BlockNumber":"7540010","TransactionHash":"0x8d58abc578f5e321f2e6b7c0637ccc60fbf62b39b120691cbf19ff201f5069b0","LogIndex":"132","Transfer":{"From":"0x296c61eaf5bea208bbabc65ae01c3bc5270fe386","To":"0x2a8f1a6af55b705b7daee0776d6f97302de2a839","Value":119928660890733235}} */ } finally { // delete any data from Azure await ClearDown(queue, storageCloudSetup, queueFactory); } }
protected override void Load(ContainerBuilder builder) { // ILogsUploader builder.Register(c => new AzureBlobLogsUploader(this.iotHubHostName, this.deviceId)) .As <ILogsUploader>() .SingleInstance(); // Task<ILogsProvider> builder.Register( async c => { var logsProcessor = new LogsProcessor(new LogMessageParser(this.iotHubHostName, this.deviceId)); IRuntimeInfoProvider runtimeInfoProvider = await c.Resolve <Task <IRuntimeInfoProvider> >(); return(new LogsProvider(runtimeInfoProvider, logsProcessor) as ILogsProvider); }) .As <Task <ILogsProvider> >() .SingleInstance(); // Task<IStreamRequestListener> builder.Register( async c => { if (this.enableStreams) { var runtimeInfoProviderTask = c.Resolve <Task <IRuntimeInfoProvider> >(); var logsProviderTask = c.Resolve <Task <ILogsProvider> >(); IRuntimeInfoProvider runtimeInfoProvider = await runtimeInfoProviderTask; ILogsProvider logsProvider = await logsProviderTask; var streamRequestHandlerProvider = new StreamRequestHandlerProvider(logsProvider, runtimeInfoProvider); return(new StreamRequestListener(streamRequestHandlerProvider) as IStreamRequestListener); } else { return(new NullStreamRequestListener() as IStreamRequestListener); } }) .As <Task <IStreamRequestListener> >() .SingleInstance(); // Task<IRequestManager> builder.Register( async c => { var logsUploader = c.Resolve <ILogsUploader>(); var runtimeInfoProviderTask = c.Resolve <Task <IRuntimeInfoProvider> >(); var logsProviderTask = c.Resolve <Task <ILogsProvider> >(); IRuntimeInfoProvider runtimeInfoProvider = await runtimeInfoProviderTask; ILogsProvider logsProvider = await logsProviderTask; var requestHandlers = new List <IRequestHandler> { new PingRequestHandler(), new LogsUploadRequestHandler(logsUploader, logsProvider, runtimeInfoProvider) }; return(new RequestManager(requestHandlers, this.requestTimeout) as IRequestManager); }) .As <Task <IRequestManager> >() .SingleInstance(); // Task<IEdgeAgentConnection> builder.Register( async c => { var serde = c.Resolve <ISerde <DeploymentConfig> >(); var deviceClientprovider = c.Resolve <IModuleClientProvider>(); var streamRequestListenerTask = c.Resolve <Task <IStreamRequestListener> >(); var requestManagerTask = c.Resolve <Task <IRequestManager> >(); IStreamRequestListener streamRequestListener = await streamRequestListenerTask; IRequestManager requestManager = await requestManagerTask; IEdgeAgentConnection edgeAgentConnection = new EdgeAgentConnection(deviceClientprovider, serde, requestManager, streamRequestListener, this.configRefreshFrequency); return(edgeAgentConnection); }) .As <Task <IEdgeAgentConnection> >() .SingleInstance(); // Task<IConfigSource> builder.Register( async c => { var serde = c.Resolve <ISerde <DeploymentConfigInfo> >(); var edgeAgentConnectionTask = c.Resolve <Task <IEdgeAgentConnection> >(); IEncryptionProvider encryptionProvider = await c.Resolve <Task <IEncryptionProvider> >(); IEdgeAgentConnection edgeAgentConnection = await edgeAgentConnectionTask; var twinConfigSource = new TwinConfigSource(edgeAgentConnection, this.configuration); IConfigSource backupConfigSource = new FileBackupConfigSource(this.backupConfigFilePath, twinConfigSource, serde, encryptionProvider); return(backupConfigSource); }) .As <Task <IConfigSource> >() .SingleInstance(); // Task<IReporter> builder.Register( async c => { var runtimeInfoDeserializerTypes = new Dictionary <string, Type> { [DockerType] = typeof(DockerReportedRuntimeInfo), [Constants.Unknown] = typeof(UnknownRuntimeInfo) }; var edgeAgentDeserializerTypes = new Dictionary <string, Type> { [DockerType] = typeof(EdgeAgentDockerRuntimeModule), [Constants.Unknown] = typeof(UnknownEdgeAgentModule) }; var edgeHubDeserializerTypes = new Dictionary <string, Type> { [DockerType] = typeof(EdgeHubDockerRuntimeModule), [Constants.Unknown] = typeof(UnknownEdgeHubModule) }; var moduleDeserializerTypes = new Dictionary <string, Type> { [DockerType] = typeof(DockerRuntimeModule) }; var deserializerTypesMap = new Dictionary <Type, IDictionary <string, Type> > { { typeof(IRuntimeInfo), runtimeInfoDeserializerTypes }, { typeof(IEdgeAgentModule), edgeAgentDeserializerTypes }, { typeof(IEdgeHubModule), edgeHubDeserializerTypes }, { typeof(IModule), moduleDeserializerTypes } }; var edgeAgentConnectionTask = c.Resolve <Task <IEdgeAgentConnection> >(); IEdgeAgentConnection edgeAgentConnection = await edgeAgentConnectionTask; return(new IoTHubReporter( edgeAgentConnection, new TypeSpecificSerDe <AgentState>(deserializerTypesMap), this.versionInfo) as IReporter); }) .As <Task <IReporter> >() .SingleInstance(); base.Load(builder); }
private static async Task <BigInteger> ProcessRange(CancellationTokenSource ctx, Action <LogBatchProcessedArgs> logBatchProcessedCallBack, LogsProcessor batchProcessorService) { uint blockRangesProcessed = 0; BigInteger blocksProcessed = 0; BlockRange?lastBlockRangeProcessed; do { lastBlockRangeProcessed = await batchProcessorService.ProcessOnceAsync(ctx?.Token ?? new CancellationToken()); if (lastBlockRangeProcessed != null) { blockRangesProcessed++; blocksProcessed += lastBlockRangeProcessed.Value.BlockCount; logBatchProcessedCallBack?.Invoke(new LogBatchProcessedArgs(blockRangesProcessed, lastBlockRangeProcessed.Value)); } } while (lastBlockRangeProcessed != null); return(blocksProcessed); }
public async Task WriteTransferEventsForMakerDAOToAzureStorage() { // Load config // - this will contain the secrets and connection strings we don't want to hard code var config = TestConfiguration.LoadConfig(); string azureStorageConnectionString = config["AzureStorageConnectionString"]; // Create a proxy for the blockchain var web3 = new Web3.Web3(TestConfiguration.BlockchainUrls.Infura.Mainnet); // Create an Azure Table Storage Factory // - The factory communicates with Azure to create and get different tables var tableStorageFactory = new AzureTablesSubscriberRepositoryFactory(azureStorageConnectionString); // Create a Handler for a Table // - It wraps a table repository // - This is where we're going to put the matching event logs // - we're supplying a table prefix // - the actual table name would be "<prefix>TransactionLogs") // - this allows us to have different tables for different types of event logs // - the handler implements ILogHandler // - ILogHandler is a really simple interface to implement if you wish to customise the storage var storageHandlerForLogs = await tableStorageFactory.GetLogRepositoryHandlerAsync(tablePrefix : "makerdaotransfersstorage"); // Create an event subscription specifically for ERC20 Transfers // - Passing in the maker dao address to ensure only logs with a matching address are processed // - There is an option to pass an implementation of IEventHandlerHistoryRepository in to the constructor // - This would record history for each event handler and is used to prevent duplication var eventSubscription = new EventSubscription <TransferEventDto>( contractAddressesToMatch: new[] { MAKER_CONTRACT_ADDRESS }); // Assign the storage handler to the event subscription // - Matching events will be passed to the handler // - internally the handler passes the events to the repository layer which writes them to Azure eventSubscription.AddStorageHandler(storageHandlerForLogs); // Azure storage setup // - this example reads and writes block progress to an Azure storage table // - to avoid collision with other samples we provide a prefix var storageCloudSetup = new CloudTableSetup(azureStorageConnectionString, prefix: $"makerdaotransfersstorage"); // Create a progress repository // - It stores and retrieves the most recent block processed var blockProgressRepo = storageCloudSetup.CreateBlockProgressRepository(); // Create a progress service // - This uses the progress repo to dictate what blocks to process next // - The MIN_BLOCK_NUMBER dictates the starting point if the progress repo is empty or has fallen too far behind var progressService = new BlockProgressService(web3, MIN_BLOCK_NUMBER, blockProgressRepo); // Create a filter // - This is essentially the query that is sent to the chain when retrieving logs // - It is OPTIONAL - without it, all logs in the block range are requested // - The filter is invoked before any event subscriptions evaluate the logs // - The subscriptions are free to implement their own matching logic // - In this sample we're only interested in MakerDAO logs // - Therefore it makes sense to restrict the number of logs to retrieve from the chain var makerAddressFilter = new NewFilterInput() { Address = new[] { MAKER_CONTRACT_ADDRESS } }; // Create a log processor // - This uses the blockchainProxy to get the logs // - It sends each log to the event subscriptions to indicate if the logs matches the subscription criteria // - It then allocates matching logs to separate batches per event subscription var logProcessor = new BlockRangeLogsProcessor(web3, new[] { eventSubscription }, makerAddressFilter); // Create a batch log processor service // - It uses the progress service to calculates the block range to progress // - It then invokes the log processor - passing in the range to process // - It updates progress via the progress service var batchProcessorService = new LogsProcessor(logProcessor, progressService, MAX_BLOCKS_PER_BATCH); // execute try { // Optional cancellation token // - Useful for cancelling long running processing operations var ctx = new System.Threading.CancellationTokenSource(); // instruct the service to get and process the next range of blocks // when the rangeProcessed is null - it means there was nothing to process var rangeProcessed = await batchProcessorService.ProcessOnceAsync(ctx.Token); // ensure we have processed the expected number of events // the event subscription has state which can record running totals across many processing batches Assert.Equal(11, eventSubscription.State.GetInt("EventsHandled")); // get the row count from azure storage // the querying on storage is limited // the TransactionHash is the partitionkey and the rowkey is the LogIndex // this allows us to query by tx hash var logRepositoryHandler = storageHandlerForLogs as TransactionLogRepositoryHandler; var repository = logRepositoryHandler.TransactionLogRepository as TransactionLogRepository; var expectedTransactionHashes = new[] { "0x8d58abc578f5e321f2e6b7c0637ccc60fbf62b39b120691cbf19ff201f5069b0", "0x0bee561ac6bafb59bcc4c48fc4c1225aaedbab3e8089acea420140aafa47f3e5", "0x6fc82b076fa7088581a80869cb9c7a08d7f8e897670a9f67e39139b39246da7e", "0xdc2ee28db35ed5dbbc9e18a7d6bdbacb6e6633a9fce1ecda99ea7e1cf4bc8c72", "0xcd2fea48c84468f70c9a44c4ffd7b26064a2add8b72937edf593634d2501c1f6", "0x3acf887420887148222aab1d25d4d4893794e505ef276cc4cb6a48fffc6cb381", "0x96129f905589b2a95c26276aa7e8708a12381ddec50485d6684c4abf9a5a1d00" }; List <TransactionLog> logsFromRepo = new List <TransactionLog>(); foreach (var txHash in expectedTransactionHashes) { logsFromRepo.AddRange(await repository.GetManyAsync(txHash)); } Assert.Equal(11, logsFromRepo.Count); } finally { // delete any data from Azure await storageCloudSetup.GetCountersTable().DeleteIfExistsAsync(); await tableStorageFactory.DeleteTablesAsync(); } }
protected override void Load(ContainerBuilder builder) { // ILogsUploader builder.Register(c => new AzureBlobLogsUploader(this.iotHubHostName, this.deviceId)) .As <ILogsUploader>() .SingleInstance(); // Task<ILogsProvider> builder.Register( async c => { var logsProcessor = new LogsProcessor(new LogMessageParser(this.iotHubHostName, this.deviceId)); IRuntimeInfoProvider runtimeInfoProvider = await c.Resolve <Task <IRuntimeInfoProvider> >(); return(new LogsProvider(runtimeInfoProvider, logsProcessor) as ILogsProvider); }) .As <Task <ILogsProvider> >() .SingleInstance(); // IRequestManager builder.Register( c => { var requestHandlers = new List <IRequestHandler> { new PingRequestHandler(), new TaskStatusRequestHandler() }; return(new RequestManager(requestHandlers, this.requestTimeout) as IRequestManager); }) .As <IRequestManager>() .SingleInstance(); if (this.experimentalFeatures.EnableUploadLogs) { // Task<IRequestHandler> - LogsUploadRequestHandler builder.Register( async c => { var logsUploader = c.Resolve <ILogsUploader>(); var runtimeInfoProviderTask = c.Resolve <Task <IRuntimeInfoProvider> >(); var logsProviderTask = c.Resolve <Task <ILogsProvider> >(); IRuntimeInfoProvider runtimeInfoProvider = await runtimeInfoProviderTask; ILogsProvider logsProvider = await logsProviderTask; return(new LogsUploadRequestHandler(logsUploader, logsProvider, runtimeInfoProvider) as IRequestHandler); }) .As <Task <IRequestHandler> >() .SingleInstance(); } if (this.experimentalFeatures.EnableGetLogs) { // Task<IRequestHandler> - LogsRequestHandler builder.Register( async c => { var runtimeInfoProviderTask = c.Resolve <Task <IRuntimeInfoProvider> >(); var logsProviderTask = c.Resolve <Task <ILogsProvider> >(); IRuntimeInfoProvider runtimeInfoProvider = await runtimeInfoProviderTask; ILogsProvider logsProvider = await logsProviderTask; return(new LogsRequestHandler(logsProvider, runtimeInfoProvider) as IRequestHandler); }) .As <Task <IRequestHandler> >() .SingleInstance(); } // Task<IRequestHandler> - RestartRequestHandler builder.Register( async c => { var environmentProviderTask = c.Resolve <Task <IEnvironmentProvider> >(); var commandFactoryTask = c.Resolve <Task <ICommandFactory> >(); var configSourceTask = c.Resolve <Task <IConfigSource> >(); IEnvironmentProvider environmentProvider = await environmentProviderTask; ICommandFactory commandFactory = await commandFactoryTask; IConfigSource configSource = await configSourceTask; return(new RestartRequestHandler(environmentProvider, configSource, commandFactory) as IRequestHandler); }) .As <Task <IRequestHandler> >() .SingleInstance(); // ISdkModuleClientProvider builder.Register(c => new SdkModuleClientProvider()) .As <ISdkModuleClientProvider>() .SingleInstance(); // IEdgeAgentConnection builder.Register( c => { var serde = c.Resolve <ISerde <DeploymentConfig> >(); var deviceClientprovider = c.Resolve <IModuleClientProvider>(); var requestManager = c.Resolve <IRequestManager>(); var deviceManager = c.Resolve <IDeviceManager>(); bool enableSubscriptions = !this.experimentalFeatures.DisableCloudSubscriptions; IEdgeAgentConnection edgeAgentConnection = new EdgeAgentConnection(deviceClientprovider, serde, requestManager, deviceManager, enableSubscriptions, this.configRefreshFrequency); return(edgeAgentConnection); }) .As <IEdgeAgentConnection>() .SingleInstance(); // Task<IStreamRequestListener> builder.Register( async c => { if (this.enableStreams) { var runtimeInfoProviderTask = c.Resolve <Task <IRuntimeInfoProvider> >(); var logsProviderTask = c.Resolve <Task <ILogsProvider> >(); var edgeAgentConnection = c.Resolve <IEdgeAgentConnection>(); IRuntimeInfoProvider runtimeInfoProvider = await runtimeInfoProviderTask; ILogsProvider logsProvider = await logsProviderTask; var streamRequestHandlerProvider = new StreamRequestHandlerProvider(logsProvider, runtimeInfoProvider); return(new StreamRequestListener(streamRequestHandlerProvider, edgeAgentConnection) as IStreamRequestListener); } else { return(new NullStreamRequestListener() as IStreamRequestListener); } }) .As <Task <IStreamRequestListener> >() .SingleInstance(); // Task<IConfigSource> builder.Register( async c => { var edgeAgentConnection = c.Resolve <IEdgeAgentConnection>(); var twinConfigSource = new TwinConfigSource(edgeAgentConnection, this.configuration); var backupSourceTask = c.Resolve <Task <IDeploymentBackupSource> >(); IConfigSource backupConfigSource = new BackupConfigSource(await backupSourceTask, twinConfigSource); return(backupConfigSource); }) .As <Task <IConfigSource> >() .SingleInstance(); // IReporter builder.Register( c => { var runtimeInfoDeserializerTypes = new Dictionary <string, Type> { [DockerType] = typeof(DockerReportedRuntimeInfo), [Constants.Unknown] = typeof(UnknownRuntimeInfo) }; var edgeAgentDeserializerTypes = new Dictionary <string, Type> { [DockerType] = typeof(EdgeAgentDockerRuntimeModule), [Constants.Unknown] = typeof(UnknownEdgeAgentModule) }; var edgeHubDeserializerTypes = new Dictionary <string, Type> { [DockerType] = typeof(EdgeHubDockerRuntimeModule), [Constants.Unknown] = typeof(UnknownEdgeHubModule) }; var moduleDeserializerTypes = new Dictionary <string, Type> { [DockerType] = typeof(DockerRuntimeModule) }; var deserializerTypesMap = new Dictionary <Type, IDictionary <string, Type> > { { typeof(IRuntimeInfo), runtimeInfoDeserializerTypes }, { typeof(IEdgeAgentModule), edgeAgentDeserializerTypes }, { typeof(IEdgeHubModule), edgeHubDeserializerTypes }, { typeof(IModule), moduleDeserializerTypes } }; var edgeAgentConnection = c.Resolve <IEdgeAgentConnection>(); return(new IoTHubReporter( edgeAgentConnection, new TypeSpecificSerDe <AgentState>(deserializerTypesMap), this.versionInfo) as IReporter); }) .As <IReporter>() .SingleInstance(); base.Load(builder); }