Beispiel #1
0
        public ILogsProcessor Build()
        {
            if (Processors == null || Processors.Count == 0)
            {
                throw new ArgumentNullException(nameof(Processors));
            }
            if (Eth == null)
            {
                throw new ArgumentNullException(nameof(Eth));
            }

            BigInteger?lastBlockProcessed = MinimumBlockNumber == null || MinimumBlockNumber == 0 ? null : MinimumBlockNumber - 1;

            BlockProgressRepository = BlockProgressRepository ?? new InMemoryBlockchainProgressRepository(lastBlockProcessed);
            var progressService       = new BlockProgressService(Eth.Blocks, MinimumBlockNumber, BlockProgressRepository, MinimumBlockConfirmations);
            var processor             = new BlockRangeLogsProcessor(Eth.Filters.GetLogs, Processors, Filters?.ToArray(), Log);
            var batchProcessorService = new LogsProcessor(processor, progressService, BlocksPerBatch, BatchProcessedCallback, FatalErrorCallback, Log);

            batchProcessorService.OnDisposing += disposeHandler;

            return(batchProcessorService);

            void disposeHandler(object s, EventArgs src)
            {
                while (_disposalStack.Count > 0)
                {
                    _disposalStack.Pop().Dispose();
                }

                batchProcessorService.OnDisposing -= disposeHandler;
            }
        }
Beispiel #2
0
        public async Task When_Cancellation_Is_Requested_Does_Not_Call_Processor()
        {
            var web3Mock          = new Web3Mock();
            var catchAllProcessor = new Mock <ILogProcessor>();

            var logProcessor = new BlockRangeLogsProcessor(
                web3Mock.Web3, new[] { catchAllProcessor.Object });

            catchAllProcessor
            .Setup(p => p.IsLogForEvent(It.IsAny <FilterLog>()))
            .Returns(true);

            var logs = new[] { new FilterLog() };

            var cancellationToken = new CancellationTokenSource();

            //fake cancellation being raised after logs are retrieved but before processing
            web3Mock.GetLogsMock
            .Setup(p => p.SendRequestAsync(It.IsAny <NewFilterInput>(), null))
            .Callback <NewFilterInput, object>((f, o) => cancellationToken.Cancel())
            .ReturnsAsync(logs);

            await logProcessor.ProcessAsync(new BlockRange(0, 10), cancellationToken.Token);

            catchAllProcessor
            .Verify(p => p.IsLogForEvent(It.IsAny <FilterLog>()), Times.Never);
        }
Beispiel #3
0
        public async Task Dedupes_Logs_Matching_Multiple_Filters()
        {
            var web3Mock          = new Web3Mock();
            var catchAllProcessor = new Mock <ILogProcessor>();

            var filter1 = new NewFilterInput();
            var filter2 = new NewFilterInput();
            var filters = new[] { filter1, filter2 };

            var mockProcessors = new[] { catchAllProcessor };

            var logProcessor = new BlockRangeLogsProcessor(
                web3Mock.Web3, mockProcessors.Select(p => p.Object), filters);

            var log1 = new FilterLog()
            {
                TransactionHash = "x", LogIndex = new HexBigInteger(0)
            };
            var duplicateLog = new FilterLog()
            {
                TransactionHash = "x", LogIndex = new HexBigInteger(0)
            };
            var log2 = new FilterLog()
            {
                TransactionHash = "y", LogIndex = new HexBigInteger(0)
            };

            var logsFromFilter1 = new[] { log1, duplicateLog };
            var logsFromFilter2 = new[] { log2, duplicateLog };

            catchAllProcessor
            .Setup(p => p.IsLogForEvent(It.IsAny <FilterLog>()))
            .Returns(true);

            web3Mock.GetLogsMock
            .Setup(p => p.SendRequestAsync(filter1, null))
            .ReturnsAsync(logsFromFilter1);

            web3Mock.GetLogsMock
            .Setup(p => p.SendRequestAsync(filter2, null))
            .ReturnsAsync(logsFromFilter2);

            var processedLogs = new List <FilterLog>();

            catchAllProcessor.Setup(p => p.ProcessLogsAsync(It.IsAny <FilterLog[]>()))
            .Callback <FilterLog[]>(l => processedLogs.AddRange(l))
            .Returns(Task.CompletedTask);

            await logProcessor.ProcessAsync(new BlockRange(0, 0));

            Assert.Equal(2, processedLogs.Count);
            Assert.Contains(log1, processedLogs);
            Assert.Contains(log2, processedLogs);
            Assert.DoesNotContain(duplicateLog, processedLogs);
        }
Beispiel #4
0
        public async Task UsingTheIndividualComponents()
        {
            TransferMetadata.CurrentChainUrl = BlockchainUrl;

            var web3 =
                new Web3.Web3(BlockchainUrl);

            using (var azureSearchService = new AzureSearchService(AzureSearchServiceName, _azureSearchApiKey))
            {
                await azureSearchService.DeleteIndexAsync(AzureTransferIndexName);

                try
                {
                    using (var transferIndexer =
                               await azureSearchService.CreateEventIndexer <TransferEvent_ERC20>(AzureTransferIndexName))
                    {
                        using (var transferProcessor =
                                   new EventIndexProcessor <TransferEvent_ERC20>(transferIndexer))
                        {
                            var logProcessor = new BlockRangeLogsProcessor(
                                web3.Eth.Filters.GetLogs,
                                new ILogProcessor[] { transferProcessor });

                            var progressRepository =
                                new JsonBlockProgressRepository(CreateJsonFileToHoldProgress());

                            var progressService = new StaticBlockRangeProgressService(
                                3146684, 3146694, progressRepository);

                            var batchProcessorService = new LogsProcessor(
                                logProcessor, progressService, maxNumberOfBlocksPerBatch: 2);

                            BlockRange?lastBlockRangeProcessed;
                            do
                            {
                                lastBlockRangeProcessed = await batchProcessorService.ProcessOnceAsync();
                            } while (lastBlockRangeProcessed != null);

                            Assert.Equal(19, transferIndexer.Indexed);
                        }
                    }
                }
                finally
                {
                    await azureSearchService.DeleteIndexAsync(AzureTransferIndexName);
                }
            }
        }
Beispiel #5
0
        public async Task Allocates_Matching_Logs_To_Processors()
        {
            var web3Mock          = new Web3Mock();
            var log1Processor     = new Mock <ILogProcessor>();
            var log2Processor     = new Mock <ILogProcessor>();
            var catchAllProcessor = new Mock <ILogProcessor>();

            var mockProcessors = new[] { log1Processor, log2Processor, catchAllProcessor };

            var logProcessor = new BlockRangeLogsProcessor(
                web3Mock.Web3, mockProcessors.Select(p => p.Object));

            var log1 = new FilterLog();
            var log2 = new FilterLog();
            var log3 = new FilterLog();

            var logs = new[] { log1, log2, log3 };

            log1Processor.Setup(p => p.IsLogForEvent(log1)).Returns(true);
            log2Processor.Setup(p => p.IsLogForEvent(log2)).Returns(true);

            catchAllProcessor
            .Setup(p => p.IsLogForEvent(It.IsAny <FilterLog>()))
            .Returns(true);

            web3Mock.GetLogsMock
            .Setup(p => p.SendRequestAsync(It.IsAny <NewFilterInput>(), null))
            .ReturnsAsync(logs);

            var processedLogs = new Dictionary <Mock <ILogProcessor>, List <FilterLog> >();

            foreach (var processor in mockProcessors)
            {
                processedLogs.Add(processor, new List <FilterLog>());

                processor.Setup(p => p.ProcessLogsAsync(It.IsAny <FilterLog[]>()))
                .Callback <FilterLog[]>(l => processedLogs[processor].AddRange(l))
                .Returns(Task.CompletedTask);
            }

            await logProcessor.ProcessAsync(new BlockRange(0, 0));

            Assert.Single(processedLogs[log1Processor], log1);
            Assert.Single(processedLogs[log2Processor], log2);
            Assert.True(logs.SequenceEqual(processedLogs[catchAllProcessor]));
        }
Beispiel #6
0
        public async Task Catches_Too_Many_Records_RpcException_And_Throws_Specific_Too_Many_Records_Exception()
        {
            var tooManyRecordsRpcEx = RpcResponseExceptionExtensions.CreateFakeTooManyRecordsRpcException();
            var web3Mock            = new Web3Mock();
            var catchAllProcessor   = new Mock <ILogProcessor>();

            var filter1 = new NewFilterInput();

            var logProcessor = new BlockRangeLogsProcessor(
                web3Mock.Web3, new[] { catchAllProcessor.Object }, new[] { filter1 });

            web3Mock.GetLogsMock
            .Setup(p => p.SendRequestAsync(filter1, null))
            .Throws(tooManyRecordsRpcEx);

            var actualException = await Assert.ThrowsAsync <TooManyRecordsException>(async() => await logProcessor.ProcessAsync(new BlockRange(0, 0)));

            Assert.Same(tooManyRecordsRpcEx, actualException.InnerException);
        }
Beispiel #7
0
        public async Task Checks_For_Cancellation_Before_Each_Processing_Batch()
        {
            var web3Mock           = new Web3Mock();
            var catchAllProcessor1 = new Mock <ILogProcessor>();
            var catchAllProcessor2 = new Mock <ILogProcessor>();

            catchAllProcessor1
            .Setup(p => p.IsLogForEvent(It.IsAny <FilterLog>()))
            .Returns(true);

            catchAllProcessor2
            .Setup(p => p.IsLogForEvent(It.IsAny <FilterLog>()))
            .Returns(true);

            var logProcessor = new BlockRangeLogsProcessor(
                web3Mock.Web3, new[] { catchAllProcessor1.Object, catchAllProcessor2.Object });

            var logs = new[] { new FilterLog() };

            var cancellationToken = new CancellationTokenSource();

            web3Mock.GetLogsMock
            .Setup(p => p.SendRequestAsync(It.IsAny <NewFilterInput>(), null))
            .ReturnsAsync(logs);

            //cancel after processor 1 finishes
            catchAllProcessor1
            .Setup(p => p.ProcessLogsAsync(It.IsAny <FilterLog[]>()))
            .Callback <FilterLog[]>(l => cancellationToken.Cancel())
            .Returns(Task.CompletedTask);

            await logProcessor.ProcessAsync(new BlockRange(0, 10), cancellationToken.Token);

            catchAllProcessor1
            .Verify(p => p.ProcessLogsAsync(It.IsAny <FilterLog[]>()), Times.Once);

            catchAllProcessor2
            .Verify(p => p.ProcessLogsAsync(It.IsAny <FilterLog[]>()), Times.Never);
        }
Beispiel #8
0
        public async Task Passes_Correct_Block_Range_To_Proxy()
        {
            var web3Mock = new Web3Mock();

            var logProcessor = new BlockRangeLogsProcessor(
                web3Mock.Web3, Array.Empty <ILogProcessor>());

            var logs = Array.Empty <FilterLog>();

            NewFilterInput actualFilter = null;

            web3Mock.GetLogsMock
            .Setup(p => p.SendRequestAsync(It.IsAny <NewFilterInput>(), null))
            .Callback <NewFilterInput, object>((f, o) => actualFilter = f)
            .ReturnsAsync(logs);

            await logProcessor.ProcessAsync(new BlockRange(0, 10));

            Assert.NotNull(actualFilter);
            Assert.Equal(0, actualFilter.FromBlock.BlockNumber.Value);
            Assert.Equal(10, actualFilter.ToBlock.BlockNumber.Value);
        }
        public virtual async Task <BigInteger> ProcessAsync(BigInteger from, BigInteger?to = null, CancellationTokenSource ctx = null, Action <LogBatchProcessedArgs> logBatchProcessedCallback = null)
        {
            if (!LogProcessors.Any())
            {
                throw new InvalidOperationException("No events to capture - use AddEventAsync to add listeners for indexable events");
            }

            var logProcessor = new BlockRangeLogsProcessor(
                Web3.Eth.Filters.GetLogs,
                LogProcessors,
                Filters);

            IBlockProgressService progressService = CreateProgressService(from, to);

            var batchProcessorService = new LogsProcessor(
                logProcessor, progressService, maxNumberOfBlocksPerBatch: MaxBlocksPerBatch);

            if (to != null)
            {
                return(await ProcessRange(ctx, logBatchProcessedCallback, batchProcessorService));
            }

            return(await batchProcessorService.ProcessContinuallyAsync(ctx?.Token ?? new CancellationToken(), logBatchProcessedCallback));
        }
        public async Task WriteAnyMakerEventToQueue()
        {
            var    config = TestConfiguration.LoadConfig();
            string azureStorageConnectionString = config["AzureStorageConnectionString"];

            var configurationContext = MakerDAOEventProcessingConfig.Create(PARTITION, out IdGenerator idGenerator);
            IEventProcessingConfigurationRepository configurationRepository = configurationContext.CreateMockRepository(idGenerator);

            var web3 = new Web3.Web3(TestConfiguration.BlockchainUrls.Infura.Mainnet);

            // queue components
            var queueFactory = new AzureSubscriberQueueFactory(azureStorageConnectionString);

            // load subscribers and event subscriptions
            var eventSubscriptionFactory = new EventSubscriptionFactory(
                web3, configurationRepository, queueFactory);

            List <IEventSubscription> eventSubscriptions = await eventSubscriptionFactory.LoadAsync(PARTITION);

            // progress repo (dictates which block ranges to process next)
            // maintain separate progress per partition via a prefix
            var storageCloudSetup = new CloudTableSetup(azureStorageConnectionString, prefix: $"Partition{PARTITION}");
            var blockProgressRepo = storageCloudSetup.CreateBlockProgressRepository();

            //this ensures we only query the chain for events relating to this contract
            var makerAddressFilter = new NewFilterInput()
            {
                Address = new[] { MakerDAOEventProcessingConfig.MAKER_CONTRACT_ADDRESS }
            };

            // load service
            var progressService       = new BlockProgressService(web3, MIN_BLOCK_NUMBER, blockProgressRepo);
            var logProcessor          = new BlockRangeLogsProcessor(web3, eventSubscriptions, makerAddressFilter);
            var batchProcessorService = new LogsProcessor(logProcessor, progressService, MAX_BLOCKS_PER_BATCH);

            // execute
            var blockRangesProcessed = new List <BlockRange?>();

            try
            {
                for (var i = 0; i < 2; i++)  // 2 batch iterations
                {
                    var ctx            = new System.Threading.CancellationTokenSource();
                    var rangeProcessed = await batchProcessorService.ProcessOnceAsync(ctx.Token);

                    blockRangesProcessed.Add(rangeProcessed);

                    // save event subscription state after each batch
                    await configurationRepository.EventSubscriptionStates.UpsertAsync(eventSubscriptions.Select(s => s.State));
                }
            }
            finally
            {
                await ClearDown(configurationContext, storageCloudSetup, queueFactory);
            }

            var subscriptionState = await configurationRepository.EventSubscriptionStates.GetAsync(eventSubscriptions[0].Id);

            Assert.Equal(2, (int)subscriptionState.Values["HandlerInvocations"]);
            Assert.Equal(28, (int)subscriptionState.Values["EventsHandled"]);
        }
        public async Task WriteTransferEventsForMakerDAOToAzureStorage()
        {
            // Load config
            //  - this will contain the secrets and connection strings we don't want to hard code
            var    config = TestConfiguration.LoadConfig();
            string azureStorageConnectionString = config["AzureStorageConnectionString"];

            // Create a proxy for the blockchain
            var web3 = new Web3.Web3(TestConfiguration.BlockchainUrls.Infura.Mainnet);

            // Create an Azure Table Storage Factory
            //  - The factory communicates with Azure to create and get different tables
            var tableStorageFactory = new AzureTablesSubscriberRepositoryFactory(azureStorageConnectionString);

            // Create a Handler for a Table
            // - It wraps a table repository
            // - This is where we're going to put the matching event logs
            // - we're supplying a table prefix
            // - the actual table name would be "<prefix>TransactionLogs")
            // - this allows us to have different tables for different types of event logs
            // - the handler implements ILogHandler
            // - ILogHandler is a really simple interface to implement if you wish to customise the storage
            var storageHandlerForLogs = await tableStorageFactory.GetLogRepositoryHandlerAsync(tablePrefix : "makerdaotransfersstorage");

            // Create an event subscription specifically for ERC20 Transfers
            // - Passing in the maker dao address to ensure only logs with a matching address are processed
            // - There is an option to pass an implementation of IEventHandlerHistoryRepository in to the constructor
            // - This would record history for each event handler and is used to prevent duplication
            var eventSubscription = new EventSubscription <TransferEventDto>(
                contractAddressesToMatch: new[] { MAKER_CONTRACT_ADDRESS });

            // Assign the storage handler to the event subscription
            // - Matching events will be passed to the handler
            // - internally the handler passes the events to the repository layer which writes them to Azure
            eventSubscription.AddStorageHandler(storageHandlerForLogs);

            // Azure storage setup
            // - this example reads and writes block progress to an Azure storage table
            // - to avoid collision with other samples we provide a prefix
            var storageCloudSetup = new CloudTableSetup(azureStorageConnectionString, prefix: $"makerdaotransfersstorage");

            // Create a progress repository
            //  - It stores and retrieves the most recent block processed
            var blockProgressRepo = storageCloudSetup.CreateBlockProgressRepository();

            // Create a progress service
            // - This uses the progress repo to dictate what blocks to process next
            // - The MIN_BLOCK_NUMBER dictates the starting point if the progress repo is empty or has fallen too far behind
            var progressService = new BlockProgressService(web3, MIN_BLOCK_NUMBER, blockProgressRepo);

            // Create a filter
            //  - This is essentially the query that is sent to the chain when retrieving logs
            //  - It is OPTIONAL - without it, all logs in the block range are requested
            //  - The filter is invoked before any event subscriptions evaluate the logs
            //  - The subscriptions are free to implement their own matching logic
            //  - In this sample we're only interested in MakerDAO logs
            //  - Therefore it makes sense to restrict the number of logs to retrieve from the chain
            var makerAddressFilter = new NewFilterInput()
            {
                Address = new[] { MAKER_CONTRACT_ADDRESS }
            };

            // Create a log processor
            // - This uses the blockchainProxy to get the logs
            // - It sends each log to the event subscriptions to indicate if the logs matches the subscription criteria
            // - It then allocates matching logs to separate batches per event subscription
            var logProcessor = new BlockRangeLogsProcessor(web3, new[] { eventSubscription }, makerAddressFilter);

            // Create a batch log processor service
            // - It uses the progress service to calculates the block range to progress
            // - It then invokes the log processor - passing in the range to process
            // - It updates progress via the progress service
            var batchProcessorService = new LogsProcessor(logProcessor, progressService, MAX_BLOCKS_PER_BATCH);

            // execute
            try
            {
                // Optional cancellation token
                // - Useful for cancelling long running processing operations
                var ctx = new System.Threading.CancellationTokenSource();

                // instruct the service to get and process the next range of blocks
                // when the rangeProcessed is null - it means there was nothing to process
                var rangeProcessed = await batchProcessorService.ProcessOnceAsync(ctx.Token);

                // ensure we have processed the expected number of events
                // the event subscription has state which can record running totals across many processing batches
                Assert.Equal(11, eventSubscription.State.GetInt("EventsHandled"));

                // get the row count from azure storage
                // the querying on storage is limited
                // the TransactionHash is the partitionkey and the rowkey is the LogIndex
                // this allows us to query by tx hash

                var logRepositoryHandler = storageHandlerForLogs as TransactionLogRepositoryHandler;
                var repository           = logRepositoryHandler.TransactionLogRepository as TransactionLogRepository;

                var expectedTransactionHashes = new[]
                {
                    "0x8d58abc578f5e321f2e6b7c0637ccc60fbf62b39b120691cbf19ff201f5069b0",
                    "0x0bee561ac6bafb59bcc4c48fc4c1225aaedbab3e8089acea420140aafa47f3e5",
                    "0x6fc82b076fa7088581a80869cb9c7a08d7f8e897670a9f67e39139b39246da7e",
                    "0xdc2ee28db35ed5dbbc9e18a7d6bdbacb6e6633a9fce1ecda99ea7e1cf4bc8c72",
                    "0xcd2fea48c84468f70c9a44c4ffd7b26064a2add8b72937edf593634d2501c1f6",
                    "0x3acf887420887148222aab1d25d4d4893794e505ef276cc4cb6a48fffc6cb381",
                    "0x96129f905589b2a95c26276aa7e8708a12381ddec50485d6684c4abf9a5a1d00"
                };

                List <TransactionLog> logsFromRepo = new List <TransactionLog>();
                foreach (var txHash in expectedTransactionHashes)
                {
                    logsFromRepo.AddRange(await repository.GetManyAsync(txHash));
                }

                Assert.Equal(11, logsFromRepo.Count);
            }
            finally
            {
                // delete any data from Azure
                await storageCloudSetup.GetCountersTable().DeleteIfExistsAsync();

                await tableStorageFactory.DeleteTablesAsync();
            }
        }
        public async Task WebJobExample()
        {
            var    config = TestConfiguration.LoadConfig();
            string azureStorageConnectionString = config["AzureStorageConnectionString"];
            string azureSearchKey = config["AzureSearchApiKey"];

            var configurationContext = EventProcessingConfigMock.Create(PARTITION, out IdGenerator idGenerator);
            IEventProcessingConfigurationRepository configurationRepository = configurationContext.CreateMockRepository(idGenerator);

            var web3 = new Web3.Web3(TestConfiguration.BlockchainUrls.Infura.Rinkeby);

            // search components
            var searchService      = new AzureSearchService(serviceName: AZURE_SEARCH_SERVICE_NAME, searchApiKey: azureSearchKey);
            var searchIndexFactory = new AzureSubscriberSearchIndexFactory(searchService);

            // queue components
            var queueFactory = new AzureSubscriberQueueFactory(azureStorageConnectionString);

            // subscriber repository
            var repositoryFactory = new AzureTablesSubscriberRepositoryFactory(azureStorageConnectionString);

            // load subscribers and event subscriptions
            var eventSubscriptionFactory = new EventSubscriptionFactory(
                web3, configurationRepository, queueFactory, searchIndexFactory, repositoryFactory);

            List <IEventSubscription> eventSubscriptions = await eventSubscriptionFactory.LoadAsync(PARTITION);

            // progress repo (dictates which block ranges to process next)
            // maintain separate progress per partition via a prefix
            var storageCloudSetup = new CloudTableSetup(azureStorageConnectionString, prefix: $"Partition{PARTITION}");
            var blockProgressRepo = storageCloudSetup.CreateBlockProgressRepository();

            // load service
            var progressService       = new BlockProgressService(web3, MIN_BLOCK_NUMBER, blockProgressRepo);
            var logProcessor          = new BlockRangeLogsProcessor(web3, eventSubscriptions);
            var batchProcessorService = new LogsProcessor(logProcessor, progressService, MAX_BLOCKS_PER_BATCH);

            // execute
            BlockRange?rangeProcessed;

            try
            {
                var ctx = new System.Threading.CancellationTokenSource();
                rangeProcessed = await batchProcessorService.ProcessOnceAsync(ctx.Token);
            }
            finally
            {
                await ClearDown(configurationContext, storageCloudSetup, searchService, queueFactory, repositoryFactory);
            }

            // save event subscription state
            await configurationRepository.EventSubscriptionStates.UpsertAsync(eventSubscriptions.Select(s => s.State));

            // assertions
            Assert.NotNull(rangeProcessed);
            Assert.Equal((ulong)10, rangeProcessed.Value.BlockCount);

            var subscriptionState1 = configurationContext.GetEventSubscriptionState(eventSubscriptionId: 1); // interested in transfers with contract queries and aggregations
            var subscriptionState2 = configurationContext.GetEventSubscriptionState(eventSubscriptionId: 2); // interested in transfers with simple aggregation
            var subscriptionState3 = configurationContext.GetEventSubscriptionState(eventSubscriptionId: 3); // interested in any event for a specific address

            Assert.Equal("4009000000002040652615", subscriptionState1.Values["RunningTotalForTransferValue"].ToString());
            Assert.Equal((uint)19, subscriptionState2.Values["CurrentTransferCount"]);

            var txForSpecificAddress = (List <string>)subscriptionState3.Values["AllTransactionHashes"];

            Assert.Equal("0x362bcbc78a5cc6156e8d24d95bee6b8f53d7821083940434d2191feba477ae0e", txForSpecificAddress[0]);
            Assert.Equal("0xe63e9422dedf84d0ce13f9f75ebfd86333ce917b2572925fbdd51b51caf89b77", txForSpecificAddress[1]);

            var blockNumbersForSpecificAddress = (List <HexBigInteger>)subscriptionState3.Values["AllBlockNumbers"];

            Assert.Equal((BigInteger)4063362, blockNumbersForSpecificAddress[0].Value);
            Assert.Equal((BigInteger)4063362, blockNumbersForSpecificAddress[1].Value);
        }
Beispiel #13
0
        public async Task QueueAllEventsForMakerDAOContract()
        {
            // Load config
            //  - this will contain the secrets and connection strings we don't want to hard code
            var    config = TestConfiguration.LoadConfig();
            string azureStorageConnectionString = config["AzureStorageConnectionString"];

            // Create a proxy for the blockchain
            var web3 = new Web3.Web3(TestConfiguration.BlockchainUrls.Infura.Mainnet);

            // Create Queue Factory
            //  - In this sample we're targetting Azure
            //  - The factory communicates with Azure to create and get different queues
            var queueFactory = new AzureSubscriberQueueFactory(azureStorageConnectionString);
            // Create a Queue
            //  - This is where we're going to put the matching event logs
            var queue = await queueFactory.GetOrCreateQueueAsync("makerdaoevents");

            //  Get the maker DAO contract abi
            //  - from this we're able to match and decode the events in the contract
            var contractAbi = new ABIDeserialiser().DeserialiseContract(MAKER_DAO_ABI);

            // Create an event subscription for these events
            // - Passing in the maker dao address to ensure only logs with a matching address are processed
            // - There is an option to pass an implementation of IEventHandlerHistoryRepository in to the constructor
            // - This would record history for each event handler and is used to prevent duplication
            var eventSubscription = new EventSubscription(contractAbi.Events, new[] { MAKER_CONTRACT_ADDRESS });

            // Assign the queue to the event subscription
            // - Matching events will be written to this queue
            // - By default a generic message is written to the queue
            // - The message contains the raw log (aka FilterLog), decoded event parameter values and event metadata
            // - Therefore the message schema is consistent across all messages sent to any queues
            // - However - should you require your own queue message schema the method below accepts a custom message mapper
            // - Ultimately the message is converted to json
            eventSubscription.AddQueueHandler(queue);

            // Azure storage setup
            // - this example reads and writes block progress to an Azure storage table
            // - to avoid collision with other samples we provide a prefix
            var storageCloudSetup = new CloudTableSetup(azureStorageConnectionString, prefix: $"makerdao");

            // Create a progress repository
            //  - It stores and retrieves the most recent block processed
            var blockProgressRepo = storageCloudSetup.CreateBlockProgressRepository();

            // Create a progress service
            // - This uses the progress repo to dictate what blocks to process next
            // - The MIN_BLOCK_NUMBER dictates the starting point if the progress repo is empty or has fallen too far behind
            var progressService = new BlockProgressService(web3, MIN_BLOCK_NUMBER, blockProgressRepo);

            // Create a filter
            //  - This is essentially the query that is sent to the chain when retrieving logs
            //  - It is OPTIONAL - without it, all logs in the block range are requested
            //  - The filter is invoked before any event subscriptions evaluate the logs
            //  - The subscriptions are free to implement their own matching logic
            //  - In this sample we're only interested in MakerDAO logs
            //  - Therefore it makes sense to restrict the number of logs to retrieve from the chain
            var makerAddressFilter = new NewFilterInput()
            {
                Address = new[] { MAKER_CONTRACT_ADDRESS }
            };

            // Create a log processor
            // - This uses the blockchainProxy to get the logs
            // - It sends each log to the event subscriptions to indicate if the logs matches the subscription criteria
            // - It then allocates matching logs to separate batches per event subscription
            var logProcessor = new BlockRangeLogsProcessor(web3, new[] { eventSubscription }, makerAddressFilter);

            // Create a batch log processor service
            // - It uses the progress service to calculates the block range to progress
            // - It then invokes the log processor - passing in the range to process
            // - It updates progress via the progress service
            var batchProcessorService = new LogsProcessor(logProcessor, progressService, MAX_BLOCKS_PER_BATCH);

            // execute
            try
            {
                // Optional cancellation token
                // - Useful for cancelling long running processing operations
                var ctx = new System.Threading.CancellationTokenSource();

                // instruct the service to get and process the next range of blocks
                // when the rangeProcessed is null - it means there was nothing to process
                var rangeProcessed = await batchProcessorService.ProcessOnceAsync(ctx.Token);

                // ensure we have processed the expected number of events
                // the event subscription has state which can record running totals across many processing batches
                Assert.Equal(16, eventSubscription.State.GetInt("EventsHandled"));
                // get the message count from the queue
                Assert.Equal(16, await queue.GetApproxMessageCountAsync());
            }
            finally
            {
                // delete any data from Azure
                await ClearDown(queue, storageCloudSetup, queueFactory);
            }
        }
Beispiel #14
0
        public async Task WritingCustomMessagesToTheQueue()
        {
            // Load config
            //  - this will contain the secrets and connection strings we don't want to hard code
            var    config = TestConfiguration.LoadConfig();
            string azureStorageConnectionString = config["AzureStorageConnectionString"];

            // Create a proxy for the blockchain
            var web3 = new Web3.Web3(TestConfiguration.BlockchainUrls.Infura.Mainnet);

            // Create Queue Factory
            //  - In this sample we're targetting Azure
            //  - The factory communicates with Azure to create and get different queues
            var queueFactory = new AzureSubscriberQueueFactory(azureStorageConnectionString);
            // Create a Queue
            //  - This is where we're going to put the matching event logs
            var queue = await queueFactory.GetOrCreateQueueAsync("makerdaotransferscustom");


            // Create an event subscription specifically for ERC20 Transfers
            // - Passing in the maker dao address to ensure only logs with a matching address are processed
            // - There is an option to pass an implementation of IEventHandlerHistoryRepository in to the constructor
            // - This would record history for each event handler and is used to prevent duplication
            var eventSubscription = new EventSubscription <TransferEventDto>(
                contractAddressesToMatch: new[] { MAKER_CONTRACT_ADDRESS });

            // Create a mapper that will convert the DecodedEvent into a custom message we want on the queue
            // In this sample we're using a subscription that is specific to an EventDTO (EventSubscription<TransferEventDto>)
            // This ensures that the decodedEvent.DecodedEventDto property is populated during processing
            // ( If the subscription is not tied to an EventDTO the decodedEvent.DecodedEventDto property would be null
            //   BUT we can still read the event arguments (aka parameters or topics) from the decodedEvent.Event property)
            var queueMessageMapper = new QueueMessageMapper((decodedEvent) =>
            {
                return(new CustomQueueMessageForTransfers
                {
                    BlockNumber = decodedEvent.Log.BlockNumber.Value.ToString(),
                    TransactionHash = decodedEvent.Log.TransactionHash,
                    LogIndex = decodedEvent.Log.LogIndex.Value.ToString(),
                    Transfer = decodedEvent.DecodedEventDto as TransferEventDto
                });
            });


            // Assign the queue to the event subscription
            // - Matching events will be written to this queue
            // - Pass a custom mapper to create a suitable queue message
            // - Ultimately the message is converted to json
            eventSubscription.AddQueueHandler(queue, queueMessageMapper);

            // Azure storage setup
            // - this example reads and writes block progress to an Azure storage table
            // - to avoid collision with other samples we provide a prefix
            var storageCloudSetup = new CloudTableSetup(azureStorageConnectionString, prefix: $"makerdaotransferscustom");

            // Create a progress repository
            //  - It stores and retrieves the most recent block processed
            var blockProgressRepo = storageCloudSetup.CreateBlockProgressRepository();

            // Create a progress service
            // - This uses the progress repo to dictate what blocks to process next
            // - The MIN_BLOCK_NUMBER dictates the starting point if the progress repo is empty or has fallen too far behind
            var progressService = new BlockProgressService(web3, MIN_BLOCK_NUMBER, blockProgressRepo);

            // Create a filter
            //  - This is essentially the query that is sent to the chain when retrieving logs
            //  - It is OPTIONAL - without it, all logs in the block range are requested
            //  - The filter is invoked before any event subscriptions evaluate the logs
            //  - The subscriptions are free to implement their own matching logic
            //  - In this sample we're only interested in MakerDAO logs
            //  - Therefore it makes sense to restrict the number of logs to retrieve from the chain
            var makerAddressFilter = new NewFilterInput()
            {
                Address = new[] { MAKER_CONTRACT_ADDRESS }
            };

            // Create a log processor
            // - This uses the blockchainProxy to get the logs
            // - It sends each log to the event subscriptions to indicate if the logs matches the subscription criteria
            // - It then allocates matching logs to separate batches per event subscription
            var logProcessor = new BlockRangeLogsProcessor(web3, new[] { eventSubscription }, makerAddressFilter);

            // Create a batch log processor service
            // - It uses the progress service to calculates the block range to progress
            // - It then invokes the log processor - passing in the range to process
            // - It updates progress via the progress service
            var batchProcessorService = new LogsProcessor(logProcessor, progressService, MAX_BLOCKS_PER_BATCH);

            // execute
            try
            {
                // Optional cancellation token
                // - Useful for cancelling long running processing operations
                var ctx = new System.Threading.CancellationTokenSource();

                // instruct the service to get and process the next range of blocks
                // when the rangeProcessed is null - it means there was nothing to process
                var rangeProcessed = await batchProcessorService.ProcessOnceAsync(ctx.Token);

                // ensure we have processed the expected number of events
                // the event subscription has state which can record running totals across many processing batches
                Assert.Equal(11, eventSubscription.State.GetInt("EventsHandled"));
                // get the message count from the queue
                Assert.Equal(11, await queue.GetApproxMessageCountAsync());

                //A sample message body from the queue

                /*
                 * {"BlockNumber":"7540010","TransactionHash":"0x8d58abc578f5e321f2e6b7c0637ccc60fbf62b39b120691cbf19ff201f5069b0","LogIndex":"132","Transfer":{"From":"0x296c61eaf5bea208bbabc65ae01c3bc5270fe386","To":"0x2a8f1a6af55b705b7daee0776d6f97302de2a839","Value":119928660890733235}}
                 */
            }
            finally
            {
                // delete any data from Azure
                await ClearDown(queue, storageCloudSetup, queueFactory);
            }
        }