public static async Task<ILogsProcessorBuilder> AddToQueueAsync(
     this ILogsProcessorBuilder eventLogProcessor,
     string azureConnectionString,
     string queueName,
     Predicate<FilterLog> predicate = null,
     Func<FilterLog, object> mapper = null)
 {
     var factory = new AzureSubscriberQueueFactory(azureConnectionString);
     return await eventLogProcessor.AddToQueueAsync(factory, queueName, predicate, mapper);
 }
Пример #2
0
        /*
         * A sample message from the queue
         * the key is block number, transaction hash and log index
         * the message contains the raw log (FilterLog) and the decoded parameter values
         * it also contains extra state data that the processing chain can add or amend
         * the message can be retrieved from the queue and can be decoded into an event DTO (e.g. TransferEventDto)
         * {
         * "Key": "7540010_0x8d58abc578f5e321f2e6b7c0637ccc60fbf62b39b120691cbf19ff201f5069b0_132",
         * "State": {
         * "EventAbiName": "Transfer",
         * "EventSignature": "ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef",
         * "TransactionHash": "0x8d58abc578f5e321f2e6b7c0637ccc60fbf62b39b120691cbf19ff201f5069b0",
         * "LogIndex": 132,
         * "HandlerInvocations": 1,
         * "UtcNowMs": 1556800544598,
         * "SubscriberId": 0,
         * "EventSubscriptionId": 0
         * },
         * "Transaction": null,
         * "ParameterValues": [{
         * "Order": 1,
         * "Name": "_from",
         * "AbiType": "address",
         * "Value": "0x296c61eaf5bea208bbabc65ae01c3bc5270fe386",
         * "Indexed": true
         * }, {
         * "Order": 2,
         * "Name": "_to",
         * "AbiType": "address",
         * "Value": "0x2a8f1a6af55b705b7daee0776d6f97302de2a839",
         * "Indexed": true
         * }, {
         * "Order": 3,
         * "Name": "_value",
         * "AbiType": "uint256",
         * "Value": 119928660890733235,
         * "Indexed": false
         * }
         * ],
         * "Log": {
         * "removed": false,
         * "type": null,
         * "logIndex": "0x84",
         * "transactionHash": "0x8d58abc578f5e321f2e6b7c0637ccc60fbf62b39b120691cbf19ff201f5069b0",
         * "transactionIndex": "0x95",
         * "blockHash": "0x0ef49c4e3a11872084715808df7a07c4fa6f03dac0596b4e6c10b21b88f7d227",
         * "blockNumber": "0x730d2a",
         * "address": "0x9f8f72aa9304c8b593d555f12ef6589cc3a579a2",
         * "data": "0x00000000000000000000000000000000000000000000000001aa127b4ec7cab3",
         * "topics": ["0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef", "0x000000000000000000000000296c61eaf5bea208bbabc65ae01c3bc5270fe386", "0x0000000000000000000000002a8f1a6af55b705b7daee0776d6f97302de2a839"]
         * }
         * }
         *
         */

        private async Task ClearDown(
            IQueue queue,
            CloudTableSetup cloudTableSetup,
            AzureSubscriberQueueFactory subscriberQueueFactory)
        {
            var qRef = subscriberQueueFactory.CloudQueueClient.GetQueueReference(queue.Name);
            await qRef.DeleteIfExistsAsync();

            await cloudTableSetup.GetCountersTable().DeleteIfExistsAsync();
        }
 public static async Task<ILogsProcessorBuilder> AddToQueueAsync<TEventDto>(
     this ILogsProcessorBuilder eventLogProcessor,
     string azureConnectionString,
     string queueName,
     Predicate<EventLog<TEventDto>> predicate = null,
     Func<EventLog<TEventDto>, object> mapper = null)
     where TEventDto : class, new()
 {
     var factory = new AzureSubscriberQueueFactory(azureConnectionString);
     return await eventLogProcessor.AddToQueueAsync(factory, queueName, predicate, mapper);
 }
 public static async Task<ILogsProcessorBuilder> AddToQueueAsync(
     this ILogsProcessorBuilder eventLogProcessor,
     AzureSubscriberQueueFactory queueFactory,
     string queueName,
     Predicate<FilterLog> predicate = null,
     Func<FilterLog, object> mapper = null)
 {
     var queue = await queueFactory.GetOrCreateQueueAsync(queueName);
     eventLogProcessor.AddToQueue(queue, predicate, mapper);
     return eventLogProcessor;
 }
 public static async Task<ILogsProcessorBuilder> AddToQueueAsync<TEventDto>(
     this ILogsProcessorBuilder eventLogProcessor,
     AzureSubscriberQueueFactory queueFactory,
     string queueName,
     Predicate<EventLog<TEventDto>> predicate = null,
     Func<EventLog<TEventDto>, object> mapper = null)
     where TEventDto : class, new()
 {
     var queue = await queueFactory.GetOrCreateQueueAsync(queueName);
     eventLogProcessor.AddToQueue(queue, predicate, mapper);
     return eventLogProcessor;
 }
        private async Task ClearDown(
            EventProcessingConfigContext repo,
            CloudTableSetup cloudTableSetup,
            AzureSubscriberQueueFactory subscriberQueueFactory)
        {
            foreach (var queue in repo.SubscriberSearchIndexes)
            {
                var qRef = subscriberQueueFactory.CloudQueueClient.GetQueueReference(queue.Name);
                await qRef.DeleteIfExistsAsync();
            }

            await cloudTableSetup.GetCountersTable().DeleteIfExistsAsync();
        }
        private async Task ClearDown(
            EventProcessingConfigContext repo,
            CloudTableSetup cloudTableSetup,
            IAzureSearchService searchService,
            AzureSubscriberQueueFactory subscriberQueueFactory,
            AzureTablesSubscriberRepositoryFactory azureTablesSubscriberRepositoryFactory)
        {
            foreach (var index in repo.SubscriberSearchIndexes)
            {
                await searchService.DeleteIndexAsync(index.Name);
            }

            foreach (var queue in repo.SubscriberSearchIndexes)
            {
                var qRef = subscriberQueueFactory.CloudQueueClient.GetQueueReference(queue.Name);
                await qRef.DeleteIfExistsAsync();
            }

            await cloudTableSetup.GetCountersTable().DeleteIfExistsAsync();

            await azureTablesSubscriberRepositoryFactory.DeleteTablesAsync();
        }
Пример #8
0
        public async Task WritingEventsToAnAzureQueue()
        {
            var web3 = new Web3.Web3(TestConfiguration.BlockchainUrls.Infura.Mainnet);
            // Requires: Nethereum.BlockchainProcessing.Queue.Azure

            // Load config
            //  - this will contain the secrets and connection strings we don't want to hard code
            var    config = TestConfiguration.LoadConfig();
            string azureStorageConnectionString = config["AzureStorageConnectionString"];

            //cancellation token to enable the listener to be stopped
            //passing in a time limit as a safety valve for the unit test
            var cancellationTokenSource = new CancellationTokenSource(TimeSpan.FromMinutes(1));

            //initialise the processor
            using (var processor = web3.Eth.LogsProcessor <TransferEventDto>()
                                   .SetBlocksPerBatch(1)           //optional: restrict batches to one block at a time
                                   .SetMinimumBlockNumber(7540103) //optional: default is to start at current block on chain
                                   .OnBatchProcessed((args) => cancellationTokenSource.Cancel())
                                   .AddToQueueAsync(azureStorageConnectionString, "sep-transfers")
                                   .Result
                                   .Build())
            {
                //run the processor for a while
                var rangesProcessed = await processor.ProcessContinuallyAsync(cancellationTokenSource.Token);
            }

            await Task.Delay(5000); //give azure time to update

            //clean up
            var queueFactory = new AzureSubscriberQueueFactory(azureStorageConnectionString);
            var queue        = await queueFactory.GetOrCreateQueueAsync("sep-transfers");

            Assert.Equal(13, await queue.GetApproxMessageCountAsync());

            //clean up
            await queueFactory.CloudQueueClient.GetQueueReference(queue.Name).DeleteIfExistsAsync();
        }
        public async Task WriteAnyMakerEventToQueue()
        {
            var    config = TestConfiguration.LoadConfig();
            string azureStorageConnectionString = config["AzureStorageConnectionString"];

            var configurationContext = MakerDAOEventProcessingConfig.Create(PARTITION, out IdGenerator idGenerator);
            IEventProcessingConfigurationRepository configurationRepository = configurationContext.CreateMockRepository(idGenerator);

            var web3 = new Web3.Web3(TestConfiguration.BlockchainUrls.Infura.Mainnet);

            // queue components
            var queueFactory = new AzureSubscriberQueueFactory(azureStorageConnectionString);

            // load subscribers and event subscriptions
            var eventSubscriptionFactory = new EventSubscriptionFactory(
                web3, configurationRepository, queueFactory);

            List <IEventSubscription> eventSubscriptions = await eventSubscriptionFactory.LoadAsync(PARTITION);

            // progress repo (dictates which block ranges to process next)
            // maintain separate progress per partition via a prefix
            var storageCloudSetup = new CloudTableSetup(azureStorageConnectionString, prefix: $"Partition{PARTITION}");
            var blockProgressRepo = storageCloudSetup.CreateBlockProgressRepository();

            //this ensures we only query the chain for events relating to this contract
            var makerAddressFilter = new NewFilterInput()
            {
                Address = new[] { MakerDAOEventProcessingConfig.MAKER_CONTRACT_ADDRESS }
            };

            // load service
            var progressService       = new BlockProgressService(web3, MIN_BLOCK_NUMBER, blockProgressRepo);
            var logProcessor          = new BlockRangeLogsProcessor(web3, eventSubscriptions, makerAddressFilter);
            var batchProcessorService = new LogsProcessor(logProcessor, progressService, MAX_BLOCKS_PER_BATCH);

            // execute
            var blockRangesProcessed = new List <BlockRange?>();

            try
            {
                for (var i = 0; i < 2; i++)  // 2 batch iterations
                {
                    var ctx            = new System.Threading.CancellationTokenSource();
                    var rangeProcessed = await batchProcessorService.ProcessOnceAsync(ctx.Token);

                    blockRangesProcessed.Add(rangeProcessed);

                    // save event subscription state after each batch
                    await configurationRepository.EventSubscriptionStates.UpsertAsync(eventSubscriptions.Select(s => s.State));
                }
            }
            finally
            {
                await ClearDown(configurationContext, storageCloudSetup, queueFactory);
            }

            var subscriptionState = await configurationRepository.EventSubscriptionStates.GetAsync(eventSubscriptions[0].Id);

            Assert.Equal(2, (int)subscriptionState.Values["HandlerInvocations"]);
            Assert.Equal(28, (int)subscriptionState.Values["EventsHandled"]);
        }
        public async Task WebJobExample()
        {
            var    config = TestConfiguration.LoadConfig();
            string azureStorageConnectionString = config["AzureStorageConnectionString"];
            string azureSearchKey = config["AzureSearchApiKey"];

            var configurationContext = EventProcessingConfigMock.Create(PARTITION, out IdGenerator idGenerator);
            IEventProcessingConfigurationRepository configurationRepository = configurationContext.CreateMockRepository(idGenerator);

            var web3 = new Web3.Web3(TestConfiguration.BlockchainUrls.Infura.Rinkeby);

            // search components
            var searchService      = new AzureSearchService(serviceName: AZURE_SEARCH_SERVICE_NAME, searchApiKey: azureSearchKey);
            var searchIndexFactory = new AzureSubscriberSearchIndexFactory(searchService);

            // queue components
            var queueFactory = new AzureSubscriberQueueFactory(azureStorageConnectionString);

            // subscriber repository
            var repositoryFactory = new AzureTablesSubscriberRepositoryFactory(azureStorageConnectionString);

            // load subscribers and event subscriptions
            var eventSubscriptionFactory = new EventSubscriptionFactory(
                web3, configurationRepository, queueFactory, searchIndexFactory, repositoryFactory);

            List <IEventSubscription> eventSubscriptions = await eventSubscriptionFactory.LoadAsync(PARTITION);

            // progress repo (dictates which block ranges to process next)
            // maintain separate progress per partition via a prefix
            var storageCloudSetup = new CloudTableSetup(azureStorageConnectionString, prefix: $"Partition{PARTITION}");
            var blockProgressRepo = storageCloudSetup.CreateBlockProgressRepository();

            // load service
            var progressService       = new BlockProgressService(web3, MIN_BLOCK_NUMBER, blockProgressRepo);
            var logProcessor          = new BlockRangeLogsProcessor(web3, eventSubscriptions);
            var batchProcessorService = new LogsProcessor(logProcessor, progressService, MAX_BLOCKS_PER_BATCH);

            // execute
            BlockRange?rangeProcessed;

            try
            {
                var ctx = new System.Threading.CancellationTokenSource();
                rangeProcessed = await batchProcessorService.ProcessOnceAsync(ctx.Token);
            }
            finally
            {
                await ClearDown(configurationContext, storageCloudSetup, searchService, queueFactory, repositoryFactory);
            }

            // save event subscription state
            await configurationRepository.EventSubscriptionStates.UpsertAsync(eventSubscriptions.Select(s => s.State));

            // assertions
            Assert.NotNull(rangeProcessed);
            Assert.Equal((ulong)10, rangeProcessed.Value.BlockCount);

            var subscriptionState1 = configurationContext.GetEventSubscriptionState(eventSubscriptionId: 1); // interested in transfers with contract queries and aggregations
            var subscriptionState2 = configurationContext.GetEventSubscriptionState(eventSubscriptionId: 2); // interested in transfers with simple aggregation
            var subscriptionState3 = configurationContext.GetEventSubscriptionState(eventSubscriptionId: 3); // interested in any event for a specific address

            Assert.Equal("4009000000002040652615", subscriptionState1.Values["RunningTotalForTransferValue"].ToString());
            Assert.Equal((uint)19, subscriptionState2.Values["CurrentTransferCount"]);

            var txForSpecificAddress = (List <string>)subscriptionState3.Values["AllTransactionHashes"];

            Assert.Equal("0x362bcbc78a5cc6156e8d24d95bee6b8f53d7821083940434d2191feba477ae0e", txForSpecificAddress[0]);
            Assert.Equal("0xe63e9422dedf84d0ce13f9f75ebfd86333ce917b2572925fbdd51b51caf89b77", txForSpecificAddress[1]);

            var blockNumbersForSpecificAddress = (List <HexBigInteger>)subscriptionState3.Values["AllBlockNumbers"];

            Assert.Equal((BigInteger)4063362, blockNumbersForSpecificAddress[0].Value);
            Assert.Equal((BigInteger)4063362, blockNumbersForSpecificAddress[1].Value);
        }
Пример #11
0
        public async Task QueueAllEventsForMakerDAOContract()
        {
            // Load config
            //  - this will contain the secrets and connection strings we don't want to hard code
            var    config = TestConfiguration.LoadConfig();
            string azureStorageConnectionString = config["AzureStorageConnectionString"];

            // Create a proxy for the blockchain
            var web3 = new Web3.Web3(TestConfiguration.BlockchainUrls.Infura.Mainnet);

            // Create Queue Factory
            //  - In this sample we're targetting Azure
            //  - The factory communicates with Azure to create and get different queues
            var queueFactory = new AzureSubscriberQueueFactory(azureStorageConnectionString);
            // Create a Queue
            //  - This is where we're going to put the matching event logs
            var queue = await queueFactory.GetOrCreateQueueAsync("makerdaoevents");

            //  Get the maker DAO contract abi
            //  - from this we're able to match and decode the events in the contract
            var contractAbi = new ABIDeserialiser().DeserialiseContract(MAKER_DAO_ABI);

            // Create an event subscription for these events
            // - Passing in the maker dao address to ensure only logs with a matching address are processed
            // - There is an option to pass an implementation of IEventHandlerHistoryRepository in to the constructor
            // - This would record history for each event handler and is used to prevent duplication
            var eventSubscription = new EventSubscription(contractAbi.Events, new[] { MAKER_CONTRACT_ADDRESS });

            // Assign the queue to the event subscription
            // - Matching events will be written to this queue
            // - By default a generic message is written to the queue
            // - The message contains the raw log (aka FilterLog), decoded event parameter values and event metadata
            // - Therefore the message schema is consistent across all messages sent to any queues
            // - However - should you require your own queue message schema the method below accepts a custom message mapper
            // - Ultimately the message is converted to json
            eventSubscription.AddQueueHandler(queue);

            // Azure storage setup
            // - this example reads and writes block progress to an Azure storage table
            // - to avoid collision with other samples we provide a prefix
            var storageCloudSetup = new CloudTableSetup(azureStorageConnectionString, prefix: $"makerdao");

            // Create a progress repository
            //  - It stores and retrieves the most recent block processed
            var blockProgressRepo = storageCloudSetup.CreateBlockProgressRepository();

            // Create a progress service
            // - This uses the progress repo to dictate what blocks to process next
            // - The MIN_BLOCK_NUMBER dictates the starting point if the progress repo is empty or has fallen too far behind
            var progressService = new BlockProgressService(web3, MIN_BLOCK_NUMBER, blockProgressRepo);

            // Create a filter
            //  - This is essentially the query that is sent to the chain when retrieving logs
            //  - It is OPTIONAL - without it, all logs in the block range are requested
            //  - The filter is invoked before any event subscriptions evaluate the logs
            //  - The subscriptions are free to implement their own matching logic
            //  - In this sample we're only interested in MakerDAO logs
            //  - Therefore it makes sense to restrict the number of logs to retrieve from the chain
            var makerAddressFilter = new NewFilterInput()
            {
                Address = new[] { MAKER_CONTRACT_ADDRESS }
            };

            // Create a log processor
            // - This uses the blockchainProxy to get the logs
            // - It sends each log to the event subscriptions to indicate if the logs matches the subscription criteria
            // - It then allocates matching logs to separate batches per event subscription
            var logProcessor = new BlockRangeLogsProcessor(web3, new[] { eventSubscription }, makerAddressFilter);

            // Create a batch log processor service
            // - It uses the progress service to calculates the block range to progress
            // - It then invokes the log processor - passing in the range to process
            // - It updates progress via the progress service
            var batchProcessorService = new LogsProcessor(logProcessor, progressService, MAX_BLOCKS_PER_BATCH);

            // execute
            try
            {
                // Optional cancellation token
                // - Useful for cancelling long running processing operations
                var ctx = new System.Threading.CancellationTokenSource();

                // instruct the service to get and process the next range of blocks
                // when the rangeProcessed is null - it means there was nothing to process
                var rangeProcessed = await batchProcessorService.ProcessOnceAsync(ctx.Token);

                // ensure we have processed the expected number of events
                // the event subscription has state which can record running totals across many processing batches
                Assert.Equal(16, eventSubscription.State.GetInt("EventsHandled"));
                // get the message count from the queue
                Assert.Equal(16, await queue.GetApproxMessageCountAsync());
            }
            finally
            {
                // delete any data from Azure
                await ClearDown(queue, storageCloudSetup, queueFactory);
            }
        }
Пример #12
0
        public async Task WritingCustomMessagesToTheQueue()
        {
            // Load config
            //  - this will contain the secrets and connection strings we don't want to hard code
            var    config = TestConfiguration.LoadConfig();
            string azureStorageConnectionString = config["AzureStorageConnectionString"];

            // Create a proxy for the blockchain
            var web3 = new Web3.Web3(TestConfiguration.BlockchainUrls.Infura.Mainnet);

            // Create Queue Factory
            //  - In this sample we're targetting Azure
            //  - The factory communicates with Azure to create and get different queues
            var queueFactory = new AzureSubscriberQueueFactory(azureStorageConnectionString);
            // Create a Queue
            //  - This is where we're going to put the matching event logs
            var queue = await queueFactory.GetOrCreateQueueAsync("makerdaotransferscustom");


            // Create an event subscription specifically for ERC20 Transfers
            // - Passing in the maker dao address to ensure only logs with a matching address are processed
            // - There is an option to pass an implementation of IEventHandlerHistoryRepository in to the constructor
            // - This would record history for each event handler and is used to prevent duplication
            var eventSubscription = new EventSubscription <TransferEventDto>(
                contractAddressesToMatch: new[] { MAKER_CONTRACT_ADDRESS });

            // Create a mapper that will convert the DecodedEvent into a custom message we want on the queue
            // In this sample we're using a subscription that is specific to an EventDTO (EventSubscription<TransferEventDto>)
            // This ensures that the decodedEvent.DecodedEventDto property is populated during processing
            // ( If the subscription is not tied to an EventDTO the decodedEvent.DecodedEventDto property would be null
            //   BUT we can still read the event arguments (aka parameters or topics) from the decodedEvent.Event property)
            var queueMessageMapper = new QueueMessageMapper((decodedEvent) =>
            {
                return(new CustomQueueMessageForTransfers
                {
                    BlockNumber = decodedEvent.Log.BlockNumber.Value.ToString(),
                    TransactionHash = decodedEvent.Log.TransactionHash,
                    LogIndex = decodedEvent.Log.LogIndex.Value.ToString(),
                    Transfer = decodedEvent.DecodedEventDto as TransferEventDto
                });
            });


            // Assign the queue to the event subscription
            // - Matching events will be written to this queue
            // - Pass a custom mapper to create a suitable queue message
            // - Ultimately the message is converted to json
            eventSubscription.AddQueueHandler(queue, queueMessageMapper);

            // Azure storage setup
            // - this example reads and writes block progress to an Azure storage table
            // - to avoid collision with other samples we provide a prefix
            var storageCloudSetup = new CloudTableSetup(azureStorageConnectionString, prefix: $"makerdaotransferscustom");

            // Create a progress repository
            //  - It stores and retrieves the most recent block processed
            var blockProgressRepo = storageCloudSetup.CreateBlockProgressRepository();

            // Create a progress service
            // - This uses the progress repo to dictate what blocks to process next
            // - The MIN_BLOCK_NUMBER dictates the starting point if the progress repo is empty or has fallen too far behind
            var progressService = new BlockProgressService(web3, MIN_BLOCK_NUMBER, blockProgressRepo);

            // Create a filter
            //  - This is essentially the query that is sent to the chain when retrieving logs
            //  - It is OPTIONAL - without it, all logs in the block range are requested
            //  - The filter is invoked before any event subscriptions evaluate the logs
            //  - The subscriptions are free to implement their own matching logic
            //  - In this sample we're only interested in MakerDAO logs
            //  - Therefore it makes sense to restrict the number of logs to retrieve from the chain
            var makerAddressFilter = new NewFilterInput()
            {
                Address = new[] { MAKER_CONTRACT_ADDRESS }
            };

            // Create a log processor
            // - This uses the blockchainProxy to get the logs
            // - It sends each log to the event subscriptions to indicate if the logs matches the subscription criteria
            // - It then allocates matching logs to separate batches per event subscription
            var logProcessor = new BlockRangeLogsProcessor(web3, new[] { eventSubscription }, makerAddressFilter);

            // Create a batch log processor service
            // - It uses the progress service to calculates the block range to progress
            // - It then invokes the log processor - passing in the range to process
            // - It updates progress via the progress service
            var batchProcessorService = new LogsProcessor(logProcessor, progressService, MAX_BLOCKS_PER_BATCH);

            // execute
            try
            {
                // Optional cancellation token
                // - Useful for cancelling long running processing operations
                var ctx = new System.Threading.CancellationTokenSource();

                // instruct the service to get and process the next range of blocks
                // when the rangeProcessed is null - it means there was nothing to process
                var rangeProcessed = await batchProcessorService.ProcessOnceAsync(ctx.Token);

                // ensure we have processed the expected number of events
                // the event subscription has state which can record running totals across many processing batches
                Assert.Equal(11, eventSubscription.State.GetInt("EventsHandled"));
                // get the message count from the queue
                Assert.Equal(11, await queue.GetApproxMessageCountAsync());

                //A sample message body from the queue

                /*
                 * {"BlockNumber":"7540010","TransactionHash":"0x8d58abc578f5e321f2e6b7c0637ccc60fbf62b39b120691cbf19ff201f5069b0","LogIndex":"132","Transfer":{"From":"0x296c61eaf5bea208bbabc65ae01c3bc5270fe386","To":"0x2a8f1a6af55b705b7daee0776d6f97302de2a839","Value":119928660890733235}}
                 */
            }
            finally
            {
                // delete any data from Azure
                await ClearDown(queue, storageCloudSetup, queueFactory);
            }
        }