public async Task WritingEventsToAzureTableStorage() { var web3 = new Web3.Web3(TestConfiguration.BlockchainUrls.Infura.Mainnet); // Requires: Nethereum.BlockchainStore.AzureTables // Load config // - this will contain the secrets and connection strings we don't want to hard code var config = TestConfiguration.LoadConfig(); string azureStorageConnectionString = config["AzureStorageConnectionString"]; //cancellation token to enable the processor to be stopped //passing in a time limit as a safety valve for the unit test var cancellationTokenSource = new CancellationTokenSource(TimeSpan.FromMinutes(1)); //initialise the processor using (var processor = web3.Eth.LogsProcessor <TransferEventDto>() .SetBlocksPerBatch(1) //optional: restrict batches to one block at a time .SetMinimumBlockNumber(7540103) //optional: default is to start at current block on chain // configure this to stop after processing a batch .OnBatchProcessed((args) => cancellationTokenSource.Cancel()) // wire up to azure table storage .StoreInAzureTable <TransferEventDto>(azureStorageConnectionString, "septransfers") .Build()) { //run the processor for a while var rangesProcessed = await processor.ProcessContinuallyAsync(cancellationTokenSource.Token); } var expectedLogs = new (string TransactionHash, long LogIndex)[] {
public async Task UsingAzureTableStorageProgressRepository() { var web3 = new Web3.Web3(TestConfiguration.BlockchainUrls.Infura.Mainnet); // Requires: Nethereum.BlockchainStore.AzureTables // Load config // - this will contain the secrets and connection strings we don't want to hard code var config = TestConfiguration.LoadConfig(); string azureStorageConnectionString = config["AzureStorageConnectionString"]; //cancellation token to enable the listener to be stopped //passing in a time limit as a safety valve for the unit test var cancellationTokenSource = new CancellationTokenSource(TimeSpan.FromMinutes(1)); //somewhere to put matching events //using ConcurrentBag because we'll be referencing the collection on different threads var erc20Transfers = new ConcurrentBag <EventLog <TransferEventDto> >(); //initialise the processor var builder = web3.Eth.LogsProcessor <TransferEventDto>() .OnEvents((events) => erc20Transfers.AddRange(events)) // transfer events .SetBlocksPerBatch(1) //optional: restrict batches to one block at a time .SetMinimumBlockNumber(7540102) //optional: default is to start at current block on chain // for test purposes we'll stop after processing a batch .OnBatchProcessed((args) => cancellationTokenSource.Cancel()) // tell the processor to reference an Azure Storage table for block progress // this is an extension method from Nethereum.BlockchainStore.AzureTables .UseAzureTableStorageForBlockProgress(azureStorageConnectionString, "EventLogProcessingSample"); var processor = builder.Build(); //we should have a BlockProgressRepository Assert.NotNull(builder.BlockProgressRepository); //there should be no prior progress Assert.Null(await builder.BlockProgressRepository.GetLastBlockNumberProcessedAsync()); //run the processor for a while var rangesProcessed = await processor.ProcessContinuallyAsync(cancellationTokenSource.Token); //the last block processed should have been saved Assert.NotNull(await builder.BlockProgressRepository.GetLastBlockNumberProcessedAsync()); //we should have captured some events Assert.True(erc20Transfers.Any()); //clean up await new CloudTableSetup(azureStorageConnectionString, "EventLogProcessingSample") .GetCountersTable() .DeleteIfExistsAsync(); }
public async Task WritingEventsToASearchIndex() { var web3 = new Web3.Web3(TestConfiguration.BlockchainUrls.Infura.Mainnet); //Requires: Nethereum.BlockchainStore.Search // Load config // - this will contain the secrets and connection strings we don't want to hard code var config = TestConfiguration.LoadConfig(); string ApiKeyName = "AzureSearchApiKey"; string AzureSearchServiceName = "blockchainsearch"; var apiKey = config[ApiKeyName]; //cancellation token to enable the listener to be stopped //passing in a time limit as a safety valve for the unit test var cancellationTokenSource = new CancellationTokenSource(TimeSpan.FromMinutes(1)); //initialise the processor //within "using" block so that the processor cleans up the search resources it creates using (var processor = web3.Eth.LogsProcessor <TransferEventDto>() .SetBlocksPerBatch(1) //optional: restrict batches to one block at a time .SetMinimumBlockNumber(7540103) //optional: default is to start at current block on chain .OnBatchProcessed((args) => cancellationTokenSource.Cancel()) .AddToSearchIndexAsync <TransferEventDto>(AzureSearchServiceName, apiKey, "sep-transfers") .Result .Build()) { //run the processor for a while var rangesProcessed = await processor.ProcessContinuallyAsync(cancellationTokenSource.Token); } await Task.Delay(5000); //give azure time to update using (var searchService = new AzureSearchService(AzureSearchServiceName, apiKey)) { Assert.Equal((long)13, await searchService.CountDocumentsAsync("sep-transfers")); await searchService.DeleteIndexAsync("sep-transfers"); } }
public async Task WritingEventsToAnAzureQueue() { var web3 = new Web3.Web3(TestConfiguration.BlockchainUrls.Infura.Mainnet); // Requires: Nethereum.BlockchainProcessing.Queue.Azure // Load config // - this will contain the secrets and connection strings we don't want to hard code var config = TestConfiguration.LoadConfig(); string azureStorageConnectionString = config["AzureStorageConnectionString"]; //cancellation token to enable the listener to be stopped //passing in a time limit as a safety valve for the unit test var cancellationTokenSource = new CancellationTokenSource(TimeSpan.FromMinutes(1)); //initialise the processor using (var processor = web3.Eth.LogsProcessor <TransferEventDto>() .SetBlocksPerBatch(1) //optional: restrict batches to one block at a time .SetMinimumBlockNumber(7540103) //optional: default is to start at current block on chain .OnBatchProcessed((args) => cancellationTokenSource.Cancel()) .AddToQueueAsync(azureStorageConnectionString, "sep-transfers") .Result .Build()) { //run the processor for a while var rangesProcessed = await processor.ProcessContinuallyAsync(cancellationTokenSource.Token); } await Task.Delay(5000); //give azure time to update //clean up var queueFactory = new AzureSubscriberQueueFactory(azureStorageConnectionString); var queue = await queueFactory.GetOrCreateQueueAsync("sep-transfers"); Assert.Equal(13, await queue.GetApproxMessageCountAsync()); //clean up await queueFactory.CloudQueueClient.GetQueueReference(queue.Name).DeleteIfExistsAsync(); }
public async Task WriteTransferEventsForMakerDAOToAzureStorage() { // Load config // - this will contain the secrets and connection strings we don't want to hard code var config = TestConfiguration.LoadConfig(); string azureStorageConnectionString = config["AzureStorageConnectionString"]; // Create a proxy for the blockchain var web3 = new Web3.Web3(TestConfiguration.BlockchainUrls.Infura.Mainnet); // Create an Azure Table Storage Factory // - The factory communicates with Azure to create and get different tables var tableStorageFactory = new AzureTablesSubscriberRepositoryFactory(azureStorageConnectionString); // Create a Handler for a Table // - It wraps a table repository // - This is where we're going to put the matching event logs // - we're supplying a table prefix // - the actual table name would be "<prefix>TransactionLogs") // - this allows us to have different tables for different types of event logs // - the handler implements ILogHandler // - ILogHandler is a really simple interface to implement if you wish to customise the storage var storageHandlerForLogs = await tableStorageFactory.GetLogRepositoryHandlerAsync(tablePrefix : "makerdaotransfersstorage"); // Create an event subscription specifically for ERC20 Transfers // - Passing in the maker dao address to ensure only logs with a matching address are processed // - There is an option to pass an implementation of IEventHandlerHistoryRepository in to the constructor // - This would record history for each event handler and is used to prevent duplication var eventSubscription = new EventSubscription <TransferEventDto>( contractAddressesToMatch: new[] { MAKER_CONTRACT_ADDRESS }); // Assign the storage handler to the event subscription // - Matching events will be passed to the handler // - internally the handler passes the events to the repository layer which writes them to Azure eventSubscription.AddStorageHandler(storageHandlerForLogs); // Azure storage setup // - this example reads and writes block progress to an Azure storage table // - to avoid collision with other samples we provide a prefix var storageCloudSetup = new CloudTableSetup(azureStorageConnectionString, prefix: $"makerdaotransfersstorage"); // Create a progress repository // - It stores and retrieves the most recent block processed var blockProgressRepo = storageCloudSetup.CreateBlockProgressRepository(); // Create a progress service // - This uses the progress repo to dictate what blocks to process next // - The MIN_BLOCK_NUMBER dictates the starting point if the progress repo is empty or has fallen too far behind var progressService = new BlockProgressService(web3, MIN_BLOCK_NUMBER, blockProgressRepo); // Create a filter // - This is essentially the query that is sent to the chain when retrieving logs // - It is OPTIONAL - without it, all logs in the block range are requested // - The filter is invoked before any event subscriptions evaluate the logs // - The subscriptions are free to implement their own matching logic // - In this sample we're only interested in MakerDAO logs // - Therefore it makes sense to restrict the number of logs to retrieve from the chain var makerAddressFilter = new NewFilterInput() { Address = new[] { MAKER_CONTRACT_ADDRESS } }; // Create a log processor // - This uses the blockchainProxy to get the logs // - It sends each log to the event subscriptions to indicate if the logs matches the subscription criteria // - It then allocates matching logs to separate batches per event subscription var logProcessor = new BlockRangeLogsProcessor(web3, new[] { eventSubscription }, makerAddressFilter); // Create a batch log processor service // - It uses the progress service to calculates the block range to progress // - It then invokes the log processor - passing in the range to process // - It updates progress via the progress service var batchProcessorService = new LogsProcessor(logProcessor, progressService, MAX_BLOCKS_PER_BATCH); // execute try { // Optional cancellation token // - Useful for cancelling long running processing operations var ctx = new System.Threading.CancellationTokenSource(); // instruct the service to get and process the next range of blocks // when the rangeProcessed is null - it means there was nothing to process var rangeProcessed = await batchProcessorService.ProcessOnceAsync(ctx.Token); // ensure we have processed the expected number of events // the event subscription has state which can record running totals across many processing batches Assert.Equal(11, eventSubscription.State.GetInt("EventsHandled")); // get the row count from azure storage // the querying on storage is limited // the TransactionHash is the partitionkey and the rowkey is the LogIndex // this allows us to query by tx hash var logRepositoryHandler = storageHandlerForLogs as TransactionLogRepositoryHandler; var repository = logRepositoryHandler.TransactionLogRepository as TransactionLogRepository; var expectedTransactionHashes = new[] { "0x8d58abc578f5e321f2e6b7c0637ccc60fbf62b39b120691cbf19ff201f5069b0", "0x0bee561ac6bafb59bcc4c48fc4c1225aaedbab3e8089acea420140aafa47f3e5", "0x6fc82b076fa7088581a80869cb9c7a08d7f8e897670a9f67e39139b39246da7e", "0xdc2ee28db35ed5dbbc9e18a7d6bdbacb6e6633a9fce1ecda99ea7e1cf4bc8c72", "0xcd2fea48c84468f70c9a44c4ffd7b26064a2add8b72937edf593634d2501c1f6", "0x3acf887420887148222aab1d25d4d4893794e505ef276cc4cb6a48fffc6cb381", "0x96129f905589b2a95c26276aa7e8708a12381ddec50485d6684c4abf9a5a1d00" }; List <TransactionLog> logsFromRepo = new List <TransactionLog>(); foreach (var txHash in expectedTransactionHashes) { logsFromRepo.AddRange(await repository.GetManyAsync(txHash)); } Assert.Equal(11, logsFromRepo.Count); } finally { // delete any data from Azure await storageCloudSetup.GetCountersTable().DeleteIfExistsAsync(); await tableStorageFactory.DeleteTablesAsync(); } }
public async Task QueueAllEventsForMakerDAOContract() { // Load config // - this will contain the secrets and connection strings we don't want to hard code var config = TestConfiguration.LoadConfig(); string azureStorageConnectionString = config["AzureStorageConnectionString"]; // Create a proxy for the blockchain var web3 = new Web3.Web3(TestConfiguration.BlockchainUrls.Infura.Mainnet); // Create Queue Factory // - In this sample we're targetting Azure // - The factory communicates with Azure to create and get different queues var queueFactory = new AzureSubscriberQueueFactory(azureStorageConnectionString); // Create a Queue // - This is where we're going to put the matching event logs var queue = await queueFactory.GetOrCreateQueueAsync("makerdaoevents"); // Get the maker DAO contract abi // - from this we're able to match and decode the events in the contract var contractAbi = new ABIDeserialiser().DeserialiseContract(MAKER_DAO_ABI); // Create an event subscription for these events // - Passing in the maker dao address to ensure only logs with a matching address are processed // - There is an option to pass an implementation of IEventHandlerHistoryRepository in to the constructor // - This would record history for each event handler and is used to prevent duplication var eventSubscription = new EventSubscription(contractAbi.Events, new[] { MAKER_CONTRACT_ADDRESS }); // Assign the queue to the event subscription // - Matching events will be written to this queue // - By default a generic message is written to the queue // - The message contains the raw log (aka FilterLog), decoded event parameter values and event metadata // - Therefore the message schema is consistent across all messages sent to any queues // - However - should you require your own queue message schema the method below accepts a custom message mapper // - Ultimately the message is converted to json eventSubscription.AddQueueHandler(queue); // Azure storage setup // - this example reads and writes block progress to an Azure storage table // - to avoid collision with other samples we provide a prefix var storageCloudSetup = new CloudTableSetup(azureStorageConnectionString, prefix: $"makerdao"); // Create a progress repository // - It stores and retrieves the most recent block processed var blockProgressRepo = storageCloudSetup.CreateBlockProgressRepository(); // Create a progress service // - This uses the progress repo to dictate what blocks to process next // - The MIN_BLOCK_NUMBER dictates the starting point if the progress repo is empty or has fallen too far behind var progressService = new BlockProgressService(web3, MIN_BLOCK_NUMBER, blockProgressRepo); // Create a filter // - This is essentially the query that is sent to the chain when retrieving logs // - It is OPTIONAL - without it, all logs in the block range are requested // - The filter is invoked before any event subscriptions evaluate the logs // - The subscriptions are free to implement their own matching logic // - In this sample we're only interested in MakerDAO logs // - Therefore it makes sense to restrict the number of logs to retrieve from the chain var makerAddressFilter = new NewFilterInput() { Address = new[] { MAKER_CONTRACT_ADDRESS } }; // Create a log processor // - This uses the blockchainProxy to get the logs // - It sends each log to the event subscriptions to indicate if the logs matches the subscription criteria // - It then allocates matching logs to separate batches per event subscription var logProcessor = new BlockRangeLogsProcessor(web3, new[] { eventSubscription }, makerAddressFilter); // Create a batch log processor service // - It uses the progress service to calculates the block range to progress // - It then invokes the log processor - passing in the range to process // - It updates progress via the progress service var batchProcessorService = new LogsProcessor(logProcessor, progressService, MAX_BLOCKS_PER_BATCH); // execute try { // Optional cancellation token // - Useful for cancelling long running processing operations var ctx = new System.Threading.CancellationTokenSource(); // instruct the service to get and process the next range of blocks // when the rangeProcessed is null - it means there was nothing to process var rangeProcessed = await batchProcessorService.ProcessOnceAsync(ctx.Token); // ensure we have processed the expected number of events // the event subscription has state which can record running totals across many processing batches Assert.Equal(16, eventSubscription.State.GetInt("EventsHandled")); // get the message count from the queue Assert.Equal(16, await queue.GetApproxMessageCountAsync()); } finally { // delete any data from Azure await ClearDown(queue, storageCloudSetup, queueFactory); } }
public async Task WritingCustomMessagesToTheQueue() { // Load config // - this will contain the secrets and connection strings we don't want to hard code var config = TestConfiguration.LoadConfig(); string azureStorageConnectionString = config["AzureStorageConnectionString"]; // Create a proxy for the blockchain var web3 = new Web3.Web3(TestConfiguration.BlockchainUrls.Infura.Mainnet); // Create Queue Factory // - In this sample we're targetting Azure // - The factory communicates with Azure to create and get different queues var queueFactory = new AzureSubscriberQueueFactory(azureStorageConnectionString); // Create a Queue // - This is where we're going to put the matching event logs var queue = await queueFactory.GetOrCreateQueueAsync("makerdaotransferscustom"); // Create an event subscription specifically for ERC20 Transfers // - Passing in the maker dao address to ensure only logs with a matching address are processed // - There is an option to pass an implementation of IEventHandlerHistoryRepository in to the constructor // - This would record history for each event handler and is used to prevent duplication var eventSubscription = new EventSubscription <TransferEventDto>( contractAddressesToMatch: new[] { MAKER_CONTRACT_ADDRESS }); // Create a mapper that will convert the DecodedEvent into a custom message we want on the queue // In this sample we're using a subscription that is specific to an EventDTO (EventSubscription<TransferEventDto>) // This ensures that the decodedEvent.DecodedEventDto property is populated during processing // ( If the subscription is not tied to an EventDTO the decodedEvent.DecodedEventDto property would be null // BUT we can still read the event arguments (aka parameters or topics) from the decodedEvent.Event property) var queueMessageMapper = new QueueMessageMapper((decodedEvent) => { return(new CustomQueueMessageForTransfers { BlockNumber = decodedEvent.Log.BlockNumber.Value.ToString(), TransactionHash = decodedEvent.Log.TransactionHash, LogIndex = decodedEvent.Log.LogIndex.Value.ToString(), Transfer = decodedEvent.DecodedEventDto as TransferEventDto }); }); // Assign the queue to the event subscription // - Matching events will be written to this queue // - Pass a custom mapper to create a suitable queue message // - Ultimately the message is converted to json eventSubscription.AddQueueHandler(queue, queueMessageMapper); // Azure storage setup // - this example reads and writes block progress to an Azure storage table // - to avoid collision with other samples we provide a prefix var storageCloudSetup = new CloudTableSetup(azureStorageConnectionString, prefix: $"makerdaotransferscustom"); // Create a progress repository // - It stores and retrieves the most recent block processed var blockProgressRepo = storageCloudSetup.CreateBlockProgressRepository(); // Create a progress service // - This uses the progress repo to dictate what blocks to process next // - The MIN_BLOCK_NUMBER dictates the starting point if the progress repo is empty or has fallen too far behind var progressService = new BlockProgressService(web3, MIN_BLOCK_NUMBER, blockProgressRepo); // Create a filter // - This is essentially the query that is sent to the chain when retrieving logs // - It is OPTIONAL - without it, all logs in the block range are requested // - The filter is invoked before any event subscriptions evaluate the logs // - The subscriptions are free to implement their own matching logic // - In this sample we're only interested in MakerDAO logs // - Therefore it makes sense to restrict the number of logs to retrieve from the chain var makerAddressFilter = new NewFilterInput() { Address = new[] { MAKER_CONTRACT_ADDRESS } }; // Create a log processor // - This uses the blockchainProxy to get the logs // - It sends each log to the event subscriptions to indicate if the logs matches the subscription criteria // - It then allocates matching logs to separate batches per event subscription var logProcessor = new BlockRangeLogsProcessor(web3, new[] { eventSubscription }, makerAddressFilter); // Create a batch log processor service // - It uses the progress service to calculates the block range to progress // - It then invokes the log processor - passing in the range to process // - It updates progress via the progress service var batchProcessorService = new LogsProcessor(logProcessor, progressService, MAX_BLOCKS_PER_BATCH); // execute try { // Optional cancellation token // - Useful for cancelling long running processing operations var ctx = new System.Threading.CancellationTokenSource(); // instruct the service to get and process the next range of blocks // when the rangeProcessed is null - it means there was nothing to process var rangeProcessed = await batchProcessorService.ProcessOnceAsync(ctx.Token); // ensure we have processed the expected number of events // the event subscription has state which can record running totals across many processing batches Assert.Equal(11, eventSubscription.State.GetInt("EventsHandled")); // get the message count from the queue Assert.Equal(11, await queue.GetApproxMessageCountAsync()); //A sample message body from the queue /* * {"BlockNumber":"7540010","TransactionHash":"0x8d58abc578f5e321f2e6b7c0637ccc60fbf62b39b120691cbf19ff201f5069b0","LogIndex":"132","Transfer":{"From":"0x296c61eaf5bea208bbabc65ae01c3bc5270fe386","To":"0x2a8f1a6af55b705b7daee0776d6f97302de2a839","Value":119928660890733235}} */ } finally { // delete any data from Azure await ClearDown(queue, storageCloudSetup, queueFactory); } }