public async Task WriteTransferEventsForMakerDAOToAzureStorage()
        {
            // Load config
            //  - this will contain the secrets and connection strings we don't want to hard code
            var    config = TestConfiguration.LoadConfig();
            string azureStorageConnectionString = config["AzureStorageConnectionString"];

            // Create a proxy for the blockchain
            var web3 = new Web3.Web3(TestConfiguration.BlockchainUrls.Infura.Mainnet);

            // Create an Azure Table Storage Factory
            //  - The factory communicates with Azure to create and get different tables
            var tableStorageFactory = new AzureTablesSubscriberRepositoryFactory(azureStorageConnectionString);

            // Create a Handler for a Table
            // - It wraps a table repository
            // - This is where we're going to put the matching event logs
            // - we're supplying a table prefix
            // - the actual table name would be "<prefix>TransactionLogs")
            // - this allows us to have different tables for different types of event logs
            // - the handler implements ILogHandler
            // - ILogHandler is a really simple interface to implement if you wish to customise the storage
            var storageHandlerForLogs = await tableStorageFactory.GetLogRepositoryHandlerAsync(tablePrefix : "makerdaotransfersstorage");

            // Create an event subscription specifically for ERC20 Transfers
            // - Passing in the maker dao address to ensure only logs with a matching address are processed
            // - There is an option to pass an implementation of IEventHandlerHistoryRepository in to the constructor
            // - This would record history for each event handler and is used to prevent duplication
            var eventSubscription = new EventSubscription <TransferEventDto>(
                contractAddressesToMatch: new[] { MAKER_CONTRACT_ADDRESS });

            // Assign the storage handler to the event subscription
            // - Matching events will be passed to the handler
            // - internally the handler passes the events to the repository layer which writes them to Azure
            eventSubscription.AddStorageHandler(storageHandlerForLogs);

            // Azure storage setup
            // - this example reads and writes block progress to an Azure storage table
            // - to avoid collision with other samples we provide a prefix
            var storageCloudSetup = new CloudTableSetup(azureStorageConnectionString, prefix: $"makerdaotransfersstorage");

            // Create a progress repository
            //  - It stores and retrieves the most recent block processed
            var blockProgressRepo = storageCloudSetup.CreateBlockProgressRepository();

            // Create a progress service
            // - This uses the progress repo to dictate what blocks to process next
            // - The MIN_BLOCK_NUMBER dictates the starting point if the progress repo is empty or has fallen too far behind
            var progressService = new BlockProgressService(web3, MIN_BLOCK_NUMBER, blockProgressRepo);

            // Create a filter
            //  - This is essentially the query that is sent to the chain when retrieving logs
            //  - It is OPTIONAL - without it, all logs in the block range are requested
            //  - The filter is invoked before any event subscriptions evaluate the logs
            //  - The subscriptions are free to implement their own matching logic
            //  - In this sample we're only interested in MakerDAO logs
            //  - Therefore it makes sense to restrict the number of logs to retrieve from the chain
            var makerAddressFilter = new NewFilterInput()
            {
                Address = new[] { MAKER_CONTRACT_ADDRESS }
            };

            // Create a log processor
            // - This uses the blockchainProxy to get the logs
            // - It sends each log to the event subscriptions to indicate if the logs matches the subscription criteria
            // - It then allocates matching logs to separate batches per event subscription
            var logProcessor = new BlockRangeLogsProcessor(web3, new[] { eventSubscription }, makerAddressFilter);

            // Create a batch log processor service
            // - It uses the progress service to calculates the block range to progress
            // - It then invokes the log processor - passing in the range to process
            // - It updates progress via the progress service
            var batchProcessorService = new LogsProcessor(logProcessor, progressService, MAX_BLOCKS_PER_BATCH);

            // execute
            try
            {
                // Optional cancellation token
                // - Useful for cancelling long running processing operations
                var ctx = new System.Threading.CancellationTokenSource();

                // instruct the service to get and process the next range of blocks
                // when the rangeProcessed is null - it means there was nothing to process
                var rangeProcessed = await batchProcessorService.ProcessOnceAsync(ctx.Token);

                // ensure we have processed the expected number of events
                // the event subscription has state which can record running totals across many processing batches
                Assert.Equal(11, eventSubscription.State.GetInt("EventsHandled"));

                // get the row count from azure storage
                // the querying on storage is limited
                // the TransactionHash is the partitionkey and the rowkey is the LogIndex
                // this allows us to query by tx hash

                var logRepositoryHandler = storageHandlerForLogs as TransactionLogRepositoryHandler;
                var repository           = logRepositoryHandler.TransactionLogRepository as TransactionLogRepository;

                var expectedTransactionHashes = new[]
                {
                    "0x8d58abc578f5e321f2e6b7c0637ccc60fbf62b39b120691cbf19ff201f5069b0",
                    "0x0bee561ac6bafb59bcc4c48fc4c1225aaedbab3e8089acea420140aafa47f3e5",
                    "0x6fc82b076fa7088581a80869cb9c7a08d7f8e897670a9f67e39139b39246da7e",
                    "0xdc2ee28db35ed5dbbc9e18a7d6bdbacb6e6633a9fce1ecda99ea7e1cf4bc8c72",
                    "0xcd2fea48c84468f70c9a44c4ffd7b26064a2add8b72937edf593634d2501c1f6",
                    "0x3acf887420887148222aab1d25d4d4893794e505ef276cc4cb6a48fffc6cb381",
                    "0x96129f905589b2a95c26276aa7e8708a12381ddec50485d6684c4abf9a5a1d00"
                };

                List <TransactionLog> logsFromRepo = new List <TransactionLog>();
                foreach (var txHash in expectedTransactionHashes)
                {
                    logsFromRepo.AddRange(await repository.GetManyAsync(txHash));
                }

                Assert.Equal(11, logsFromRepo.Count);
            }
            finally
            {
                // delete any data from Azure
                await storageCloudSetup.GetCountersTable().DeleteIfExistsAsync();

                await tableStorageFactory.DeleteTablesAsync();
            }
        }