/// <inheritdoc /> public override async Task <bool> OnExecute() { if (TryDequeue(out SyncBlockTransactionsOperation item)) { watch.Restart(); storageOperations.ValidateBlock(item); InsertStats count = storageOperations.InsertTransactions(item); if (item.BlockInfo != null) { if (!Runner.SyncingBlocks.CurrentSyncing.TryRemove(item.BlockInfo.Hash, out BlockInfo blockInfo)) { throw new Exception(string.Format("Failed to remove block hash {0} from collection", item.BlockInfo.Hash)); } syncConnection.RecentItems.Add((DateTime.UtcNow, watch.Elapsed, item.BlockInfo.Size)); } var notifications = new AddressNotifications { Addresses = count.Items.Where(ad => ad.Addresses != null).SelectMany(s => s.Addresses).Distinct().ToList() }; Runner.Get <Notifier>().Enqueue(notifications); watch.Stop(); string message = item.BlockInfo != null? string.Format("Seconds = {0} - BlockIndex = {1} - TotalItems = {2} - Size = {3} kb", watch.Elapsed.TotalSeconds, item.BlockInfo.Height, count.Transactions + count.InputsOutputs, item.BlockInfo.Size) : string.Format("Seconds = {0} - PoolSync - TotalItems = {1}", watch.Elapsed.TotalSeconds, count.Transactions + count.InputsOutputs); log.LogDebug(message); return(await Task.FromResult(true)); } return(await Task.FromResult(false)); }
public InsertStats InsertTransactions(SyncBlockTransactionsOperation item) { var stats = new InsertStats { Items = new List <MapTransactionAddress>() }; if (item.BlockInfo != null) { // remove all transactions from the memory pool item.Transactions.ForEach(t => { DecodedRawTransaction outer; this.data.MemoryTransactions.TryRemove(t.TxId, out outer); }); // break the work in to batches transactions var queue = new Queue <DecodedRawTransaction>(item.Transactions); do { var items = this.GetBatch(this.configuration.MongoBatchSize, queue).ToList(); try { if (item.BlockInfo != null) { var inserts = items.Select(s => new MapTransactionBlock { BlockIndex = item.BlockInfo.Height, TransactionId = s.TxId }).ToList(); stats.Transactions += inserts.Count(); this.data.MapTransactionBlock.InsertMany(inserts, new InsertManyOptions { IsOrdered = false }); } } catch (MongoBulkWriteException mbwex) { if (!mbwex.Message.Contains("E11000 duplicate key error collection")) { throw; } } // insert inputs and add to the list for later to use on the notification task. var inputs = this.CreateInputs(item.BlockInfo.Height, items).ToList(); var queueInner = new Queue <MapTransactionAddress>(inputs); do { try { var itemsInner = this.GetBatch(this.configuration.MongoBatchSize, queueInner).ToList(); if (itemsInner.Any()) { stats.Inputs += itemsInner.Count(); stats.Items.AddRange(itemsInner); this.data.MapTransactionAddress.InsertMany(itemsInner, new InsertManyOptions { IsOrdered = false }); } } catch (MongoBulkWriteException mbwex) { if (!mbwex.Message.Contains("E11000 duplicate key error collection")) { throw; } } }while (queueInner.Any()); // insert outputs var outputs = this.CreateOutputs(items).ToList(); stats.Outputs += outputs.Count(); outputs.ForEach(outp => this.data.MarkOutput(outp.InputTransactionId, outp.InputIndex, outp.TransactionId)); }while (queue.Any()); // mark the block as synced. this.CompleteBlock(item.BlockInfo); } else { // memory transaction push in to the pool. item.Transactions.ForEach(t => { this.data.MemoryTransactions.TryAdd(t.TxId, t); }); stats.Transactions = this.data.MemoryTransactions.Count(); // todo: for accuracy - remove transactions from the mongo memory pool that are not anymore in the syncing pool // remove all transactions from the memory pool // this can be done using the SyncingBlocks objects - see method SyncOperations.FindPoolInternal() // add to the list for later to use on the notification task. var inputs = this.CreateInputs(-1, item.Transactions).ToList(); stats.Items.AddRange(inputs); } return(stats); }
public InsertStats InsertTransactions(SyncBlockTransactionsOperation item) { var stats = new InsertStats { Items = new List <MapTransactionAddress>() }; if (item.BlockInfo != null) { // remove all transactions from the memory pool item.Transactions.ForEach(t => { NBitcoin.Transaction outer; this.data.MemoryTransactions.TryRemove(t.GetHash().ToString(), out outer); }); // break the work in to batches of transactions var queue = new Queue <NBitcoin.Transaction>(item.Transactions); do { var items = this.GetBatch(this.configuration.MongoBatchSize, queue).ToList(); try { if (item.BlockInfo != null) { var inserts = items.Select(s => new MapTransactionBlock { BlockIndex = item.BlockInfo.Height, TransactionId = s.GetHash().ToString() }).ToList(); stats.Transactions += inserts.Count; this.data.MapTransactionBlock.InsertMany(inserts, new InsertManyOptions { IsOrdered = false }); } } catch (MongoBulkWriteException mbwex) { if (mbwex.WriteErrors.Any(e => e.Category != ServerErrorCategory.DuplicateKey))//.Message.Contains("E11000 duplicate key error collection")) { throw; } } // insert inputs and add to the list for later to use on the notification task. var inputs = this.CreateInputs(item.BlockInfo.Height, items).ToList(); var outputs = this.CreateOutputs(items, item.BlockInfo.Height).ToList(); inputs.AddRange(outputs); var queueInner = new Queue <MapTransactionAddress>(inputs); do { try { var itemsInner = this.GetBatch(this.configuration.MongoBatchSize, queueInner).ToList(); var ops = new Dictionary <string, WriteModel <MapTransactionAddress> >(); var writeOptions = new BulkWriteOptions() { IsOrdered = false }; foreach (var mapTransactionAddress in itemsInner) { if (mapTransactionAddress.SpendingTransactionId == null) { ops.Add(mapTransactionAddress.Id, new InsertOneModel <MapTransactionAddress>(mapTransactionAddress)); } else { if (ops.TryGetValue(mapTransactionAddress.Id, out WriteModel <MapTransactionAddress> mta)) { // in case a utxo is spent in the same block // we just modify the inserted item directly var imta = mta as InsertOneModel <MapTransactionAddress>; imta.Document.SpendingTransactionId = mapTransactionAddress.SpendingTransactionId; imta.Document.SpendingBlockIndex = mapTransactionAddress.SpendingBlockIndex; } else { var filter = Builders <MapTransactionAddress> .Filter.Eq(addr => addr.Id, mapTransactionAddress.Id); var update = Builders <MapTransactionAddress> .Update .Set(blockInfo => blockInfo.SpendingTransactionId, mapTransactionAddress.SpendingTransactionId) .Set(blockInfo => blockInfo.SpendingBlockIndex, mapTransactionAddress.SpendingBlockIndex); ops.Add(mapTransactionAddress.Id, new UpdateOneModel <MapTransactionAddress>(filter, update)); } } } if (itemsInner.Any()) { stats.Items.AddRange(itemsInner); stats.InputsOutputs += ops.Count; this.data.MapTransactionAddress.BulkWrite(ops.Values, writeOptions); } } catch (MongoBulkWriteException mbwex) { if (mbwex.WriteErrors.Any(e => e.Category != ServerErrorCategory.DuplicateKey))//.Message.Contains("E11000 duplicate key error collection")) { throw; } } }while (queueInner.Any()); // If insert trx supported then push trx in batches. if (this.configuration.StoreRawTransactions) { try { var inserts = items.Select(t => new MapTransaction { TransactionId = t.GetHash().ToString(), RawTransaction = t.ToBytes(syncConnection.Network.Consensus.ConsensusFactory) }).ToList(); stats.RawTransactions = inserts.Count; this.data.MapTransaction.InsertMany(inserts, new InsertManyOptions { IsOrdered = false }); } catch (MongoBulkWriteException mbwex) { if (mbwex.WriteErrors.Any(e => e.Category != ServerErrorCategory.DuplicateKey))//.Message.Contains("E11000 duplicate key error collection")) { throw; } } } }while (queue.Any()); // mark the block as synced. this.CompleteBlock(item.BlockInfo); } else { // memory transaction push in to the pool. item.Transactions.ForEach(t => { this.data.MemoryTransactions.TryAdd(t.GetHash().ToString(), t); }); stats.Transactions = this.data.MemoryTransactions.Count(); // todo: for accuracy - remove transactions from the mongo memory pool that are not anymore in the syncing pool // remove all transactions from the memory pool // this can be done using the SyncingBlocks objects - see method SyncOperations.FindPoolInternal() // add to the list for later to use on the notification task. var inputs = this.CreateInputs(-1, item.Transactions).ToList(); stats.Items.AddRange(inputs); } return(stats); }
public InsertStats InsertTransactions(SyncBlockTransactionsOperation item) { var stats = new InsertStats { Items = new List <MapTransactionAddress>() }; if (item.BlockInfo != null) { // remove all transactions from the memory pool item.Transactions.ForEach(t => { DecodedRawTransaction outer; this.data.MemoryTransactions.TryRemove(t.TxId, out outer); }); // break the work in to batches transactions var queue = new Queue <DecodedRawTransaction>(item.Transactions); do { var transactions = this.GetBatch(this.configuration.MongoBatchSize, queue).ToList(); var bitcoinClient = CryptoClientFactory.Create( syncConnection.ServerDomain, syncConnection.RpcAccessPort, syncConnection.User, syncConnection.Password, syncConnection.Secure); try { if (item.BlockInfo != null) { var inserts = new List <MapTransactionBlock>(); var insertDetails = new List <MapTransactionDetail>(); foreach (var tx in transactions) { var isCoinBase = !string.IsNullOrWhiteSpace(tx.VIn.First().CoinBase); var syncVin = tx.VIn.Select(vin => { var previousTransaction = isCoinBase ? null : bitcoinClient.GetRawTransaction(vin.TxId, 1); return(new SyncVin { TxId = vin.TxId, CoinBase = vin.CoinBase, IsCoinBase = !string.IsNullOrWhiteSpace(vin.CoinBase), ScriptSig = vin.ScriptSig, Sequence = vin.Sequence, VOut = vin.VOut, PreviousVout = previousTransaction?.VOut.First(o => o.N == vin.VOut) }); }).ToList(); var totalVout = tx.VOut.Sum(o => o.Value); var totalVin = syncVin.Sum(i => i.PreviousVout?.Value); inserts.Add(new MapTransactionBlock { BlockIndex = item.BlockInfo.Height, TransactionId = tx.TxId, TotalVout = totalVout, TotalVin = totalVin, Time = tx.Time, BlockHash = tx.BlockHash, BlockTime = tx.BlockTime, Locktime = tx.Locktime, Version = tx.Version, IsCoinBase = isCoinBase }); insertDetails.Add(new MapTransactionDetail { TransactionId = tx.TxId, Vin = syncVin, Vout = tx.VOut }); } stats.Transactions += inserts.Count(); this.data.MapTransactionBlock.InsertMany(inserts, new InsertManyOptions { IsOrdered = false }); this.data.MapTransactionDetails.InsertMany(insertDetails, new InsertManyOptions { IsOrdered = false }); } } catch (MongoBulkWriteException mbwex) { if (!mbwex.Message.Contains("E11000 duplicate key error collection")) { throw; } } // insert inputs and add to the list for later to use on the notification task. var inputs = this.CreateInputs(item.BlockInfo.Height, transactions).ToList(); var queueInner = new Queue <MapTransactionAddress>(inputs); do { try { var itemsInner = this.GetBatch(this.configuration.MongoBatchSize, queueInner).ToList(); if (itemsInner.Any()) { stats.Inputs += itemsInner.Count(); stats.Items.AddRange(itemsInner); this.data.MapTransactionAddress.InsertMany(itemsInner, new InsertManyOptions { IsOrdered = false }); } } catch (MongoBulkWriteException mbwex) { if (!mbwex.Message.Contains("E11000 duplicate key error collection")) { throw; } } }while (queueInner.Any()); // insert outputs var outputs = this.CreateOutputs(transactions).ToList(); stats.Outputs += outputs.Count(); outputs.ForEach(outp => this.data.MarkOutput(outp.InputTransactionId, outp.InputIndex, outp.TransactionId)); }while (queue.Any()); // mark the block as synced. this.CompleteBlock(item.BlockInfo); } else { // memory transaction push in to the pool. item.Transactions.ForEach(t => { this.data.MemoryTransactions.TryAdd(t.TxId, t); }); stats.Transactions = this.data.MemoryTransactions.Count(); // todo: for accuracy - remove transactions from the mongo memory pool that are not anymore in the syncing pool // remove all transactions from the memory pool // this can be done using the SyncingBlocks objects - see method SyncOperations.FindPoolInternal() // add to the list for later to use on the notification task. var inputs = this.CreateInputs(-1, item.Transactions).ToList(); stats.Items.AddRange(inputs); } return(stats); }