public async Task <SyncBlockInfo> RewindToBestChain(SyncConnection connection) { IBlockchainClient client = clientFactory.Create(connection); while (true) { SyncBlockInfo block = storage.GetLatestBlock(); if (block == null) { return(null); } string currentHash = await client.GetblockHashAsync(block.BlockIndex); if (currentHash == block.BlockHash) { return(block); } log.LogDebug($"Rewinding block {block.BlockIndex}({block.BlockHash})"); await storage.DeleteBlockAsync(block.BlockHash); } }
public SyncTransactionInfo BlockTransactionGet(string transactionId) { FilterDefinition <TransactionBlockTable> filter = Builders <TransactionBlockTable> .Filter.Eq(info => info.TransactionId, transactionId); TransactionBlockTable trx = mongoDb.TransactionBlockTable.Find(filter).FirstOrDefault(); if (trx == null) { return(null); } SyncBlockInfo current = globalState.StoreTip;// GetLatestBlock(); SyncBlockInfo blk = BlockByIndex(trx.BlockIndex); return(new SyncTransactionInfo { BlockIndex = trx.BlockIndex, BlockHash = blk.BlockHash, Timestamp = blk.BlockTime, TransactionHash = trx.TransactionId, TransactionIndex = trx.TransactionIndex, Confirmations = current.BlockIndex + 1 - trx.BlockIndex }); }
public SyncTransactionAddressBalance AddressGetBalanceUtxo(string address, long confirmations) { SyncBlockInfo current = BlockGetBlockCount(1).First(); var addrs = SelectAddressWithPool(current, address, true).ToList(); return(CreateAddresBalance(confirmations, addrs, true)); }
/// <summary> /// Get transactions that belongs to a block. /// </summary> /// <param name="hash"></param> public QueryResult <SyncTransactionInfo> TransactionsByBlock(string hash, int offset, int limit) { SyncBlockInfo blk = BlockByHash(hash); if (blk == null) { return(null); } return(TransactionsByBlock(blk.BlockIndex, offset, limit)); }
public void UpdateConfirmations() { SyncBlockInfo first = storage.BlockGetBlockCount(1).First(); for (long i = first.BlockIndex; i > first.BlockIndex - 500; i--) { SyncBlockInfo next = storage.BlockGetByIndex(i); long Confirmations = first.BlockIndex - next.BlockIndex + 2; data.UpdateConfirmations(next.BlockHash, Confirmations); } }
protected override async Task OnDeleteBlockAsync(SyncBlockInfo block) { // delete the contracts FilterDefinition <CirrusContractTable> contractFilter = Builders <CirrusContractTable> .Filter.Eq(info => info.BlockIndex, block.BlockIndex); Task <DeleteResult> contracts = mongoDb.CirrusContractTable.DeleteManyAsync(contractFilter); FilterDefinition <CirrusContractCodeTable> contractCodeFilter = Builders <CirrusContractCodeTable> .Filter.Eq(info => info.BlockIndex, block.BlockIndex); Task <DeleteResult> contractsCode = mongoDb.CirrusContractCodeTable.DeleteManyAsync(contractCodeFilter); await Task.WhenAll(contracts, contractsCode); }
public IEnumerable <SyncTransactionInfo> BlockTransactionGetByBlockIndex(long blockIndex) { SyncBlockInfo blk = BlockGetByIndex(blockIndex); SyncBlockInfo current = BlockGetBlockCount(1).First(); FilterDefinition <MapTransactionBlock> filter = Builders <MapTransactionBlock> .Filter.Eq(info => info.BlockIndex, blk.BlockIndex); var trxs = MapTransactionBlock.Find(filter).ToList(); return(trxs.Select(s => new SyncTransactionInfo { BlockIndex = s.BlockIndex, BlockHash = blk.BlockHash, Timestamp = blk.BlockTime, TransactionHash = s.TransactionId, Confirmations = current.BlockIndex - s.BlockIndex })); }
public void DeleteBlock(string blockHash) { SyncBlockInfo block = BlockGetByHash(blockHash); // delete the outputs FilterDefinition <MapTransactionAddress> addrFilter = Builders <MapTransactionAddress> .Filter.Eq(addr => addr.BlockIndex, block.BlockIndex); MapTransactionAddress.DeleteMany(addrFilter); // delete the transaction FilterDefinition <MapTransactionBlock> transactionFilter = Builders <MapTransactionBlock> .Filter.Eq(info => info.BlockIndex, block.BlockIndex); MapTransactionBlock.DeleteMany(transactionFilter); // delete the block itself. FilterDefinition <MapBlock> blockFilter = Builders <MapBlock> .Filter.Eq(info => info.BlockHash, blockHash); MapBlock.DeleteOne(blockFilter); }
public async Task <SyncBlockInfo> RewindToLastCompletedBlockAsync() { SyncBlockInfo lastBlock = storage.GetLatestBlock(); if (lastBlock == null) { return(null); } while (lastBlock != null && lastBlock.SyncComplete == false) { log.LogDebug($"Rewinding block {lastBlock.BlockIndex}({lastBlock.BlockHash})"); await storage.DeleteBlockAsync(lastBlock.BlockHash); lastBlock = storage.BlockByIndex(lastBlock.BlockIndex - 1); } return(lastBlock); }
/// <summary> /// Returns block information in the section specified with offset and limit. If offset is set to 0, then the last page is returned. /// </summary> /// <param name="offset">Set to zero if last page should be returned.</param> /// <param name="limit">Amount of items to return.</param> /// <returns></returns> public QueryResult <SyncBlockInfo> Blocks(int?offset, int limit) { SyncBlockInfo storeTip = globalState.StoreTip; long index = storeTip?.BlockIndex ?? mongoDb.BlockTable.Find(Builders <BlockTable> .Filter.Empty).CountDocuments() - 1; // Get the total number of items based off the index. long total = index + 1; // If the offset has value, then use it, if not fetch the latest blocks. long startPosition = offset ?? total - limit; long endPosition = startPosition + limit; // The BlockIndex is 0 based, so we must perform >= to get first. IQueryable <BlockTable> filter = mongoDb.BlockTable.AsQueryable().OrderBy(b => b.BlockIndex).Where(w => w.BlockIndex >= startPosition && w.BlockIndex < endPosition); IEnumerable <SyncBlockInfo> list = filter.ToList().Select(mongoBlockToStorageBlock.Map); return(new QueryResult <SyncBlockInfo> { Items = list, Total = total, Offset = (int)startPosition, Limit = limit }); }
public void ValidateBlock(SyncBlockTransactionsOperation item) { if (item.BlockInfo != null) { SyncBlockInfo lastBlock = storage.BlockGetBlockCount(1).FirstOrDefault(); if (lastBlock != null) { if (lastBlock.BlockHash == item.BlockInfo.Hash) { if (lastBlock.SyncComplete) { throw new InvalidOperationException("This should never happen."); } } else { if (item.BlockInfo.PreviousBlockHash != lastBlock.BlockHash) { InvalidBlockFound(lastBlock, item); return; } CreateBlock(item.BlockInfo); ////if (string.IsNullOrEmpty(lastBlock.NextBlockHash)) ////{ //// lastBlock.NextBlockHash = item.BlockInfo.Hash; //// this.SyncOperations.UpdateBlockHash(lastBlock); ////} } } else { CreateBlock(item.BlockInfo); } } }
public SyncTransactionInfo BlockTransactionGet(string transactionId) { FilterDefinition <MapTransactionBlock> filter = Builders <MapTransactionBlock> .Filter.Eq(info => info.TransactionId, transactionId); MapTransactionBlock trx = MapTransactionBlock.Find(filter).FirstOrDefault(); if (trx == null) { return(null); } SyncBlockInfo current = BlockGetBlockCount(1).First(); SyncBlockInfo blk = BlockGetByIndex(trx.BlockIndex); return(new SyncTransactionInfo { BlockIndex = trx.BlockIndex, BlockHash = blk.BlockHash, Timestamp = blk.BlockTime, TransactionHash = trx.TransactionId, Confirmations = current.BlockIndex - trx.BlockIndex }); }
private IEnumerable <SyncTransactionAddressItem> SelectAddressWithPool(SyncBlockInfo current, string address, bool availableOnly) { FilterDefinitionBuilder <MapTransactionAddress> builder = Builders <MapTransactionAddress> .Filter; var addressFiler = new List <string> { address }; FilterDefinition <MapTransactionAddress> filter = builder.AnyIn(transactionAddress => transactionAddress.Addresses, addressFiler); if (availableOnly) { // we only want spendable transactions filter = filter & builder.Eq(info => info.SpendingTransactionId, null); } watch.Restart(); SortDefinition <MapTransactionAddress> sort = Builders <MapTransactionAddress> .Sort.Descending(info => info.BlockIndex); var addrs = MapTransactionAddress.Find(filter).Sort(sort).ToList(); watch.Stop(); log.LogInformation($"Select: Seconds = {watch.Elapsed.TotalSeconds} - UnspentOnly = {availableOnly} - Addr = {address} - Items = {addrs.Count()}"); // this creates a copy of the collection (to avoid thread issues) ICollection <Transaction> pool = MemoryTransactions.Values; if (pool.Any()) { // mark trx in output as spent if they exist in the pool List <MapTransactionAddress> addrsupdate = addrs; GetPoolOutputs(pool).ForEach(f => { MapTransactionAddress adr = addrsupdate.FirstOrDefault(a => a.TransactionId == f.Item1.PrevOut.Hash.ToString() && a.Index == f.Item1.PrevOut.N); if (adr != null) { adr.SpendingTransactionId = f.Item2; } }); // if only spendable transactions are to be returned we need to remove // any that have been marked as spent by a transaction in the pool if (availableOnly) { addrs = addrs.Where(d => d.SpendingTransactionId == null).ToList(); } // add all pool transactions to main output var paddr = PoolToMapTransactionAddress(pool, address).ToList(); addrs = addrs.OrderByDescending(s => s.BlockIndex).Concat(paddr).ToList(); } // map to return type and calculate confirmations return(addrs.Select(s => new SyncTransactionAddressItem { Address = address, Index = s.Index, TransactionHash = s.TransactionId, BlockIndex = s.BlockIndex == -1 ? default(long?) : s.BlockIndex, Value = s.Value, Confirmations = s.BlockIndex == -1 ? 0 : current.BlockIndex - s.BlockIndex + 1, SpendingTransactionHash = s.SpendingTransactionId, SpendingBlockIndex = s.SpendingBlockIndex, CoinBase = s.CoinBase, CoinStake = s.CoinStake, ScriptHex = new Script(Encoders.Hex.DecodeData(s.ScriptHex)).ToString(), Type = StandardScripts.GetTemplateFromScriptPubKey(new Script(Encoders.Hex.DecodeData(s.ScriptHex)))?.Type.ToString(), Time = s.BlockIndex == -1 ? UnixUtils.DateToUnixTimestamp(DateTime.UtcNow) : current.BlockTime })); }
public QueryResult <QueryAddressItem> AddressHistory(string address, int?offset, int limit) { // make sure fields are computed AddressComputedTable addressComputedTable = ComputeAddressBalance(address); IQueryable <AddressHistoryComputedTable> filter = mongoDb.AddressHistoryComputedTable.AsQueryable() .Where(t => t.Address == address); SyncBlockInfo storeTip = globalState.StoreTip; if (storeTip == null) { // this can happen if node is in the middle of reorg return(new QueryResult <QueryAddressItem> { Items = Enumerable.Empty <QueryAddressItem>(), Offset = 0, Limit = limit, Total = 0 }); } ; // This will first perform one db query. long total = addressComputedTable.CountSent + addressComputedTable.CountReceived + addressComputedTable.CountStaked + addressComputedTable.CountMined; // Filter by the position, in the order of first entry being 1 and then second entry being 2. filter = filter.OrderBy(s => s.Position); long startPosition = offset ?? total - limit; long endPosition = (startPosition) + limit; // Get all items that is higher than start position and lower than end position. var list = filter.Where(w => w.Position > startPosition && w.Position <= endPosition).ToList(); // Loop all transaction IDs and get the transaction object. IEnumerable <QueryAddressItem> transactions = list.Select(item => new QueryAddressItem { BlockIndex = item.BlockIndex, Value = item.AmountInOutputs - item.AmountInInputs, EntryType = item.EntryType, TransactionHash = item.TransactionId, Confirmations = storeTip.BlockIndex + 1 - item.BlockIndex }); IEnumerable <QueryAddressItem> mempollTransactions = null; if (offset == total) { List <MapMempoolAddressBag> mempoolAddressBag = MempoolBalance(address); mempollTransactions = mempoolAddressBag.Select(item => new QueryAddressItem { BlockIndex = 0, Value = item.AmountInOutputs - item.AmountInInputs, EntryType = item.AmountInOutputs > item.AmountInInputs ? "receive" : "send", TransactionHash = item.Mempool.TransactionId, Confirmations = 0 }); } List <QueryAddressItem> allTransactions = new(); if (mempollTransactions != null) { allTransactions.AddRange(mempollTransactions); } allTransactions.AddRange(transactions); return(new QueryResult <QueryAddressItem> { Items = allTransactions, Offset = (int)startPosition, Limit = limit, Total = total }); }
/// <summary> /// Compute the balance and history of a given address. /// If the address already has history only the difference is computed. /// The difference is any new entries related to the given address from the last time it was computed. /// /// Edge cases that need special handling: /// - two inputs in the same transaction /// - to outputs in the same transaction /// - outputs and inputs in the same transaction /// /// Paging: /// We use a computed field called position that is incremented on each entry that is added to the list. /// The position is indexed but is only directly related to the given address /// When paging is requested we will fetch directly the required rows (no need to perform a table scan) /// /// Resource Access: /// concerns around computing tables /// users call the method concurrently and compute the data simultaneously, this is mostly cpu wistful /// as the tables are idempotent and the first call will compute and persist the computed data but second /// will just fail to persist any existing entries, to apply this we use OCC (Optimistic Concurrency Control) /// on the block height, if the version currently in disk is not the same as when the row was read /// another process already calculated the latest additional entries /// </summary> private AddressComputedTable ComputeAddressBalance(string address) { //if (globalState.IndexModeCompleted == false) //{ // // do not compute tables if indexes have not run. // throw new ApplicationException("node in syncing process"); //} FilterDefinition <AddressComputedTable> addrFilter = Builders <AddressComputedTable> .Filter .Where(f => f.Address == address); AddressComputedTable addressComputedTable = mongoDb.AddressComputedTable.Find(addrFilter).FirstOrDefault(); if (addressComputedTable == null) { addressComputedTable = new AddressComputedTable() { Id = address, Address = address, ComputedBlockIndex = 0 }; mongoDb.AddressComputedTable.ReplaceOne(addrFilter, addressComputedTable, new ReplaceOptions { IsUpsert = true }); } SyncBlockInfo storeTip = globalState.StoreTip; if (storeTip == null) { return(addressComputedTable); // this can happen if node is in the middle of reorg } long currentHeight = addressComputedTable.ComputedBlockIndex; long tipHeight = storeTip.BlockIndex; IQueryable <OutputTable> filterOutputs = mongoDb.OutputTable.AsQueryable() .Where(t => t.Address == address) .Where(b => b.BlockIndex > currentHeight && b.BlockIndex <= tipHeight); IQueryable <InputTable> filterInputs = mongoDb.InputTable.AsQueryable() .Where(t => t.Address == address) .Where(b => b.BlockIndex > currentHeight && b.BlockIndex <= tipHeight); long countReceived = 0, countSent = 0, countStaked = 0, countMined = 0; long received = 0, sent = 0, staked = 0, mined = 0; long maxHeight = 0; var history = new Dictionary <string, AddressHistoryComputedTable>(); var transcations = new Dictionary <string, MapAddressBag>(); var utxoToAdd = new Dictionary <string, AddressUtxoComputedTable>(); var utxoToDelete = new Dictionary <string, Outpoint>(); foreach (OutputTable item in filterOutputs) { if (item.BlockIndex > currentHeight && item.BlockIndex <= tipHeight) { maxHeight = Math.Max(maxHeight, item.BlockIndex); if (transcations.TryGetValue(item.Outpoint.TransactionId, out MapAddressBag current)) { current.CoinBase = item.CoinBase; current.CoinStake = item.CoinStake; current.Ouputs.Add(item); } else { var bag = new MapAddressBag { BlockIndex = item.BlockIndex, CoinBase = item.CoinBase, CoinStake = item.CoinStake }; bag.Ouputs.Add(item); transcations.Add(item.Outpoint.TransactionId, bag); } // add to the utxo table utxoToAdd.Add(item.Outpoint.ToString(), new AddressUtxoComputedTable { Outpoint = item.Outpoint, BlockIndex = item.BlockIndex, Address = item.Address, CoinBase = item.CoinBase, CoinStake = item.CoinStake, ScriptHex = item.ScriptHex, Value = item.Value }); } } foreach (InputTable item in filterInputs) { if (item.BlockIndex > currentHeight && item.BlockIndex <= tipHeight) { maxHeight = Math.Max(maxHeight, item.BlockIndex); if (transcations.TryGetValue(item.TrxHash, out MapAddressBag current)) { current.Inputs.Add(item); } else { var bag = new MapAddressBag { BlockIndex = item.BlockIndex }; bag.Inputs.Add(item); transcations.Add(item.TrxHash, bag); } // remove from the utxo table if (!utxoToAdd.Remove(item.Outpoint.ToString())) { // if not found in memory we need to delete form disk utxoToDelete.Add(item.Outpoint.ToString(), item.Outpoint); } } } if (transcations.Any()) { foreach ((string key, MapAddressBag mapAddressBag) in transcations.OrderBy(o => o.Value.BlockIndex)) { var historyItem = new AddressHistoryComputedTable { Address = addressComputedTable.Address, TransactionId = key, BlockIndex = Convert.ToUInt32(mapAddressBag.BlockIndex), Id = $"{key}-{address}", }; history.Add(key, historyItem); foreach (OutputTable output in mapAddressBag.Ouputs) { historyItem.AmountInOutputs += output.Value; } foreach (InputTable output in mapAddressBag.Inputs) { historyItem.AmountInInputs += output.Value; } if (mapAddressBag.CoinBase) { countMined++; mined += historyItem.AmountInOutputs; historyItem.EntryType = "mine"; } else if (mapAddressBag.CoinStake) { countStaked++; staked += historyItem.AmountInOutputs - historyItem.AmountInInputs; historyItem.EntryType = "stake"; } else { received += historyItem.AmountInOutputs; sent += historyItem.AmountInInputs; if (historyItem.AmountInOutputs > historyItem.AmountInInputs) { countReceived++; historyItem.EntryType = "receive"; } else { countSent++; historyItem.EntryType = "send"; } } } long totalCount = countSent + countReceived + countMined + countStaked; if (totalCount < history.Values.Count) { throw new ApplicationException("Failed to compute history correctly"); } // each entry is assigned an incremental id to improve efficiency of paging. long position = addressComputedTable.CountSent + addressComputedTable.CountReceived + addressComputedTable.CountStaked + addressComputedTable.CountMined; foreach (AddressHistoryComputedTable historyValue in history.Values.OrderBy(o => o.BlockIndex)) { historyValue.Position = ++position; } addressComputedTable.Received += received; addressComputedTable.Staked += staked; addressComputedTable.Mined += mined; addressComputedTable.Sent += sent; addressComputedTable.Available = addressComputedTable.Received + addressComputedTable.Mined + addressComputedTable.Staked - addressComputedTable.Sent; addressComputedTable.CountReceived += countReceived; addressComputedTable.CountSent += countSent; addressComputedTable.CountStaked += countStaked; addressComputedTable.CountMined += countMined; addressComputedTable.CountUtxo = addressComputedTable.CountUtxo - utxoToDelete.Count + utxoToAdd.Count; addressComputedTable.ComputedBlockIndex = maxHeight; // the last block a trx was received to this address if (addressComputedTable.Available < 0) { throw new ApplicationException("Failed to compute balance correctly"); } try { // only push to store if the same version of computed bloc index is present (meaning entry was not modified) // block height must change if new trx are added so use it to apply OCC (Optimistic Concurrency Control) // to determine if a newer entry was pushed to store. FilterDefinition <AddressComputedTable> updateFilter = Builders <AddressComputedTable> .Filter .Where(f => f.Address == address && f.ComputedBlockIndex == currentHeight); // update the computed address entry, this will throw if a newer version is in store mongoDb.AddressComputedTable.ReplaceOne(updateFilter, addressComputedTable, new ReplaceOptions { IsUpsert = true }); } catch (MongoWriteException nwe) { if (nwe.WriteError.Category != ServerErrorCategory.DuplicateKey) { throw; } // address was already modified fetch the latest version addressComputedTable = mongoDb.AddressComputedTable.Find(addrFilter).FirstOrDefault(); return(addressComputedTable); } var historyTask = Task.Run(() => { try { // if we managed to update the address we can safely insert history mongoDb.AddressHistoryComputedTable.InsertMany(history.Values, new InsertManyOptions { IsOrdered = false }); } catch (MongoBulkWriteException mbwex) { // in cases of reorgs trx are not deleted from the store, // if a trx is already written and we attempt to write it again // the write will fail and throw, so we ignore such errors. // (IsOrdered = false will attempt all entries and only throw when done) if (mbwex.WriteErrors.Any(e => e.Category != ServerErrorCategory.DuplicateKey)) { throw; } } }); Task.WaitAll(historyTask); } return(addressComputedTable); }
public async Task <Statistics> Statistics() { SyncConnection connection = syncConnection; var client = clientFactory.Create(connection); var stats = new Statistics { Symbol = syncConnection.Symbol }; try { stats.Blockchain = await client.GetBlockchainInfo(); stats.Network = await client.GetNetworkInfo(); } catch (Exception ex) { stats.Error = ex.Message; return(stats); } stats.TransactionsInPool = storage.GetMemoryTransactionsCount(); try { SyncBlockInfo latestBlock = storage.GetLatestBlock(); if (latestBlock != null) { stats.SyncBlockIndex = latestBlock.BlockIndex; stats.Progress = $"{stats.SyncBlockIndex}/{stats.Blockchain.Blocks} - {stats.Blockchain.Blocks - stats.SyncBlockIndex}"; stats.BlocksLeftToSync = stats.Blockchain.Blocks - stats.SyncBlockIndex; if (globalState.IndexMode) { stats.Progress = $"{stats.Progress} (indexing)"; } double totalSeconds = syncConnection.RecentItems.Sum(s => s.Duration.TotalSeconds); stats.AvgBlockPersistInSeconds = Math.Round(totalSeconds / syncConnection.RecentItems.Count, 2); long totalSize = syncConnection.RecentItems.Sum(s => s.Size); stats.AvgBlockSizeKb = Math.Round((double)totalSize / syncConnection.RecentItems.Count, 0); //var groupedByMin = syncConnection.RecentItems // //.GroupBy(g => g.Inserted.Hour + g.Inserted.Minute) // .OrderByDescending(o => o.Inserted) // .GroupBy(g => g.Inserted.Minute) // .Take(10) // .ToDictionary(s => s.Key, // s => s.ToList().Count); //int totalBlocks = groupedByMin.Skip(1).Take(5).Sum(s => s.Value); //int totalSecondsPerBlok = groupedByMin.Skip(1).Take(5).Count(); //stats.BlocksPerMinute = (int)Math.Round((double)totalBlocks / totalSecondsPerBlok); stats.BlocksPerMinute = syncConnection.RecentItems.Count(w => w.Inserted > DateTime.UtcNow.AddMinutes(-1)); } } catch (Exception ex) { stats.Progress = ex.Message; } return(stats); }
private IEnumerable <SyncTransactionAddressItem> SelectAddressWithPool(SyncBlockInfo current, string address, bool availableOnly) { // this code will not work as we need to have the actual transactions to deduct their value in case a memory pool transaction is found. // this is not way to know if a mem pool transaction belongs to an address with heavily querying mongo, so the best solution (for now) // is to just fetch the entire history, this could be limited to the available balance only. ////var confirmations = 3; //// var res = this.MapTransactionAddress.Aggregate() //// .Match(new BsonDocument //// { //// new BsonElement("Addresses", new BsonArray(new[] { address })), //// // in case we only want unspet values un comment this row //// //new BsonElement("SpendingTransactionId", new BsonDocument("$eq", BsonNull.Value)) //// }) //// .Project(new BsonDocument //// { //// new BsonElement("confirmed", new BsonDocument(new BsonElement("$lte", new BsonArray(new[] {"$BlockIndex", (object)(current.BlockIndex - confirmations + 1)})))), //// new BsonElement("val", new BsonString("$Value")), //// new BsonElement("spent", new BsonDocument(new BsonElement("$ne", new BsonArray(new[] {"$SpendingTransactionId", (object)BsonNull.Value})))), //// }) //// .Group(new BsonDocument //// { //// new BsonElement("_id", new BsonDocument{ {"Confirmed", new BsonString("$confirmed") }, { new BsonDocument("Spent", new BsonString("$spent")) }}), //// new BsonElement("TotalAmount", new BsonDocument("$sum", new BsonString("$val"))), //// new BsonElement("Count", new BsonDocument("$sum", 1)), //// }); //// var results = res.ToList().Select(s => BsonSerializer.Deserialize<SelectBalanceResult>(s)).ToList(); ////var enumerated = results.ToList(); ////var received = enumerated.Where(w => w.id.Confirmed).Sum(s => s.TotalAmount); ////var sent = enumerated.Where(w => w.id.Spent).Sum(s => s.TotalAmount); ////var uncinfirmed = enumerated.Where(w => !w.id.Confirmed).Sum(s => s.TotalAmount); ////// the mongo aggregator may change the precision so we corrected here limiting to 8 digits. ////var balanceNew = new SyncTransactionAddressBalance ////{ //// Received = System.Convert.ToDecimal(received.ToString("#0.########")), //// Sent = System.Convert.ToDecimal(sent.ToString("#0.########")), //// Unconfirmed = System.Convert.ToDecimal(uncinfirmed.ToString("#0.########")), ////}; var builder = Builders <MapTransactionAddress> .Filter; var filter = builder.Eq(info => info.Addresses, new List <string> { address }); if (availableOnly) { // we only want spendable transactions filter = filter & builder.Eq(info => info.SpendingTransactionId, null); } var stoper1 = Stopwatch.Start(); var addrs = this.MapTransactionAddress.Find(filter).ToList(); stoper1.Stop(); this.tracer.Trace("Select", string.Format("Seconds = {0} - UnspentOnly = {1} - Addr = {2} - Items = {3}", stoper1.Elapsed.TotalSeconds, availableOnly, address, addrs.Count()), ConsoleColor.Yellow); // this creates a copy of the collection (to avoid thread issues) var pool = this.MemoryTransactions.Values; if (pool.Any()) { // mark trx in output as spent if they exist in the pool var addrsupdate = addrs; this.GetPoolOutputs(pool).ForEach(f => { var adr = addrsupdate.FirstOrDefault(a => a.TransactionId == f.Item1.TxId && a.Index == f.Item1.VOut); if (adr != null) { adr.SpendingTransactionId = f.Item2; } }); // if only spendable transactions are to be returned we need to remove // any that have been marked as spent by a transaction in the pool if (availableOnly) { addrs = addrs.Where(d => d.SpendingTransactionId == null).ToList(); } // add all pool transactions to main output var paddr = this.PoolToMapTransactionAddress(pool, address).ToList(); addrs = addrs.Concat(paddr).ToList(); } // map to return type and calculate confirmations return(addrs.Select(s => new SyncTransactionAddressItem { Address = address, Index = s.Index, TransactionHash = !s.CoinBase ? s.TransactionId : string.Empty, BlockIndex = s.BlockIndex == -1 ? default(long?) : s.BlockIndex, Value = System.Convert.ToDecimal(s.Value), Confirmations = s.BlockIndex == -1 ? 0 : current.BlockIndex - s.BlockIndex + 1, SpendingTransactionHash = s.SpendingTransactionId, CoinBase = s.CoinBase ? s.TransactionId : string.Empty, ScriptHex = s.ScriptHex })); }
public SyncBlockInfo PushStorageBatch(StorageBatch storageBatch) { if (globalState.IndexModeCompleted) { if (globalState.IbdMode() == false) { if (globalState.LocalMempoolView.Any()) { var toRemoveFromMempool = storageBatch.TransactionBlockTable.Select(s => s.TransactionId).ToList(); FilterDefinitionBuilder <MempoolTable> builder = Builders <MempoolTable> .Filter; FilterDefinition <MempoolTable> filter = builder.In(mempoolItem => mempoolItem.TransactionId, toRemoveFromMempool); db.Mempool.DeleteMany(filter); foreach (string mempooltrx in toRemoveFromMempool) { globalState.LocalMempoolView.Remove(mempooltrx, out _); } } } } var blockTableTask = storageBatch.BlockTable.Values.Any() ? db.BlockTable.InsertManyAsync(storageBatch.BlockTable.Values, new InsertManyOptions { IsOrdered = false }) : Task.CompletedTask; var transactionBlockTableTask = storageBatch.TransactionBlockTable.Any() ? db.TransactionBlockTable.InsertManyAsync(storageBatch.TransactionBlockTable, new InsertManyOptions { IsOrdered = false }) : Task.CompletedTask; var outputTableTask = storageBatch.OutputTable.Any() ? db.OutputTable.InsertManyAsync(storageBatch.OutputTable.Values, new InsertManyOptions { IsOrdered = false }) : Task.CompletedTask; Task transactionTableTask = Task.Run(() => { try { if (storageBatch.TransactionTable.Any()) { db.TransactionTable.InsertMany(storageBatch.TransactionTable, new InsertManyOptions { IsOrdered = false }); } } catch (MongoBulkWriteException mbwex) { // transactions are a special case they are not deleted from store in case of reorgs // because they will just be included in another blocks, so we ignore if key is already present if (mbwex.WriteErrors.Any(e => e.Category != ServerErrorCategory.DuplicateKey)) { throw; } } }); var utxos = new List <UnspentOutputTable>(storageBatch.OutputTable.Values.Count); foreach (OutputTable outputTable in storageBatch.OutputTable.Values) { if (outputTable.Address.Equals(OpReturnAddress)) { continue; } // TODO: filter out outputs that are already spent in the storageBatch.InputTable table // such inputs will get deleted anyway in the next operation of UnspentOutputTable.DeleteMany // this means we should probably make the storageBatch.InputTable a dictionary as well. utxos.Add(new UnspentOutputTable { Address = outputTable.Address, Outpoint = outputTable.Outpoint, Value = outputTable.Value, BlockIndex = outputTable.BlockIndex }); } var unspentOutputTableTask = utxos.Any() ? db.UnspentOutputTable.InsertManyAsync(utxos, new InsertManyOptions { IsOrdered = false }) : Task.CompletedTask; var inputTableTask = Task.CompletedTask; if (storageBatch.InputTable.Any()) { var utxosLookups = FetchUtxos( storageBatch.InputTable .Where(_ => _.Address == null) .Select(_ => _.Outpoint)); foreach (InputTable input in storageBatch.InputTable) { if (input.Address != null) { continue; } string key = input.Outpoint.ToString(); input.Address = utxosLookups[key].Address; input.Value = utxosLookups[key].Value; } inputTableTask = db.InputTable.InsertManyAsync(storageBatch.InputTable, new InsertManyOptions { IsOrdered = false }); } Task.WaitAll(blockTableTask, transactionBlockTableTask, outputTableTask, inputTableTask, transactionTableTask, unspentOutputTableTask); if (storageBatch.InputTable.Any()) { // TODO: if earlier we filtered out outputs that are already spent and not pushed to the utxo table // now we do not need to try and delete such outputs becuase they where never pushed to the store. var outpointsFromNewInput = storageBatch.InputTable .Select(_ => _.Outpoint) .ToList(); var filterToDelete = Builders <UnspentOutputTable> .Filter .Where(_ => outpointsFromNewInput.Contains(_.Outpoint)); var deleteResult = db.UnspentOutputTable.DeleteMany(filterToDelete); if (deleteResult.DeletedCount != outpointsFromNewInput.Count) { throw new ApplicationException($"Delete of unspent outputs did not complete successfully : {deleteResult.DeletedCount} deleted but {outpointsFromNewInput.Count} expected"); } } // allow any extensions to push to repo before we complete the block. OnPushStorageBatch(storageBatch); string lastBlockHash = null; long blockIndex = 0; var markBlocksAsComplete = new List <UpdateOneModel <BlockTable> >(); foreach (BlockTable mapBlock in storageBatch.BlockTable.Values.OrderBy(b => b.BlockIndex)) { FilterDefinition <BlockTable> filter = Builders <BlockTable> .Filter.Eq(block => block.BlockIndex, mapBlock.BlockIndex); UpdateDefinition <BlockTable> update = Builders <BlockTable> .Update.Set(blockInfo => blockInfo.SyncComplete, true); markBlocksAsComplete.Add(new UpdateOneModel <BlockTable>(filter, update)); lastBlockHash = mapBlock.BlockHash; blockIndex = mapBlock.BlockIndex; } // mark each block is complete db.BlockTable.BulkWrite(markBlocksAsComplete, new BulkWriteOptions() { IsOrdered = true }); SyncBlockInfo block = storage.BlockByIndex(blockIndex); if (block.BlockHash != lastBlockHash) { throw new ArgumentException($"Expected hash {blockIndex} for block {lastBlockHash} but was {block.BlockHash}"); } return(block); }
private void InvalidBlockFound(SyncBlockInfo lastBlock, SyncBlockTransactionsOperation item) { // Re-org happened. throw new SyncRestartException(); }
private void UpdateLastBlockNextHash(SyncBlockInfo block) { data.UpdateLastBlockNextHash(block.BlockHash, block.NextBlockHash); }