private void LoadSlimAndSaveToSlimFormat(ConcurrentChain chain) { foreach (var block in chain.ToEnumerable(false)) { _Chain.TrySetTip(block.HashBlock, block.Previous?.HashBlock); } SaveChainInCache(); }
public void CanValidateChain() { var main = new ConcurrentChain(LoadMainChain(), Network.Main); foreach (var h in main.ToEnumerable(false)) { Assert.True(h.Validate(Network.Main)); } }
public void CanValidateChain() { var main = new ConcurrentChain(this.network, this.LoadMainChain()); foreach (ChainedHeader h in main.ToEnumerable(false)) { Assert.True(h.Validate(this.network)); } }
public void IndexTheFullChain() { var store = new BlockStore(TestDataLocations.BlockFolderLocation, Network.StratisMain); var indexStore = new IndexedBlockStore(new InMemoryNoSqlRepository(), store); var reindexed = indexStore.ReIndex(); Assert.Equal(103952, reindexed); ConcurrentChain chain = store.GetChain(); foreach (ChainedBlock item in chain.ToEnumerable(false)) { Block block = indexStore.Get(item.HashBlock); Assert.True(BlockValidator.CheckBlock(block)); } }
public void CanIterateConcurrentChain() { ConcurrentChain chain = new ConcurrentChain(Network.Main); AppendBlock(chain); AppendBlock(chain); AppendBlock(chain); foreach (var b in chain.EnumerateAfter(chain.Genesis)) { chain.GetBlock(0); } foreach (var b in chain.ToEnumerable(false)) { chain.GetBlock(0); } }
public void CanIterateConcurrentChain() { var chain = new ConcurrentChain(this.network); this.AppendBlock(chain); this.AppendBlock(chain); this.AppendBlock(chain); foreach (ChainedHeader b in chain.EnumerateAfter(chain.Genesis)) { chain.GetBlock(0); } foreach (ChainedHeader b in chain.ToEnumerable(false)) { chain.GetBlock(0); } }
public static ChainedBlock GetClosestToTimeBlock(this ConcurrentChain chain, DateTime utcTime) { return(chain.ToEnumerable(true).FirstOrDefault(p => p.Header.BlockTime.UtcDateTime <= utcTime)); }
public void CheckBlockProofOfStake() { var totalblocks = 5000; // fill only a small portion so test wont be too long var mainStore = new BlockStore(TestDataLocations.BlockFolderLocation, Network.StratisMain); // create the stores BlockStore store = CreateBlockStore(); var index = 0; var blockStore = new NoSqlBlockRepository(); foreach (StoredBlock storedBlock in mainStore.Enumerate(false).Take(totalblocks)) { store.Append(storedBlock.Item); blockStore.PutAsync(storedBlock.Item); index++; } // build the chain ConcurrentChain chain = store.GetChain(); // fill the transaction store var trxStore = new NoSqlTransactionRepository(); var mapStore = new BlockTransactionMapStore(); foreach (ChainedBlock chainedBlock in chain.ToEnumerable(false).Take(totalblocks)) { Block block = blockStore.GetBlock(chainedBlock.HashBlock); foreach (Transaction blockTransaction in block.Transactions) { trxStore.Put(blockTransaction); mapStore.PutAsync(blockTransaction.GetHash(), block.GetHash()); } } RPCClient client = null; if (!pos_RPCClientTests.noClient) { client = new RPCClient(new NetworkCredential("rpcuser", "rpcpassword"), new Uri("http://127.0.0.1:" + Network.StratisMain.RPCPort), Network.StratisMain); } var stakeChain = new MemoryStakeChain(Network.StratisMain); // validate the stake trasnaction foreach (ChainedBlock item in chain.ToEnumerable(false).Take(totalblocks).ToList()) { Block block = blockStore.GetBlock(item.HashBlock); BlockStake blockStake; Assert.True(BlockValidator.CheckAndComputeStake(blockStore, trxStore, mapStore, stakeChain, chain, item, block, out blockStake)); stakeChain.Set(item.HashBlock, blockStake); if (item.Height == 1125) { var g = block.ToHex(); } if (client != null) { RPCBlock fetched = client.GetRPCBlockAsync(item.HashBlock).Result; Assert.Equal(uint256.Parse(fetched.modifierv2), blockStake.StakeModifierV2); Assert.Equal(uint256.Parse(fetched.proofhash), blockStake.HashProof); } } }
/// <summary> /// Once the blockchain headers have been synchronised this method will attempt to find all transactions relevant to a single address. /// To find the transactions there are two options: first option the full blocks can be completely downloaded and searched which is what a full node /// would do; second option is to set a bloom filter and then request the desired blocks from a connected full node. /// </summary> private static async Task <List <uint256> > GetTransactions(ConcurrentChain chain, Node node, BitcoinPubKeyAddress addr, DateTimeOffset start, DateTimeOffset end, CancellationToken ct) { logger.DebugFormat("Transaction search task commencing..."); ct.ThrowIfCancellationRequested(); ManualResetEventSlim searchCompleteSignal = new ManualResetEventSlim(); List <uint256> txs = new List <uint256>(); var searchBlocks = chain.ToEnumerable(true).Where(x => x.Header.BlockTime > start && x.Header.BlockTime < end).ToList(); // Only search if there are some blocks in the period of interest. if (searchBlocks.Count() > 0) { int searchBlocksIndex = 0; BloomFilter filter = new BloomFilter(_nElements, _falsePositiveRate, _nTweakIn, BloomFlags.UPDATE_NONE); logger.DebugFormat("Setting bloom for address " + addr.Hash + "."); filter.Insert(addr.Hash.ToBytes()); node.MessageReceived += (node1, message) => { switch (message.Message.Payload) { case MerkleBlockPayload merkleBlk: foreach (var tx in merkleBlk.Object.PartialMerkleTree.GetMatchedTransactions()) { logger.DebugFormat("Matched merkle block TX ID {0}.", tx); txs.Add(tx); } if (searchBlocksIndex < searchBlocks.Count()) { var dp = new GetDataPayload(new InventoryVector(InventoryType.MSG_FILTERED_BLOCK, searchBlocks[searchBlocksIndex++].HashBlock)); node.SendMessage(dp); } else { searchCompleteSignal.Set(); } break; case TxPayload tx: logger.DebugFormat("TX ID {0}.", tx.Object.GetHash()); break; } }; node.SendMessage(new FilterLoadPayload(filter)); var dataPayload = new GetDataPayload(new InventoryVector(InventoryType.MSG_FILTERED_BLOCK, searchBlocks[searchBlocksIndex++].HashBlock)); node.SendMessage(dataPayload); await Task.Run(() => { searchCompleteSignal.Wait(ct); logger.DebugFormat("Block search task completed."); }); } return(txs); }
public void Sync() { _isRunning = true; try { var node = Node.Connect(Network.Main, new IPEndPoint(IPAddress.Parse(_settings.Nodes), 16178)); // var node = Node.ConnectToLocal(Network.Main); //var connectedNodes = _group.ConnectedNodes.ToArray(); //if (!connectedNodes.Any()) //{ // _logger.Information("Waiting for nodes ... "); // _isRunning = false; // return; //} _logger.Information("************************************************"); _logger.Information("Connecting to Node "); //_logger.Information($"Total connected nodes {connectedNodes.Length}"); //var rand = new Random(); //var node = connectedNodes[rand.Next(connectedNodes.Length)]; _logger.Information($"Node selected {node.Peer.Endpoint.Address}"); if (node.IsConnected) { node.VersionHandshake(); _logger.Information("Connected..."); _logger.Information($"Last Seen -->{node.LastSeen}"); _logger.Information("Checking chain"); if (_chain == null) { _logger.Information("Loading chain from disk..."); LoadChain(); if (_chain == null) { _logger.Information("No chain found on disk, Syncing chain from node "); _chain = node.GetChain(); _logger.Information("Chain sync completed"); } } _logger.Information("Retrieving latest block..."); var lastDbBlock = _proofService.GetLatestBlock(); _logger.Information($"Latest Block {lastDbBlock}"); _logger.Information("Synching latest chain..."); node.SynchronizeChain(_chain); _lastHeight = _chain.Height; _logger.Information($"Lastest Chain Height: {_lastHeight}"); var height = 0; IEnumerable <IEnumerable <uint256> > hashChunks = null; if (lastDbBlock == null) { _logger.Information("Retrieving chained blocks from height 0"); hashChunks = _chain.ToEnumerable(false).Select(x => x.HashBlock).Chunk(ChunkSize); } else if (lastDbBlock.Height < _chain.Height) { _logger.Information($"Retrieving chained blocks from height {lastDbBlock.Height}"); hashChunks = _chain.EnumerateAfter(new uint256(lastDbBlock.Hash)).Select(x => x.HashBlock).Chunk(ChunkSize); height = lastDbBlock.Height + 1; } if (hashChunks != null) { _logger.Information($"Total blocks to save {_chain.Height - (lastDbBlock ?? new BlockData()).Height}, hashChunks {hashChunks.LongCount()}"); var chunkSize = 0; foreach (var hashChunk in hashChunks) { _logger.Information($"Retrieving chunks from {chunkSize} - {chunkSize + ChunkSize}"); var blocks = node.GetBlocks(hashChunk); var dataBlocks = blocks.Select(x => { var block = BlockData.Parse(x); block.Height = height++; return(block); }); _proofService.SaveBlocks(dataBlocks); chunkSize += ChunkSize; _logger.Information($"Saved blocks"); } } _logger.Information("Finished syncing..."); SaveChain(); _logger.Information("Disconnecting node...."); node.Disconnect(); CheckPending(); //var indexStore = new IndexedBlockStore(new InMemoryNoSqlRepository(), _blockStore); // CheckAddressTransactions(newBlocks); _logger.Information("************************************************"); } else { _logger.Information("Couldn't connect to node"); } } catch (System.Exception ex) { _logger.Information($"Exception while syncing {ex}"); } finally { _isRunning = false; } }