private void AddBroadcastMessage(MessageBase message) { lock (_broadcastQueueLock) { BroadcastQueue.Add(new BroadcastableItem(message)); } }
// consensus methods for validating a transaction public static void ProccessTempTXforPending(string _filePath, bool needPropagate) { // we first check if length can be divide by 1100 .. FileInfo f = new FileInfo(_filePath); int fl = (int)f.Length; if (fl % 1100 != 0 || fl < 1100) { File.Delete(_filePath); return; } // can be very large ... so we have have to chunk every txs ... into split part of max 500 tx 4 the RAM alloc uint chunkcounter = 0; uint byteOffset = 0; // we only accept pending tx that avec List <Tx> txs = new List <Tx>(); while (byteOffset < fl) { Tx Trans = BytesToTx(GetBytesFromFile(byteOffset, 1100, _filePath)); if (Trans == null) { File.Delete(_filePath); return; } txs.Add(Trans); byteOffset += 1100; chunkcounter++; if (chunkcounter > 500 || byteOffset == fl) { chunkcounter = 0; foreach (Tx TX in txs) { // we should do some virtualisation here ... ... if (isTxValidforPending(TX, GetOfficialUTXOAtPointer(TX.sUTXOP))) { AppendBytesToFile(_folderPath + "ptx", TxToBytes(TX)); // NT.SendFile(_folderPath + "ptx", 2); //< Send our PTX File } } txs = new List <Tx>(); } } if (needPropagate) { BroadcastQueue.Add(new BroadcastInfo(1, 2, _folderPath + "ptx")); } File.Delete(_filePath); }
public static void AddBlocksToOfficialChain(string filePath, bool needPropagate) { // GET THE LATEST BLOCKCHAIN FILE PATH.-> string blockchainPath = GetLatestBlockChainFilePath(); uint firstTempIndex = BitConverter.ToUInt32(GetBytesFromFile(4, 8, filePath), 0); uint latestTempIndex = RequestLatestBlockIndexInFile(filePath); //Print(latestTempIndex); for (uint i = firstTempIndex; i < latestTempIndex + 1; i++) { Block b = GetBlockAtIndexInFile(i, filePath); if (b == null) { FatalErrorHandler(0, "no block found in data during updating official chain"); return; } // FATAL ERROR PrintBlockData(b); byte[] bytes = BlockToBytes(b); FileInfo f = new FileInfo(blockchainPath); if (f.Length + bytes.Length > BLOCKCHAIN_FILE_CHUNK) { string name = GetNewBlockChainFilePath(); File.WriteAllBytes(name, BitConverter.GetBytes(b.Index)); AppendBytesToFile(name.ToString(), bytes); } else { OverWriteBytesInFile(0, blockchainPath, BitConverter.GetBytes(b.Index)); AppendBytesToFile(blockchainPath, bytes); } UpgradeUTXOSet(b); UpdatePendingTXFile(b); } if (needPropagate) { BroadcastQueue.Add(new BroadcastInfo(2, 1)); } string[] forkfiles = Directory.GetFiles(_folderPath + "fork"); foreach (string s in forkfiles) { File.Delete(s); } Print("will delete " + filePath); File.Delete(filePath); Print("Blockchain updated!"); //arduino.SendTick("1"); }
private bool ApplyMessageOverrides(BroadcastableItem item) { lock (_broadcastQueueLock) { var items = BroadcastQueue.ToList(); var toRemove = new List <BroadcastableItem>(); foreach (var broadcastableItem in items) { var m = broadcastableItem.SwimMessage.Message; var w = item.SwimMessage.Message.GetMessageOverrideWeight(m); if (w == 0) { continue; } if (w == 1) { toRemove.Add(broadcastableItem); } if (w == -1) { return(false); } } foreach (var broadcastableItem in toRemove) { items.Remove(broadcastableItem); } items.Add(item); BroadcastQueue = new ConcurrentBag <BroadcastableItem>(items .OrderByDescending(x => x.BroadcastCount)); } return(true); }
private IEnumerable <MessageBase> GetBroadcastMessages(int num = 10) { lock (_broadcastQueueLock) { // Prioritize new items and remove items that have hit the Broadcast threshold lock (_nodesLock) { BroadcastQueue = new ConcurrentBag <BroadcastableItem>(BroadcastQueue.OrderBy(x => x.BroadcastCount).Where(x => x.BroadcastCount < Lambda * Math.Log(Nodes.Count + 1))); } IEnumerable <BroadcastableItem> bis = null; bis = BroadcastQueue.Count < num?BroadcastQueue.Take(BroadcastQueue.Count) : BroadcastQueue.Take(num); foreach (var bi in bis) { bi.BroadcastCount++; } return(bis.Select(x => x.Message)); } }
private IEnumerable <SignedSwimMessage> GetBroadcastMessages(int num = 10) { lock (_broadcastQueueLock) { // Prioritize new items and remove items that have hit the Broadcast threshold or expired. lock (_nodesLock) { BroadcastQueue = new ConcurrentBag <BroadcastableItem>(BroadcastQueue .OrderByDescending(x => x.BroadcastCount) .Where(x => x.BroadcastCount < Math.Max(Lambda * Math.Log(_nodes.Count + 1), 1) && x.SwimMessage.Message.IsValid) ); } var bis = BroadcastQueue.Count < num?BroadcastQueue.Take(BroadcastQueue.Count) : BroadcastQueue.Take(num); foreach (var bi in bis) { bi.BroadcastCount++; } return(bis.Select(x => x.SwimMessage)); } }
// Main methods for blocks validation public static void ProccessTempBlocks(string _filePath, bool needPropagate) // MAIN FUNCTION TO VALID BLOCK { Print("Proccess File block : " + _filePath); FileInfo f = new FileInfo(_filePath); if (f.Length < 8) { Console.WriteLine("[BLOCKS REFUSED] bad file length!"); File.Delete(_filePath); return; } if (!isHeaderCorrectInBlockFile(_filePath)) { Console.WriteLine("[BLOCKS REFUSED] header incorrect!"); File.Delete(_filePath); return; } uint firstTempIndex = BitConverter.ToUInt32(GetBytesFromFile(4, 8, _filePath), 0); uint latestTempIndex = BitConverter.ToUInt32(GetBytesFromFile(0, 4, _filePath), 0); Print(firstTempIndex + " " + latestTempIndex); uint latestOfficialIndex = RequestLatestBlockIndex(true); bool HardFork = false; Print(((int)(latestOfficialIndex - MAX_RETROGRADE)).ToString()); if ((int)firstTempIndex <= (int)(latestOfficialIndex - MAX_RETROGRADE)) // create a uint max value error ! { if (firstTempIndex == 0) { Console.WriteLine("[BLOCKS REFUSED] no genesis allowed!"); File.Delete(_filePath); return; } if (latestTempIndex < latestOfficialIndex + WINNING_RUN_DISTANCE) { Console.WriteLine("[BLOCKS REFUSED] Not Winning dist!"); File.Delete(_filePath); return; } HardFork = true; } else { uint latestIndex = RequestLatestBlockIndex(false); if (firstTempIndex > latestIndex + 1) { Console.WriteLine("[BLOCKS REFUSED] Can't proccess blocks. "); File.Delete(_filePath); return; } // we check if we have a fork that contains specific index to mesure if (latestTempIndex < latestOfficialIndex + 1) { Console.WriteLine("[BLOCKS REFUSED] Can't proccess blocks. "); File.Delete(_filePath); return; } } //--------- get the shom data during validation process ... //----- we should load the shit . but like every 30 blocs or something (depending of distance in time between those blocks ) Block bfirst = GetBlockAtIndexInFile(firstTempIndex, _filePath); Block blast = GetBlockAtIndexInFile(latestTempIndex, _filePath); if (bfirst == null || blast == null) { Console.WriteLine("header incorrect!"); File.Delete(_filePath); return; } //----------- if (HardFork) { Print("hard forking"); Block currentBlockReading = GetBlockAtIndexInFile(firstTempIndex, _filePath); Block previousBlock = GetBlockAtIndex(firstTempIndex - 1); if (currentBlockReading == null || previousBlock == null) { Console.WriteLine("[BLOCKS REFUSED] block null "); File.Delete(_filePath); return; } List <uint> timestamps = new List <uint>(); uint TC = 0; for (uint i = firstTempIndex - 1; i >= 0; i--) { if (i == uint.MaxValue) { break; } Block b = GetBlockAtIndex(i); if (b == null) { Console.WriteLine("[BLOCKS REFUSED] block null "); File.Delete(_filePath); return; } timestamps.Add(b.TimeStamp); TC++; if (TC >= TIMESTAMP_TARGET) { break; } } uint MINTIMESTAMP = GetTimeStampRequirementB(timestamps); byte[] tempTarget = new byte[32]; if (isNewTargetRequired(firstTempIndex)) { // will just use gethashtarget with ComputeHashTargetB Block earlierBlock; if (previousBlock.Index + 1 <= TARGET_CLOCK) { earlierBlock = GetBlockAtIndex(0); // get genesis if (earlierBlock == null) { Console.WriteLine("[BLOCKS REFUSED] block null "); File.Delete(_filePath); return; } } else { earlierBlock = GetBlockAtIndex(previousBlock.Index + 1 - TARGET_CLOCK);// need also an update if (earlierBlock == null) { Console.WriteLine("[BLOCKS REFUSED] block null "); File.Delete(_filePath); return; } } tempTarget = ComputeHashTargetB(previousBlock, earlierBlock); } else { tempTarget = previousBlock.HashTarget; } List <UTXO> vUTXO = new List <UTXO>(); //< we will need temp file to avoid stackoverflow ( dont load this in RAM! ) while (true) { //---- byte[] reqtarget = ApplyTheSeaToTheCryptoPuzzle(tempTarget, Tidal.GetTidalAtSpecificTime(currentBlockReading.TimeStamp)); Tuple <bool, List <UTXO> > bV = IsBlockValid(currentBlockReading, previousBlock, MINTIMESTAMP, tempTarget, reqtarget, vUTXO); if (!bV.Item1) { File.Delete(_filePath); Console.WriteLine("[BLOCKS REFUSED] block not valid "); return; } vUTXO = bV.Item2; if (currentBlockReading.Index == latestTempIndex) { DownGradeUTXOSet(firstTempIndex - 1); DowngradeOfficialChain(firstTempIndex - 1); AddBlocksToOfficialChain(_filePath, needPropagate); return; } previousBlock = currentBlockReading; //* currentBlockReading = GetBlockAtIndexInFile(currentBlockReading.Index + 1, _filePath); //* if (currentBlockReading == null) { Console.WriteLine("[BLOCKS REFUSED] block not valid "); File.Delete(_filePath); return; } timestamps.RemoveAt(0); timestamps.Add(previousBlock.TimeStamp); MINTIMESTAMP = GetTimeStampRequirementB(timestamps); if (isNewTargetRequired(currentBlockReading.Index)) { Block earlierBlock; if (previousBlock.Index + 1 <= TARGET_CLOCK) { earlierBlock = GetBlockAtIndex(0); if (earlierBlock == null) { Console.WriteLine("[BLOCKS REFUSED] block not valid "); File.Delete(_filePath); return; } } else { if (previousBlock.Index + 1 - TARGET_CLOCK > latestOfficialIndex) { earlierBlock = GetBlockAtIndexInFile(previousBlock.Index + 1 - TARGET_CLOCK, _filePath); if (earlierBlock == null) { Console.WriteLine("[BLOCKS REFUSED] block not valid "); File.Delete(_filePath); return; } } else { earlierBlock = GetBlockAtIndex(previousBlock.Index + 1 - TARGET_CLOCK); if (earlierBlock == null) { Console.WriteLine("[BLOCKS REFUSED] block not valid "); File.Delete(_filePath); return; } } } tempTarget = ComputeHashTargetB(previousBlock, earlierBlock); } else { tempTarget = previousBlock.HashTarget; } } } else { // [1] we need to check if first temp index is lower than our highest forks index forks OR if first temp index is equal to latestOfficialIndex + 1 // [2] we need to find if Block currentBlockReading = GetBlockAtIndexInFile(firstTempIndex, _filePath); // we get like latesttempindex -> 2 if (currentBlockReading == null) { File.Delete(_filePath); Console.WriteLine("[BLOCKS REFUSED] block not valid "); return; } string[] forkfiles = Directory.GetFiles(_folderPath + "fork"); uint latestIndex = RequestLatestBlockIndex(false); // string _pathToGetPreviousBlock = ""; if (firstTempIndex != latestOfficialIndex + 1) // searching A fork : if firsttempindex(1) is not latestofficialindex (0) + 1 { Print("called"); latestIndex = RequestLatestBlockIndex(false); if (latestIndex > firstTempIndex - 1) { _pathToGetPreviousBlock = GetIndexBlockChainFilePath(firstTempIndex - 1); if (_pathToGetPreviousBlock == "") { File.Delete(_filePath); Console.WriteLine("[BLOCK REFUSED ]Can't find a fork to process those blocks. temp index : " + firstTempIndex); return; } } else { // check if we can find a fork with this specific block file ... goes here when lightfork ... string forkpath = FindMatchingFork(currentBlockReading); if (forkpath.Length == 0) { Console.WriteLine("[BLOCK REFUSED ]Can't find a fork to process those blocks. temp index : " + firstTempIndex); return; } else { _pathToGetPreviousBlock = forkpath; } Block bb = GetBlockAtIndexInFile(latestTempIndex, _filePath); if (bb == null) { Console.WriteLine("[BLOCKS REFUSED] block not valid "); File.Delete(_filePath); return; } if (isForkAlreadyExisting(bb)) { Console.WriteLine("[BLOCKS REFUSED] block already exist "); File.Delete(_filePath); return; } } } else { //_pathToGetPreviousBlock = GetLatestBlockChainFilePath(); // this is shit ... _pathToGetPreviousBlock = GetIndexBlockChainFilePath(firstTempIndex - 1); } Print(_pathToGetPreviousBlock); Block previousBlock = GetBlockAtIndexInFile(firstTempIndex - 1, _pathToGetPreviousBlock); if (previousBlock == null) { File.Delete(_filePath); Console.WriteLine("[BLOCKS REFUSED] block null "); return; } List <uint> timestamps = new List <uint>(); uint TC = 0; for (uint i = firstTempIndex - 1; i >= 0; i--) { if (i == uint.MaxValue) { break; } if (i > latestOfficialIndex) { Block b = GetBlockAtIndexInFile(i, _pathToGetPreviousBlock); if (b == null) { Console.WriteLine("[BLOCKS REFUSED] block null "); File.Delete(_filePath); return; } timestamps.Add(b.TimeStamp); } else { Block b = GetBlockAtIndex(i); if (b == null) { Console.WriteLine("[BLOCKS REFUSED] block null "); File.Delete(_filePath); return; } timestamps.Add(GetBlockAtIndex(i).TimeStamp); } TC++; if (TC >= TIMESTAMP_TARGET) { break; } } uint MINTIMESTAMP = GetTimeStampRequirementB(timestamps); Print(MINTIMESTAMP.ToString()); byte[] tempTarget = new byte[32]; if (isNewTargetRequired(firstTempIndex)) { // will just use gethashtarget with ComputeHashTargetB Block earlierBlock; if (latestOfficialIndex < previousBlock.Index + 1 - TARGET_CLOCK) { uint lastindexfile = RequestLatestBlockIndexInFile(_pathToGetPreviousBlock); if (lastindexfile < previousBlock.Index + 1 - TARGET_CLOCK) { earlierBlock = GetBlockAtIndexInFile(previousBlock.Index + 1 - TARGET_CLOCK, _filePath); if (earlierBlock == null) { Console.WriteLine("[BLOCKS REFUSED] block null "); File.Delete(_filePath); return; } } else { earlierBlock = GetBlockAtIndexInFile(previousBlock.Index + 1 - TARGET_CLOCK, _pathToGetPreviousBlock); // it means that we have an index shit... if (earlierBlock == null) { Console.WriteLine("[BLOCKS REFUSED] block null "); File.Delete(_filePath); return; } } } else { earlierBlock = GetBlockAtIndex(previousBlock.Index + 1 - TARGET_CLOCK); if (earlierBlock == null) { Console.WriteLine("[BLOCKS REFUSED] block null "); File.Delete(_filePath); return; } } tempTarget = ComputeHashTargetB(previousBlock, earlierBlock); Print((previousBlock.Index + 1).ToString()); } else { tempTarget = previousBlock.HashTarget; } List <UTXO> vUTXO = new List <UTXO>(); //< can go up to 1.2mb (including retrograde) in ram so it is ok... // we should update this vUTXO with every block of a fork if fork is needed... also if (_pathToGetPreviousBlock != GetLatestBlockChainFilePath()) // we absolutely not need to compute retrograde here we just update the fork { uint firstforkIndex = BitConverter.ToUInt32(GetBytesFromFile(4, 8, _pathToGetPreviousBlock), 0); for (uint i = firstforkIndex; i < previousBlock.Index + 1; i++) { Block b = GetBlockAtIndexInFile(i, _pathToGetPreviousBlock); if (b == null) { File.Delete(_filePath); return; } foreach (Tx TXS in b.Data) { bool _sFound = false; int sIndex = 0; bool _rFound = false; int rIndex = 0; UTXO rutxo = null; for (int a = 0; a < vUTXO.Count; a++) { if (vUTXO[a].HashKey.SequenceEqual(ComputeSHA256(TXS.sPKey))) { _sFound = true; sIndex = a; } if (vUTXO[a].HashKey.SequenceEqual(TXS.rHashKey)) { _rFound = true; rIndex = a; rutxo = vUTXO[a]; } } if (!_sFound) { vUTXO.Add(UpdateVirtualUTXOWithFullBlock(b, GetOfficialUTXOAtPointer(TXS.sUTXOP), false)); } else { vUTXO[sIndex] = UpdateVirtualUTXOWithFullBlock(b, vUTXO[sIndex], false); } if (!_rFound) { rutxo = GetOfficialUTXOAtPointer(TXS.rUTXOP); if (rutxo != null) { vUTXO.Add(UpdateVirtualUTXOWithFullBlock(b, GetOfficialUTXOAtPointer(TXS.rUTXOP), false)); } } else { vUTXO[rIndex] = UpdateVirtualUTXOWithFullBlock(b, rutxo, false); } } bool _mFound = false; int mIndex = 0; for (int a = 0; a < vUTXO.Count; a++) { if (b.minerToken.MinerPKEY.SequenceEqual(vUTXO[a].HashKey)) { _mFound = true; mIndex = a; } } if (!_mFound) { UTXO mutxo = GetOfficialUTXOAtPointer(b.minerToken.mUTXOP); if (mutxo != null) { vUTXO.Add(UpdateVirtualUTXOWithFullBlock(b, mutxo, false)); } } else { vUTXO[mIndex] = UpdateVirtualUTXOWithFullBlock(b, vUTXO[mIndex], false); } } } while (true) { byte[] reqtarget = ApplyTheSeaToTheCryptoPuzzle(tempTarget, Tidal.GetTidalAtSpecificTime(currentBlockReading.TimeStamp)); //---- Tuple <bool, List <UTXO> > bV = IsBlockValid(currentBlockReading, previousBlock, MINTIMESTAMP, tempTarget, reqtarget, vUTXO); if (!bV.Item1) { File.Delete(_filePath); return; } vUTXO = bV.Item2; // vutxo are update! if (currentBlockReading.Index == latestTempIndex) { if (_pathToGetPreviousBlock == GetLatestBlockChainFilePath()) { string newPath = GetNewForkFilePath(); File.Move(_filePath, newPath); Print("new fork added"); UpdatePendingTXFileB(newPath); VerifyRunState(needPropagate); // we should verify if newpath exist. if it is existing we broadcast it if (File.Exists(newPath) && needPropagate) { BroadcastQueue.Add(new BroadcastInfo(1, 1, newPath)); } return; } else { // we will need to concatenate those two forks... to write a new one ... string newForkPath = ConcatenateForks(_pathToGetPreviousBlock, _filePath, firstTempIndex); VerifyRunState(needPropagate); // we should verify if newpath exist. if it is existing we broadcast it if (File.Exists(newForkPath) && needPropagate) { BroadcastQueue.Add(new BroadcastInfo(1, 1, newForkPath)); } Print("fork has been append."); return; } } previousBlock = currentBlockReading; //* currentBlockReading = GetBlockAtIndexInFile(currentBlockReading.Index + 1, _filePath); //* if (currentBlockReading == null) { File.Delete(_filePath); Print("wrong index specified 3"); return; } timestamps.RemoveAt(0); timestamps.Add(previousBlock.TimeStamp); MINTIMESTAMP = GetTimeStampRequirementB(timestamps); if (isNewTargetRequired(currentBlockReading.Index)) { // will just use gethashtarget with ComputeHashTargetB Block earlierBlock; if (latestOfficialIndex < previousBlock.Index + 1 - TARGET_CLOCK) { uint lastindexfile = RequestLatestBlockIndexInFile(_pathToGetPreviousBlock); if (lastindexfile < previousBlock.Index + 1 - TARGET_CLOCK) { earlierBlock = GetBlockAtIndexInFile(previousBlock.Index + 1 - TARGET_CLOCK, _filePath); if (earlierBlock == null) { Print("[missing block]"); File.Delete(_filePath); return; } } else { earlierBlock = GetBlockAtIndexInFile(previousBlock.Index + 1 - TARGET_CLOCK, _pathToGetPreviousBlock); // it means that we have an index shit... if (earlierBlock == null) { Print("[missing block]"); File.Delete(_filePath); return; } } } else { earlierBlock = GetBlockAtIndex(previousBlock.Index + 1 - TARGET_CLOCK); if (earlierBlock == null) { Print("[missing block]"); File.Delete(_filePath); return; } } tempTarget = ComputeHashTargetB(previousBlock, earlierBlock); } else { tempTarget = previousBlock.HashTarget; } } } }