private async Task ReorgOneAsync() { // 1. Rollback index using (await IndexLock.LockAsync()) { Logger.LogInfo <IndexBuilderService>($"REORG invalid block: {Index.Last().BlockHash}"); Index.RemoveLast(); } // 2. Serialize Index. (Remove last line.) var lines = await File.ReadAllLinesAsync(IndexFilePath); await File.WriteAllLinesAsync(IndexFilePath, lines.Take(lines.Length - 1).ToArray()); // 3. Rollback Bech32UtxoSet if (Bech32UtxoSetHistory.Count != 0) { Bech32UtxoSetHistory.Last().Rollback(Bech32UtxoSet); // The Bech32UtxoSet MUST be recovered to its previous state. Bech32UtxoSetHistory.RemoveLast(); // 4. Serialize Bech32UtxoSet. await File.WriteAllLinesAsync(Bech32UtxoSetFilePath, Bech32UtxoSet .Select(entry => entry.Key.Hash + ":" + entry.Key.N + ":" + ByteHelpers.ToHex(entry.Value.ToCompressedBytes()))); } }
public void Synchronize() { Interlocked.Exchange(ref _running, 1); Task.Run(async() => { try { var blockCount = await RpcClient.GetBlockCountAsync(); var isIIB = true; // Initial Index Building phase while (IsRunning) { try { // If stop was requested return. if (IsRunning == false) { return; } Height height = StartingHeight; uint256 prevHash = null; using (await IndexLock.LockAsync()) { if (Index.Count != 0) { var lastIndex = Index.Last(); height = lastIndex.BlockHeight + 1; prevHash = lastIndex.BlockHash; } } if (blockCount - height <= 100) { isIIB = false; } Block block = null; try { block = await RpcClient.GetBlockAsync(height); } catch (RPCException) // if the block didn't come yet { await Task.Delay(1000); continue; } if (blockCount - height <= 2) { NewBlock?.Invoke(this, block); } if (!(prevHash is null)) { // In case of reorg: if (prevHash != block.Header.HashPrevBlock && !isIIB) // There is no reorg in IIB { Logger.LogInfo <IndexBuilderService>($"REORG Invalid Block: {prevHash}"); // 1. Rollback index using (await IndexLock.LockAsync()) { Index.RemoveLast(); } // 2. Serialize Index. (Remove last line.) var lines = File.ReadAllLines(IndexFilePath); File.WriteAllLines(IndexFilePath, lines.Take(lines.Length - 1).ToArray()); // 3. Rollback Bech32UtxoSet if (Bech32UtxoSetHistory.Count != 0) { Bech32UtxoSetHistory.Last().Rollback(Bech32UtxoSet); // The Bech32UtxoSet MUST be recovered to its previous state. Bech32UtxoSetHistory.RemoveLast(); // 4. Serialize Bech32UtxoSet. await File.WriteAllLinesAsync(Bech32UtxoSetFilePath, Bech32UtxoSet .Select(entry => entry.Key.Hash + ":" + entry.Key.N + ":" + ByteHelpers.ToHex(entry.Value.ToCompressedBytes()))); } // 5. Skip the current block. continue; } } if (!isIIB) { if (Bech32UtxoSetHistory.Count >= 100) { Bech32UtxoSetHistory.RemoveFirst(); } Bech32UtxoSetHistory.Add(new ActionHistoryHelper()); } var scripts = new HashSet <Script>(); foreach (var tx in block.Transactions) { // If stop was requested return. // Because this tx iteration can take even minutes // It doesn't need to be accessed with a thread safe fasion with Interlocked through IsRunning, this may have some performance benefit if (_running != 1) { return; } for (int i = 0; i < tx.Outputs.Count; i++) { var output = tx.Outputs[i]; if (!output.ScriptPubKey.IsPayToScriptHash && output.ScriptPubKey.IsWitness) { var outpoint = new OutPoint(tx.GetHash(), i); Bech32UtxoSet.Add(outpoint, output.ScriptPubKey); if (!isIIB) { Bech32UtxoSetHistory.Last().StoreAction(ActionHistoryHelper.Operation.Add, outpoint, output.ScriptPubKey); } scripts.Add(output.ScriptPubKey); } } foreach (var input in tx.Inputs) { OutPoint prevOut = input.PrevOut; if (Bech32UtxoSet.TryGetValue(prevOut, out Script foundScript)) { Bech32UtxoSet.Remove(prevOut); if (!isIIB) { Bech32UtxoSetHistory.Last().StoreAction(ActionHistoryHelper.Operation.Remove, prevOut, foundScript); } scripts.Add(foundScript); } } } GolombRiceFilter filter = null; if (scripts.Count != 0) { filter = new GolombRiceFilterBuilder() .SetKey(block.GetHash()) .SetP(20) .SetM(1 << 20) .AddEntries(scripts.Select(x => x.ToCompressedBytes())) .Build(); } var filterModel = new FilterModel { BlockHash = block.GetHash(), BlockHeight = height, Filter = filter }; await File.AppendAllLinesAsync(IndexFilePath, new[] { filterModel.ToLine() }); using (await IndexLock.LockAsync()) { Index.Add(filterModel); } if (File.Exists(Bech32UtxoSetFilePath)) { File.Delete(Bech32UtxoSetFilePath); } await File.WriteAllLinesAsync(Bech32UtxoSetFilePath, Bech32UtxoSet .Select(entry => entry.Key.Hash + ":" + entry.Key.N + ":" + ByteHelpers.ToHex(entry.Value.ToCompressedBytes()))); // If not close to the tip, just log debug. // Use height.Value instead of simply height, because it cannot be negative height. if (blockCount - height.Value <= 3 || height % 100 == 0) { Logger.LogInfo <IndexBuilderService>($"Created filter for block: {height}."); } else { Logger.LogDebug <IndexBuilderService>($"Created filter for block: {height}."); } } catch (Exception ex) { Logger.LogDebug <IndexBuilderService>(ex); } } } finally { if (IsStopping) { Interlocked.Exchange(ref _running, 3); } } }); }
public void Synchronize() { // Check permissions. using var _ = File.Open(IndexFilePath, FileMode.OpenOrCreate, FileAccess.ReadWrite); Task.Run(async() => { try { if (Interlocked.Read(ref _workerCount) >= 2) { return; } Interlocked.Increment(ref _workerCount); while (Interlocked.Read(ref _workerCount) != 1) { await Task.Delay(100).ConfigureAwait(false); } if (IsStopping) { return; } try { Interlocked.Exchange(ref _serviceStatus, Running); while (IsRunning) { try { SyncInfo syncInfo = await GetSyncInfoAsync().ConfigureAwait(false); uint currentHeight; uint256?currentHash = null; using (await IndexLock.LockAsync()) { if (Index.Count != 0) { var lastIndex = Index[^ 1]; currentHeight = lastIndex.Header.Height; currentHash = lastIndex.Header.BlockHash; } else { currentHash = StartingHeight == 0 ? uint256.Zero : await RpcClient.GetBlockHashAsync((int)StartingHeight - 1).ConfigureAwait(false); currentHeight = StartingHeight - 1; } } var coreNotSynced = !syncInfo.IsCoreSynchronized; var tipReached = syncInfo.BlockCount == currentHeight; var isTimeToRefresh = DateTimeOffset.UtcNow - syncInfo.BlockchainInfoUpdated > TimeSpan.FromMinutes(5); if (coreNotSynced || tipReached || isTimeToRefresh) { syncInfo = await GetSyncInfoAsync().ConfigureAwait(false); } // If wasabi filter height is the same as core we may be done. if (syncInfo.BlockCount == currentHeight) { // Check that core is fully synced if (syncInfo.IsCoreSynchronized && !syncInfo.InitialBlockDownload) { // Mark the process notstarted, so it can be started again // and finally block can mark it as stopped. Interlocked.Exchange(ref _serviceStatus, NotStarted); return; } else { // Knots is catching up give it a 10 seconds await Task.Delay(10000).ConfigureAwait(false); continue; } } uint nextHeight = currentHeight + 1; uint256 blockHash = await RpcClient.GetBlockHashAsync((int)nextHeight).ConfigureAwait(false); VerboseBlockInfo block = await RpcClient.GetVerboseBlockAsync(blockHash).ConfigureAwait(false); // Check if we are still on the best chain, // if not rewind filters till we find the fork. if (currentHash != block.PrevBlockHash) { Logger.LogWarning("Reorg observed on the network."); await ReorgOneAsync().ConfigureAwait(false); // Skip the current block. continue; } var filter = BuildFilterForBlock(block); var smartHeader = new SmartHeader(block.Hash, block.PrevBlockHash, nextHeight, block.BlockTime); var filterModel = new FilterModel(smartHeader, filter); await File.AppendAllLinesAsync(IndexFilePath, new[] { filterModel.ToLine() }).ConfigureAwait(false); using (await IndexLock.LockAsync()) { Index.Add(filterModel); } // If not close to the tip, just log debug. if (syncInfo.BlockCount - nextHeight <= 3 || nextHeight % 100 == 0) { Logger.LogInfo($"Created filter for block: {nextHeight}."); } else { Logger.LogDebug($"Created filter for block: {nextHeight}."); } LastFilterBuildTime = DateTimeOffset.UtcNow; }
public void Synchronize() { Task.Run(async () => { try { if (Interlocked.Read(ref _runner) >= 2) { return; } Interlocked.Increment(ref _runner); while (Interlocked.Read(ref _runner) != 1) { await Task.Delay(100); } if (Interlocked.Read(ref _running) >= 2) { return; } try { Interlocked.Exchange(ref _running, 1); var isImmature = false; // The last 100 blocks are reorgable. (Assume it is mature at first.) SyncInfo syncInfo = null; while (IsRunning) { try { // If we did not yet initialized syncInfo, do so. if (syncInfo is null) { syncInfo = await GetSyncInfoAsync(); } uint heightToRequest = StartingHeight; uint256 currentHash = null; using (await IndexLock.LockAsync()) { if (Index.Count != 0) { var lastIndex = Index.Last(); heightToRequest = lastIndex.Header.Height + 1; currentHash = lastIndex.Header.BlockHash; } } // If not synchronized or already 5 min passed since last update, get the latest blockchain info. if (!syncInfo.IsCoreSynchornized || syncInfo.BlockchainInfoUpdated - DateTimeOffset.UtcNow > TimeSpan.FromMinutes(5)) { syncInfo = await GetSyncInfoAsync(); } if (syncInfo.BlockCount - heightToRequest <= 100) { // Both Wasabi and our Core node is in sync. Start doing stuff through P2P from now on. if (syncInfo.IsCoreSynchornized && syncInfo.BlockCount == heightToRequest - 1) { syncInfo = await GetSyncInfoAsync(); // Double it to make sure not to accidentally miss any notification. if (syncInfo.IsCoreSynchornized && syncInfo.BlockCount == heightToRequest - 1) { // Mark the process notstarted, so it can be started again and finally block can mark it is stopped. Interlocked.Exchange(ref _running, 0); return; } } // Mark the synchronizing process is working with immature blocks from now on. isImmature = true; } Block block = await RpcClient.GetBlockAsync(heightToRequest); // Reorg check, except if we're requesting the starting height, because then the "currentHash" wouldn't exist. if (heightToRequest != StartingHeight && currentHash != block.Header.HashPrevBlock) { // Reorg can happen only when immature. (If it'd not be immature, that'd be a huge issue.) if (isImmature) { await ReorgOneAsync(); } else { Logger.LogCritical("This is something serious! Over 100 block reorg is noticed! We cannot handle that!"); } // Skip the current block. continue; } if (isImmature) { PrepareBech32UtxoSetHistory(); } var scripts = new HashSet<Script>(); foreach (var tx in block.Transactions) { // If stop was requested return. // Because this tx iteration can take even minutes // It does not need to be accessed with a thread safe fashion with Interlocked through IsRunning, this may have some performance benefit if (_running != 1) { return; } for (int i = 0; i < tx.Outputs.Count; i++) { var output = tx.Outputs[i]; if (output.ScriptPubKey.IsScriptType(ScriptType.P2WPKH)) { var outpoint = new OutPoint(tx.GetHash(), i); var utxoEntry = new UtxoEntry(outpoint, output.ScriptPubKey); Bech32UtxoSet.Add(outpoint, utxoEntry); if (isImmature) { Bech32UtxoSetHistory.Last().StoreAction(Operation.Add, outpoint, output.ScriptPubKey); } scripts.Add(output.ScriptPubKey); } } foreach (var input in tx.Inputs) { OutPoint prevOut = input.PrevOut; if (Bech32UtxoSet.TryGetValue(prevOut, out UtxoEntry foundUtxoEntry)) { var foundScript = foundUtxoEntry.Script; Bech32UtxoSet.Remove(prevOut); if (isImmature) { Bech32UtxoSetHistory.Last().StoreAction(Operation.Remove, prevOut, foundScript); } scripts.Add(foundScript); } } } GolombRiceFilter filter; if (scripts.Any()) { filter = new GolombRiceFilterBuilder() .SetKey(block.GetHash()) .SetP(20) .SetM(1 << 20) .AddEntries(scripts.Select(x => x.ToCompressedBytes())) .Build(); } else { // We cannot have empty filters, because there was a bug in GolombRiceFilterBuilder that evaluates empty filters to true. // And this must be fixed in a backwards compatible way, so we create a fake filter with a random scp instead. filter = CreateDummyEmptyFilter(block.GetHash()); } var smartHeader = new SmartHeader(block.GetHash(), block.Header.HashPrevBlock, heightToRequest, block.Header.BlockTime); var filterModel = new FilterModel(smartHeader, filter); await File.AppendAllLinesAsync(IndexFilePath, new[] { filterModel.ToLine() }); using (await IndexLock.LockAsync()) { Index.Add(filterModel); } if (File.Exists(Bech32UtxoSetFilePath)) { File.Delete(Bech32UtxoSetFilePath); } var bech32UtxoSetLines = Bech32UtxoSet.Select(entry => entry.Value.Line); // Keep it sync unless you fix the performance issue with async. File.WriteAllLines(Bech32UtxoSetFilePath, bech32UtxoSetLines); // If not close to the tip, just log debug. // Use height.Value instead of simply height, because it cannot be negative height. if (syncInfo.BlockCount - heightToRequest <= 3 || heightToRequest % 100 == 0) { Logger.LogInfo($"Created filter for block: {heightToRequest}."); } else { Logger.LogDebug($"Created filter for block: {heightToRequest}."); } } catch (Exception ex) { Logger.LogDebug(ex); } } } finally { Interlocked.CompareExchange(ref _running, 3, 2); // If IsStopping, make it stopped. Interlocked.Decrement(ref _runner); } } catch (Exception ex) { Logger.LogError($"Synchronization attempt failed to start: {ex}"); } }); }
public void Synchronize() { Task.Run(async() => { try { if (Interlocked.Read(ref _workerCount) >= 2) { return; } Interlocked.Increment(ref _workerCount); while (Interlocked.Read(ref _workerCount) != 1) { await Task.Delay(100); } if (IsStopping) { return; } try { Interlocked.Exchange(ref _serviceStatus, Running); SyncInfo syncInfo = null; while (IsRunning) { try { // If we did not yet initialized syncInfo, do so. if (syncInfo is null) { syncInfo = await GetSyncInfoAsync(); } uint currentHeight = 0; uint256 currentHash = null; using (await IndexLock.LockAsync()) { if (Index.Count != 0) { var lastIndex = Index[^ 1]; currentHeight = lastIndex.Header.Height; currentHash = lastIndex.Header.BlockHash; } else { currentHash = StartingHeight == 0 ? uint256.Zero : await RpcClient.GetBlockHashAsync((int)StartingHeight - 1); currentHeight = StartingHeight - 1; } } var coreNotSynced = !syncInfo.IsCoreSynchornized; var tipReached = syncInfo.BlockCount == currentHeight; var isTimeToRefresh = DateTimeOffset.UtcNow - syncInfo.BlockchainInfoUpdated > TimeSpan.FromMinutes(5); if (coreNotSynced || tipReached || isTimeToRefresh) { syncInfo = await GetSyncInfoAsync(); } // If wasabi filter height is the same as core we may be done. if (syncInfo.BlockCount == currentHeight) { // Check that core is fully synced if (syncInfo.IsCoreSynchornized && !syncInfo.InitialBlockDownload) { // Mark the process notstarted, so it can be started again // and finally block can mark it as stopped. Interlocked.Exchange(ref _serviceStatus, NotStarted); return; } else { // Knots is catching up give it a 10 seconds await Task.Delay(10000); continue; } } uint nextHeight = currentHeight + 1; uint256 blockHash = await RpcClient.GetBlockHashAsync((int)nextHeight); VerboseBlockInfo block = await RpcClient.GetVerboseBlockAsync(blockHash); // Check if we are still on the best chain, // if not rewind filters till we find the fork. if (currentHash != block.PrevBlockHash) { Logger.LogWarning("Reorg observed on the network."); await ReorgOneAsync(); // Skip the current block. continue; } var scripts = FetchScripts(block); GolombRiceFilter filter; if (scripts.Any()) { filter = new GolombRiceFilterBuilder() .SetKey(block.Hash) .SetP(20) .SetM(1 << 20) .AddEntries(scripts.Select(x => x.ToCompressedBytes())) .Build(); } else { // We cannot have empty filters, because there was a bug in GolombRiceFilterBuilder that evaluates empty filters to true. // And this must be fixed in a backwards compatible way, so we create a fake filter with a random scp instead. filter = CreateDummyEmptyFilter(block.Hash); } var smartHeader = new SmartHeader(block.Hash, block.PrevBlockHash, nextHeight, block.BlockTime); var filterModel = new FilterModel(smartHeader, filter); await File.AppendAllLinesAsync(IndexFilePath, new[] { filterModel.ToLine() }); using (await IndexLock.LockAsync()) { Index.Add(filterModel); } // If not close to the tip, just log debug. if (syncInfo.BlockCount - nextHeight <= 3 || nextHeight % 100 == 0) { Logger.LogInfo($"Created filter for block: {nextHeight}."); } else { Logger.LogDebug($"Created filter for block: {nextHeight}."); } }
public void Synchronize() { Interlocked.Exchange(ref _running, 1); Task.Run(async() => { try { var blockCount = await RpcClient.GetBlockCountAsync(); var isIIB = true; // Initial Index Building phase while (IsRunning) { try { // If stop was requested return. if (IsRunning == false) { return; } var height = StartingHeight; uint256 prevHash = null; using (await IndexLock.LockAsync()) { if (Index.Count != 0) { var lastIndex = Index.Last(); height = lastIndex.BlockHeight + 1; prevHash = lastIndex.BlockHash; } } if (blockCount - (int)height <= 100) { isIIB = false; } Block block = null; try { block = await RpcClient.GetBlockAsync(height); } catch (RPCException) // if the block didn't come yet { await Task.Delay(1000); continue; } if (prevHash != null) { // In case of reorg: if (prevHash != block.Header.HashPrevBlock && !isIIB) // There is no reorg in IIB { Logger.LogInfo <IndexBuilderService>($"REORG Invalid Block: {prevHash}"); // 1. Rollback index using (await IndexLock.LockAsync()) { Index.RemoveLast(); } // 2. Serialize Index. (Remove last line.) var lines = File.ReadAllLines(IndexFilePath); File.WriteAllLines(IndexFilePath, lines.Take(lines.Length - 1).ToArray()); // 3. Rollback Bech32UtxoSet if (Bech32UtxoSetHistory.Count != 0) { Bech32UtxoSetHistory.Last().Rollback(Bech32UtxoSet); // The Bech32UtxoSet MUST be recovered to its previous state. Bech32UtxoSetHistory.RemoveLast(); // 4. Serialize Bech32UtxoSet. await File.WriteAllLinesAsync(Bech32UtxoSetFilePath, Bech32UtxoSet .Select(entry => entry.Key.Hash + ":" + entry.Key.N + ":" + ByteHelpers.ToHex(entry.Value.ToCompressedBytes()))); } // 5. Skip the current block. continue; } } if (!isIIB) { if (Bech32UtxoSetHistory.Count >= 100) { Bech32UtxoSetHistory.RemoveFirst(); } Bech32UtxoSetHistory.Add(new ActionHistoryHelper()); } var scripts = new HashSet <Script>(); foreach (var tx in block.Transactions) { for (int i = 0; i < tx.Outputs.Count; i++) { var output = tx.Outputs[i]; if (!output.ScriptPubKey.IsPayToScriptHash && output.ScriptPubKey.IsWitness) { var outpoint = new OutPoint(tx.GetHash(), i); Bech32UtxoSet.Add(outpoint, output.ScriptPubKey); if (!isIIB) { Bech32UtxoSetHistory.Last().StoreAction(ActionHistoryHelper.Operation.Add, outpoint, output.ScriptPubKey); } scripts.Add(output.ScriptPubKey); } } foreach (var input in tx.Inputs) { var found = Bech32UtxoSet.SingleOrDefault(x => x.Key == input.PrevOut); if (found.Key != default) { Script val = Bech32UtxoSet[input.PrevOut]; Bech32UtxoSet.Remove(input.PrevOut); if (!isIIB) { Bech32UtxoSetHistory.Last().StoreAction(ActionHistoryHelper.Operation.Remove, input.PrevOut, val); } scripts.Add(found.Value); } } } // https://github.com/bitcoin/bips/blob/master/bip-0158.mediawiki // The parameter k MUST be set to the first 16 bytes of the hash of the block for which the filter // is constructed.This ensures the key is deterministic while still varying from block to block. var key = block.GetHash().ToBytes().Take(16).ToArray(); GolombRiceFilter filter = null; if (scripts.Count != 0) { filter = GolombRiceFilter.Build(key, scripts.Select(x => x.ToCompressedBytes())); } var filterModel = new FilterModel { BlockHash = block.GetHash(), BlockHeight = height, Filter = filter }; await File.AppendAllLinesAsync(IndexFilePath, new[] { filterModel.ToLine() }); using (await IndexLock.LockAsync()) { Index.Add(filterModel); } if (File.Exists(Bech32UtxoSetFilePath)) { File.Delete(Bech32UtxoSetFilePath); } await File.WriteAllLinesAsync(Bech32UtxoSetFilePath, Bech32UtxoSet .Select(entry => entry.Key.Hash + ":" + entry.Key.N + ":" + ByteHelpers.ToHex(entry.Value.ToCompressedBytes()))); if (blockCount - height <= 3 || height % 100 == 0) // If not close to the tip, just log debug. { Logger.LogInfo <IndexBuilderService>($"Created filter for block: {height}."); } else { Logger.LogDebug <IndexBuilderService>($"Created filter for block: {height}."); } } catch (Exception ex) { Logger.LogDebug <IndexBuilderService>(ex); } } } finally { if (IsStopping) { Interlocked.Exchange(ref _running, 3); } } }); }