// Add descendants of given transactions to mapModifiedTx with ancestor // state updated assuming given transactions are inBlock. Returns number // of updated descendants. private int UpdatePackagesForAdded(TxMempool.SetEntries alreadyAdded, Dictionary <uint256, TxMemPoolModifiedEntry> mapModifiedTx) { int descendantsUpdated = 0; foreach (TxMempoolEntry setEntry in alreadyAdded) { TxMempool.SetEntries setEntries = new TxMempool.SetEntries(); this.mempoolLock.ReadAsync(() => this.mempool.CalculateDescendants(setEntry, setEntries)).GetAwaiter().GetResult(); foreach (var desc in setEntries) { if (alreadyAdded.Contains(desc)) { continue; } descendantsUpdated++; TxMemPoolModifiedEntry modEntry; if (!mapModifiedTx.TryGetValue(desc.TransactionHash, out modEntry)) { modEntry = new TxMemPoolModifiedEntry(desc); mapModifiedTx.Add(desc.TransactionHash, modEntry); } modEntry.SizeWithAncestors -= setEntry.GetTxSize(); modEntry.ModFeesWithAncestors -= setEntry.ModifiedFee; modEntry.SigOpCostWithAncestors -= setEntry.SigOpCost; } } return(descendantsUpdated); }
/// <summary> /// Add descendants of given transactions to mapModifiedTx with ancestor /// state updated assuming given transactions are inBlock. Returns number /// of updated descendants. /// </summary> private int UpdatePackagesForAdded(TxMempool.SetEntries alreadyAdded, Dictionary <uint256, TxMemPoolModifiedEntry> mapModifiedTx) { int descendantsUpdated = 0; foreach (TxMempoolEntry setEntry in alreadyAdded) { var setEntries = new TxMempool.SetEntries(); this.MempoolLock.ReadAsync(() => this.Mempool.CalculateDescendants(setEntry, setEntries)).GetAwaiter().GetResult(); foreach (TxMempoolEntry desc in setEntries) { if (alreadyAdded.Contains(desc)) { continue; } descendantsUpdated++; TxMemPoolModifiedEntry modEntry; if (!mapModifiedTx.TryGetValue(desc.TransactionHash, out modEntry)) { modEntry = new TxMemPoolModifiedEntry(desc); mapModifiedTx.Add(desc.TransactionHash, modEntry); this.logger.LogDebug("Added transaction '{0}' to the block template because it's a required ancestor for '{1}'.", desc.TransactionHash, setEntry.TransactionHash); } modEntry.SizeWithAncestors -= setEntry.GetTxSize(); modEntry.ModFeesWithAncestors -= setEntry.ModifiedFee; modEntry.SigOpCostWithAncestors -= setEntry.SigOpCost; } } return(descendantsUpdated); }
// Perform transaction-level checks before adding to block: // - transaction finality (locktime) // - premature witness (in case segwit transactions are added to mempool before // segwit activation) // - serialized size (in case -blockmaxsize is in use) private bool TestPackageTransactions(TxMempool.SetEntries package) { long nPotentialBlockSize = this.blockSize; // only used with needSizeAccounting foreach (TxMempoolEntry it in package) { if (!it.Transaction.IsFinal(Utils.UnixTimeToDateTime(this.lockTimeCutoff), this.height)) { return(false); } if (!this.fIncludeWitness && it.Transaction.HasWitness) { return(false); } if (this.needSizeAccounting) { int nTxSize = it.Transaction.GetSerializedSize(); if (nPotentialBlockSize + nTxSize >= this.blockMaxSize) { return(false); } nPotentialBlockSize += nTxSize; } } return(true); }
// Remove confirmed (inBlock) entries from given set private void OnlyUnconfirmed(TxMempool.SetEntries testSet) { foreach (var setEntry in testSet.ToList()) { // Only test txs not already in the block if (this.inBlock.Contains(setEntry)) { testSet.Remove(setEntry); } } }
/// <summary> /// Configures (resets) the builder to its default state /// before constructing a new block. /// </summary> private void Configure() { this.BlockSize = 1000; this.BlockTemplate = new BlockTemplate(this.Network); this.BlockTx = 0; this.BlockWeight = 1000 * this.Network.Consensus.Options.WitnessScaleFactor; this.BlockSigOpsCost = 400; this.fees = 0; this.inBlock = new TxMempool.SetEntries(); this.IncludeWitness = false; }
public PowBlockAssembler( IConsensusLoop consensusLoop, Network network, MempoolSchedulerLock mempoolLock, ITxMempool mempool, IDateTimeProvider dateTimeProvider, ChainedBlock chainTip, ILoggerFactory loggerFactory, AssemblerOptions options = null) { this.logger = loggerFactory.CreateLogger(this.GetType().FullName); options = options ?? new AssemblerOptions(); this.blockMinFeeRate = options.BlockMinFeeRate; // Limit weight to between 4K and MAX_BLOCK_WEIGHT-4K for sanity. this.blockMaxWeight = (uint)Math.Max(4000, Math.Min(PowMining.DefaultBlockMaxWeight - 4000, options.BlockMaxWeight)); // Limit size to between 1K and MAX_BLOCK_SERIALIZED_SIZE-1K for sanity. this.blockMaxSize = (uint)Math.Max(1000, Math.Min(network.Consensus.Option <PowConsensusOptions>().MaxBlockSerializedSize - 1000, options.BlockMaxSize)); // Whether we need to account for byte usage (in addition to weight usage). this.needSizeAccounting = (this.blockMaxSize < network.Consensus.Option <PowConsensusOptions>().MaxBlockSerializedSize - 1000); this.consensusLoop = consensusLoop; this.mempoolLock = mempoolLock; this.mempool = mempool; this.dateTimeProvider = dateTimeProvider; this.options = options; this.network = network; this.inBlock = new TxMempool.SetEntries(); // Reserve space for coinbase tx. this.blockSize = 1000; this.blockWeight = 4000; this.blockSigOpsCost = 400; this.fIncludeWitness = false; // These counters do not include coinbase tx. this.blockTx = 0; this.fees = 0; this.ChainTip = chainTip; this.pblocktemplate = new BlockTemplate { Block = new Block(), VTxFees = new List <Money>() }; }
/// <summary> /// Configures (resets) the builder to its default state /// before constructing a new block. /// </summary> private void Configure() { this.BlockTemplate = new BlockTemplate(this.Network); // Reserve space for the coinbase transaction, bitcoind miner.cpp void BlockAssembler::resetBlock() this.BlockSize = 1000; this.BlockWeight = 1000 * this.Network.Consensus.Options.WitnessScaleFactor; this.BlockSigOpsCost = 400; this.IncludeWitness = false; // These counters do not include the coinbase transaction this.BlockTx = 0; this.fees = 0; this.inBlock = new TxMempool.SetEntries(); }
/// <summary> /// Add descendants of given transactions to mapModifiedTx with ancestor /// state updated assuming given transactions are inBlock. Returns number /// of updated descendants. /// </summary> int UpdatePackagesForAdded(TxMempool.SetEntries alreadyAdded, Dictionary <uint256, TxMemPoolModifiedEntry> mapModifiedTx) { var descendantsUpdated = 0; foreach (var addedEntry in alreadyAdded) { var setEntries = new TxMempool.SetEntries(); this.MempoolLock.ReadAsync(() => { if (!this.Mempool.MapTx.ContainsKey(addedEntry.TransactionHash)) { this.logger.LogWarning("{0} is not present in {1} any longer, skipping.", addedEntry.TransactionHash, nameof(this.Mempool.MapTx)); return; } this.Mempool.CalculateDescendants(addedEntry, setEntries); }).GetAwaiter().GetResult(); foreach (var desc in setEntries) { if (alreadyAdded.Contains(desc)) { continue; } descendantsUpdated++; TxMemPoolModifiedEntry modEntry; if (!mapModifiedTx.TryGetValue(desc.TransactionHash, out modEntry)) { modEntry = new TxMemPoolModifiedEntry(desc); mapModifiedTx.Add(desc.TransactionHash, modEntry); this.logger.LogDebug( "Added transaction '{0}' to the block template because it's a required ancestor for '{1}'.", desc.TransactionHash, addedEntry.TransactionHash); } modEntry.SizeWithAncestors -= addedEntry.GetTxSize(); modEntry.ModFeesWithAncestors -= addedEntry.ModifiedFee; modEntry.SigOpCostWithAncestors -= addedEntry.SigOpCost; } } return(descendantsUpdated); }
public PowBlockAssembler(ConsensusLoop consensusLoop, Network network, ConcurrentChain chain, MempoolScheduler mempoolScheduler, TxMempool mempool, IDateTimeProvider dateTimeProvider, AssemblerOptions options = null) { options = options ?? new AssemblerOptions(); this.blockMinFeeRate = options.BlockMinFeeRate; // Limit weight to between 4K and MAX_BLOCK_WEIGHT-4K for sanity: this.blockMaxWeight = (uint)Math.Max(4000, Math.Min(PowMining.DefaultBlockMaxWeight - 4000, options.BlockMaxWeight)); // Limit size to between 1K and MAX_BLOCK_SERIALIZED_SIZE-1K for sanity: this.blockMaxSize = (uint)Math.Max(1000, Math.Min(network.Consensus.Option <PowConsensusOptions>().MAX_BLOCK_SERIALIZED_SIZE - 1000, options.BlockMaxSize)); // Whether we need to account for byte usage (in addition to weight usage) this.needSizeAccounting = (blockMaxSize < network.Consensus.Option <PowConsensusOptions>().MAX_BLOCK_SERIALIZED_SIZE - 1000); this.consensusLoop = consensusLoop; this.chain = chain; this.mempoolScheduler = mempoolScheduler; this.mempool = mempool; this.dateTimeProvider = dateTimeProvider; this.options = options; this.network = network; this.inBlock = new TxMempool.SetEntries(); // Reserve space for coinbase tx this.blockSize = 1000; this.blockWeight = 4000; this.blockSigOpsCost = 400; this.fIncludeWitness = false; // These counters do not include coinbase tx this.blockTx = 0; this.fees = 0; this.pblocktemplate = new BlockTemplate { Block = new Block(), VTxFees = new List <Money>() }; }
// Methods for how to add transactions to a block. // Add transactions based on feerate including unconfirmed ancestors // Increments nPackagesSelected / nDescendantsUpdated with corresponding // statistics from the package selection (for logging statistics). // This transaction selection algorithm orders the mempool based // on feerate of a transaction including all unconfirmed ancestors. // Since we don't remove transactions from the mempool as we select them // for block inclusion, we need an alternate method of updating the feerate // of a transaction with its not-yet-selected ancestors as we go. // This is accomplished by walking the in-mempool descendants of selected // transactions and storing a temporary modified state in mapModifiedTxs. // Each time through the loop, we compare the best transaction in // mapModifiedTxs with the next transaction in the mempool to decide what // transaction package to work on next. protected virtual void AddTransactions(out int nPackagesSelected, out int nDescendantsUpdated) { nPackagesSelected = 0; nDescendantsUpdated = 0; this.logger.LogTrace("({0}:{1},{2}:{3})", nameof(nPackagesSelected), nPackagesSelected, nameof(nDescendantsUpdated), nDescendantsUpdated); // mapModifiedTx will store sorted packages after they are modified // because some of their txs are already in the block. var mapModifiedTx = new Dictionary <uint256, TxMemPoolModifiedEntry>(); //var mapModifiedTxRes = this.mempoolScheduler.ReadAsync(() => mempool.MapTx.Values).GetAwaiter().GetResult(); // mapModifiedTxRes.Select(s => new TxMemPoolModifiedEntry(s)).OrderBy(o => o, new CompareModifiedEntry()); // Keep track of entries that failed inclusion, to avoid duplicate work. TxMempool.SetEntries failedTx = new TxMempool.SetEntries(); // Start by adding all descendants of previously added txs to mapModifiedTx // and modifying them for their already included ancestors. this.UpdatePackagesForAdded(this.inBlock, mapModifiedTx); List <TxMempoolEntry> ancestorScoreList = this.mempoolLock.ReadAsync(() => this.mempool.MapTx.AncestorScore).GetAwaiter().GetResult().ToList(); TxMempoolEntry iter; int nConsecutiveFailed = 0; while (ancestorScoreList.Any() || mapModifiedTx.Any()) { TxMempoolEntry mi = ancestorScoreList.FirstOrDefault(); if (mi != null) { // Skip entries in mapTx that are already in a block or are present // in mapModifiedTx (which implies that the mapTx ancestor state is // stale due to ancestor inclusion in the block). // Also skip transactions that we've already failed to add. This can happen if // we consider a transaction in mapModifiedTx and it fails: we can then // potentially consider it again while walking mapTx. It's currently // guaranteed to fail again, but as a belt-and-suspenders check we put it in // failedTx and avoid re-evaluation, since the re-evaluation would be using // cached size/sigops/fee values that are not actually correct. // First try to find a new transaction in mapTx to evaluate. if (mapModifiedTx.ContainsKey(mi.TransactionHash) || this.inBlock.Contains(mi) || failedTx.Contains(mi)) { ancestorScoreList.Remove(mi); continue; } } // Now that mi is not stale, determine which transaction to evaluate: // the next entry from mapTx, or the best from mapModifiedTx? bool fUsingModified = false; TxMemPoolModifiedEntry modit; var compare = new CompareModifiedEntry(); if (mi == null) { modit = mapModifiedTx.Values.OrderByDescending(o => o, compare).First(); iter = modit.iter; fUsingModified = true; } else { // Try to compare the mapTx entry to the mapModifiedTx entry iter = mi; modit = mapModifiedTx.Values.OrderByDescending(o => o, compare).FirstOrDefault(); if ((modit != null) && (compare.Compare(modit, new TxMemPoolModifiedEntry(iter)) > 0)) { // The best entry in mapModifiedTx has higher score // than the one from mapTx.. // Switch which transaction (package) to consider. iter = modit.iter; fUsingModified = true; } else { // Either no entry in mapModifiedTx, or it's worse than mapTx. // Increment mi for the next loop iteration. ancestorScoreList.Remove(iter); } } // We skip mapTx entries that are inBlock, and mapModifiedTx shouldn't // contain anything that is inBlock. Guard.Assert(!this.inBlock.Contains(iter)); long packageSize = iter.SizeWithAncestors; Money packageFees = iter.ModFeesWithAncestors; long packageSigOpsCost = iter.SizeWithAncestors; if (fUsingModified) { packageSize = modit.SizeWithAncestors; packageFees = modit.ModFeesWithAncestors; packageSigOpsCost = modit.SigOpCostWithAncestors; } if (packageFees < this.blockMinFeeRate.GetFee((int)packageSize)) { // Everything else we might consider has a lower fee rate return; } if (!this.TestPackage(packageSize, packageSigOpsCost)) { if (fUsingModified) { // Since we always look at the best entry in mapModifiedTx, // we must erase failed entries so that we can consider the // next best entry on the next loop iteration mapModifiedTx.Remove(modit.iter.TransactionHash); failedTx.Add(iter); } nConsecutiveFailed++; if ((nConsecutiveFailed > this.MaxConsecutiveAddTransactionFailures) && (this.blockWeight > this.blockMaxWeight - 4000)) { // Give up if we're close to full and haven't succeeded in a while break; } continue; } TxMempool.SetEntries ancestors = new TxMempool.SetEntries(); long nNoLimit = long.MaxValue; string dummy; this.mempool.CalculateMemPoolAncestors(iter, ancestors, nNoLimit, nNoLimit, nNoLimit, nNoLimit, out dummy, false); this.OnlyUnconfirmed(ancestors); ancestors.Add(iter); // Test if all tx's are Final. if (!this.TestPackageTransactions(ancestors)) { if (fUsingModified) { mapModifiedTx.Remove(modit.iter.TransactionHash); failedTx.Add(iter); } continue; } // This transaction will make it in; reset the failed counter. nConsecutiveFailed = 0; // Package can be added. Sort the entries in a valid order. // Sort package by ancestor count // If a transaction A depends on transaction B, then A's ancestor count // must be greater than B's. So this is sufficient to validly order the // transactions for block inclusion. List <TxMempoolEntry> sortedEntries = ancestors.ToList().OrderBy(o => o, new CompareTxIterByAncestorCount()).ToList(); foreach (TxMempoolEntry sortedEntry in sortedEntries) { this.AddToBlock(sortedEntry); // Erase from the modified set, if present mapModifiedTx.Remove(sortedEntry.TransactionHash); } nPackagesSelected++; // Update transactions that depend on each of these nDescendantsUpdated += this.UpdatePackagesForAdded(ancestors, mapModifiedTx); } this.logger.LogTrace("(-)"); }
public void MempoolIndexingTest() { NodeSettings settings = NodeSettings.Default(KnownNetworks.TestNet); var pool = new TxMempool(DateTimeProvider.Default, new BlockPolicyEstimator(new MempoolSettings(settings), settings.LoggerFactory, settings), settings.LoggerFactory, settings); var entry = new TestMemPoolEntryHelper(); /* 3rd highest fee */ var tx1 = new Transaction(); tx1.AddOutput(new TxOut(new Money(10 * Money.COIN), new Script(OpcodeType.OP_11, OpcodeType.OP_EQUAL))); pool.AddUnchecked(tx1.GetHash(), entry.Fee(new Money(10000L)).Priority(10.0).FromTx(tx1)); /* highest fee */ var tx2 = new Transaction(); tx2.AddOutput(new TxOut(new Money(2 * Money.COIN), new Script(OpcodeType.OP_11, OpcodeType.OP_EQUAL))); pool.AddUnchecked(tx2.GetHash(), entry.Fee(new Money(20000L)).Priority(9.0).FromTx(tx2)); /* lowest fee */ var tx3 = new Transaction(); tx3.AddOutput(new TxOut(new Money(5 * Money.COIN), new Script(OpcodeType.OP_11, OpcodeType.OP_EQUAL))); pool.AddUnchecked(tx3.GetHash(), entry.Fee(new Money(0L)).Priority(100.0).FromTx(tx3)); /* 2nd highest fee */ var tx4 = new Transaction(); tx4.AddOutput(new TxOut(new Money(6 * Money.COIN), new Script(OpcodeType.OP_11, OpcodeType.OP_EQUAL))); pool.AddUnchecked(tx4.GetHash(), entry.Fee(new Money(15000L)).Priority(1.0).FromTx(tx4)); /* equal fee rate to tx1, but newer */ var tx5 = new Transaction(); tx5.AddOutput(new TxOut(new Money(11 * Money.COIN), new Script(OpcodeType.OP_11, OpcodeType.OP_EQUAL))); pool.AddUnchecked(tx5.GetHash(), entry.Fee(new Money(10000L)).Priority(10.0).Time(1).FromTx(tx5)); // assert size Assert.Equal(5, pool.Size); var sortedOrder = new List <string>(5); sortedOrder.Insert(0, tx3.GetHash().ToString()); // 0 sortedOrder.Insert(1, tx5.GetHash().ToString()); // 10000 sortedOrder.Insert(2, tx1.GetHash().ToString()); // 10000 sortedOrder.Insert(3, tx4.GetHash().ToString()); // 15000 sortedOrder.Insert(4, tx2.GetHash().ToString()); // 20000 this.CheckSort(pool, pool.MapTx.DescendantScore.ToList(), sortedOrder); /* low fee but with high fee child */ /* tx6 -> tx7 -> tx8, tx9 -> tx10 */ var tx6 = new Transaction(); tx6.AddOutput(new TxOut(new Money(20 * Money.COIN), new Script(OpcodeType.OP_11, OpcodeType.OP_EQUAL))); pool.AddUnchecked(tx6.GetHash(), entry.Fee(new Money(0L)).FromTx(tx6)); // assert size Assert.Equal(6, pool.Size); // Check that at this point, tx6 is sorted low sortedOrder.Insert(0, tx6.GetHash().ToString()); this.CheckSort(pool, pool.MapTx.DescendantScore.ToList(), sortedOrder); var setAncestors = new TxMempool.SetEntries(); setAncestors.Add(pool.MapTx.TryGet(tx6.GetHash())); var tx7 = new Transaction(); tx7.AddInput(new TxIn(new OutPoint(tx6.GetHash(), 0), new Script(OpcodeType.OP_11))); tx7.AddOutput(new TxOut(new Money(10 * Money.COIN), new Script(OpcodeType.OP_11, OpcodeType.OP_EQUAL))); tx7.AddOutput(new TxOut(new Money(1 * Money.COIN), new Script(OpcodeType.OP_11, OpcodeType.OP_EQUAL))); var setAncestorsCalculated = new TxMempool.SetEntries(); string dummy; Assert.True(pool.CalculateMemPoolAncestors(entry.Fee(2000000L).FromTx(tx7), setAncestorsCalculated, 100, 1000000, 1000, 1000000, out dummy)); Assert.True(setAncestorsCalculated.Equals(setAncestors)); pool.AddUnchecked(tx7.GetHash(), entry.FromTx(tx7), setAncestors); Assert.Equal(7, pool.Size); // Now tx6 should be sorted higher (high fee child): tx7, tx6, tx2, ... sortedOrder.RemoveAt(0); sortedOrder.Add(tx6.GetHash().ToString()); sortedOrder.Add(tx7.GetHash().ToString()); this.CheckSort(pool, pool.MapTx.DescendantScore.ToList(), sortedOrder); /* low fee child of tx7 */ var tx8 = new Transaction(); tx8.AddInput(new TxIn(new OutPoint(tx7.GetHash(), 0), new Script(OpcodeType.OP_11))); tx8.AddOutput(new TxOut(new Money(10 * Money.COIN), new Script(OpcodeType.OP_11, OpcodeType.OP_EQUAL))); setAncestors.Add(pool.MapTx.TryGet(tx7.GetHash())); pool.AddUnchecked(tx8.GetHash(), entry.Fee(0L).Time(2).FromTx(tx8), setAncestors); // Now tx8 should be sorted low, but tx6/tx both high sortedOrder.Insert(0, tx8.GetHash().ToString()); this.CheckSort(pool, pool.MapTx.DescendantScore.ToList(), sortedOrder); /* low fee child of tx7 */ var tx9 = new Transaction(); tx9.AddInput(new TxIn(new OutPoint(tx7.GetHash(), 1), new Script(OpcodeType.OP_11))); tx9.AddOutput(new TxOut(new Money(1 * Money.COIN), new Script(OpcodeType.OP_11, OpcodeType.OP_EQUAL))); pool.AddUnchecked(tx9.GetHash(), entry.Fee(0L).Time(3).FromTx(tx9), setAncestors); // tx9 should be sorted low Assert.Equal(9, pool.Size); sortedOrder.Insert(0, tx9.GetHash().ToString()); this.CheckSort(pool, pool.MapTx.DescendantScore.ToList(), sortedOrder); List <string> snapshotOrder = sortedOrder.ToList(); setAncestors.Add(pool.MapTx.TryGet(tx8.GetHash())); setAncestors.Add(pool.MapTx.TryGet(tx9.GetHash())); /* tx10 depends on tx8 and tx9 and has a high fee*/ var tx10 = new Transaction(); tx10.AddInput(new TxIn(new OutPoint(tx8.GetHash(), 0), new Script(OpcodeType.OP_11))); tx10.AddInput(new TxIn(new OutPoint(tx9.GetHash(), 0), new Script(OpcodeType.OP_11))); tx10.AddOutput(new TxOut(new Money(10 * Money.COIN), new Script(OpcodeType.OP_11, OpcodeType.OP_EQUAL))); setAncestorsCalculated.Clear(); Assert.True(pool.CalculateMemPoolAncestors(entry.Fee(200000L).Time(4).FromTx(tx10), setAncestorsCalculated, 100, 1000000, 1000, 1000000, out dummy)); Assert.True(setAncestorsCalculated.Equals(setAncestors)); pool.AddUnchecked(tx10.GetHash(), entry.FromTx(tx10), setAncestors); /** * tx8 and tx9 should both now be sorted higher * Final order after tx10 is added: * * tx3 = 0 (1) * tx5 = 10000 (1) * tx1 = 10000 (1) * tx4 = 15000 (1) * tx2 = 20000 (1) * tx9 = 200k (2 txs) * tx8 = 200k (2 txs) * tx10 = 200k (1 tx) * tx6 = 2.2M (5 txs) * tx7 = 2.2M (4 txs) */ sortedOrder.RemoveRange(0, 2); // take out tx9, tx8 from the beginning sortedOrder.Insert(5, tx9.GetHash().ToString()); sortedOrder.Insert(6, tx8.GetHash().ToString()); sortedOrder.Insert(7, tx10.GetHash().ToString()); // tx10 is just before tx6 this.CheckSort(pool, pool.MapTx.DescendantScore.ToList(), sortedOrder); // there should be 10 transactions in the mempool Assert.Equal(10, pool.Size); // Now try removing tx10 and verify the sort order returns to normal pool.RemoveRecursive(pool.MapTx.TryGet(tx10.GetHash()).Transaction); this.CheckSort(pool, pool.MapTx.DescendantScore.ToList(), snapshotOrder); pool.RemoveRecursive(pool.MapTx.TryGet(tx9.GetHash()).Transaction); pool.RemoveRecursive(pool.MapTx.TryGet(tx8.GetHash()).Transaction); /* Now check the sort on the mining score index. * Final order should be: * * tx7 (2M) * tx2 (20k) * tx4 (15000) * tx1/tx5 (10000) * tx3/6 (0) * (Ties resolved by hash) */ sortedOrder.Clear(); sortedOrder.Add(tx7.GetHash().ToString()); sortedOrder.Add(tx2.GetHash().ToString()); sortedOrder.Add(tx4.GetHash().ToString()); if (tx1.GetHash() < tx5.GetHash()) { sortedOrder.Add(tx5.GetHash().ToString()); sortedOrder.Add(tx1.GetHash().ToString()); } else { sortedOrder.Add(tx1.GetHash().ToString()); sortedOrder.Add(tx5.GetHash().ToString()); } if (tx3.GetHash() < tx6.GetHash()) { sortedOrder.Add(tx6.GetHash().ToString()); sortedOrder.Add(tx3.GetHash().ToString()); } else { sortedOrder.Add(tx3.GetHash().ToString()); sortedOrder.Add(tx6.GetHash().ToString()); } this.CheckSort(pool, pool.MapTx.MiningScore.ToList(), sortedOrder); }
public override void CheckTransaction(MempoolValidationContext context) { // Check if it's economically rational to mine this transaction rather // than the ones it replaces. context.ConflictingFees = 0; context.ConflictingSize = 0; context.ConflictingCount = 0; context.AllConflicting = new TxMempool.SetEntries(); // If we don't hold the lock allConflicting might be incomplete; the // subsequent RemoveStaged() and addUnchecked() calls don't guarantee // mempool consistency for us. //LOCK(pool.cs); if (context.SetConflicts.Any()) { var newFeeRate = new FeeRate(context.ModifiedFees, context.EntrySize); var setConflictsParents = new List <uint256>(); const int MaxDescendantsToVisit = 100; var setIterConflicting = new TxMempool.SetEntries(); foreach (uint256 hashConflicting in context.SetConflicts) { TxMempoolEntry mi = this.mempool.MapTx.TryGet(hashConflicting); if (mi == null) { continue; } // Save these to avoid repeated lookups setIterConflicting.Add(mi); // Don't allow the replacement to reduce the feerate of the // mempool. // // We usually don't want to accept replacements with lower // feerates than what they replaced as that would lower the // feerate of the next block. Requiring that the feerate always // be increased is also an easy-to-reason about way to prevent // DoS attacks via replacements. // // The mining code doesn't (currently) take children into // account (CPFP) so we only consider the feerates of // transactions being directly replaced, not their indirect // descendants. While that does mean high feerate children are // ignored when deciding whether or not to replace, we do // require the replacement to pay more overall fees too, // mitigating most cases. var oldFeeRate = new FeeRate(mi.ModifiedFee, (int)mi.GetTxSize()); if (newFeeRate <= oldFeeRate) { this.logger.LogTrace("(-)[FAIL_INSUFFICIENT_FEE]"); context.State.Fail(MempoolErrors.InsufficientFee, $"rejecting replacement {context.TransactionHash}; new feerate {newFeeRate} <= old feerate {oldFeeRate}").Throw(); } foreach (TxIn txin in mi.Transaction.Inputs) { setConflictsParents.Add(txin.PrevOut.Hash); } context.ConflictingCount += mi.CountWithDescendants; } // This potentially overestimates the number of actual descendants // but we just want to be conservative to avoid doing too much // work. if (context.ConflictingCount <= MaxDescendantsToVisit) { // If not too many to replace, then calculate the set of // transactions that would have to be evicted foreach (TxMempoolEntry it in setIterConflicting) { this.mempool.CalculateDescendants(it, context.AllConflicting); } foreach (TxMempoolEntry it in context.AllConflicting) { context.ConflictingFees += it.ModifiedFee; context.ConflictingSize += it.GetTxSize(); } } else { this.logger.LogTrace("(-)[FAIL_TOO_MANY_POTENTIAL_REPLACEMENTS]"); context.State.Fail(MempoolErrors.TooManyPotentialReplacements, $"rejecting replacement {context.TransactionHash}; too many potential replacements ({context.ConflictingCount} > {MaxDescendantsToVisit})").Throw(); } for (int j = 0; j < context.Transaction.Inputs.Count; j++) { // We don't want to accept replacements that require low // feerate junk to be mined first. Ideally we'd keep track of // the ancestor feerates and make the decision based on that, // but for now requiring all new inputs to be confirmed works. if (!setConflictsParents.Contains(context.Transaction.Inputs[j].PrevOut.Hash)) { // Rather than check the UTXO set - potentially expensive - // it's cheaper to just check if the new input refers to a // tx that's in the mempool. if (this.mempool.MapTx.ContainsKey(context.Transaction.Inputs[j].PrevOut.Hash)) { this.logger.LogTrace("(-)[FAIL_REPLACEMENT_ADDS_UNCONFIRMED]"); context.State.Fail(MempoolErrors.ReplacementAddsUnconfirmed, $"replacement {context.TransactionHash} adds unconfirmed input, idx {j}").Throw(); } } } // The replacement must pay greater fees than the transactions it // replaces - if we did the bandwidth used by those conflicting // transactions would not be paid for. if (context.ModifiedFees < context.ConflictingFees) { this.logger.LogTrace("(-)[FAIL_INSUFFICIENT_FEE]"); context.State.Fail(MempoolErrors.Insufficientfee, $"rejecting replacement {context.TransactionHash}, less fees than conflicting txs; {context.ModifiedFees} < {context.ConflictingFees}").Throw(); } // Finally in addition to paying more fees than the conflicts the // new transaction must pay for its own bandwidth. Money nDeltaFees = context.ModifiedFees - context.ConflictingFees; if (nDeltaFees < context.MinRelayTxFee.GetFee(context.EntrySize)) { this.logger.LogTrace("(-)[FAIL_INSUFFICIENT_DELTA_FEE]"); context.State.Fail(MempoolErrors.Insufficientfee, $"rejecting replacement {context.TransactionHash}, not enough additional fees to relay; {nDeltaFees} < {context.MinRelayTxFee.GetFee(context.EntrySize)}").Throw(); } } }