// Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2022 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace node { int64_t UpdateTime(CBlockHeader* pblock, const Consensus::Params& consensusParams, const CBlockIndex* pindexPrev) { int64_t nOldTime = pblock->nTime; int64_t nNewTime{std::max(pindexPrev->GetMedianTimePast() + 1, TicksSinceEpoch(NodeClock::now()))}; if (consensusParams.enforce_BIP94) { // Height of block to be mined. const int height{pindexPrev->nHeight + 1}; if (height % consensusParams.DifficultyAdjustmentInterval() == 0) { nNewTime = std::max(nNewTime, pindexPrev->GetBlockTime() - MAX_TIMEWARP); } } if (nOldTime < nNewTime) { pblock->nTime = nNewTime; } // Updating time can change work required on testnet: if (consensusParams.fPowAllowMinDifficultyBlocks) { pblock->nBits = GetNextWorkRequired(pindexPrev, pblock, consensusParams); } return nNewTime - nOldTime; } void RegenerateCommitments(CBlock& block, ChainstateManager& chainman) { CMutableTransaction tx{*block.vtx.at(0)}; tx.vout.erase(tx.vout.begin() + GetWitnessCommitmentIndex(block)); block.vtx.at(0) = MakeTransactionRef(tx); const CBlockIndex* prev_block = WITH_LOCK(::cs_main, return chainman.m_blockman.LookupBlockIndex(block.hashPrevBlock)); chainman.GenerateCoinbaseCommitment(block, prev_block); block.hashMerkleRoot = BlockMerkleRoot(block); } static BlockAssembler::Options ClampOptions(BlockAssembler::Options options) { Assert(options.coinbase_max_additional_weight <= DEFAULT_BLOCK_MAX_WEIGHT); Assert(options.coinbase_output_max_additional_sigops <= MAX_BLOCK_SIGOPS_COST); // Limit weight to between coinbase_max_additional_weight and DEFAULT_BLOCK_MAX_WEIGHT for sanity: // Coinbase (reserved) outputs can safely exceed -blockmaxweight, but the rest of the block template will be empty. options.nBlockMaxWeight = std::clamp(options.nBlockMaxWeight, options.coinbase_max_additional_weight, DEFAULT_BLOCK_MAX_WEIGHT); return options; } BlockAssembler::BlockAssembler(Chainstate& chainstate, const CTxMemPool* mempool, const Options& options) : chainparams{chainstate.m_chainman.GetParams()}, m_mempool{options.use_mempool ? mempool : nullptr}, m_chainstate{chainstate}, m_options{ClampOptions(options)} { } void ApplyArgsManOptions(const ArgsManager& args, BlockAssembler::Options& options) { // Block resource limits options.nBlockMaxWeight = args.GetIntArg("-blockmaxweight", options.nBlockMaxWeight); if (const auto blockmintxfee{args.GetArg("-blockmintxfee")}) { if (const auto parsed{ParseMoney(*blockmintxfee)}) options.blockMinFeeRate = CFeeRate{*parsed}; } options.print_modified_fee = args.GetBoolArg("-printpriority", options.print_modified_fee); } void BlockAssembler::resetBlock() { inBlock.clear(); // Reserve space for coinbase tx nBlockWeight = m_options.coinbase_max_additional_weight; nBlockSigOpsCost = m_options.coinbase_output_max_additional_sigops; // These counters do not include coinbase tx nBlockTx = 0; nFees = 0; } std::unique_ptr BlockAssembler::CreateNewBlock(const CScript& scriptPubKeyIn) { const auto time_start{SteadyClock::now()}; resetBlock(); pblocktemplate.reset(new CBlockTemplate()); if (!pblocktemplate.get()) { return nullptr; } CBlock* const pblock = &pblocktemplate->block; // pointer for convenience // Add dummy coinbase tx as first transaction pblock->vtx.emplace_back(); pblocktemplate->vTxFees.push_back(-1); // updated at end pblocktemplate->vTxSigOpsCost.push_back(-1); // updated at end LOCK(::cs_main); CBlockIndex* pindexPrev = m_chainstate.m_chain.Tip(); assert(pindexPrev != nullptr); nHeight = pindexPrev->nHeight + 1; pblock->nVersion = m_chainstate.m_chainman.m_versionbitscache.ComputeBlockVersion(pindexPrev, chainparams.GetConsensus()); // -regtest only: allow overriding block.nVersion with // -blockversion=N to test forking scenarios if (chainparams.MineBlocksOnDemand()) { pblock->nVersion = gArgs.GetIntArg("-blockversion", pblock->nVersion); } pblock->nTime = TicksSinceEpoch(NodeClock::now()); m_lock_time_cutoff = pindexPrev->GetMedianTimePast(); int nPackagesSelected = 0; int nDescendantsUpdated = 0; if (m_mempool) { LOCK(m_mempool->cs); addPackageTxs(*m_mempool, nPackagesSelected, nDescendantsUpdated); } const auto time_1{SteadyClock::now()}; m_last_block_num_txs = nBlockTx; m_last_block_weight = nBlockWeight; // Create coinbase transaction. CMutableTransaction coinbaseTx; coinbaseTx.vin.resize(1); coinbaseTx.vin[0].prevout.SetNull(); coinbaseTx.vout.resize(1); coinbaseTx.vout[0].scriptPubKey = scriptPubKeyIn; coinbaseTx.vout[0].nValue = nFees + GetBlockSubsidy(nHeight, chainparams.GetConsensus()); coinbaseTx.vin[0].scriptSig = CScript() << nHeight << OP_0; pblock->vtx[0] = MakeTransactionRef(std::move(coinbaseTx)); pblocktemplate->vchCoinbaseCommitment = m_chainstate.m_chainman.GenerateCoinbaseCommitment(*pblock, pindexPrev); pblocktemplate->vTxFees[0] = -nFees; LogPrintf("CreateNewBlock(): block weight: %u txs: %u fees: %ld sigops %d\n", GetBlockWeight(*pblock), nBlockTx, nFees, nBlockSigOpsCost); // Fill in header pblock->hashPrevBlock = pindexPrev->GetBlockHash(); UpdateTime(pblock, chainparams.GetConsensus(), pindexPrev); pblock->nBits = GetNextWorkRequired(pindexPrev, pblock, chainparams.GetConsensus()); pblock->nNonce = 0; pblocktemplate->vTxSigOpsCost[0] = WITNESS_SCALE_FACTOR * GetLegacySigOpCount(*pblock->vtx[0]); BlockValidationState state; if (m_options.test_block_validity && !TestBlockValidity(state, chainparams, m_chainstate, *pblock, pindexPrev, /*fCheckPOW=*/false, /*fCheckMerkleRoot=*/false)) { throw std::runtime_error(strprintf("%s: TestBlockValidity failed: %s", __func__, state.ToString())); } const auto time_2{SteadyClock::now()}; LogPrint(BCLog::BENCH, "CreateNewBlock() packages: %.2fms (%d packages, %d updated descendants), validity: %.2fms (total %.2fms)\n", Ticks(time_1 - time_start), nPackagesSelected, nDescendantsUpdated, Ticks(time_2 - time_1), Ticks(time_2 - time_start)); return std::move(pblocktemplate); } void BlockAssembler::onlyUnconfirmed(CTxMemPool::setEntries& testSet) { for (CTxMemPool::setEntries::iterator iit = testSet.begin(); iit != testSet.end(); ) { // Only test txs not already in the block if (inBlock.count((*iit)->GetSharedTx()->GetHash())) { testSet.erase(iit++); } else { iit++; } } } bool BlockAssembler::TestPackage(uint64_t packageSize, int64_t packageSigOpsCost) const { // TODO: switch to weight-based accounting for packages instead of vsize-based accounting. if (nBlockWeight + WITNESS_SCALE_FACTOR * packageSize >= m_options.nBlockMaxWeight) { return false; } if (nBlockSigOpsCost + packageSigOpsCost >= MAX_BLOCK_SIGOPS_COST) { return false; } return true; } // Perform transaction-level checks before adding to block: // - transaction finality (locktime) bool BlockAssembler::TestPackageTransactions(const CTxMemPool::setEntries& package) const { for (CTxMemPool::txiter it : package) { if (!IsFinalTx(it->GetTx(), nHeight, m_lock_time_cutoff)) { return false; } } return true; } void BlockAssembler::AddToBlock(CTxMemPool::txiter iter) { pblocktemplate->block.vtx.emplace_back(iter->GetSharedTx()); pblocktemplate->vTxFees.push_back(iter->GetFee()); pblocktemplate->vTxSigOpsCost.push_back(iter->GetSigOpCost()); nBlockWeight += iter->GetTxWeight(); ++nBlockTx; nBlockSigOpsCost += iter->GetSigOpCost(); nFees += iter->GetFee(); inBlock.insert(iter->GetSharedTx()->GetHash()); if (m_options.print_modified_fee) { LogPrintf("fee rate %s txid %s\n", CFeeRate(iter->GetModifiedFee(), iter->GetTxSize()).ToString(), iter->GetTx().GetHash().ToString()); } } /** Add descendants of given transactions to mapModifiedTx with ancestor * state updated assuming given transactions are inBlock. Returns number * of updated descendants. */ static int UpdatePackagesForAdded(const CTxMemPool& mempool, const CTxMemPool::setEntries& alreadyAdded, indexed_modified_transaction_set& mapModifiedTx) EXCLUSIVE_LOCKS_REQUIRED(mempool.cs) { AssertLockHeld(mempool.cs); int nDescendantsUpdated = 0; for (CTxMemPool::txiter it : alreadyAdded) { CTxMemPool::setEntries descendants; mempool.CalculateDescendants(it, descendants); // Insert all descendants (not yet in block) into the modified set for (CTxMemPool::txiter desc : descendants) { if (alreadyAdded.count(desc)) { continue; } ++nDescendantsUpdated; modtxiter mit = mapModifiedTx.find(desc); if (mit == mapModifiedTx.end()) { CTxMemPoolModifiedEntry modEntry(desc); mit = mapModifiedTx.insert(modEntry).first; } mapModifiedTx.modify(mit, update_for_parent_inclusion(it)); } } return nDescendantsUpdated; } void BlockAssembler::SortForBlock(const CTxMemPool::setEntries& package, std::vector& sortedEntries) { // Sort package by ancestor count // If a transaction A depends on transaction B, then A's ancestor count // must be greater than B's. So this is sufficient to validly order the // transactions for block inclusion. sortedEntries.clear(); sortedEntries.insert(sortedEntries.begin(), package.begin(), package.end()); std::sort(sortedEntries.begin(), sortedEntries.end(), CompareTxIterByAncestorCount()); } // This transaction selection algorithm orders the mempool based // on feerate of a transaction including all unconfirmed ancestors. // Since we don't remove transactions from the mempool as we select them // for block inclusion, we need an alternate method of updating the feerate // of a transaction with its not-yet-selected ancestors as we go. // This is accomplished by walking the in-mempool descendants of selected // transactions and storing a temporary modified state in mapModifiedTxs. // Each time through the loop, we compare the best transaction in // mapModifiedTxs with the next transaction in the mempool to decide what // transaction package to work on next. void BlockAssembler::addPackageTxs(const CTxMemPool& mempool, int& nPackagesSelected, int& nDescendantsUpdated) { AssertLockHeld(mempool.cs); // mapModifiedTx will store sorted packages after they are modified // because some of their txs are already in the block indexed_modified_transaction_set mapModifiedTx; // Keep track of entries that failed inclusion, to avoid duplicate work std::set failedTx; CTxMemPool::indexed_transaction_set::index::type::iterator mi = mempool.mapTx.get().begin(); CTxMemPool::txiter iter; // Limit the number of attempts to add transactions to the block when it is // close to full; this is just a simple heuristic to finish quickly if the // mempool has a lot of entries. const int64_t MAX_CONSECUTIVE_FAILURES = 1000; int64_t nConsecutiveFailed = 0; while (mi != mempool.mapTx.get().end() || !mapModifiedTx.empty()) { // First try to find a new transaction in mapTx to evaluate. // // Skip entries in mapTx that are already in a block or are present // in mapModifiedTx (which implies that the mapTx ancestor state is // stale due to ancestor inclusion in the block) // Also skip transactions that we've already failed to add. This can happen if // we consider a transaction in mapModifiedTx and it fails: we can then // potentially consider it again while walking mapTx. It's currently // guaranteed to fail again, but as a belt-and-suspenders check we put it in // failedTx and avoid re-evaluation, since the re-evaluation would be using // cached size/sigops/fee values that are not actually correct. /** Return true if given transaction from mapTx has already been evaluated, * or if the transaction's cached data in mapTx is incorrect. */ if (mi != mempool.mapTx.get().end()) { auto it = mempool.mapTx.project<0>(mi); assert(it != mempool.mapTx.end()); if (mapModifiedTx.count(it) || inBlock.count(it->GetSharedTx()->GetHash()) || failedTx.count(it->GetSharedTx()->GetHash())) { ++mi; continue; } } // Now that mi is not stale, determine which transaction to evaluate: // the next entry from mapTx, or the best from mapModifiedTx? bool fUsingModified = false; modtxscoreiter modit = mapModifiedTx.get().begin(); if (mi == mempool.mapTx.get().end()) { // We're out of entries in mapTx; use the entry from mapModifiedTx iter = modit->iter; fUsingModified = true; } else { // Try to compare the mapTx entry to the mapModifiedTx entry iter = mempool.mapTx.project<0>(mi); if (modit != mapModifiedTx.get().end() && CompareTxMemPoolEntryByAncestorFee()(*modit, CTxMemPoolModifiedEntry(iter))) { // The best entry in mapModifiedTx has higher score // than the one from mapTx. // Switch which transaction (package) to consider iter = modit->iter; fUsingModified = true; } else { // Either no entry in mapModifiedTx, or it's worse than mapTx. // Increment mi for the next loop iteration. ++mi; } } // We skip mapTx entries that are inBlock, and mapModifiedTx shouldn't // contain anything that is inBlock. assert(!inBlock.count(iter->GetSharedTx()->GetHash())); uint64_t packageSize = iter->GetSizeWithAncestors(); CAmount packageFees = iter->GetModFeesWithAncestors(); int64_t packageSigOpsCost = iter->GetSigOpCostWithAncestors(); if (fUsingModified) { packageSize = modit->nSizeWithAncestors; packageFees = modit->nModFeesWithAncestors; packageSigOpsCost = modit->nSigOpCostWithAncestors; } if (packageFees < m_options.blockMinFeeRate.GetFee(packageSize)) { // Everything else we might consider has a lower fee rate return; } if (!TestPackage(packageSize, packageSigOpsCost)) { if (fUsingModified) { // Since we always look at the best entry in mapModifiedTx, // we must erase failed entries so that we can consider the // next best entry on the next loop iteration mapModifiedTx.get().erase(modit); failedTx.insert(iter->GetSharedTx()->GetHash()); } ++nConsecutiveFailed; if (nConsecutiveFailed > MAX_CONSECUTIVE_FAILURES && nBlockWeight > m_options.nBlockMaxWeight - m_options.coinbase_max_additional_weight) { // Give up if we're close to full and haven't succeeded in a while break; } continue; } auto ancestors{mempool.AssumeCalculateMemPoolAncestors(__func__, *iter, CTxMemPool::Limits::NoLimits(), /*fSearchForParents=*/false)}; onlyUnconfirmed(ancestors); ancestors.insert(iter); // Test if all tx's are Final if (!TestPackageTransactions(ancestors)) { if (fUsingModified) { mapModifiedTx.get().erase(modit); failedTx.insert(iter->GetSharedTx()->GetHash()); } continue; } // This transaction will make it in; reset the failed counter. nConsecutiveFailed = 0; // Package can be added. Sort the entries in a valid order. std::vector sortedEntries; SortForBlock(ancestors, sortedEntries); for (size_t i = 0; i < sortedEntries.size(); ++i) { AddToBlock(sortedEntries[i]); // Erase from the modified set, if present mapModifiedTx.erase(sortedEntries[i]); } ++nPackagesSelected; // Update transactions that depend on each of these nDescendantsUpdated += UpdatePackagesForAdded(mempool, ancestors, mapModifiedTx); } } } // namespace node