diff options
Diffstat (limited to 'src/miner.cpp')
-rw-r--r-- | src/miner.cpp | 194 |
1 files changed, 103 insertions, 91 deletions
diff --git a/src/miner.cpp b/src/miner.cpp index d38ccedf55..ebf2f21ffd 100644 --- a/src/miner.cpp +++ b/src/miner.cpp @@ -46,6 +46,7 @@ using namespace std; uint64_t nLastBlockTx = 0; uint64_t nLastBlockSize = 0; +uint64_t nLastBlockWeight = 0; class ScoreCompare { @@ -76,15 +77,32 @@ int64_t UpdateTime(CBlockHeader* pblock, const Consensus::Params& consensusParam BlockAssembler::BlockAssembler(const CChainParams& _chainparams) : chainparams(_chainparams) { - // Largest block you're willing to create: - nBlockMaxSize = GetArg("-blockmaxsize", DEFAULT_BLOCK_MAX_SIZE); - // Limit to between 1K and MAX_BLOCK_SIZE-1K for sanity: - nBlockMaxSize = std::max((unsigned int)1000, std::min((unsigned int)(MAX_BLOCK_SIZE-1000), nBlockMaxSize)); - - // Minimum block size you want to create; block will be filled with free transactions - // until there are no more or the block reaches this size: - nBlockMinSize = GetArg("-blockminsize", DEFAULT_BLOCK_MIN_SIZE); - nBlockMinSize = std::min(nBlockMaxSize, nBlockMinSize); + // Block resource limits + // If neither -blockmaxsize or -blockmaxweight is given, limit to DEFAULT_BLOCK_MAX_* + // If only one is given, only restrict the specified resource. + // If both are given, restrict both. + nBlockMaxWeight = DEFAULT_BLOCK_MAX_WEIGHT; + nBlockMaxSize = DEFAULT_BLOCK_MAX_SIZE; + bool fWeightSet = false; + if (mapArgs.count("-blockmaxweight")) { + nBlockMaxWeight = GetArg("-blockmaxweight", DEFAULT_BLOCK_MAX_WEIGHT); + nBlockMaxSize = MAX_BLOCK_SERIALIZED_SIZE; + fWeightSet = true; + } + if (mapArgs.count("-blockmaxsize")) { + nBlockMaxSize = GetArg("-blockmaxsize", DEFAULT_BLOCK_MAX_SIZE); + if (!fWeightSet) { + nBlockMaxWeight = nBlockMaxSize * WITNESS_SCALE_FACTOR; + } + } + + // Limit weight to between 4K and MAX_BLOCK_WEIGHT-4K for sanity: + nBlockMaxWeight = std::max((unsigned int)4000, std::min((unsigned int)(MAX_BLOCK_WEIGHT-4000), nBlockMaxWeight)); + // Limit size to between 1K and MAX_BLOCK_SERIALIZED_SIZE-1K for sanity: + nBlockMaxSize = std::max((unsigned int)1000, std::min((unsigned int)(MAX_BLOCK_SERIALIZED_SIZE-1000), nBlockMaxSize)); + + // Whether we need to account for byte usage (in addition to weight usage) + fNeedSizeAccounting = (nBlockMaxSize < MAX_BLOCK_SERIALIZED_SIZE-1000); } void BlockAssembler::resetBlock() @@ -93,7 +111,9 @@ void BlockAssembler::resetBlock() // Reserve space for coinbase tx nBlockSize = 1000; - nBlockSigOps = 100; + nBlockWeight = 4000; + nBlockSigOpsCost = 400; + fIncludeWitness = false; // These counters do not include coinbase tx nBlockTx = 0; @@ -116,7 +136,7 @@ std::unique_ptr<CBlockTemplate> BlockAssembler::CreateNewBlock(const CScript& sc // Add dummy coinbase tx as first transaction pblock->vtx.push_back(CTransaction()); pblocktemplate->vTxFees.push_back(-1); // updated at end - pblocktemplate->vTxSigOps.push_back(-1); // updated at end + pblocktemplate->vTxSigOpsCost.push_back(-1); // updated at end LOCK2(cs_main, mempool.cs); CBlockIndex* pindexPrev = chainActive.Tip(); @@ -135,12 +155,21 @@ std::unique_ptr<CBlockTemplate> BlockAssembler::CreateNewBlock(const CScript& sc ? nMedianTimePast : pblock->GetBlockTime(); + // Decide whether to include witness transactions + // This is only needed in case the witness softfork activation is reverted + // (which would require a very deep reorganization) or when + // -promiscuousmempoolflags is used. + // TODO: replace this with a call to main to assess validity of a mempool + // transaction (which in most cases can be a no-op). + fIncludeWitness = IsWitnessEnabled(pindexPrev, chainparams.GetConsensus()); + addPriorityTxs(); addPackageTxs(); nLastBlockTx = nBlockTx; nLastBlockSize = nBlockSize; - LogPrintf("CreateNewBlock(): total size %u txs: %u fees: %ld sigops %d\n", nBlockSize, nBlockTx, nFees, nBlockSigOps); + nLastBlockWeight = nBlockWeight; + LogPrintf("CreateNewBlock(): total size %u txs: %u fees: %ld sigops %d\n", nBlockSize, nBlockTx, nFees, nBlockSigOpsCost); // Create coinbase transaction. CMutableTransaction coinbaseTx; @@ -151,6 +180,7 @@ std::unique_ptr<CBlockTemplate> BlockAssembler::CreateNewBlock(const CScript& sc coinbaseTx.vout[0].nValue = nFees + GetBlockSubsidy(nHeight, chainparams.GetConsensus()); coinbaseTx.vin[0].scriptSig = CScript() << nHeight << OP_0; pblock->vtx[0] = coinbaseTx; + pblocktemplate->vchCoinbaseCommitment = GenerateCoinbaseCommitment(*pblock, pindexPrev, chainparams.GetConsensus()); pblocktemplate->vTxFees[0] = -nFees; // Fill in header @@ -158,7 +188,7 @@ std::unique_ptr<CBlockTemplate> BlockAssembler::CreateNewBlock(const CScript& sc UpdateTime(pblock, chainparams.GetConsensus(), pindexPrev); pblock->nBits = GetNextWorkRequired(pindexPrev, pblock, chainparams.GetConsensus()); pblock->nNonce = 0; - pblocktemplate->vTxSigOps[0] = GetLegacySigOpCount(pblock->vtx[0]); + pblocktemplate->vTxSigOpsCost[0] = WITNESS_SCALE_FACTOR * GetLegacySigOpCount(pblock->vtx[0]); CValidationState state; if (!TestBlockValidity(state, chainparams, *pblock, pindexPrev, false, false)) { @@ -192,48 +222,75 @@ void BlockAssembler::onlyUnconfirmed(CTxMemPool::setEntries& testSet) } } -bool BlockAssembler::TestPackage(uint64_t packageSize, unsigned int packageSigOps) +bool BlockAssembler::TestPackage(uint64_t packageSize, int64_t packageSigOpsCost) { - if (nBlockSize + packageSize >= nBlockMaxSize) + // TODO: switch to weight-based accounting for packages instead of vsize-based accounting. + if (nBlockWeight + WITNESS_SCALE_FACTOR * packageSize >= nBlockMaxWeight) return false; - if (nBlockSigOps + packageSigOps >= MAX_BLOCK_SIGOPS) + if (nBlockSigOpsCost + packageSigOpsCost >= MAX_BLOCK_SIGOPS_COST) return false; return true; } -// Block size and sigops have already been tested. Check that all transactions -// are final. -bool BlockAssembler::TestPackageFinality(const CTxMemPool::setEntries& package) +// Perform transaction-level checks before adding to block: +// - transaction finality (locktime) +// - premature witness (in case segwit transactions are added to mempool before +// segwit activation) +// - serialized size (in case -blockmaxsize is in use) +bool BlockAssembler::TestPackageTransactions(const CTxMemPool::setEntries& package) { + uint64_t nPotentialBlockSize = nBlockSize; // only used with fNeedSizeAccounting BOOST_FOREACH (const CTxMemPool::txiter it, package) { if (!IsFinalTx(it->GetTx(), nHeight, nLockTimeCutoff)) return false; + if (!fIncludeWitness && !it->GetTx().wit.IsNull()) + return false; + if (fNeedSizeAccounting) { + uint64_t nTxSize = ::GetSerializeSize(it->GetTx(), SER_NETWORK, PROTOCOL_VERSION); + if (nPotentialBlockSize + nTxSize >= nBlockMaxSize) { + return false; + } + nPotentialBlockSize += nTxSize; + } } return true; } bool BlockAssembler::TestForBlock(CTxMemPool::txiter iter) { - if (nBlockSize + iter->GetTxSize() >= nBlockMaxSize) { + if (nBlockWeight + iter->GetTxWeight() >= nBlockMaxWeight) { // If the block is so close to full that no more txs will fit // or if we've tried more than 50 times to fill remaining space // then flag that the block is finished - if (nBlockSize > nBlockMaxSize - 100 || lastFewTxs > 50) { + if (nBlockWeight > nBlockMaxWeight - 400 || lastFewTxs > 50) { blockFinished = true; return false; } - // Once we're within 1000 bytes of a full block, only look at 50 more txs + // Once we're within 4000 weight of a full block, only look at 50 more txs // to try to fill the remaining space. - if (nBlockSize > nBlockMaxSize - 1000) { + if (nBlockWeight > nBlockMaxWeight - 4000) { lastFewTxs++; } return false; } - if (nBlockSigOps + iter->GetSigOpCount() >= MAX_BLOCK_SIGOPS) { + if (fNeedSizeAccounting) { + if (nBlockSize + ::GetSerializeSize(iter->GetTx(), SER_NETWORK, PROTOCOL_VERSION) >= nBlockMaxSize) { + if (nBlockSize > nBlockMaxSize - 100 || lastFewTxs > 50) { + blockFinished = true; + return false; + } + if (nBlockSize > nBlockMaxSize - 1000) { + lastFewTxs++; + } + return false; + } + } + + if (nBlockSigOpsCost + iter->GetSigOpCost() >= MAX_BLOCK_SIGOPS_COST) { // If the block has room for no more sig ops then // flag that the block is finished - if (nBlockSigOps > MAX_BLOCK_SIGOPS - 2) { + if (nBlockSigOpsCost > MAX_BLOCK_SIGOPS_COST - 8) { blockFinished = true; return false; } @@ -255,10 +312,13 @@ void BlockAssembler::AddToBlock(CTxMemPool::txiter iter) { pblock->vtx.push_back(iter->GetTx()); pblocktemplate->vTxFees.push_back(iter->GetFee()); - pblocktemplate->vTxSigOps.push_back(iter->GetSigOpCount()); - nBlockSize += iter->GetTxSize(); + pblocktemplate->vTxSigOpsCost.push_back(iter->GetSigOpCost()); + if (fNeedSizeAccounting) { + nBlockSize += ::GetSerializeSize(iter->GetTx(), SER_NETWORK, PROTOCOL_VERSION); + } + nBlockWeight += iter->GetTxWeight(); ++nBlockTx; - nBlockSigOps += iter->GetSigOpCount(); + nBlockSigOpsCost += iter->GetSigOpCost(); nFees += iter->GetFee(); inBlock.insert(iter); @@ -274,62 +334,6 @@ void BlockAssembler::AddToBlock(CTxMemPool::txiter iter) } } -void BlockAssembler::addScoreTxs() -{ - std::priority_queue<CTxMemPool::txiter, std::vector<CTxMemPool::txiter>, ScoreCompare> clearedTxs; - CTxMemPool::setEntries waitSet; - CTxMemPool::indexed_transaction_set::index<mining_score>::type::iterator mi = mempool.mapTx.get<mining_score>().begin(); - CTxMemPool::txiter iter; - while (!blockFinished && (mi != mempool.mapTx.get<mining_score>().end() || !clearedTxs.empty())) - { - // If no txs that were previously postponed are available to try - // again, then try the next highest score tx - if (clearedTxs.empty()) { - iter = mempool.mapTx.project<0>(mi); - mi++; - } - // If a previously postponed tx is available to try again, then it - // has higher score than all untried so far txs - else { - iter = clearedTxs.top(); - clearedTxs.pop(); - } - - // If tx already in block, skip (added by addPriorityTxs) - if (inBlock.count(iter)) { - continue; - } - - // If tx is dependent on other mempool txs which haven't yet been included - // then put it in the waitSet - if (isStillDependent(iter)) { - waitSet.insert(iter); - continue; - } - - // If the fee rate is below the min fee rate for mining, then we're done - // adding txs based on score (fee rate) - if (iter->GetModifiedFee() < ::minRelayTxFee.GetFee(iter->GetTxSize()) && nBlockSize >= nBlockMinSize) { - return; - } - - // If this tx fits in the block add it, otherwise keep looping - if (TestForBlock(iter)) { - AddToBlock(iter); - - // This tx was successfully added, so - // add transactions that depend on this one to the priority queue to try again - BOOST_FOREACH(CTxMemPool::txiter child, mempool.GetMemPoolChildren(iter)) - { - if (waitSet.count(child)) { - clearedTxs.push(child); - waitSet.erase(child); - } - } - } - } -} - void BlockAssembler::UpdatePackagesForAdded(const CTxMemPool::setEntries& alreadyAdded, indexed_modified_transaction_set &mapModifiedTx) { @@ -345,7 +349,7 @@ void BlockAssembler::UpdatePackagesForAdded(const CTxMemPool::setEntries& alread CTxMemPoolModifiedEntry modEntry(desc); modEntry.nSizeWithAncestors -= it->GetTxSize(); modEntry.nModFeesWithAncestors -= it->GetModifiedFee(); - modEntry.nSigOpCountWithAncestors -= it->GetSigOpCount(); + modEntry.nSigOpCostWithAncestors -= it->GetSigOpCost(); mapModifiedTx.insert(modEntry); } else { mapModifiedTx.modify(mit, update_for_parent_inclusion(it)); @@ -447,19 +451,19 @@ void BlockAssembler::addPackageTxs() uint64_t packageSize = iter->GetSizeWithAncestors(); CAmount packageFees = iter->GetModFeesWithAncestors(); - unsigned int packageSigOps = iter->GetSigOpCountWithAncestors(); + int64_t packageSigOpsCost = iter->GetSigOpCostWithAncestors(); if (fUsingModified) { packageSize = modit->nSizeWithAncestors; packageFees = modit->nModFeesWithAncestors; - packageSigOps = modit->nSigOpCountWithAncestors; + packageSigOpsCost = modit->nSigOpCostWithAncestors; } - if (packageFees < ::minRelayTxFee.GetFee(packageSize) && nBlockSize >= nBlockMinSize) { + if (packageFees < ::minRelayTxFee.GetFee(packageSize)) { // Everything else we might consider has a lower fee rate return; } - if (!TestPackage(packageSize, packageSigOps)) { + if (!TestPackage(packageSize, packageSigOpsCost)) { if (fUsingModified) { // Since we always look at the best entry in mapModifiedTx, // we must erase failed entries so that we can consider the @@ -479,7 +483,7 @@ void BlockAssembler::addPackageTxs() ancestors.insert(iter); // Test if all tx's are Final - if (!TestPackageFinality(ancestors)) { + if (!TestPackageTransactions(ancestors)) { if (fUsingModified) { mapModifiedTx.get<ancestor_score>().erase(modit); failedTx.insert(iter); @@ -513,6 +517,9 @@ void BlockAssembler::addPriorityTxs() return; } + bool fSizeAccounting = fNeedSizeAccounting; + fNeedSizeAccounting = true; + // This vector will be sorted into a priority queue: vector<TxCoinAgePriority> vecPriority; TxCoinAgePriorityCompare pricomparer; @@ -544,6 +551,10 @@ void BlockAssembler::addPriorityTxs() continue; } + // cannot accept witness transactions into a non-witness block + if (!fIncludeWitness && !iter->GetTx().wit.IsNull()) + continue; + // If tx is dependent on other mempool txs which haven't yet been included // then put it in the waitSet if (isStillDependent(iter)) { @@ -558,7 +569,7 @@ void BlockAssembler::addPriorityTxs() // If now that this txs is added we've surpassed our desired priority size // or have dropped below the AllowFreeThreshold, then we're done adding priority txs if (nBlockSize >= nBlockPrioritySize || !AllowFree(actualPriority)) { - return; + break; } // This tx was successfully added, so @@ -574,6 +585,7 @@ void BlockAssembler::addPriorityTxs() } } } + fNeedSizeAccounting = fSizeAccounting; } void IncrementExtraNonce(CBlock* pblock, const CBlockIndex* pindexPrev, unsigned int& nExtraNonce) |