diff options
Diffstat (limited to 'src/validation.cpp')
-rw-r--r-- | src/validation.cpp | 940 |
1 files changed, 460 insertions, 480 deletions
diff --git a/src/validation.cpp b/src/validation.cpp index d0ce3f78fc..13e92ef4d4 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -20,7 +20,6 @@ #include <index/txindex.h> #include <policy/fees.h> #include <policy/policy.h> -#include <policy/rbf.h> #include <policy/settings.h> #include <pow.h> #include <primitives/block.h> @@ -42,6 +41,7 @@ #include <util/rbf.h> #include <util/strencodings.h> #include <util/system.h> +#include <util/translation.h> #include <util/validation.h> #include <validationinterface.h> #include <warnings.h> @@ -60,167 +60,39 @@ #define MICRO 0.000001 #define MILLI 0.001 -/** - * Global state - */ -namespace { - struct CBlockIndexWorkComparator - { - bool operator()(const CBlockIndex *pa, const CBlockIndex *pb) const { - // First sort by most total work, ... - if (pa->nChainWork > pb->nChainWork) return false; - if (pa->nChainWork < pb->nChainWork) return true; - - // ... then by earliest time received, ... - if (pa->nSequenceId < pb->nSequenceId) return false; - if (pa->nSequenceId > pb->nSequenceId) return true; - - // Use pointer address as tie breaker (should only happen with blocks - // loaded from disk, as those all have id 0). - if (pa < pb) return false; - if (pa > pb) return true; - - // Identical blocks. - return false; - } - }; -} // anon namespace - -enum DisconnectResult -{ - DISCONNECT_OK, // All good. - DISCONNECT_UNCLEAN, // Rolled back, but UTXO set was inconsistent with block. - DISCONNECT_FAILED // Something else went wrong. -}; - -class ConnectTrace; - -/** - * CChainState stores and provides an API to update our local knowledge of the - * current best chain and header tree. - * - * It generally provides access to the current block tree, as well as functions - * to provide new data, which it will appropriately validate and incorporate in - * its state as necessary. - * - * Eventually, the API here is targeted at being exposed externally as a - * consumable libconsensus library, so any functions added must only call - * other class member functions, pure functions in other parts of the consensus - * library, callbacks via the validation interface, or read/write-to-disk - * functions (eventually this will also be via callbacks). - */ -class CChainState { -private: - /** - * The set of all CBlockIndex entries with BLOCK_VALID_TRANSACTIONS (for itself and all ancestors) and - * as good as our current tip or better. Entries may be failed, though, and pruning nodes may be - * missing the data for the block. - */ - std::set<CBlockIndex*, CBlockIndexWorkComparator> setBlockIndexCandidates; - - /** - * Every received block is assigned a unique and increasing identifier, so we - * know which one to give priority in case of a fork. - */ - CCriticalSection cs_nBlockSequenceId; - /** Blocks loaded from disk are assigned id 0, so start the counter at 1. */ - int32_t nBlockSequenceId = 1; - /** Decreasing counter (used by subsequent preciousblock calls). */ - int32_t nBlockReverseSequenceId = -1; - /** chainwork for the last block that preciousblock has been applied to. */ - arith_uint256 nLastPreciousChainwork = 0; - - /** In order to efficiently track invalidity of headers, we keep the set of - * blocks which we tried to connect and found to be invalid here (ie which - * were set to BLOCK_FAILED_VALID since the last restart). We can then - * walk this set and check if a new header is a descendant of something in - * this set, preventing us from having to walk mapBlockIndex when we try - * to connect a bad block and fail. - * - * While this is more complicated than marking everything which descends - * from an invalid block as invalid at the time we discover it to be - * invalid, doing so would require walking all of mapBlockIndex to find all - * descendants. Since this case should be very rare, keeping track of all - * BLOCK_FAILED_VALID blocks in a set should be just fine and work just as - * well. - * - * Because we already walk mapBlockIndex in height-order at startup, we go - * ahead and mark descendants of invalid blocks as FAILED_CHILD at that time, - * instead of putting things in this set. - */ - std::set<CBlockIndex*> m_failed_blocks; - - /** - * the ChainState CriticalSection - * A lock that must be held when modifying this ChainState - held in ActivateBestChain() - */ - CCriticalSection m_cs_chainstate; - -public: - //! The current chain of blockheaders we consult and build on. - //! @see CChain, CBlockIndex. - CChain m_chain; - BlockMap mapBlockIndex GUARDED_BY(cs_main); - std::multimap<CBlockIndex*, CBlockIndex*> mapBlocksUnlinked; - CBlockIndex *pindexBestInvalid = nullptr; - - bool LoadBlockIndex(const Consensus::Params& consensus_params, CBlockTreeDB& blocktree) EXCLUSIVE_LOCKS_REQUIRED(cs_main); - - bool ActivateBestChain(CValidationState &state, const CChainParams& chainparams, std::shared_ptr<const CBlock> pblock) LOCKS_EXCLUDED(cs_main); - - /** - * If a block header hasn't already been seen, call CheckBlockHeader on it, ensure - * that it doesn't descend from an invalid block, and then add it to mapBlockIndex. - */ - bool AcceptBlockHeader(const CBlockHeader& block, CValidationState& state, const CChainParams& chainparams, CBlockIndex** ppindex) EXCLUSIVE_LOCKS_REQUIRED(cs_main); - bool AcceptBlock(const std::shared_ptr<const CBlock>& pblock, CValidationState& state, const CChainParams& chainparams, CBlockIndex** ppindex, bool fRequested, const FlatFilePos* dbp, bool* fNewBlock) EXCLUSIVE_LOCKS_REQUIRED(cs_main); - - // Block (dis)connection on a given view: - DisconnectResult DisconnectBlock(const CBlock& block, const CBlockIndex* pindex, CCoinsViewCache& view); - bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pindex, - CCoinsViewCache& view, const CChainParams& chainparams, bool fJustCheck = false) EXCLUSIVE_LOCKS_REQUIRED(cs_main); - - // Block disconnection on our pcoinsTip: - bool DisconnectTip(CValidationState& state, const CChainParams& chainparams, DisconnectedBlockTransactions* disconnectpool) EXCLUSIVE_LOCKS_REQUIRED(cs_main); - - // Manual block validity manipulation: - bool PreciousBlock(CValidationState& state, const CChainParams& params, CBlockIndex* pindex) LOCKS_EXCLUDED(cs_main); - bool InvalidateBlock(CValidationState& state, const CChainParams& chainparams, CBlockIndex* pindex) LOCKS_EXCLUDED(cs_main); - void ResetBlockFailureFlags(CBlockIndex* pindex) EXCLUSIVE_LOCKS_REQUIRED(cs_main); - - bool ReplayBlocks(const CChainParams& params, CCoinsView* view); - bool RewindBlockIndex(const CChainParams& params) LOCKS_EXCLUDED(cs_main); - bool LoadGenesisBlock(const CChainParams& chainparams); +bool CBlockIndexWorkComparator::operator()(const CBlockIndex *pa, const CBlockIndex *pb) const { + // First sort by most total work, ... + if (pa->nChainWork > pb->nChainWork) return false; + if (pa->nChainWork < pb->nChainWork) return true; - void PruneBlockIndexCandidates(); + // ... then by earliest time received, ... + if (pa->nSequenceId < pb->nSequenceId) return false; + if (pa->nSequenceId > pb->nSequenceId) return true; - void UnloadBlockIndex(); + // Use pointer address as tie breaker (should only happen with blocks + // loaded from disk, as those all have id 0). + if (pa < pb) return false; + if (pa > pb) return true; -private: - bool ActivateBestChainStep(CValidationState& state, const CChainParams& chainparams, CBlockIndex* pindexMostWork, const std::shared_ptr<const CBlock>& pblock, bool& fInvalidFound, ConnectTrace& connectTrace) EXCLUSIVE_LOCKS_REQUIRED(cs_main); - bool ConnectTip(CValidationState& state, const CChainParams& chainparams, CBlockIndex* pindexNew, const std::shared_ptr<const CBlock>& pblock, ConnectTrace& connectTrace, DisconnectedBlockTransactions &disconnectpool) EXCLUSIVE_LOCKS_REQUIRED(cs_main); - - CBlockIndex* AddToBlockIndex(const CBlockHeader& block) EXCLUSIVE_LOCKS_REQUIRED(cs_main); - /** Create a new block index entry for a given block hash */ - CBlockIndex* InsertBlockIndex(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main); - /** - * Make various assertions about the state of the block index. - * - * By default this only executes fully when using the Regtest chain; see: fCheckBlockIndex. - */ - void CheckBlockIndex(const Consensus::Params& consensusParams); + // Identical blocks. + return false; +} - void InvalidBlockFound(CBlockIndex *pindex, const CValidationState &state) EXCLUSIVE_LOCKS_REQUIRED(cs_main); - CBlockIndex* FindMostWorkChain() EXCLUSIVE_LOCKS_REQUIRED(cs_main); - void ReceivedBlockTransactions(const CBlock& block, CBlockIndex* pindexNew, const FlatFilePos& pos, const Consensus::Params& consensusParams) EXCLUSIVE_LOCKS_REQUIRED(cs_main); +namespace { +BlockManager g_blockman; +} // anon namespace - bool RollforwardBlock(const CBlockIndex* pindex, CCoinsViewCache& inputs, const CChainParams& params) EXCLUSIVE_LOCKS_REQUIRED(cs_main); +std::unique_ptr<CChainState> g_chainstate; - //! Mark a block as not having block data - void EraseBlockData(CBlockIndex* index) EXCLUSIVE_LOCKS_REQUIRED(cs_main); -} g_chainstate; +CChainState& ChainstateActive() { + assert(g_chainstate); + return *g_chainstate; +} -CChain& ChainActive() { return g_chainstate.m_chain; } +CChain& ChainActive() { + assert(g_chainstate); + return g_chainstate->m_chain; +} /** * Mutex to guard access to validation specific variables, such as reading @@ -234,7 +106,6 @@ CChain& ChainActive() { return g_chainstate.m_chain; } */ RecursiveMutex cs_main; -BlockMap& mapBlockIndex = g_chainstate.mapBlockIndex; CBlockIndex *pindexBestHeader = nullptr; Mutex g_best_block_mutex; std::condition_variable g_best_block_cv; @@ -250,7 +121,6 @@ bool fCheckpointsEnabled = DEFAULT_CHECKPOINTS_ENABLED; size_t nCoinCacheUsage = 5000 * 300; uint64_t nPruneTarget = 0; int64_t nMaxTipAge = DEFAULT_MAX_TIP_AGE; -bool fEnableReplacement = DEFAULT_ENABLE_REPLACEMENT; uint256 hashAssumeValid; arith_uint256 nMinimumChainWork; @@ -265,12 +135,7 @@ CScript COINBASE_FLAGS; // Internal stuff namespace { - CBlockIndex *&pindexBestInvalid = g_chainstate.pindexBestInvalid; - - /** All pairs A->B, where A (or one of its ancestors) misses transactions, but B has transactions. - * Pruned nodes may have entries where B is missing data. - */ - std::multimap<CBlockIndex*, CBlockIndex*>& mapBlocksUnlinked = g_chainstate.mapBlocksUnlinked; + CBlockIndex* pindexBestInvalid = nullptr; CCriticalSection cs_LastBlockFile; std::vector<CBlockFileInfo> vinfoBlockFile; @@ -288,6 +153,13 @@ namespace { std::set<int> setDirtyFileInfo; } // anon namespace +CBlockIndex* LookupBlockIndex(const uint256& hash) +{ + AssertLockHeld(cs_main); + BlockMap::const_iterator it = g_blockman.m_block_index.find(hash); + return it == g_blockman.m_block_index.end() ? nullptr : it->second; +} + CBlockIndex* FindForkInGlobalIndex(const CChain& chain, const CBlockLocator& locator) { AssertLockHeld(cs_main); @@ -307,22 +179,12 @@ CBlockIndex* FindForkInGlobalIndex(const CChain& chain, const CBlockLocator& loc return chain.Genesis(); } -std::unique_ptr<CCoinsViewDB> pcoinsdbview; -std::unique_ptr<CCoinsViewCache> pcoinsTip; std::unique_ptr<CBlockTreeDB> pblocktree; -enum class FlushStateMode { - NONE, - IF_NEEDED, - PERIODIC, - ALWAYS -}; - // See definition for documentation -static bool FlushStateToDisk(const CChainParams& chainParams, CValidationState &state, FlushStateMode mode, int nManualPruneHeight=0); static void FindFilesToPruneManual(std::set<int>& setFilesToPrune, int nManualPruneHeight); static void FindFilesToPrune(std::set<int>& setFilesToPrune, uint64_t nPruneAfterHeight); -bool CheckInputs(const CTransaction& tx, CValidationState &state, const CCoinsViewCache &inputs, bool fScriptChecks, unsigned int flags, bool cacheSigStore, bool cacheFullScriptStore, PrecomputedTransactionData& txdata, std::vector<CScriptCheck> *pvChecks = nullptr); +bool CheckInputs(const CTransaction& tx, CValidationState &state, const CCoinsViewCache &inputs, unsigned int flags, bool cacheSigStore, bool cacheFullScriptStore, PrecomputedTransactionData& txdata, std::vector<CScriptCheck> *pvChecks = nullptr); static FILE* OpenUndoFile(const FlatFilePos &pos, bool fReadOnly = false); static FlatFileSeq BlockFileSeq(); static FlatFileSeq UndoFileSeq(); @@ -402,8 +264,8 @@ bool CheckSequenceLocks(const CTxMemPool& pool, const CTransaction& tx, int flag lockPair.second = lp->time; } else { - // pcoinsTip contains the UTXO set for ::ChainActive().Tip() - CCoinsViewMemPool viewMemPool(pcoinsTip.get(), pool); + // CoinsTip() contains the UTXO set for ::ChainActive().Tip() + CCoinsViewMemPool viewMemPool(&::ChainstateActive().CoinsTip(), pool); std::vector<int> prevheights; prevheights.resize(tx.vin.size()); for (size_t txinIndex = 0; txinIndex < tx.vin.size(); txinIndex++) { @@ -452,7 +314,9 @@ bool CheckSequenceLocks(const CTxMemPool& pool, const CTransaction& tx, int flag // Returns the script flags which should be checked for a given block static unsigned int GetBlockScriptFlags(const CBlockIndex* pindex, const Consensus::Params& chainparams); -static void LimitMempoolSize(CTxMemPool& pool, size_t limit, unsigned long age) { +static void LimitMempoolSize(CTxMemPool& pool, size_t limit, unsigned long age) + EXCLUSIVE_LOCKS_REQUIRED(pool.cs, ::cs_main) +{ int expired = pool.Expire(GetTime() - age); if (expired != 0) { LogPrint(BCLog::MEMPOOL, "Expired %i transactions from the memory pool\n", expired); @@ -461,13 +325,13 @@ static void LimitMempoolSize(CTxMemPool& pool, size_t limit, unsigned long age) std::vector<COutPoint> vNoSpendsRemaining; pool.TrimToSize(limit, &vNoSpendsRemaining); for (const COutPoint& removed : vNoSpendsRemaining) - pcoinsTip->Uncache(removed); + ::ChainstateActive().CoinsTip().Uncache(removed); } static bool IsCurrentForFeeEstimation() EXCLUSIVE_LOCKS_REQUIRED(cs_main) { AssertLockHeld(cs_main); - if (IsInitialBlockDownload()) + if (::ChainstateActive().IsInitialBlockDownload()) return false; if (::ChainActive().Tip()->GetBlockTime() < (GetTime() - MAX_FEE_ESTIMATION_TIP_AGE)) return false; @@ -489,7 +353,7 @@ static bool IsCurrentForFeeEstimation() EXCLUSIVE_LOCKS_REQUIRED(cs_main) * and instead just erase from the mempool as needed. */ -static void UpdateMempoolForReorg(DisconnectedBlockTransactions &disconnectpool, bool fAddToMempool) EXCLUSIVE_LOCKS_REQUIRED(cs_main) +static void UpdateMempoolForReorg(DisconnectedBlockTransactions& disconnectpool, bool fAddToMempool) EXCLUSIVE_LOCKS_REQUIRED(cs_main, ::mempool.cs) { AssertLockHeld(cs_main); std::vector<uint256> vHashUpdate; @@ -523,7 +387,7 @@ static void UpdateMempoolForReorg(DisconnectedBlockTransactions &disconnectpool, mempool.UpdateTransactionsFromBlock(vHashUpdate); // We also need to remove any now-immature transactions - mempool.removeForReorg(pcoinsTip.get(), ::ChainActive().Tip()->nHeight + 1, STANDARD_LOCKTIME_VERIFY_FLAGS); + mempool.removeForReorg(&::ChainstateActive().CoinsTip(), ::ChainActive().Tip()->nHeight + 1, STANDARD_LOCKTIME_VERIFY_FLAGS); // Re-limit mempool size, in case we added any transactions LimitMempoolSize(mempool, gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000, gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60); } @@ -555,13 +419,13 @@ static bool CheckInputsFromMempoolAndCache(const CTransaction& tx, CValidationSt assert(txFrom->vout.size() > txin.prevout.n); assert(txFrom->vout[txin.prevout.n] == coin.out); } else { - const Coin& coinFromDisk = pcoinsTip->AccessCoin(txin.prevout); + const Coin& coinFromDisk = ::ChainstateActive().CoinsTip().AccessCoin(txin.prevout); assert(!coinFromDisk.IsSpent()); assert(coinFromDisk.out == coin.out); } } - return CheckInputs(tx, state, view, true, flags, cacheSigStore, true, txdata); + return CheckInputs(tx, state, view, flags, cacheSigStore, true, txdata); } /** @@ -633,15 +497,12 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool // unconfirmed ancestors anyway; doing otherwise is hopelessly // insecure. bool fReplacementOptOut = true; - if (fEnableReplacement) + for (const CTxIn &_txin : ptxConflicting->vin) { - for (const CTxIn &_txin : ptxConflicting->vin) + if (_txin.nSequence <= MAX_BIP125_RBF_SEQUENCE) { - if (_txin.nSequence <= MAX_BIP125_RBF_SEQUENCE) - { - fReplacementOptOut = false; - break; - } + fReplacementOptOut = false; + break; } } if (fReplacementOptOut) { @@ -658,23 +519,24 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool CCoinsViewCache view(&dummy); LockPoints lp; - CCoinsViewMemPool viewMemPool(pcoinsTip.get(), pool); + CCoinsViewCache& coins_cache = ::ChainstateActive().CoinsTip(); + CCoinsViewMemPool viewMemPool(&coins_cache, pool); view.SetBackend(viewMemPool); // do all inputs exist? for (const CTxIn& txin : tx.vin) { - if (!pcoinsTip->HaveCoinInCache(txin.prevout)) { + if (!coins_cache.HaveCoinInCache(txin.prevout)) { coins_to_uncache.push_back(txin.prevout); } // Note: this call may add txin.prevout to the coins cache - // (pcoinsTip.cacheCoins) by way of FetchCoin(). It should be removed + // (CoinsTip().cacheCoins) by way of FetchCoin(). It should be removed // later (via coins_to_uncache) if this tx turns out to be invalid. if (!view.HaveCoin(txin.prevout)) { // Are inputs missing because we already have the tx? for (size_t out = 0; out < tx.vout.size(); out++) { // Optimistically just do efficient check of cache for outputs - if (pcoinsTip->HaveCoinInCache(COutPoint(hash, out))) { + if (coins_cache.HaveCoinInCache(COutPoint(hash, out))) { return state.Invalid(ValidationInvalidReason::TX_CONFLICT, false, REJECT_DUPLICATE, "txn-already-known"); } } @@ -753,15 +615,69 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool REJECT_HIGHFEE, "absurdly-high-fee", strprintf("%d > %d", nFees, nAbsurdFee)); + const CTxMemPool::setEntries setIterConflicting = pool.GetIterSet(setConflicts); // Calculate in-mempool ancestors, up to a limit. CTxMemPool::setEntries setAncestors; size_t nLimitAncestors = gArgs.GetArg("-limitancestorcount", DEFAULT_ANCESTOR_LIMIT); size_t nLimitAncestorSize = gArgs.GetArg("-limitancestorsize", DEFAULT_ANCESTOR_SIZE_LIMIT)*1000; size_t nLimitDescendants = gArgs.GetArg("-limitdescendantcount", DEFAULT_DESCENDANT_LIMIT); size_t nLimitDescendantSize = gArgs.GetArg("-limitdescendantsize", DEFAULT_DESCENDANT_SIZE_LIMIT)*1000; + + if (setConflicts.size() == 1) { + // In general, when we receive an RBF transaction with mempool conflicts, we want to know whether we + // would meet the chain limits after the conflicts have been removed. However, there isn't a practical + // way to do this short of calculating the ancestor and descendant sets with an overlay cache of + // changed mempool entries. Due to both implementation and runtime complexity concerns, this isn't + // very realistic, thus we only ensure a limited set of transactions are RBF'able despite mempool + // conflicts here. Importantly, we need to ensure that some transactions which were accepted using + // the below carve-out are able to be RBF'ed, without impacting the security the carve-out provides + // for off-chain contract systems (see link in the comment below). + // + // Specifically, the subset of RBF transactions which we allow despite chain limits are those which + // conflict directly with exactly one other transaction (but may evict children of said transaction), + // and which are not adding any new mempool dependencies. Note that the "no new mempool dependencies" + // check is accomplished later, so we don't bother doing anything about it here, but if BIP 125 is + // amended, we may need to move that check to here instead of removing it wholesale. + // + // Such transactions are clearly not merging any existing packages, so we are only concerned with + // ensuring that (a) no package is growing past the package size (not count) limits and (b) we are + // not allowing something to effectively use the (below) carve-out spot when it shouldn't be allowed + // to. + // + // To check these we first check if we meet the RBF criteria, above, and increment the descendant + // limits by the direct conflict and its descendants (as these are recalculated in + // CalculateMempoolAncestors by assuming the new transaction being added is a new descendant, with no + // removals, of each parent's existing dependant set). The ancestor count limits are unmodified (as + // the ancestor limits should be the same for both our new transaction and any conflicts). + // We don't bother incrementing nLimitDescendants by the full removal count as that limit never comes + // into force here (as we're only adding a single transaction). + assert(setIterConflicting.size() == 1); + CTxMemPool::txiter conflict = *setIterConflicting.begin(); + + nLimitDescendants += 1; + nLimitDescendantSize += conflict->GetSizeWithDescendants(); + } + std::string errString; if (!pool.CalculateMemPoolAncestors(entry, setAncestors, nLimitAncestors, nLimitAncestorSize, nLimitDescendants, nLimitDescendantSize, errString)) { - return state.Invalid(ValidationInvalidReason::TX_MEMPOOL_POLICY, false, REJECT_NONSTANDARD, "too-long-mempool-chain", errString); + setAncestors.clear(); + // If CalculateMemPoolAncestors fails second time, we want the original error string. + std::string dummy_err_string; + // Contracting/payment channels CPFP carve-out: + // If the new transaction is relatively small (up to 40k weight) + // and has at most one ancestor (ie ancestor limit of 2, including + // the new transaction), allow it if its parent has exactly the + // descendant limit descendants. + // + // This allows protocols which rely on distrusting counterparties + // being able to broadcast descendants of an unconfirmed transaction + // to be secure by simply only having two immediately-spendable + // outputs - one for each counterparty. For more info on the uses for + // this, see https://lists.linuxfoundation.org/pipermail/bitcoin-dev/2018-November/016518.html + if (nSize > EXTRA_DESCENDANT_TX_SIZE_LIMIT || + !pool.CalculateMemPoolAncestors(entry, setAncestors, 2, nLimitAncestorSize, nLimitDescendants + 1, nLimitDescendantSize + EXTRA_DESCENDANT_TX_SIZE_LIMIT, dummy_err_string)) { + return state.Invalid(ValidationInvalidReason::TX_MEMPOOL_POLICY, false, REJECT_NONSTANDARD, "too-long-mempool-chain", errString); + } } // A transaction that spends outputs that would be replaced by it is invalid. Now @@ -796,7 +712,6 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool CFeeRate newFeeRate(nModifiedFees, nSize); std::set<uint256> setConflictsParents; const int maxDescendantsToVisit = 100; - const CTxMemPool::setEntries setIterConflicting = pool.GetIterSet(setConflicts); for (const auto& mi : setIterConflicting) { // Don't allow the replacement to reduce the feerate of the // mempool. @@ -856,6 +771,11 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool // feerate junk to be mined first. Ideally we'd keep track of // the ancestor feerates and make the decision based on that, // but for now requiring all new inputs to be confirmed works. + // + // Note that if you relax this to make RBF a little more useful, + // this may break the CalculateMempoolAncestors RBF relaxation, + // above. See the comment above the first CalculateMempoolAncestors + // call for more info. if (!setConflictsParents.count(tx.vin[j].prevout.hash)) { // Rather than check the UTXO set - potentially expensive - @@ -895,15 +815,17 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool constexpr unsigned int scriptVerifyFlags = STANDARD_SCRIPT_VERIFY_FLAGS; // Check against previous transactions - // This is done last to help prevent CPU exhaustion denial-of-service attacks. + // The first loop above does all the inexpensive checks. + // Only if ALL inputs pass do we perform expensive ECDSA signature checks. + // Helps prevent CPU exhaustion denial-of-service attacks. PrecomputedTransactionData txdata(tx); - if (!CheckInputs(tx, state, view, true, scriptVerifyFlags, true, false, txdata)) { + if (!CheckInputs(tx, state, view, scriptVerifyFlags, true, false, txdata)) { // SCRIPT_VERIFY_CLEANSTACK requires SCRIPT_VERIFY_WITNESS, so we // need to turn both off, and compare against just turning off CLEANSTACK // to see if the failure is specifically due to witness validation. CValidationState stateDummy; // Want reported failures to be from first CheckInputs - if (!tx.HasWitness() && CheckInputs(tx, stateDummy, view, true, scriptVerifyFlags & ~(SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_CLEANSTACK), true, false, txdata) && - !CheckInputs(tx, stateDummy, view, true, scriptVerifyFlags & ~SCRIPT_VERIFY_CLEANSTACK, true, false, txdata)) { + if (!tx.HasWitness() && CheckInputs(tx, stateDummy, view, scriptVerifyFlags & ~(SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_CLEANSTACK), true, false, txdata) && + !CheckInputs(tx, stateDummy, view, scriptVerifyFlags & ~SCRIPT_VERIFY_CLEANSTACK, true, false, txdata)) { // Only the witness is missing, so the transaction itself may be fine. state.Invalid(ValidationInvalidReason::TX_WITNESS_MUTATED, false, state.GetRejectCode(), state.GetRejectReason(), state.GetDebugMessage()); @@ -988,11 +910,11 @@ static bool AcceptToMemoryPoolWithTime(const CChainParams& chainparams, CTxMemPo // (`CCoinsViewCache::cacheCoins`). for (const COutPoint& hashTx : coins_to_uncache) - pcoinsTip->Uncache(hashTx); + ::ChainstateActive().CoinsTip().Uncache(hashTx); } // After we've (potentially) uncached entries, ensure our coins cache is still within its size limits CValidationState stateDummy; - FlushStateToDisk(chainparams, stateDummy, FlushStateMode::PERIODIC); + ::ChainstateActive().FlushStateToDisk(chainparams, stateDummy, FlushStateMode::PERIODIC); return res; } @@ -1168,35 +1090,78 @@ CAmount GetBlockSubsidy(int nHeight, const Consensus::Params& consensusParams) return nSubsidy; } -bool IsInitialBlockDownload() +CoinsViews::CoinsViews( + std::string ldb_name, + size_t cache_size_bytes, + bool in_memory, + bool should_wipe) : m_dbview( + GetDataDir() / ldb_name, cache_size_bytes, in_memory, should_wipe), + m_catcherview(&m_dbview) {} + +void CoinsViews::InitCache() +{ + m_cacheview = MakeUnique<CCoinsViewCache>(&m_catcherview); +} + +// NOTE: for now m_blockman is set to a global, but this will be changed +// in a future commit. +CChainState::CChainState() : m_blockman(g_blockman) {} + + +void CChainState::InitCoinsDB( + size_t cache_size_bytes, + bool in_memory, + bool should_wipe, + std::string leveldb_name) +{ + m_coins_views = MakeUnique<CoinsViews>( + leveldb_name, cache_size_bytes, in_memory, should_wipe); +} + +void CChainState::InitCoinsCache() +{ + assert(m_coins_views != nullptr); + m_coins_views->InitCache(); +} + +// Note that though this is marked const, we may end up modifying `m_cached_finished_ibd`, which +// is a performance-related implementation detail. This function must be marked +// `const` so that `CValidationInterface` clients (which are given a `const CChainState*`) +// can call it. +// +bool CChainState::IsInitialBlockDownload() const { - // Once this function has returned false, it must remain false. - static std::atomic<bool> latchToFalse{false}; // Optimization: pre-test latch before taking the lock. - if (latchToFalse.load(std::memory_order_relaxed)) + if (m_cached_finished_ibd.load(std::memory_order_relaxed)) return false; LOCK(cs_main); - if (latchToFalse.load(std::memory_order_relaxed)) + if (m_cached_finished_ibd.load(std::memory_order_relaxed)) return false; if (fImporting || fReindex) return true; - if (::ChainActive().Tip() == nullptr) + if (m_chain.Tip() == nullptr) return true; - if (::ChainActive().Tip()->nChainWork < nMinimumChainWork) + if (m_chain.Tip()->nChainWork < nMinimumChainWork) return true; - if (::ChainActive().Tip()->GetBlockTime() < (GetTime() - nMaxTipAge)) + if (m_chain.Tip()->GetBlockTime() < (GetTime() - nMaxTipAge)) return true; LogPrintf("Leaving InitialBlockDownload (latching to false)\n"); - latchToFalse.store(true, std::memory_order_relaxed); + m_cached_finished_ibd.store(true, std::memory_order_relaxed); return false; } -CBlockIndex *pindexBestForkTip = nullptr, *pindexBestForkBase = nullptr; +static CBlockIndex *pindexBestForkTip = nullptr, *pindexBestForkBase = nullptr; + +BlockMap& BlockIndex() +{ + return g_blockman.m_block_index; +} static void AlertNotify(const std::string& strMessage) { uiInterface.NotifyAlertChanged(); +#if HAVE_SYSTEM std::string strCmd = gArgs.GetArg("-alertnotify", ""); if (strCmd.empty()) return; @@ -1210,6 +1175,7 @@ static void AlertNotify(const std::string& strMessage) std::thread t(runCommand, strCmd); t.detach(); // thread runs free +#endif } static void CheckForkWarningConditions() EXCLUSIVE_LOCKS_REQUIRED(cs_main) @@ -1217,7 +1183,7 @@ static void CheckForkWarningConditions() EXCLUSIVE_LOCKS_REQUIRED(cs_main) AssertLockHeld(cs_main); // Before we get past initial download, we cannot reliably alert about forks // (we assume we don't get stuck on a fork before finishing our initial sync) - if (IsInitialBlockDownload()) + if (::ChainstateActive().IsInitialBlockDownload()) return; // If our best fork is no longer within 72 blocks (+/- 12 hours if no one mines it) @@ -1305,7 +1271,7 @@ void static InvalidChainFound(CBlockIndex* pindexNew) EXCLUSIVE_LOCKS_REQUIRED(c void CChainState::InvalidBlockFound(CBlockIndex *pindex, const CValidationState &state) { if (state.GetReason() != ValidationInvalidReason::BLOCK_MUTATED) { pindex->nStatus |= BLOCK_FAILED_VALID; - m_failed_blocks.insert(pindex); + m_blockman.m_failed_blocks.insert(pindex); setDirtyBlockIndex.insert(pindex); setBlockIndexCandidates.erase(pindex); InvalidChainFound(pindex); @@ -1376,90 +1342,79 @@ void InitScriptExecutionCache() { * * Non-static (and re-declared) in src/test/txvalidationcache_tests.cpp */ -bool CheckInputs(const CTransaction& tx, CValidationState &state, const CCoinsViewCache &inputs, bool fScriptChecks, unsigned int flags, bool cacheSigStore, bool cacheFullScriptStore, PrecomputedTransactionData& txdata, std::vector<CScriptCheck> *pvChecks) EXCLUSIVE_LOCKS_REQUIRED(cs_main) +bool CheckInputs(const CTransaction& tx, CValidationState &state, const CCoinsViewCache &inputs, unsigned int flags, bool cacheSigStore, bool cacheFullScriptStore, PrecomputedTransactionData& txdata, std::vector<CScriptCheck> *pvChecks) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { - if (!tx.IsCoinBase()) - { - if (pvChecks) - pvChecks->reserve(tx.vin.size()); - - // The first loop above does all the inexpensive checks. - // Only if ALL inputs pass do we perform expensive ECDSA signature checks. - // Helps prevent CPU exhaustion attacks. - - // Skip script verification when connecting blocks under the - // assumevalid block. Assuming the assumevalid block is valid this - // is safe because block merkle hashes are still computed and checked, - // Of course, if an assumed valid block is invalid due to false scriptSigs - // this optimization would allow an invalid chain to be accepted. - if (fScriptChecks) { - // First check if script executions have been cached with the same - // flags. Note that this assumes that the inputs provided are - // correct (ie that the transaction hash which is in tx's prevouts - // properly commits to the scriptPubKey in the inputs view of that - // transaction). - uint256 hashCacheEntry; - // We only use the first 19 bytes of nonce to avoid a second SHA - // round - giving us 19 + 32 + 4 = 55 bytes (+ 8 + 1 = 64) - static_assert(55 - sizeof(flags) - 32 >= 128/8, "Want at least 128 bits of nonce for script execution cache"); - CSHA256().Write(scriptExecutionCacheNonce.begin(), 55 - sizeof(flags) - 32).Write(tx.GetWitnessHash().begin(), 32).Write((unsigned char*)&flags, sizeof(flags)).Finalize(hashCacheEntry.begin()); - AssertLockHeld(cs_main); //TODO: Remove this requirement by making CuckooCache not require external locks - if (scriptExecutionCache.contains(hashCacheEntry, !cacheFullScriptStore)) { - return true; - } - - for (unsigned int i = 0; i < tx.vin.size(); i++) { - const COutPoint &prevout = tx.vin[i].prevout; - const Coin& coin = inputs.AccessCoin(prevout); - assert(!coin.IsSpent()); - - // We very carefully only pass in things to CScriptCheck which - // are clearly committed to by tx' witness hash. This provides - // a sanity check that our caching is not introducing consensus - // failures through additional data in, eg, the coins being - // spent being checked as a part of CScriptCheck. - - // Verify signature - CScriptCheck check(coin.out, tx, i, flags, cacheSigStore, &txdata); - if (pvChecks) { - pvChecks->push_back(CScriptCheck()); - check.swap(pvChecks->back()); - } else if (!check()) { - if (flags & STANDARD_NOT_MANDATORY_VERIFY_FLAGS) { - // Check whether the failure was caused by a - // non-mandatory script verification check, such as - // non-standard DER encodings or non-null dummy - // arguments; if so, ensure we return NOT_STANDARD - // instead of CONSENSUS to avoid downstream users - // splitting the network between upgraded and - // non-upgraded nodes by banning CONSENSUS-failing - // data providers. - CScriptCheck check2(coin.out, tx, i, - flags & ~STANDARD_NOT_MANDATORY_VERIFY_FLAGS, cacheSigStore, &txdata); - if (check2()) - return state.Invalid(ValidationInvalidReason::TX_NOT_STANDARD, false, REJECT_NONSTANDARD, strprintf("non-mandatory-script-verify-flag (%s)", ScriptErrorString(check.GetScriptError()))); - } - // MANDATORY flag failures correspond to - // ValidationInvalidReason::CONSENSUS. Because CONSENSUS - // failures are the most serious case of validation - // failures, we may need to consider using - // RECENT_CONSENSUS_CHANGE for any script failure that - // could be due to non-upgraded nodes which we may want to - // support, to avoid splitting the network (but this - // depends on the details of how net_processing handles - // such errors). - return state.Invalid(ValidationInvalidReason::CONSENSUS, false, REJECT_INVALID, strprintf("mandatory-script-verify-flag-failed (%s)", ScriptErrorString(check.GetScriptError()))); - } - } + if (tx.IsCoinBase()) return true; + + if (pvChecks) { + pvChecks->reserve(tx.vin.size()); + } + + // First check if script executions have been cached with the same + // flags. Note that this assumes that the inputs provided are + // correct (ie that the transaction hash which is in tx's prevouts + // properly commits to the scriptPubKey in the inputs view of that + // transaction). + uint256 hashCacheEntry; + // We only use the first 19 bytes of nonce to avoid a second SHA + // round - giving us 19 + 32 + 4 = 55 bytes (+ 8 + 1 = 64) + static_assert(55 - sizeof(flags) - 32 >= 128/8, "Want at least 128 bits of nonce for script execution cache"); + CSHA256().Write(scriptExecutionCacheNonce.begin(), 55 - sizeof(flags) - 32).Write(tx.GetWitnessHash().begin(), 32).Write((unsigned char*)&flags, sizeof(flags)).Finalize(hashCacheEntry.begin()); + AssertLockHeld(cs_main); //TODO: Remove this requirement by making CuckooCache not require external locks + if (scriptExecutionCache.contains(hashCacheEntry, !cacheFullScriptStore)) { + return true; + } - if (cacheFullScriptStore && !pvChecks) { - // We executed all of the provided scripts, and were told to - // cache the result. Do so now. - scriptExecutionCache.insert(hashCacheEntry); + for (unsigned int i = 0; i < tx.vin.size(); i++) { + const COutPoint &prevout = tx.vin[i].prevout; + const Coin& coin = inputs.AccessCoin(prevout); + assert(!coin.IsSpent()); + + // We very carefully only pass in things to CScriptCheck which + // are clearly committed to by tx' witness hash. This provides + // a sanity check that our caching is not introducing consensus + // failures through additional data in, eg, the coins being + // spent being checked as a part of CScriptCheck. + + // Verify signature + CScriptCheck check(coin.out, tx, i, flags, cacheSigStore, &txdata); + if (pvChecks) { + pvChecks->push_back(CScriptCheck()); + check.swap(pvChecks->back()); + } else if (!check()) { + if (flags & STANDARD_NOT_MANDATORY_VERIFY_FLAGS) { + // Check whether the failure was caused by a + // non-mandatory script verification check, such as + // non-standard DER encodings or non-null dummy + // arguments; if so, ensure we return NOT_STANDARD + // instead of CONSENSUS to avoid downstream users + // splitting the network between upgraded and + // non-upgraded nodes by banning CONSENSUS-failing + // data providers. + CScriptCheck check2(coin.out, tx, i, + flags & ~STANDARD_NOT_MANDATORY_VERIFY_FLAGS, cacheSigStore, &txdata); + if (check2()) + return state.Invalid(ValidationInvalidReason::TX_NOT_STANDARD, false, REJECT_NONSTANDARD, strprintf("non-mandatory-script-verify-flag (%s)", ScriptErrorString(check.GetScriptError()))); } + // MANDATORY flag failures correspond to + // ValidationInvalidReason::CONSENSUS. Because CONSENSUS + // failures are the most serious case of validation + // failures, we may need to consider using + // RECENT_CONSENSUS_CHANGE for any script failure that + // could be due to non-upgraded nodes which we may want to + // support, to avoid splitting the network (but this + // depends on the details of how net_processing handles + // such errors). + return state.Invalid(ValidationInvalidReason::CONSENSUS, false, REJECT_INVALID, strprintf("mandatory-script-verify-flag-failed (%s)", ScriptErrorString(check.GetScriptError()))); } } + if (cacheFullScriptStore && !pvChecks) { + // We executed all of the provided scripts, and were told to + // cache the result. Do so now. + scriptExecutionCache.insert(hashCacheEntry); + } + return true; } @@ -1522,20 +1477,22 @@ bool UndoReadFromDisk(CBlockUndo& blockundo, const CBlockIndex* pindex) } /** Abort with a message */ -static bool AbortNode(const std::string& strMessage, const std::string& userMessage="") +static bool AbortNode(const std::string& strMessage, const std::string& userMessage = "", unsigned int prefix = 0) { SetMiscWarning(strMessage); LogPrintf("*** %s\n", strMessage); - uiInterface.ThreadSafeMessageBox( - userMessage.empty() ? _("Error: A fatal internal error occurred, see debug.log for details") : userMessage, - "", CClientUIInterface::MSG_ERROR); + if (!userMessage.empty()) { + uiInterface.ThreadSafeMessageBox(userMessage, "", CClientUIInterface::MSG_ERROR | prefix); + } else { + uiInterface.ThreadSafeMessageBox(_("Error: A fatal internal error occurred, see debug.log for details").translated, "", CClientUIInterface::MSG_ERROR | CClientUIInterface::MSG_NOPREFIX); + } StartShutdown(); return false; } -static bool AbortNode(CValidationState& state, const std::string& strMessage, const std::string& userMessage="") +static bool AbortNode(CValidationState& state, const std::string& strMessage, const std::string& userMessage = "", unsigned int prefix = 0) { - AbortNode(strMessage, userMessage); + AbortNode(strMessage, userMessage, prefix); return state.Error(strMessage); } @@ -1724,7 +1681,7 @@ static ThresholdConditionCache warningcache[VERSIONBITS_NUM_BITS] GUARDED_BY(cs_ // environment. See test/functional/p2p-segwit.py. static bool IsScriptWitnessEnabled(const Consensus::Params& params) { - return params.vDeployments[Consensus::DEPLOYMENT_SEGWIT].nTimeout != 0; + return params.SegwitHeight != std::numeric_limits<int>::max(); } static unsigned int GetBlockScriptFlags(const CBlockIndex* pindex, const Consensus::Params& consensusparams) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { @@ -1760,12 +1717,13 @@ static unsigned int GetBlockScriptFlags(const CBlockIndex* pindex, const Consens flags |= SCRIPT_VERIFY_CHECKLOCKTIMEVERIFY; } - // Start enforcing BIP68 (sequence locks) and BIP112 (CHECKSEQUENCEVERIFY) using versionbits logic. - if (VersionBitsState(pindex->pprev, consensusparams, Consensus::DEPLOYMENT_CSV, versionbitscache) == ThresholdState::ACTIVE) { + // Start enforcing BIP112 (CHECKSEQUENCEVERIFY) + if (pindex->nHeight >= consensusparams.CSVHeight) { flags |= SCRIPT_VERIFY_CHECKSEQUENCEVERIFY; } - if (IsNullDummyEnabled(pindex->pprev, consensusparams)) { + // Start enforcing BIP147 NULLDUMMY (activated simultaneously with segwit) + if (IsWitnessEnabled(pindex->pprev, consensusparams)) { flags |= SCRIPT_VERIFY_NULLDUMMY; } @@ -1838,12 +1796,17 @@ bool CChainState::ConnectBlock(const CBlock& block, CValidationState& state, CBl // relative to a piece of software is an objective fact these defaults can be easily reviewed. // This setting doesn't force the selection of any particular chain but makes validating some faster by // effectively caching the result of part of the verification. - BlockMap::const_iterator it = mapBlockIndex.find(hashAssumeValid); - if (it != mapBlockIndex.end()) { + BlockMap::const_iterator it = m_blockman.m_block_index.find(hashAssumeValid); + if (it != m_blockman.m_block_index.end()) { if (it->second->GetAncestor(pindex->nHeight) == pindex && pindexBestHeader->GetAncestor(pindex->nHeight) == pindex && pindexBestHeader->nChainWork >= nMinimumChainWork) { // This block is a member of the assumed verified chain and an ancestor of the best header. + // Script verification is skipped when connecting blocks under the + // assumevalid block. Assuming the assumevalid block is valid this + // is safe because block merkle hashes are still computed and checked, + // Of course, if an assumed valid block is invalid due to false scriptSigs + // this optimization would allow an invalid chain to be accepted. // The equivalent time check discourages hash power from extorting the network via DOS attack // into accepting an invalid block through telling users they must manually set assumevalid. // Requiring a software change or burying the invalid block, regardless of the setting, makes @@ -1950,9 +1913,9 @@ bool CChainState::ConnectBlock(const CBlock& block, CValidationState& state, CBl } } - // Start enforcing BIP68 (sequence locks) and BIP112 (CHECKSEQUENCEVERIFY) using versionbits logic. + // Start enforcing BIP68 (sequence locks) int nLockTimeFlags = 0; - if (VersionBitsState(pindex->pprev, chainparams.GetConsensus(), Consensus::DEPLOYMENT_CSV, versionbitscache) == ThresholdState::ACTIVE) { + if (pindex->nHeight >= chainparams.GetConsensus().CSVHeight) { nLockTimeFlags |= LOCKTIME_VERIFY_SEQUENCE; } @@ -2027,7 +1990,7 @@ bool CChainState::ConnectBlock(const CBlock& block, CValidationState& state, CBl { std::vector<CScriptCheck> vChecks; bool fCacheResults = fJustCheck; /* Don't cache results if we're actually connecting blocks (still consult the cache, though) */ - if (!CheckInputs(tx, state, view, fScriptChecks, flags, fCacheResults, fCacheResults, txdata[i], nScriptCheckThreads ? &vChecks : nullptr)) { + if (fScriptChecks && !CheckInputs(tx, state, view, flags, fCacheResults, fCacheResults, txdata[i], nScriptCheckThreads ? &vChecks : nullptr)) { if (state.GetReason() == ValidationInvalidReason::TX_NOT_STANDARD) { // CheckInputs may return NOT_STANDARD for extra flags we passed, // but we can't return that, as it's not defined for a block, so @@ -2089,18 +2052,15 @@ bool CChainState::ConnectBlock(const CBlock& block, CValidationState& state, CBl return true; } -/** - * Update the on-disk chain state. - * The caches and indexes are flushed depending on the mode we're called with - * if they're too large, if it's been a while since the last write, - * or always and in all cases if we're in prune mode and are deleting files. - * - * If FlushStateMode::NONE is used, then FlushStateToDisk(...) won't do anything - * besides checking if we need to prune. - */ -bool static FlushStateToDisk(const CChainParams& chainparams, CValidationState &state, FlushStateMode mode, int nManualPruneHeight) { +bool CChainState::FlushStateToDisk( + const CChainParams& chainparams, + CValidationState &state, + FlushStateMode mode, + int nManualPruneHeight) +{ int64_t nMempoolUsage = mempool.DynamicMemoryUsage(); LOCK(cs_main); + assert(this->CanFlushToDisk()); static int64_t nLastWrite = 0; static int64_t nLastFlush = 0; std::set<int> setFilesToPrune; @@ -2134,7 +2094,7 @@ bool static FlushStateToDisk(const CChainParams& chainparams, CValidationState & nLastFlush = nNow; } int64_t nMempoolSizeMax = gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000; - int64_t cacheSize = pcoinsTip->DynamicMemoryUsage(); + int64_t cacheSize = CoinsTip().DynamicMemoryUsage(); int64_t nTotalSpace = nCoinCacheUsage + std::max<int64_t>(nMempoolSizeMax - nMempoolUsage, 0); // The cache is large and we're within 10% and 10 MiB of the limit, but we have time now (not in the middle of a block processing). bool fCacheLarge = mode == FlushStateMode::PERIODIC && cacheSize > std::max((9 * nTotalSpace) / 10, nTotalSpace - MAX_BLOCK_COINSDB_USAGE * 1024 * 1024); @@ -2150,7 +2110,7 @@ bool static FlushStateToDisk(const CChainParams& chainparams, CValidationState & if (fDoFullFlush || fPeriodicWrite) { // Depend on nMinDiskSpace to ensure we can write block index if (!CheckDiskSpace(GetBlocksDir())) { - return AbortNode(state, "Disk space is low!", _("Error: Disk space is low!")); + return AbortNode(state, "Disk space is too low!", _("Error: Disk space is too low!").translated, CClientUIInterface::MSG_NOPREFIX); } // First make sure all block and undo data is flushed to disk. FlushBlockFile(); @@ -2178,17 +2138,17 @@ bool static FlushStateToDisk(const CChainParams& chainparams, CValidationState & nLastWrite = nNow; } // Flush best chain related state. This can only be done if the blocks / block index write was also done. - if (fDoFullFlush && !pcoinsTip->GetBestBlock().IsNull()) { + if (fDoFullFlush && !CoinsTip().GetBestBlock().IsNull()) { // Typical Coin structures on disk are around 48 bytes in size. // Pushing a new one to the database can cause it to be written // twice (once in the log, and once in the tables). This is already // an overestimation, as most will delete an existing entry or // overwrite one. Still, use a conservative safety factor of 2. - if (!CheckDiskSpace(GetDataDir(), 48 * 2 * 2 * pcoinsTip->GetCacheSize())) { - return AbortNode(state, "Disk space is low!", _("Error: Disk space is low!")); + if (!CheckDiskSpace(GetDataDir(), 48 * 2 * 2 * CoinsTip().GetCacheSize())) { + return AbortNode(state, "Disk space is too low!", _("Error: Disk space is too low!").translated, CClientUIInterface::MSG_NOPREFIX); } // Flush the chainstate (which may refer to block index entries). - if (!pcoinsTip->Flush()) + if (!CoinsTip().Flush()) return AbortNode(state, "Failed to write to coin database"); nLastFlush = nNow; full_flush_completed = true; @@ -2196,7 +2156,7 @@ bool static FlushStateToDisk(const CChainParams& chainparams, CValidationState & } if (full_flush_completed) { // Update best block in wallet (so we can detect restored wallets). - GetMainSignals().ChainStateFlushed(::ChainActive().GetLocator()); + GetMainSignals().ChainStateFlushed(m_chain.GetLocator()); } } catch (const std::runtime_error& e) { return AbortNode(state, std::string("System error while flushing: ") + e.what()); @@ -2204,19 +2164,20 @@ bool static FlushStateToDisk(const CChainParams& chainparams, CValidationState & return true; } -void FlushStateToDisk() { +void CChainState::ForceFlushStateToDisk() { CValidationState state; const CChainParams& chainparams = Params(); - if (!FlushStateToDisk(chainparams, state, FlushStateMode::ALWAYS)) { + if (!this->FlushStateToDisk(chainparams, state, FlushStateMode::ALWAYS)) { LogPrintf("%s: failed to flush state (%s)\n", __func__, FormatStateMessage(state)); } } -void PruneAndFlush() { +void CChainState::PruneAndFlush() { CValidationState state; fCheckForPruning = true; const CChainParams& chainparams = Params(); - if (!FlushStateToDisk(chainparams, state, FlushStateMode::NONE)) { + + if (!this->FlushStateToDisk(chainparams, state, FlushStateMode::NONE)) { LogPrintf("%s: failed to flush state (%s)\n", __func__, FormatStateMessage(state)); } } @@ -2239,7 +2200,9 @@ static void AppendWarning(std::string& res, const std::string& warn) } /** Check warning conditions and do some notifications on new chain tip set. */ -void static UpdateTip(const CBlockIndex *pindexNew, const CChainParams& chainParams) { +void static UpdateTip(const CBlockIndex* pindexNew, const CChainParams& chainParams) + EXCLUSIVE_LOCKS_REQUIRED(::cs_main) +{ // New best block mempool.AddTransactionsUpdated(1); @@ -2250,7 +2213,7 @@ void static UpdateTip(const CBlockIndex *pindexNew, const CChainParams& chainPar } std::string warningMessages; - if (!IsInitialBlockDownload()) + if (!::ChainstateActive().IsInitialBlockDownload()) { int nUpgraded = 0; const CBlockIndex* pindex = pindexNew; @@ -2258,7 +2221,7 @@ void static UpdateTip(const CBlockIndex *pindexNew, const CChainParams& chainPar WarningBitsConditionChecker checker(bit); ThresholdState state = checker.GetStateFor(pindex, chainParams.GetConsensus(), warningcache[bit]); if (state == ThresholdState::ACTIVE || state == ThresholdState::LOCKED_IN) { - const std::string strWarning = strprintf(_("Warning: unknown new rules activated (versionbit %i)"), bit); + const std::string strWarning = strprintf(_("Warning: unknown new rules activated (versionbit %i)").translated, bit); if (state == ThresholdState::ACTIVE) { DoWarning(strWarning); } else { @@ -2275,16 +2238,14 @@ void static UpdateTip(const CBlockIndex *pindexNew, const CChainParams& chainPar pindex = pindex->pprev; } if (nUpgraded > 0) - AppendWarning(warningMessages, strprintf(_("%d of last 100 blocks have unexpected version"), nUpgraded)); + AppendWarning(warningMessages, strprintf(_("%d of last 100 blocks have unexpected version").translated, nUpgraded)); } - LogPrintf("%s: new best=%s height=%d version=0x%08x log2_work=%.8g tx=%lu date='%s' progress=%f cache=%.1fMiB(%utxo)", __func__, /* Continued */ + LogPrintf("%s: new best=%s height=%d version=0x%08x log2_work=%.8g tx=%lu date='%s' progress=%f cache=%.1fMiB(%utxo)%s\n", __func__, pindexNew->GetBlockHash().ToString(), pindexNew->nHeight, pindexNew->nVersion, log(pindexNew->nChainWork.getdouble())/log(2.0), (unsigned long)pindexNew->nChainTx, FormatISO8601DateTime(pindexNew->GetBlockTime()), - GuessVerificationProgress(chainParams.TxData(), pindexNew), pcoinsTip->DynamicMemoryUsage() * (1.0 / (1<<20)), pcoinsTip->GetCacheSize()); - if (!warningMessages.empty()) - LogPrintf(" warning='%s'", warningMessages); /* Continued */ - LogPrintf("\n"); + GuessVerificationProgress(chainParams.TxData(), pindexNew), ::ChainstateActive().CoinsTip().DynamicMemoryUsage() * (1.0 / (1<<20)), ::ChainstateActive().CoinsTip().GetCacheSize(), + !warningMessages.empty() ? strprintf(" warning='%s'", warningMessages) : ""); } @@ -2306,11 +2267,11 @@ bool CChainState::DisconnectTip(CValidationState& state, const CChainParams& cha std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>(); CBlock& block = *pblock; if (!ReadBlockFromDisk(block, pindexDelete, chainparams.GetConsensus())) - return AbortNode(state, "Failed to read block"); + return error("DisconnectTip(): Failed to read block"); // Apply the block atomically to the chain state. int64_t nStart = GetTimeMicros(); { - CCoinsViewCache view(pcoinsTip.get()); + CCoinsViewCache view(&CoinsTip()); assert(view.GetBestBlock() == pindexDelete->GetBlockHash()); if (DisconnectBlock(block, pindexDelete, view) != DISCONNECT_OK) return error("DisconnectTip(): DisconnectBlock %s failed", pindexDelete->GetBlockHash().ToString()); @@ -2438,7 +2399,7 @@ bool CChainState::ConnectTip(CValidationState& state, const CChainParams& chainp int64_t nTime3; LogPrint(BCLog::BENCH, " - Load block from disk: %.2fms [%.2fs]\n", (nTime2 - nTime1) * MILLI, nTimeReadFromDisk * MICRO); { - CCoinsViewCache view(pcoinsTip.get()); + CCoinsViewCache view(&CoinsTip()); bool rv = ConnectBlock(blockConnecting, state, pindexNew, view, chainparams); GetMainSignals().BlockChecked(blockConnecting, state); if (!rv) { @@ -2512,10 +2473,11 @@ CBlockIndex* CChainState::FindMostWorkChain() { if (fFailedChain) { pindexFailed->nStatus |= BLOCK_FAILED_CHILD; } else if (fMissingData) { - // If we're missing data, then add back to mapBlocksUnlinked, + // If we're missing data, then add back to m_blocks_unlinked, // so that if the block arrives in the future we can try adding // to setBlockIndexCandidates again. - mapBlocksUnlinked.insert(std::make_pair(pindexFailed->pprev, pindexFailed)); + m_blockman.m_blocks_unlinked.insert( + std::make_pair(pindexFailed->pprev, pindexFailed)); } setBlockIndexCandidates.erase(pindexFailed); pindexFailed = pindexFailed->pprev; @@ -2546,6 +2508,8 @@ void CChainState::PruneBlockIndexCandidates() { /** * Try to make some progress towards making pindexMostWork the active block. * pblock is either nullptr or a pointer to a CBlock corresponding to pindexMostWork. + * + * @returns true unless a system error occurred */ bool CChainState::ActivateBestChainStep(CValidationState& state, const CChainParams& chainparams, CBlockIndex* pindexMostWork, const std::shared_ptr<const CBlock>& pblock, bool& fInvalidFound, ConnectTrace& connectTrace) { @@ -2562,6 +2526,11 @@ bool CChainState::ActivateBestChainStep(CValidationState& state, const CChainPar // This is likely a fatal error, but keep the mempool consistent, // just in case. Only remove from the mempool in this case. UpdateMempoolForReorg(disconnectpool, false); + + // If we're unable to disconnect a block during normal operation, + // then that is a failure of our local system -- we should abort + // rather than stay on a less work chain. + AbortNode(state, "Failed to disconnect block; see debug.log for details"); return false; } fBlocksDisconnected = true; @@ -2619,7 +2588,7 @@ bool CChainState::ActivateBestChainStep(CValidationState& state, const CChainPar // any disconnected transactions back to the mempool. UpdateMempoolForReorg(disconnectpool, true); } - mempool.check(pcoinsTip.get()); + mempool.check(&CoinsTip()); // Callbacks/notifications for a new best chain. if (fInvalidFound) @@ -2630,7 +2599,7 @@ bool CChainState::ActivateBestChainStep(CValidationState& state, const CChainPar return true; } -static void NotifyHeaderTip() LOCKS_EXCLUDED(cs_main) { +static bool NotifyHeaderTip() LOCKS_EXCLUDED(cs_main) { bool fNotify = false; bool fInitialBlockDownload = false; static CBlockIndex* pindexHeaderOld = nullptr; @@ -2641,7 +2610,7 @@ static void NotifyHeaderTip() LOCKS_EXCLUDED(cs_main) { if (pindexHeader != pindexHeaderOld) { fNotify = true; - fInitialBlockDownload = IsInitialBlockDownload(); + fInitialBlockDownload = ::ChainstateActive().IsInitialBlockDownload(); pindexHeaderOld = pindexHeader; } } @@ -2649,6 +2618,7 @@ static void NotifyHeaderTip() LOCKS_EXCLUDED(cs_main) { if (fNotify) { uiInterface.NotifyHeaderTip(fInitialBlockDownload, pindexHeader); } + return fNotify; } static void LimitValidationInterfaceQueue() LOCKS_EXCLUDED(cs_main) { @@ -2659,15 +2629,6 @@ static void LimitValidationInterfaceQueue() LOCKS_EXCLUDED(cs_main) { } } -/** - * Make the best chain active, in multiple steps. The result is either failure - * or an activated best chain. pblock is either nullptr or a pointer to a block - * that is already loaded (to avoid loading it again from disk). - * - * ActivateBestChain is split into steps (see ActivateBestChainStep) so that - * we avoid holding cs_main for an extended period of time; the length of this - * call may be quite long during reindexing or a substantial reorg. - */ bool CChainState::ActivateBestChain(CValidationState &state, const CChainParams& chainparams, std::shared_ptr<const CBlock> pblock) { // Note that while we're often called here from ProcessNewBlock, this is // far from a guarantee. Things in the P2P/RPC will often end up calling @@ -2696,7 +2657,7 @@ bool CChainState::ActivateBestChain(CValidationState &state, const CChainParams& LimitValidationInterfaceQueue(); { - LOCK(cs_main); + LOCK2(cs_main, ::mempool.cs); // Lock transaction pool for at least as long as it takes for connectTrace to be consumed CBlockIndex* starting_tip = m_chain.Tip(); bool blocks_connected = false; do { @@ -2715,8 +2676,10 @@ bool CChainState::ActivateBestChain(CValidationState &state, const CChainParams& bool fInvalidFound = false; std::shared_ptr<const CBlock> nullBlockPtr; - if (!ActivateBestChainStep(state, chainparams, pindexMostWork, pblock && pblock->GetHash() == pindexMostWork->GetBlockHash() ? pblock : nullBlockPtr, fInvalidFound, connectTrace)) + if (!ActivateBestChainStep(state, chainparams, pindexMostWork, pblock && pblock->GetHash() == pindexMostWork->GetBlockHash() ? pblock : nullBlockPtr, fInvalidFound, connectTrace)) { + // A system error occurred return false; + } blocks_connected = true; if (fInvalidFound) { @@ -2767,7 +2730,7 @@ bool CChainState::ActivateBestChain(CValidationState &state, const CChainParams& } bool ActivateBestChain(CValidationState &state, const CChainParams& chainparams, std::shared_ptr<const CBlock> pblock) { - return g_chainstate.ActivateBestChain(state, chainparams, std::move(pblock)); + return ::ChainstateActive().ActivateBestChain(state, chainparams, std::move(pblock)); } bool CChainState::PreciousBlock(CValidationState& state, const CChainParams& params, CBlockIndex *pindex) @@ -2799,7 +2762,7 @@ bool CChainState::PreciousBlock(CValidationState& state, const CChainParams& par return ActivateBestChain(state, params, std::shared_ptr<const CBlock>()); } bool PreciousBlock(CValidationState& state, const CChainParams& params, CBlockIndex *pindex) { - return g_chainstate.PreciousBlock(state, params, pindex); + return ::ChainstateActive().PreciousBlock(state, params, pindex); } bool CChainState::InvalidateBlock(CValidationState& state, const CChainParams& chainparams, CBlockIndex *pindex) @@ -2816,6 +2779,7 @@ bool CChainState::InvalidateBlock(CValidationState& state, const CChainParams& c LimitValidationInterfaceQueue(); LOCK(cs_main); + LOCK(::mempool.cs); // Lock for as long as disconnectpool is in scope to make sure UpdateMempoolForReorg is called after DisconnectTip without unlocking in between if (!m_chain.Contains(pindex)) break; pindex_was_in_chain = true; CBlockIndex *invalid_walk_tip = m_chain.Tip(); @@ -2865,12 +2829,12 @@ bool CChainState::InvalidateBlock(CValidationState& state, const CChainParams& c to_mark_failed->nStatus |= BLOCK_FAILED_VALID; setDirtyBlockIndex.insert(to_mark_failed); setBlockIndexCandidates.erase(to_mark_failed); - m_failed_blocks.insert(to_mark_failed); + m_blockman.m_failed_blocks.insert(to_mark_failed); // The resulting new best tip may not be in setBlockIndexCandidates anymore, so // add it again. - BlockMap::iterator it = mapBlockIndex.begin(); - while (it != mapBlockIndex.end()) { + BlockMap::iterator it = m_blockman.m_block_index.begin(); + while (it != m_blockman.m_block_index.end()) { if (it->second->IsValid(BLOCK_VALID_TRANSACTIONS) && it->second->HaveTxsDownloaded() && !setBlockIndexCandidates.value_comp()(it->second, m_chain.Tip())) { setBlockIndexCandidates.insert(it->second); } @@ -2888,7 +2852,7 @@ bool CChainState::InvalidateBlock(CValidationState& state, const CChainParams& c } bool InvalidateBlock(CValidationState& state, const CChainParams& chainparams, CBlockIndex *pindex) { - return g_chainstate.InvalidateBlock(state, chainparams, pindex); + return ::ChainstateActive().InvalidateBlock(state, chainparams, pindex); } void CChainState::ResetBlockFailureFlags(CBlockIndex *pindex) { @@ -2897,8 +2861,8 @@ void CChainState::ResetBlockFailureFlags(CBlockIndex *pindex) { int nHeight = pindex->nHeight; // Remove the invalidity flag from this block and all its descendants. - BlockMap::iterator it = mapBlockIndex.begin(); - while (it != mapBlockIndex.end()) { + BlockMap::iterator it = m_blockman.m_block_index.begin(); + while (it != m_blockman.m_block_index.end()) { if (!it->second->IsValid() && it->second->GetAncestor(nHeight) == pindex) { it->second->nStatus &= ~BLOCK_FAILED_MASK; setDirtyBlockIndex.insert(it->second); @@ -2909,7 +2873,7 @@ void CChainState::ResetBlockFailureFlags(CBlockIndex *pindex) { // Reset invalid block marker if it was pointing to one of those. pindexBestInvalid = nullptr; } - m_failed_blocks.erase(it->second); + m_blockman.m_failed_blocks.erase(it->second); } it++; } @@ -2919,24 +2883,24 @@ void CChainState::ResetBlockFailureFlags(CBlockIndex *pindex) { if (pindex->nStatus & BLOCK_FAILED_MASK) { pindex->nStatus &= ~BLOCK_FAILED_MASK; setDirtyBlockIndex.insert(pindex); - m_failed_blocks.erase(pindex); + m_blockman.m_failed_blocks.erase(pindex); } pindex = pindex->pprev; } } void ResetBlockFailureFlags(CBlockIndex *pindex) { - return g_chainstate.ResetBlockFailureFlags(pindex); + return ::ChainstateActive().ResetBlockFailureFlags(pindex); } -CBlockIndex* CChainState::AddToBlockIndex(const CBlockHeader& block) +CBlockIndex* BlockManager::AddToBlockIndex(const CBlockHeader& block) { AssertLockHeld(cs_main); // Check for duplicate uint256 hash = block.GetHash(); - BlockMap::iterator it = mapBlockIndex.find(hash); - if (it != mapBlockIndex.end()) + BlockMap::iterator it = m_block_index.find(hash); + if (it != m_block_index.end()) return it->second; // Construct new block index object @@ -2945,10 +2909,10 @@ CBlockIndex* CChainState::AddToBlockIndex(const CBlockHeader& block) // to avoid miners withholding blocks but broadcasting headers, to get a // competitive advantage. pindexNew->nSequenceId = 0; - BlockMap::iterator mi = mapBlockIndex.insert(std::make_pair(hash, pindexNew)).first; + BlockMap::iterator mi = m_block_index.insert(std::make_pair(hash, pindexNew)).first; pindexNew->phashBlock = &((*mi).first); - BlockMap::iterator miPrev = mapBlockIndex.find(block.hashPrevBlock); - if (miPrev != mapBlockIndex.end()) + BlockMap::iterator miPrev = m_block_index.find(block.hashPrevBlock); + if (miPrev != m_block_index.end()) { pindexNew->pprev = (*miPrev).second; pindexNew->nHeight = pindexNew->pprev->nHeight + 1; @@ -2997,17 +2961,17 @@ void CChainState::ReceivedBlockTransactions(const CBlock& block, CBlockIndex* pi if (m_chain.Tip() == nullptr || !setBlockIndexCandidates.value_comp()(pindex, m_chain.Tip())) { setBlockIndexCandidates.insert(pindex); } - std::pair<std::multimap<CBlockIndex*, CBlockIndex*>::iterator, std::multimap<CBlockIndex*, CBlockIndex*>::iterator> range = mapBlocksUnlinked.equal_range(pindex); + std::pair<std::multimap<CBlockIndex*, CBlockIndex*>::iterator, std::multimap<CBlockIndex*, CBlockIndex*>::iterator> range = m_blockman.m_blocks_unlinked.equal_range(pindex); while (range.first != range.second) { std::multimap<CBlockIndex*, CBlockIndex*>::iterator it = range.first; queue.push_back(it->second); range.first++; - mapBlocksUnlinked.erase(it); + m_blockman.m_blocks_unlinked.erase(it); } } } else { if (pindexNew->pprev && pindexNew->pprev->IsValid(BLOCK_VALID_TREE)) { - mapBlocksUnlinked.insert(std::make_pair(pindexNew->pprev, pindexNew)); + m_blockman.m_blocks_unlinked.insert(std::make_pair(pindexNew->pprev, pindexNew)); } } } @@ -3050,7 +3014,7 @@ static bool FindBlockPos(FlatFilePos &pos, unsigned int nAddSize, unsigned int n bool out_of_space; size_t bytes_allocated = BlockFileSeq().Allocate(pos, nAddSize, out_of_space); if (out_of_space) { - return AbortNode("Disk space is low!", _("Error: Disk space is low!")); + return AbortNode("Disk space is too low!", _("Error: Disk space is too low!").translated, CClientUIInterface::MSG_NOPREFIX); } if (bytes_allocated != 0 && fPruneMode) { fCheckForPruning = true; @@ -3074,7 +3038,7 @@ static bool FindUndoPos(CValidationState &state, int nFile, FlatFilePos &pos, un bool out_of_space; size_t bytes_allocated = UndoFileSeq().Allocate(pos, nAddSize, out_of_space); if (out_of_space) { - return AbortNode(state, "Disk space is low!", _("Error: Disk space is low!")); + return AbortNode(state, "Disk space is too low!", _("Error: Disk space is too low!").translated, CClientUIInterface::MSG_NOPREFIX); } if (bytes_allocated != 0 && fPruneMode) { fCheckForPruning = true; @@ -3158,14 +3122,8 @@ bool CheckBlock(const CBlock& block, CValidationState& state, const Consensus::P bool IsWitnessEnabled(const CBlockIndex* pindexPrev, const Consensus::Params& params) { - LOCK(cs_main); - return (VersionBitsState(pindexPrev, params, Consensus::DEPLOYMENT_SEGWIT, versionbitscache) == ThresholdState::ACTIVE); -} - -bool IsNullDummyEnabled(const CBlockIndex* pindexPrev, const Consensus::Params& params) -{ - LOCK(cs_main); - return (VersionBitsState(pindexPrev, params, Consensus::DEPLOYMENT_SEGWIT, versionbitscache) == ThresholdState::ACTIVE); + int height = pindexPrev == nullptr ? 0 : pindexPrev->nHeight + 1; + return (height >= params.SegwitHeight); } // Compute at which vout of the block's coinbase transaction the witness @@ -3200,7 +3158,7 @@ std::vector<unsigned char> GenerateCoinbaseCommitment(CBlock& block, const CBloc std::vector<unsigned char> commitment; int commitpos = GetWitnessCommitmentIndex(block); std::vector<unsigned char> ret(32, 0x00); - if (consensusParams.vDeployments[Consensus::DEPLOYMENT_SEGWIT].nTimeout != 0) { + if (consensusParams.SegwitHeight != std::numeric_limits<int>::max()) { if (commitpos == -1) { uint256 witnessroot = BlockWitnessMerkleRoot(block, nullptr); CHash256().Write(witnessroot.begin(), 32).Write(ret.data(), 32).Finalize(witnessroot.begin()); @@ -3263,7 +3221,7 @@ static bool ContextualCheckBlockHeader(const CBlockHeader& block, CValidationSta if (fCheckpointsEnabled) { // Don't accept any forks from the main chain prior to last checkpoint. // GetLastCheckpoint finds the last checkpoint in MapCheckpoints that's in our - // MapBlockIndex. + // g_blockman.m_block_index. CBlockIndex* pcheckpoint = GetLastCheckpoint(params.Checkpoints()); if (pcheckpoint && nHeight < pcheckpoint->nHeight) return state.Invalid(ValidationInvalidReason::BLOCK_CHECKPOINT, error("%s: forked chain older than last checkpoint (height %d)", __func__, nHeight), REJECT_CHECKPOINT, "bad-fork-prior-to-checkpoint"); @@ -3298,9 +3256,9 @@ static bool ContextualCheckBlock(const CBlock& block, CValidationState& state, c { const int nHeight = pindexPrev == nullptr ? 0 : pindexPrev->nHeight + 1; - // Start enforcing BIP113 (Median Time Past) using versionbits logic. + // Start enforcing BIP113 (Median Time Past). int nLockTimeFlags = 0; - if (VersionBitsState(pindexPrev, consensusParams, Consensus::DEPLOYMENT_CSV, versionbitscache) == ThresholdState::ACTIVE) { + if (nHeight >= consensusParams.CSVHeight) { assert(pindexPrev != nullptr); nLockTimeFlags |= LOCKTIME_MEDIAN_TIME_PAST; } @@ -3335,7 +3293,7 @@ static bool ContextualCheckBlock(const CBlock& block, CValidationState& state, c // {0xaa, 0x21, 0xa9, 0xed}, and the following 32 bytes are SHA256^2(witness root, witness reserved value). In case there are // multiple, the last one is used. bool fHaveWitness = false; - if (VersionBitsState(pindexPrev, consensusParams, Consensus::DEPLOYMENT_SEGWIT, versionbitscache) == ThresholdState::ACTIVE) { + if (nHeight >= consensusParams.SegwitHeight) { int commitpos = GetWitnessCommitmentIndex(block); if (commitpos != -1) { bool malleated = false; @@ -3376,15 +3334,15 @@ static bool ContextualCheckBlock(const CBlock& block, CValidationState& state, c return true; } -bool CChainState::AcceptBlockHeader(const CBlockHeader& block, CValidationState& state, const CChainParams& chainparams, CBlockIndex** ppindex) +bool BlockManager::AcceptBlockHeader(const CBlockHeader& block, CValidationState& state, const CChainParams& chainparams, CBlockIndex** ppindex) { AssertLockHeld(cs_main); // Check for duplicate uint256 hash = block.GetHash(); - BlockMap::iterator miSelf = mapBlockIndex.find(hash); + BlockMap::iterator miSelf = m_block_index.find(hash); CBlockIndex *pindex = nullptr; if (hash != chainparams.GetConsensus().hashGenesisBlock) { - if (miSelf != mapBlockIndex.end()) { + if (miSelf != m_block_index.end()) { // Block header is already known. pindex = miSelf->second; if (ppindex) @@ -3399,8 +3357,8 @@ bool CChainState::AcceptBlockHeader(const CBlockHeader& block, CValidationState& // Get prev block index CBlockIndex* pindexPrev = nullptr; - BlockMap::iterator mi = mapBlockIndex.find(block.hashPrevBlock); - if (mi == mapBlockIndex.end()) + BlockMap::iterator mi = m_block_index.find(block.hashPrevBlock); + if (mi == m_block_index.end()) return state.Invalid(ValidationInvalidReason::BLOCK_MISSING_PREV, error("%s: prev block not found", __func__), 0, "prev-blk-not-found"); pindexPrev = (*mi).second; if (pindexPrev->nStatus & BLOCK_FAILED_MASK) @@ -3452,8 +3410,6 @@ bool CChainState::AcceptBlockHeader(const CBlockHeader& block, CValidationState& if (ppindex) *ppindex = pindex; - CheckBlockIndex(chainparams.GetConsensus()); - return true; } @@ -3465,7 +3421,10 @@ bool ProcessNewBlockHeaders(const std::vector<CBlockHeader>& headers, CValidatio LOCK(cs_main); for (const CBlockHeader& header : headers) { CBlockIndex *pindex = nullptr; // Use a temp pindex instead of ppindex to avoid a const_cast - if (!g_chainstate.AcceptBlockHeader(header, state, chainparams, &pindex)) { + bool accepted = g_blockman.AcceptBlockHeader(header, state, chainparams, &pindex); + ::ChainstateActive().CheckBlockIndex(chainparams.GetConsensus()); + + if (!accepted) { if (first_invalid) *first_invalid = header; return false; } @@ -3474,7 +3433,11 @@ bool ProcessNewBlockHeaders(const std::vector<CBlockHeader>& headers, CValidatio } } } - NotifyHeaderTip(); + if (NotifyHeaderTip()) { + if (::ChainstateActive().IsInitialBlockDownload() && ppindex && *ppindex) { + LogPrintf("Synchronizing blockheaders, height: %d (~%.2f%%)\n", (*ppindex)->nHeight, 100.0/((*ppindex)->nHeight+(GetAdjustedTime() - (*ppindex)->GetBlockTime()) / Params().GetConsensus().nPowTargetSpacing) * (*ppindex)->nHeight); + } + } return true; } @@ -3508,7 +3471,10 @@ bool CChainState::AcceptBlock(const std::shared_ptr<const CBlock>& pblock, CVali CBlockIndex *pindexDummy = nullptr; CBlockIndex *&pindex = ppindex ? *ppindex : pindexDummy; - if (!AcceptBlockHeader(block, state, chainparams, &pindex)) + bool accepted_header = m_blockman.AcceptBlockHeader(block, state, chainparams, &pindex); + CheckBlockIndex(chainparams.GetConsensus()); + + if (!accepted_header) return false; // Try to process all requested blocks that we don't have, but only @@ -3596,7 +3562,7 @@ bool ProcessNewBlock(const CChainParams& chainparams, const std::shared_ptr<cons bool ret = CheckBlock(*pblock, state, chainparams.GetConsensus()); if (ret) { // Store to disk - ret = g_chainstate.AcceptBlock(pblock, state, chainparams, &pindex, fForceProcessing, nullptr, fNewBlock); + ret = ::ChainstateActive().AcceptBlock(pblock, state, chainparams, &pindex, fForceProcessing, nullptr, fNewBlock); } if (!ret) { GetMainSignals().BlockChecked(*pblock, state); @@ -3607,7 +3573,7 @@ bool ProcessNewBlock(const CChainParams& chainparams, const std::shared_ptr<cons NotifyHeaderTip(); CValidationState state; // Only used to report errors, not invalidity - ignore it - if (!g_chainstate.ActivateBestChain(state, chainparams, pblock)) + if (!::ChainstateActive().ActivateBestChain(state, chainparams, pblock)) return error("%s: ActivateBestChain failed (%s)", __func__, FormatStateMessage(state)); return true; @@ -3617,7 +3583,7 @@ bool TestBlockValidity(CValidationState& state, const CChainParams& chainparams, { AssertLockHeld(cs_main); assert(pindexPrev && pindexPrev == ::ChainActive().Tip()); - CCoinsViewCache viewNew(pcoinsTip.get()); + CCoinsViewCache viewNew(&::ChainstateActive().CoinsTip()); uint256 block_hash(block.GetHash()); CBlockIndex indexDummy(block); indexDummy.pprev = pindexPrev; @@ -3631,7 +3597,7 @@ bool TestBlockValidity(CValidationState& state, const CChainParams& chainparams, return error("%s: Consensus::CheckBlock: %s", __func__, FormatStateMessage(state)); if (!ContextualCheckBlock(block, state, chainparams.GetConsensus(), pindexPrev)) return error("%s: Consensus::ContextualCheckBlock: %s", __func__, FormatStateMessage(state)); - if (!g_chainstate.ConnectBlock(block, state, &indexDummy, viewNew, chainparams, true)) + if (!::ChainstateActive().ConnectBlock(block, state, &indexDummy, viewNew, chainparams, true)) return false; assert(state.IsValid()); @@ -3659,7 +3625,7 @@ void PruneOneBlockFile(const int fileNumber) { LOCK(cs_LastBlockFile); - for (const auto& entry : mapBlockIndex) { + for (const auto& entry : g_blockman.m_block_index) { CBlockIndex* pindex = entry.second; if (pindex->nFile == fileNumber) { pindex->nStatus &= ~BLOCK_HAVE_DATA; @@ -3669,16 +3635,16 @@ void PruneOneBlockFile(const int fileNumber) pindex->nUndoPos = 0; setDirtyBlockIndex.insert(pindex); - // Prune from mapBlocksUnlinked -- any block we prune would have + // Prune from m_blocks_unlinked -- any block we prune would have // to be downloaded again in order to consider its chain, at which // point it would be considered as a candidate for - // mapBlocksUnlinked or setBlockIndexCandidates. - std::pair<std::multimap<CBlockIndex*, CBlockIndex*>::iterator, std::multimap<CBlockIndex*, CBlockIndex*>::iterator> range = mapBlocksUnlinked.equal_range(pindex->pprev); + // m_blocks_unlinked or setBlockIndexCandidates. + auto range = g_blockman.m_blocks_unlinked.equal_range(pindex->pprev); while (range.first != range.second) { std::multimap<CBlockIndex *, CBlockIndex *>::iterator _it = range.first; range.first++; if (_it->second == pindex) { - mapBlocksUnlinked.erase(_it); + g_blockman.m_blocks_unlinked.erase(_it); } } } @@ -3726,7 +3692,8 @@ void PruneBlockFilesManual(int nManualPruneHeight) { CValidationState state; const CChainParams& chainparams = Params(); - if (!FlushStateToDisk(chainparams, state, FlushStateMode::NONE, nManualPruneHeight)) { + if (!::ChainstateActive().FlushStateToDisk( + chainparams, state, FlushStateMode::NONE, nManualPruneHeight)) { LogPrintf("%s: failed to flush state (%s)\n", __func__, FormatStateMessage(state)); } } @@ -3770,7 +3737,7 @@ static void FindFilesToPrune(std::set<int>& setFilesToPrune, uint64_t nPruneAfte // To avoid excessive prune events negating the benefit of high dbcache // values, we should not prune too rapidly. // So when pruning in IBD, increase the buffer a bit to avoid a re-prune too soon. - if (IsInitialBlockDownload()) { + if (::ChainstateActive().IsInitialBlockDownload()) { // Since this is only relevant during IBD, we use a fixed 10% nBuffer += nPruneTarget / 10; } @@ -3826,7 +3793,7 @@ fs::path GetBlockPosFilename(const FlatFilePos &pos) return BlockFileSeq().FileName(pos); } -CBlockIndex * CChainState::InsertBlockIndex(const uint256& hash) +CBlockIndex * BlockManager::InsertBlockIndex(const uint256& hash) { AssertLockHeld(cs_main); @@ -3834,27 +3801,30 @@ CBlockIndex * CChainState::InsertBlockIndex(const uint256& hash) return nullptr; // Return existing - BlockMap::iterator mi = mapBlockIndex.find(hash); - if (mi != mapBlockIndex.end()) + BlockMap::iterator mi = m_block_index.find(hash); + if (mi != m_block_index.end()) return (*mi).second; // Create new CBlockIndex* pindexNew = new CBlockIndex(); - mi = mapBlockIndex.insert(std::make_pair(hash, pindexNew)).first; + mi = m_block_index.insert(std::make_pair(hash, pindexNew)).first; pindexNew->phashBlock = &((*mi).first); return pindexNew; } -bool CChainState::LoadBlockIndex(const Consensus::Params& consensus_params, CBlockTreeDB& blocktree) +bool BlockManager::LoadBlockIndex( + const Consensus::Params& consensus_params, + CBlockTreeDB& blocktree, + std::set<CBlockIndex*, CBlockIndexWorkComparator>& block_index_candidates) { if (!blocktree.LoadBlockIndexGuts(consensus_params, [this](const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { return this->InsertBlockIndex(hash); })) return false; // Calculate nChainWork std::vector<std::pair<int, CBlockIndex*> > vSortedByHeight; - vSortedByHeight.reserve(mapBlockIndex.size()); - for (const std::pair<const uint256, CBlockIndex*>& item : mapBlockIndex) + vSortedByHeight.reserve(m_block_index.size()); + for (const std::pair<const uint256, CBlockIndex*>& item : m_block_index) { CBlockIndex* pindex = item.second; vSortedByHeight.push_back(std::make_pair(pindex->nHeight, pindex)); @@ -3874,7 +3844,7 @@ bool CChainState::LoadBlockIndex(const Consensus::Params& consensus_params, CBlo pindex->nChainTx = pindex->pprev->nChainTx + pindex->nTx; } else { pindex->nChainTx = 0; - mapBlocksUnlinked.insert(std::make_pair(pindex->pprev, pindex)); + m_blocks_unlinked.insert(std::make_pair(pindex->pprev, pindex)); } } else { pindex->nChainTx = pindex->nTx; @@ -3884,8 +3854,9 @@ bool CChainState::LoadBlockIndex(const Consensus::Params& consensus_params, CBlo pindex->nStatus |= BLOCK_FAILED_CHILD; setDirtyBlockIndex.insert(pindex); } - if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS) && (pindex->HaveTxsDownloaded() || pindex->pprev == nullptr)) - setBlockIndexCandidates.insert(pindex); + if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS) && (pindex->HaveTxsDownloaded() || pindex->pprev == nullptr)) { + block_index_candidates.insert(pindex); + } if (pindex->nStatus & BLOCK_FAILED_MASK && (!pindexBestInvalid || pindex->nChainWork > pindexBestInvalid->nChainWork)) pindexBestInvalid = pindex; if (pindex->pprev) @@ -3897,9 +3868,21 @@ bool CChainState::LoadBlockIndex(const Consensus::Params& consensus_params, CBlo return true; } +void BlockManager::Unload() { + m_failed_blocks.clear(); + m_blocks_unlinked.clear(); + + for (const BlockMap::value_type& entry : m_block_index) { + delete entry.second; + } + + m_block_index.clear(); +} + bool static LoadBlockIndexDB(const CChainParams& chainparams) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { - if (!g_chainstate.LoadBlockIndex(chainparams.GetConsensus(), *pblocktree)) + if (!g_blockman.LoadBlockIndex( + chainparams.GetConsensus(), *pblocktree, ::ChainstateActive().setBlockIndexCandidates)) return false; // Load block file info @@ -3922,7 +3905,7 @@ bool static LoadBlockIndexDB(const CChainParams& chainparams) EXCLUSIVE_LOCKS_RE // Check presence of blk files LogPrintf("Checking all blk files are present...\n"); std::set<int> setBlkDataFiles; - for (const std::pair<const uint256, CBlockIndex*>& item : mapBlockIndex) + for (const std::pair<const uint256, CBlockIndex*>& item : g_blockman.m_block_index) { CBlockIndex* pindex = item.second; if (pindex->nStatus & BLOCK_HAVE_DATA) { @@ -3953,18 +3936,20 @@ bool static LoadBlockIndexDB(const CChainParams& chainparams) EXCLUSIVE_LOCKS_RE bool LoadChainTip(const CChainParams& chainparams) { AssertLockHeld(cs_main); - assert(!pcoinsTip->GetBestBlock().IsNull()); // Never called when the coins view is empty + const CCoinsViewCache& coins_cache = ::ChainstateActive().CoinsTip(); + assert(!coins_cache.GetBestBlock().IsNull()); // Never called when the coins view is empty - if (::ChainActive().Tip() && ::ChainActive().Tip()->GetBlockHash() == pcoinsTip->GetBestBlock()) return true; + if (::ChainActive().Tip() && + ::ChainActive().Tip()->GetBlockHash() == coins_cache.GetBestBlock()) return true; // Load pointer to end of best chain - CBlockIndex* pindex = LookupBlockIndex(pcoinsTip->GetBestBlock()); + CBlockIndex* pindex = LookupBlockIndex(coins_cache.GetBestBlock()); if (!pindex) { return false; } ::ChainActive().SetTip(pindex); - g_chainstate.PruneBlockIndexCandidates(); + ::ChainstateActive().PruneBlockIndexCandidates(); LogPrintf("Loaded best chain: hashBestChain=%s height=%d date=%s progress=%f\n", ::ChainActive().Tip()->GetBlockHash().ToString(), ::ChainActive().Height(), @@ -3975,7 +3960,7 @@ bool LoadChainTip(const CChainParams& chainparams) CVerifyDB::CVerifyDB() { - uiInterface.ShowProgress(_("Verifying blocks..."), 0, false); + uiInterface.ShowProgress(_("Verifying blocks...").translated, 0, false); } CVerifyDB::~CVerifyDB() @@ -4009,7 +3994,7 @@ bool CVerifyDB::VerifyDB(const CChainParams& chainparams, CCoinsView *coinsview, LogPrintf("[%d%%]...", percentageDone); /* Continued */ reportDone = percentageDone/10; } - uiInterface.ShowProgress(_("Verifying blocks..."), percentageDone, false); + uiInterface.ShowProgress(_("Verifying blocks...").translated, percentageDone, false); if (pindex->nHeight <= ::ChainActive().Height()-nCheckDepth) break; if (fPruneMode && !(pindex->nStatus & BLOCK_HAVE_DATA)) { @@ -4035,9 +4020,9 @@ bool CVerifyDB::VerifyDB(const CChainParams& chainparams, CCoinsView *coinsview, } } // check level 3: check for inconsistencies during memory-only disconnect of tip blocks - if (nCheckLevel >= 3 && (coins.DynamicMemoryUsage() + pcoinsTip->DynamicMemoryUsage()) <= nCoinCacheUsage) { + if (nCheckLevel >= 3 && (coins.DynamicMemoryUsage() + ::ChainstateActive().CoinsTip().DynamicMemoryUsage()) <= nCoinCacheUsage) { assert(coins.GetBestBlock() == pindex->GetBlockHash()); - DisconnectResult res = g_chainstate.DisconnectBlock(block, pindex, coins); + DisconnectResult res = ::ChainstateActive().DisconnectBlock(block, pindex, coins); if (res == DISCONNECT_FAILED) { return error("VerifyDB(): *** irrecoverable inconsistency in block data at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString()); } @@ -4067,12 +4052,12 @@ bool CVerifyDB::VerifyDB(const CChainParams& chainparams, CCoinsView *coinsview, LogPrintf("[%d%%]...", percentageDone); /* Continued */ reportDone = percentageDone/10; } - uiInterface.ShowProgress(_("Verifying blocks..."), percentageDone, false); + uiInterface.ShowProgress(_("Verifying blocks...").translated, percentageDone, false); pindex = ::ChainActive().Next(pindex); CBlock block; if (!ReadBlockFromDisk(block, pindex, chainparams.GetConsensus())) return error("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString()); - if (!g_chainstate.ConnectBlock(block, state, pindex, coins, chainparams)) + if (!::ChainstateActive().ConnectBlock(block, state, pindex, coins, chainparams)) return error("VerifyDB(): *** found unconnectable block at %d, hash=%s (%s)", pindex->nHeight, pindex->GetBlockHash().ToString(), FormatStateMessage(state)); } } @@ -4114,23 +4099,23 @@ bool CChainState::ReplayBlocks(const CChainParams& params, CCoinsView* view) if (hashHeads.empty()) return true; // We're already in a consistent state. if (hashHeads.size() != 2) return error("ReplayBlocks(): unknown inconsistent state"); - uiInterface.ShowProgress(_("Replaying blocks..."), 0, false); + uiInterface.ShowProgress(_("Replaying blocks...").translated, 0, false); LogPrintf("Replaying blocks\n"); const CBlockIndex* pindexOld = nullptr; // Old tip during the interrupted flush. const CBlockIndex* pindexNew; // New tip during the interrupted flush. const CBlockIndex* pindexFork = nullptr; // Latest block common to both the old and the new tip. - if (mapBlockIndex.count(hashHeads[0]) == 0) { + if (m_blockman.m_block_index.count(hashHeads[0]) == 0) { return error("ReplayBlocks(): reorganization to unknown block requested"); } - pindexNew = mapBlockIndex[hashHeads[0]]; + pindexNew = m_blockman.m_block_index[hashHeads[0]]; if (!hashHeads[1].IsNull()) { // The old tip is allowed to be 0, indicating it's the first flush. - if (mapBlockIndex.count(hashHeads[1]) == 0) { + if (m_blockman.m_block_index.count(hashHeads[1]) == 0) { return error("ReplayBlocks(): reorganization from unknown block requested"); } - pindexOld = mapBlockIndex[hashHeads[1]]; + pindexOld = m_blockman.m_block_index[hashHeads[1]]; pindexFork = LastCommonAncestor(pindexOld, pindexNew); assert(pindexFork != nullptr); } @@ -4160,7 +4145,7 @@ bool CChainState::ReplayBlocks(const CChainParams& params, CCoinsView* view) for (int nHeight = nForkHeight + 1; nHeight <= pindexNew->nHeight; ++nHeight) { const CBlockIndex* pindex = pindexNew->GetAncestor(nHeight); LogPrintf("Rolling forward %s (%i)\n", pindex->GetBlockHash().ToString(), nHeight); - uiInterface.ShowProgress(_("Replaying blocks..."), (int) ((nHeight - nForkHeight) * 100.0 / (pindexNew->nHeight - nForkHeight)) , false); + uiInterface.ShowProgress(_("Replaying blocks...").translated, (int) ((nHeight - nForkHeight) * 100.0 / (pindexNew->nHeight - nForkHeight)) , false); if (!RollforwardBlock(pindex, cache, params)) return false; } @@ -4171,7 +4156,7 @@ bool CChainState::ReplayBlocks(const CChainParams& params, CCoinsView* view) } bool ReplayBlocks(const CChainParams& params, CCoinsView* view) { - return g_chainstate.ReplayBlocks(params, view); + return ::ChainstateActive().ReplayBlocks(params, view); } //! Helper for CChainState::RewindBlockIndex @@ -4196,10 +4181,10 @@ void CChainState::EraseBlockData(CBlockIndex* index) setDirtyBlockIndex.insert(index); // Update indexes setBlockIndexCandidates.erase(index); - std::pair<std::multimap<CBlockIndex*, CBlockIndex*>::iterator, std::multimap<CBlockIndex*, CBlockIndex*>::iterator> ret = mapBlocksUnlinked.equal_range(index->pprev); + auto ret = m_blockman.m_blocks_unlinked.equal_range(index->pprev); while (ret.first != ret.second) { if (ret.first->second == index) { - mapBlocksUnlinked.erase(ret.first++); + m_blockman.m_blocks_unlinked.erase(ret.first++); } else { ++ret.first; } @@ -4219,7 +4204,7 @@ bool CChainState::RewindBlockIndex(const CChainParams& params) // blocks will be dealt with below (releasing cs_main in between). { LOCK(cs_main); - for (const auto& entry : mapBlockIndex) { + for (const auto& entry : m_blockman.m_block_index) { if (IsWitnessEnabled(entry.second->pprev, params.GetConsensus()) && !(entry.second->nStatus & BLOCK_OPT_WITNESS) && !m_chain.Contains(entry.second)) { EraseBlockData(entry.second); } @@ -4249,7 +4234,7 @@ bool CChainState::RewindBlockIndex(const CChainParams& params) // Loop until the tip is below nHeight, or we reach a pruned block. while (!ShutdownRequested()) { { - LOCK(cs_main); + LOCK2(cs_main, ::mempool.cs); // Make sure nothing changed from under us (this won't happen because RewindBlockIndex runs before importing/network are active) assert(tip == m_chain.Tip()); if (tip == nullptr || tip->nHeight < nHeight) break; @@ -4304,7 +4289,7 @@ bool CChainState::RewindBlockIndex(const CChainParams& params) } bool RewindBlockIndex(const CChainParams& params) { - if (!g_chainstate.RewindBlockIndex(params)) { + if (!::ChainstateActive().RewindBlockIndex(params)) { return false; } @@ -4314,7 +4299,7 @@ bool RewindBlockIndex(const CChainParams& params) { // and skip it here, we're about to -reindex-chainstate anyway, so // it'll get called a bunch real soon. CValidationState state; - if (!FlushStateToDisk(params, state, FlushStateMode::ALWAYS)) { + if (!::ChainstateActive().FlushStateToDisk(params, state, FlushStateMode::ALWAYS)) { LogPrintf("RewindBlockIndex: unable to flush state to disk (%s)\n", FormatStateMessage(state)); return false; } @@ -4325,7 +4310,6 @@ bool RewindBlockIndex(const CChainParams& params) { void CChainState::UnloadBlockIndex() { nBlockSequenceId = 1; - m_failed_blocks.clear(); setBlockIndexCandidates.clear(); } @@ -4336,10 +4320,10 @@ void UnloadBlockIndex() { LOCK(cs_main); ::ChainActive().SetTip(nullptr); + g_blockman.Unload(); pindexBestInvalid = nullptr; pindexBestHeader = nullptr; mempool.clear(); - mapBlocksUnlinked.clear(); vinfoBlockFile.clear(); nLastBlockFile = 0; setDirtyBlockIndex.clear(); @@ -4348,14 +4332,9 @@ void UnloadBlockIndex() for (int b = 0; b < VERSIONBITS_NUM_BITS; b++) { warningcache[b].clear(); } - - for (const BlockMap::value_type& entry : mapBlockIndex) { - delete entry.second; - } - mapBlockIndex.clear(); fHavePruned = false; - g_chainstate.UnloadBlockIndex(); + ::ChainstateActive().UnloadBlockIndex(); } bool LoadBlockIndex(const CChainParams& chainparams) @@ -4365,7 +4344,7 @@ bool LoadBlockIndex(const CChainParams& chainparams) if (!fReindex) { bool ret = LoadBlockIndexDB(chainparams); if (!ret) return false; - needs_init = mapBlockIndex.empty(); + needs_init = g_blockman.m_block_index.empty(); } if (needs_init) { @@ -4385,10 +4364,10 @@ bool CChainState::LoadGenesisBlock(const CChainParams& chainparams) LOCK(cs_main); // Check whether we're already initialized by checking for genesis in - // mapBlockIndex. Note that we can't use m_chain here, since it is + // m_blockman.m_block_index. Note that we can't use m_chain here, since it is // set based on the coins db, not the block index db, which is the only // thing loaded at this point. - if (mapBlockIndex.count(chainparams.GenesisBlock().GetHash())) + if (m_blockman.m_block_index.count(chainparams.GenesisBlock().GetHash())) return true; try { @@ -4396,7 +4375,7 @@ bool CChainState::LoadGenesisBlock(const CChainParams& chainparams) FlatFilePos blockPos = SaveBlockToDisk(block, 0, chainparams, nullptr); if (blockPos.IsNull()) return error("%s: writing genesis block to disk failed", __func__); - CBlockIndex *pindex = AddToBlockIndex(block); + CBlockIndex *pindex = m_blockman.AddToBlockIndex(block); ReceivedBlockTransactions(block, pindex, blockPos, chainparams.GetConsensus()); } catch (const std::runtime_error& e) { return error("%s: failed to write genesis block: %s", __func__, e.what()); @@ -4407,7 +4386,7 @@ bool CChainState::LoadGenesisBlock(const CChainParams& chainparams) bool LoadGenesisBlock(const CChainParams& chainparams) { - return g_chainstate.LoadGenesisBlock(chainparams); + return ::ChainstateActive().LoadGenesisBlock(chainparams); } bool LoadExternalBlockFile(const CChainParams& chainparams, FILE* fileIn, FlatFilePos *dbp) @@ -4472,7 +4451,7 @@ bool LoadExternalBlockFile(const CChainParams& chainparams, FILE* fileIn, FlatFi CBlockIndex* pindex = LookupBlockIndex(hash); if (!pindex || (pindex->nStatus & BLOCK_HAVE_DATA) == 0) { CValidationState state; - if (g_chainstate.AcceptBlock(pblock, state, chainparams, nullptr, true, dbp, nullptr)) { + if (::ChainstateActive().AcceptBlock(pblock, state, chainparams, nullptr, true, dbp, nullptr)) { nLoaded++; } if (state.IsError()) { @@ -4509,7 +4488,7 @@ bool LoadExternalBlockFile(const CChainParams& chainparams, FILE* fileIn, FlatFi head.ToString()); LOCK(cs_main); CValidationState dummy; - if (g_chainstate.AcceptBlock(pblockrecursive, dummy, chainparams, nullptr, true, &it->second, nullptr)) + if (::ChainstateActive().AcceptBlock(pblockrecursive, dummy, chainparams, nullptr, true, &it->second, nullptr)) { nLoaded++; queue.push_back(pblockrecursive->GetHash()); @@ -4541,20 +4520,20 @@ void CChainState::CheckBlockIndex(const Consensus::Params& consensusParams) LOCK(cs_main); // During a reindex, we read the genesis block and call CheckBlockIndex before ActivateBestChain, - // so we have the genesis block in mapBlockIndex but no active chain. (A few of the tests when - // iterating the block tree require that m_chain has been initialized.) + // so we have the genesis block in m_blockman.m_block_index but no active chain. (A few of the + // tests when iterating the block tree require that m_chain has been initialized.) if (m_chain.Height() < 0) { - assert(mapBlockIndex.size() <= 1); + assert(m_blockman.m_block_index.size() <= 1); return; } // Build forward-pointing map of the entire block tree. std::multimap<CBlockIndex*,CBlockIndex*> forward; - for (const std::pair<const uint256, CBlockIndex*>& entry : mapBlockIndex) { + for (const std::pair<const uint256, CBlockIndex*>& entry : m_blockman.m_block_index) { forward.insert(std::make_pair(entry.second->pprev, entry.second)); } - assert(forward.size() == mapBlockIndex.size()); + assert(forward.size() == m_blockman.m_block_index.size()); std::pair<std::multimap<CBlockIndex*,CBlockIndex*>::iterator,std::multimap<CBlockIndex*,CBlockIndex*>::iterator> rangeGenesis = forward.equal_range(nullptr); CBlockIndex *pindex = rangeGenesis.first->second; @@ -4608,7 +4587,7 @@ void CChainState::CheckBlockIndex(const Consensus::Params& consensusParams) assert(pindex->nHeight == nHeight); // nHeight must be consistent. assert(pindex->pprev == nullptr || pindex->nChainWork >= pindex->pprev->nChainWork); // For every block except the genesis block, the chainwork must be larger than the parent's. assert(nHeight < 2 || (pindex->pskip && (pindex->pskip->nHeight < nHeight))); // The pskip pointer must point back for all but the first 2 blocks. - assert(pindexFirstNotTreeValid == nullptr); // All mapBlockIndex entries must at least be TREE valid + assert(pindexFirstNotTreeValid == nullptr); // All m_blockman.m_block_index entries must at least be TREE valid if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_TREE) assert(pindexFirstNotTreeValid == nullptr); // TREE valid implies all parents are TREE valid if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_CHAIN) assert(pindexFirstNotChainValid == nullptr); // CHAIN valid implies all parents are CHAIN valid if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_SCRIPTS) assert(pindexFirstNotScriptsValid == nullptr); // SCRIPTS valid implies all parents are SCRIPTS valid @@ -4627,13 +4606,13 @@ void CChainState::CheckBlockIndex(const Consensus::Params& consensusParams) } // If some parent is missing, then it could be that this block was in // setBlockIndexCandidates but had to be removed because of the missing data. - // In this case it must be in mapBlocksUnlinked -- see test below. + // In this case it must be in m_blocks_unlinked -- see test below. } } else { // If this block sorts worse than the current tip or some ancestor's block has never been seen, it cannot be in setBlockIndexCandidates. assert(setBlockIndexCandidates.count(pindex) == 0); } - // Check whether this block is in mapBlocksUnlinked. - std::pair<std::multimap<CBlockIndex*,CBlockIndex*>::iterator,std::multimap<CBlockIndex*,CBlockIndex*>::iterator> rangeUnlinked = mapBlocksUnlinked.equal_range(pindex->pprev); + // Check whether this block is in m_blocks_unlinked. + std::pair<std::multimap<CBlockIndex*,CBlockIndex*>::iterator,std::multimap<CBlockIndex*,CBlockIndex*>::iterator> rangeUnlinked = m_blockman.m_blocks_unlinked.equal_range(pindex->pprev); bool foundInUnlinked = false; while (rangeUnlinked.first != rangeUnlinked.second) { assert(rangeUnlinked.first->first == pindex->pprev); @@ -4644,22 +4623,22 @@ void CChainState::CheckBlockIndex(const Consensus::Params& consensusParams) rangeUnlinked.first++; } if (pindex->pprev && (pindex->nStatus & BLOCK_HAVE_DATA) && pindexFirstNeverProcessed != nullptr && pindexFirstInvalid == nullptr) { - // If this block has block data available, some parent was never received, and has no invalid parents, it must be in mapBlocksUnlinked. + // If this block has block data available, some parent was never received, and has no invalid parents, it must be in m_blocks_unlinked. assert(foundInUnlinked); } - if (!(pindex->nStatus & BLOCK_HAVE_DATA)) assert(!foundInUnlinked); // Can't be in mapBlocksUnlinked if we don't HAVE_DATA - if (pindexFirstMissing == nullptr) assert(!foundInUnlinked); // We aren't missing data for any parent -- cannot be in mapBlocksUnlinked. + if (!(pindex->nStatus & BLOCK_HAVE_DATA)) assert(!foundInUnlinked); // Can't be in m_blocks_unlinked if we don't HAVE_DATA + if (pindexFirstMissing == nullptr) assert(!foundInUnlinked); // We aren't missing data for any parent -- cannot be in m_blocks_unlinked. if (pindex->pprev && (pindex->nStatus & BLOCK_HAVE_DATA) && pindexFirstNeverProcessed == nullptr && pindexFirstMissing != nullptr) { // We HAVE_DATA for this block, have received data for all parents at some point, but we're currently missing data for some parent. assert(fHavePruned); // We must have pruned. - // This block may have entered mapBlocksUnlinked if: + // This block may have entered m_blocks_unlinked if: // - it has a descendant that at some point had more work than the // tip, and // - we tried switching to that descendant but were missing // data for some intermediate block between m_chain and the // tip. // So if this block is itself better than m_chain.Tip() and it wasn't in - // setBlockIndexCandidates, then it must be in mapBlocksUnlinked. + // setBlockIndexCandidates, then it must be in m_blocks_unlinked. if (!CBlockIndexWorkComparator()(pindex, m_chain.Tip()) && setBlockIndexCandidates.count(pindex) == 0) { if (pindexFirstInvalid == nullptr) { assert(foundInUnlinked); @@ -4903,9 +4882,10 @@ public: CMainCleanup() {} ~CMainCleanup() { // block headers - BlockMap::iterator it1 = mapBlockIndex.begin(); - for (; it1 != mapBlockIndex.end(); it1++) + BlockMap::iterator it1 = g_blockman.m_block_index.begin(); + for (; it1 != g_blockman.m_block_index.end(); it1++) delete (*it1).second; - mapBlockIndex.clear(); + g_blockman.m_block_index.clear(); } -} instance_of_cmaincleanup; +}; +static CMainCleanup instance_of_cmaincleanup; |