diff options
Diffstat (limited to 'src/validation.cpp')
-rw-r--r-- | src/validation.cpp | 1058 |
1 files changed, 602 insertions, 456 deletions
diff --git a/src/validation.cpp b/src/validation.cpp index 975cbcc79f..986052771c 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -3,42 +3,44 @@ // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. -#include "validation.h" - -#include "arith_uint256.h" -#include "chain.h" -#include "chainparams.h" -#include "checkpoints.h" -#include "checkqueue.h" -#include "consensus/consensus.h" -#include "consensus/merkle.h" -#include "consensus/tx_verify.h" -#include "consensus/validation.h" -#include "cuckoocache.h" -#include "fs.h" -#include "hash.h" -#include "init.h" -#include "policy/fees.h" -#include "policy/policy.h" -#include "pow.h" -#include "primitives/block.h" -#include "primitives/transaction.h" -#include "random.h" -#include "script/script.h" -#include "script/sigcache.h" -#include "script/standard.h" -#include "timedata.h" -#include "tinyformat.h" -#include "txdb.h" -#include "txmempool.h" -#include "ui_interface.h" -#include "undo.h" -#include "util.h" -#include "utilmoneystr.h" -#include "utilstrencodings.h" -#include "validationinterface.h" -#include "versionbits.h" -#include "warnings.h" +#include <validation.h> + +#include <arith_uint256.h> +#include <chain.h> +#include <chainparams.h> +#include <checkpoints.h> +#include <checkqueue.h> +#include <consensus/consensus.h> +#include <consensus/merkle.h> +#include <consensus/tx_verify.h> +#include <consensus/validation.h> +#include <cuckoocache.h> +#include <fs.h> +#include <hash.h> +#include <init.h> +#include <policy/fees.h> +#include <policy/policy.h> +#include <policy/rbf.h> +#include <pow.h> +#include <primitives/block.h> +#include <primitives/transaction.h> +#include <random.h> +#include <reverse_iterator.h> +#include <script/script.h> +#include <script/sigcache.h> +#include <script/standard.h> +#include <timedata.h> +#include <tinyformat.h> +#include <txdb.h> +#include <txmempool.h> +#include <ui_interface.h> +#include <undo.h> +#include <util.h> +#include <utilmoneystr.h> +#include <utilstrencodings.h> +#include <validationinterface.h> +#include <versionbits.h> +#include <warnings.h> #include <atomic> #include <sstream> @@ -51,6 +53,9 @@ # error "Bitcoin cannot be compiled without assertions." #endif +#define MICRO 0.000001 +#define MILLI 0.001 + /** * Global state */ @@ -59,12 +64,12 @@ CCriticalSection cs_main; BlockMap mapBlockIndex; CChain chainActive; -CBlockIndex *pindexBestHeader = NULL; +CBlockIndex *pindexBestHeader = nullptr; CWaitableCriticalSection csBestBlock; CConditionVariable cvBlockChange; int nScriptCheckThreads = 0; std::atomic_bool fImporting(false); -bool fReindex = false; +std::atomic_bool fReindex(false); bool fTxIndex = false; bool fHavePruned = false; bool fPruneMode = false; @@ -78,6 +83,7 @@ int64_t nMaxTipAge = DEFAULT_MAX_TIP_AGE; bool fEnableReplacement = DEFAULT_ENABLE_REPLACEMENT; uint256 hashAssumeValid; +arith_uint256 nMinimumChainWork; CFeeRate minRelayTxFee = CFeeRate(DEFAULT_MIN_RELAY_TX_FEE); CAmount maxTxFee = DEFAULT_TRANSACTION_MAXFEE; @@ -150,6 +156,26 @@ namespace { /** chainwork for the last block that preciousblock has been applied to. */ arith_uint256 nLastPreciousChainwork = 0; + /** In order to efficiently track invalidity of headers, we keep the set of + * blocks which we tried to connect and found to be invalid here (ie which + * were set to BLOCK_FAILED_VALID since the last restart). We can then + * walk this set and check if a new header is a descendant of something in + * this set, preventing us from having to walk mapBlockIndex when we try + * to connect a bad block and fail. + * + * While this is more complicated than marking everything which descends + * from an invalid block as invalid at the time we discover it to be + * invalid, doing so would require walking all of mapBlockIndex to find all + * descendants. Since this case should be very rare, keeping track of all + * BLOCK_FAILED_VALID blocks in a set should be just fine and work just as + * well. + * + * Because we already walk mapBlockIndex in height-order at startup, we go + * ahead and mark descendants of invalid blocks as FAILED_CHILD at that time, + * instead of putting things in this set. + */ + std::set<CBlockIndex*> g_failed_blocks; + /** Dirty block index entries. */ std::set<CBlockIndex*> setDirtyBlockIndex; @@ -175,9 +201,9 @@ CBlockIndex* FindForkInGlobalIndex(const CChain& chain, const CBlockLocator& loc return chain.Genesis(); } -CCoinsViewDB *pcoinsdbview = NULL; -CCoinsViewCache *pcoinsTip = NULL; -CBlockTreeDB *pblocktree = NULL; +std::unique_ptr<CCoinsViewDB> pcoinsdbview; +std::unique_ptr<CCoinsViewCache> pcoinsTip; +std::unique_ptr<CBlockTreeDB> pblocktree; enum FlushStateMode { FLUSH_STATE_NONE, @@ -213,7 +239,7 @@ bool CheckFinalTx(const CTransaction &tx, int flags) // IsFinalTx() with one more than chainActive.Height(). const int nBlockHeight = chainActive.Height() + 1; - // BIP113 will require that time-locked transactions have nLockTime set to + // BIP113 requires that time-locked transactions have nLockTime set to // less than the median time of the previous block they're contained in. // When the next block is created its previous block will be the current // chain tip, so we use that to calculate the median time passed to @@ -249,6 +275,8 @@ bool CheckSequenceLocks(const CTransaction &tx, int flags, LockPoints* lp, bool AssertLockHeld(mempool.cs); CBlockIndex* tip = chainActive.Tip(); + assert(tip != nullptr); + CBlockIndex index; index.pprev = tip; // CheckSequenceLocks() uses chainActive.Height()+1 to evaluate @@ -267,7 +295,7 @@ bool CheckSequenceLocks(const CTransaction &tx, int flags, LockPoints* lp, bool } else { // pcoinsTip contains the UTXO set for chainActive.Tip() - CCoinsViewMemPool viewMemPool(pcoinsTip, mempool); + CCoinsViewMemPool viewMemPool(pcoinsTip.get(), mempool); std::vector<int> prevheights; prevheights.resize(tx.vin.size()); for (size_t txinIndex = 0; txinIndex < tx.vin.size(); txinIndex++) { @@ -376,7 +404,9 @@ void UpdateMempoolForReorg(DisconnectedBlockTransactions &disconnectpool, bool f while (it != disconnectpool.queuedTx.get<insertion_order>().rend()) { // ignore validation errors in resurrected transactions CValidationState stateDummy; - if (!fAddToMempool || (*it)->IsCoinBase() || !AcceptToMemoryPool(mempool, stateDummy, *it, false, NULL, NULL, true)) { + if (!fAddToMempool || (*it)->IsCoinBase() || + !AcceptToMemoryPool(mempool, stateDummy, *it, nullptr /* pfMissingInputs */, + nullptr /* plTxnReplaced */, true /* bypass_limits */, 0 /* nAbsurdFee */)) { // If the transaction doesn't make it in to the mempool, remove any // transactions that depend on it (which would now be orphans). mempool.removeRecursive(**it, MemPoolRemovalReason::REORG); @@ -394,9 +424,9 @@ void UpdateMempoolForReorg(DisconnectedBlockTransactions &disconnectpool, bool f mempool.UpdateTransactionsFromBlock(vHashUpdate); // We also need to remove any now-immature transactions - mempool.removeForReorg(pcoinsTip, chainActive.Tip()->nHeight + 1, STANDARD_LOCKTIME_VERIFY_FLAGS); + mempool.removeForReorg(pcoinsTip.get(), chainActive.Tip()->nHeight + 1, STANDARD_LOCKTIME_VERIFY_FLAGS); // Re-limit mempool size, in case we added any transactions - LimitMempoolSize(mempool, GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000, GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60); + LimitMempoolSize(mempool, gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000, gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60); } // Used to avoid mempool polluting consensus critical paths if CCoinsViewMempool @@ -435,9 +465,9 @@ static bool CheckInputsFromMempoolAndCache(const CTransaction& tx, CValidationSt return CheckInputs(tx, state, view, true, flags, cacheSigStore, true, txdata); } -static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool& pool, CValidationState& state, const CTransactionRef& ptx, bool fLimitFree, +static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool& pool, CValidationState& state, const CTransactionRef& ptx, bool* pfMissingInputs, int64_t nAcceptTime, std::list<CTransactionRef>* plTxnReplaced, - bool fOverrideMempoolLimit, const CAmount& nAbsurdFee, std::vector<COutPoint>& coins_to_uncache) + bool bypass_limits, const CAmount& nAbsurdFee, std::vector<COutPoint>& coins_to_uncache) { const CTransaction& tx = *ptx; const uint256 hash = tx.GetHash(); @@ -454,7 +484,7 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool // Reject transactions with witness before segregated witness activates (override with -prematurewitness) bool witnessEnabled = IsWitnessEnabled(chainActive.Tip(), chainparams.GetConsensus()); - if (!GetBoolArg("-prematurewitness",false) && tx.HasWitness() && !witnessEnabled) { + if (!gArgs.GetBoolArg("-prematurewitness", false) && tx.HasWitness() && !witnessEnabled) { return state.DoS(0, false, REJECT_NONSTANDARD, "no-witness-yet", true); } @@ -487,9 +517,9 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool if (!setConflicts.count(ptxConflicting->GetHash())) { // Allow opt-out of transaction replacement by setting - // nSequence >= maxint-1 on all inputs. + // nSequence > MAX_BIP125_RBF_SEQUENCE (SEQUENCE_FINAL-2) on all inputs. // - // maxint-1 is picked to still allow use of nLockTime by + // SEQUENCE_FINAL-1 is picked to still allow use of nLockTime by // non-replaceable transactions. All inputs rather than just one // is for the sake of multi-party protocols, where we don't // want a single party to be able to disable replacement. @@ -503,7 +533,7 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool { for (const CTxIn &_txin : ptxConflicting->vin) { - if (_txin.nSequence < std::numeric_limits<unsigned int>::max()-1) + if (_txin.nSequence <= MAX_BIP125_RBF_SEQUENCE) { fReplacementOptOut = false; break; @@ -524,31 +554,26 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool CCoinsView dummy; CCoinsViewCache view(&dummy); - CAmount nValueIn = 0; LockPoints lp; { LOCK(pool.cs); - CCoinsViewMemPool viewMemPool(pcoinsTip, pool); + CCoinsViewMemPool viewMemPool(pcoinsTip.get(), pool); view.SetBackend(viewMemPool); - // do we already have it? - for (size_t out = 0; out < tx.vout.size(); out++) { - COutPoint outpoint(hash, out); - bool had_coin_in_cache = pcoinsTip->HaveCoinInCache(outpoint); - if (view.HaveCoin(outpoint)) { - if (!had_coin_in_cache) { - coins_to_uncache.push_back(outpoint); - } - return state.Invalid(false, REJECT_DUPLICATE, "txn-already-known"); - } - } - // do all inputs exist? for (const CTxIn txin : tx.vin) { if (!pcoinsTip->HaveCoinInCache(txin.prevout)) { coins_to_uncache.push_back(txin.prevout); } if (!view.HaveCoin(txin.prevout)) { + // Are inputs missing because we already have the tx? + for (size_t out = 0; out < tx.vout.size(); out++) { + // Optimistically just do efficient check of cache for outputs + if (pcoinsTip->HaveCoinInCache(COutPoint(hash, out))) { + return state.Invalid(false, REJECT_DUPLICATE, "txn-already-known"); + } + } + // Otherwise assume this might be an orphan tx for which we just haven't seen parents yet if (pfMissingInputs) { *pfMissingInputs = true; } @@ -559,8 +584,6 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool // Bring the best block into scope view.GetBestBlock(); - nValueIn = view.GetValueIn(tx); - // we have all inputs cached now, so switch back to dummy, so we don't need to keep lock on mempool view.SetBackend(dummy); @@ -571,6 +594,12 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool // CoinsViewCache instead of create its own if (!CheckSequenceLocks(tx, STANDARD_LOCKTIME_VERIFY_FLAGS, &lp)) return state.DoS(0, false, REJECT_NONSTANDARD, "non-BIP68-final"); + + } // end LOCK(pool.cs) + + CAmount nFees = 0; + if (!Consensus::CheckTxInputs(tx, state, view, GetSpendHeight(view), nFees)) { + return error("%s: Consensus::CheckTxInputs: %s, %s", __func__, tx.GetHash().ToString(), FormatStateMessage(state)); } // Check for non-standard pay-to-script-hash in inputs @@ -583,8 +612,6 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool int64_t nSigOpsCost = GetTransactionSigOpCost(tx, view, STANDARD_SCRIPT_VERIFY_FLAGS); - CAmount nValueOut = tx.GetValueOut(); - CAmount nFees = nValueIn-nValueOut; // nModifiedFees includes any fee deltas from PrioritiseTransaction CAmount nModifiedFees = nFees; pool.ApplyDelta(hash, nModifiedFees); @@ -613,13 +640,13 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool return state.DoS(0, false, REJECT_NONSTANDARD, "bad-txns-too-many-sigops", false, strprintf("%d", nSigOpsCost)); - CAmount mempoolRejectFee = pool.GetMinFee(GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000).GetFee(nSize); - if (mempoolRejectFee > 0 && nModifiedFees < mempoolRejectFee) { + CAmount mempoolRejectFee = pool.GetMinFee(gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000).GetFee(nSize); + if (!bypass_limits && mempoolRejectFee > 0 && nModifiedFees < mempoolRejectFee) { return state.DoS(0, false, REJECT_INSUFFICIENTFEE, "mempool min fee not met", false, strprintf("%d < %d", nFees, mempoolRejectFee)); } // No transactions are allowed below minRelayTxFee except from disconnected blocks - if (fLimitFree && nModifiedFees < ::minRelayTxFee.GetFee(nSize)) { + if (!bypass_limits && nModifiedFees < ::minRelayTxFee.GetFee(nSize)) { return state.DoS(0, false, REJECT_INSUFFICIENTFEE, "min relay fee not met"); } @@ -630,10 +657,10 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool // Calculate in-mempool ancestors, up to a limit. CTxMemPool::setEntries setAncestors; - size_t nLimitAncestors = GetArg("-limitancestorcount", DEFAULT_ANCESTOR_LIMIT); - size_t nLimitAncestorSize = GetArg("-limitancestorsize", DEFAULT_ANCESTOR_SIZE_LIMIT)*1000; - size_t nLimitDescendants = GetArg("-limitdescendantcount", DEFAULT_DESCENDANT_LIMIT); - size_t nLimitDescendantSize = GetArg("-limitdescendantsize", DEFAULT_DESCENDANT_SIZE_LIMIT)*1000; + size_t nLimitAncestors = gArgs.GetArg("-limitancestorcount", DEFAULT_ANCESTOR_LIMIT); + size_t nLimitAncestorSize = gArgs.GetArg("-limitancestorsize", DEFAULT_ANCESTOR_SIZE_LIMIT)*1000; + size_t nLimitDescendants = gArgs.GetArg("-limitdescendantcount", DEFAULT_DESCENDANT_LIMIT); + size_t nLimitDescendantSize = gArgs.GetArg("-limitdescendantsize", DEFAULT_DESCENDANT_SIZE_LIMIT)*1000; std::string errString; if (!pool.CalculateMemPoolAncestors(entry, setAncestors, nLimitAncestors, nLimitAncestorSize, nLimitDescendants, nLimitDescendantSize, errString)) { return state.DoS(0, false, REJECT_NONSTANDARD, "too-long-mempool-chain", false, errString); @@ -785,7 +812,7 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool unsigned int scriptVerifyFlags = STANDARD_SCRIPT_VERIFY_FLAGS; if (!chainparams.RequireStandard()) { - scriptVerifyFlags = GetArg("-promiscuousmempoolflags", scriptVerifyFlags); + scriptVerifyFlags = gArgs.GetArg("-promiscuousmempoolflags", scriptVerifyFlags); } // Check against previous transactions @@ -851,18 +878,19 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool } pool.RemoveStaged(allConflicting, false, MemPoolRemovalReason::REPLACED); - // This transaction should only count for fee estimation if it isn't a - // BIP 125 replacement transaction (may not be widely supported), the - // node is not behind, and the transaction is not dependent on any other - // transactions in the mempool. - bool validForFeeEstimation = !fReplacementTransaction && IsCurrentForFeeEstimation() && pool.HasNoInputsOf(tx); + // This transaction should only count for fee estimation if: + // - it isn't a BIP 125 replacement transaction (may not be widely supported) + // - it's not being readded during a reorg which bypasses typical mempool fee limits + // - the node is not behind + // - the transaction is not dependent on any other transactions in the mempool + bool validForFeeEstimation = !fReplacementTransaction && !bypass_limits && IsCurrentForFeeEstimation() && pool.HasNoInputsOf(tx); // Store transaction in memory pool.addUnchecked(hash, entry, setAncestors, validForFeeEstimation); // trim mempool and check if tx was trimmed - if (!fOverrideMempoolLimit) { - LimitMempoolSize(pool, GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000, GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60); + if (!bypass_limits) { + LimitMempoolSize(pool, gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000, gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60); if (!pool.exists(hash)) return state.DoS(0, false, REJECT_INSUFFICIENTFEE, "mempool full"); } @@ -874,12 +902,12 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool } /** (try to) add transaction to memory pool with a specified acceptance time **/ -static bool AcceptToMemoryPoolWithTime(const CChainParams& chainparams, CTxMemPool& pool, CValidationState &state, const CTransactionRef &tx, bool fLimitFree, +static bool AcceptToMemoryPoolWithTime(const CChainParams& chainparams, CTxMemPool& pool, CValidationState &state, const CTransactionRef &tx, bool* pfMissingInputs, int64_t nAcceptTime, std::list<CTransactionRef>* plTxnReplaced, - bool fOverrideMempoolLimit, const CAmount nAbsurdFee) + bool bypass_limits, const CAmount nAbsurdFee) { std::vector<COutPoint> coins_to_uncache; - bool res = AcceptToMemoryPoolWorker(chainparams, pool, state, tx, fLimitFree, pfMissingInputs, nAcceptTime, plTxnReplaced, fOverrideMempoolLimit, nAbsurdFee, coins_to_uncache); + bool res = AcceptToMemoryPoolWorker(chainparams, pool, state, tx, pfMissingInputs, nAcceptTime, plTxnReplaced, bypass_limits, nAbsurdFee, coins_to_uncache); if (!res) { for (const COutPoint& hashTx : coins_to_uncache) pcoinsTip->Uncache(hashTx); @@ -890,52 +918,59 @@ static bool AcceptToMemoryPoolWithTime(const CChainParams& chainparams, CTxMemPo return res; } -bool AcceptToMemoryPool(CTxMemPool& pool, CValidationState &state, const CTransactionRef &tx, bool fLimitFree, +bool AcceptToMemoryPool(CTxMemPool& pool, CValidationState &state, const CTransactionRef &tx, bool* pfMissingInputs, std::list<CTransactionRef>* plTxnReplaced, - bool fOverrideMempoolLimit, const CAmount nAbsurdFee) + bool bypass_limits, const CAmount nAbsurdFee) { const CChainParams& chainparams = Params(); - return AcceptToMemoryPoolWithTime(chainparams, pool, state, tx, fLimitFree, pfMissingInputs, GetTime(), plTxnReplaced, fOverrideMempoolLimit, nAbsurdFee); + return AcceptToMemoryPoolWithTime(chainparams, pool, state, tx, pfMissingInputs, GetTime(), plTxnReplaced, bypass_limits, nAbsurdFee); } -/** Return transaction in txOut, and if it was found inside a block, its hash is placed in hashBlock */ -bool GetTransaction(const uint256 &hash, CTransactionRef &txOut, const Consensus::Params& consensusParams, uint256 &hashBlock, bool fAllowSlow) +/** + * Return transaction in txOut, and if it was found inside a block, its hash is placed in hashBlock. + * If blockIndex is provided, the transaction is fetched from the corresponding block. + */ +bool GetTransaction(const uint256& hash, CTransactionRef& txOut, const Consensus::Params& consensusParams, uint256& hashBlock, bool fAllowSlow, CBlockIndex* blockIndex) { - CBlockIndex *pindexSlow = NULL; + CBlockIndex* pindexSlow = blockIndex; LOCK(cs_main); - CTransactionRef ptx = mempool.get(hash); - if (ptx) - { - txOut = ptx; - return true; - } + if (!blockIndex) { + CTransactionRef ptx = mempool.get(hash); + if (ptx) { + txOut = ptx; + return true; + } - if (fTxIndex) { - CDiskTxPos postx; - if (pblocktree->ReadTxIndex(hash, postx)) { - CAutoFile file(OpenBlockFile(postx, true), SER_DISK, CLIENT_VERSION); - if (file.IsNull()) - return error("%s: OpenBlockFile failed", __func__); - CBlockHeader header; - try { - file >> header; - fseek(file.Get(), postx.nTxOffset, SEEK_CUR); - file >> txOut; - } catch (const std::exception& e) { - return error("%s: Deserialize or I/O error - %s", __func__, e.what()); + if (fTxIndex) { + CDiskTxPos postx; + if (pblocktree->ReadTxIndex(hash, postx)) { + CAutoFile file(OpenBlockFile(postx, true), SER_DISK, CLIENT_VERSION); + if (file.IsNull()) + return error("%s: OpenBlockFile failed", __func__); + CBlockHeader header; + try { + file >> header; + fseek(file.Get(), postx.nTxOffset, SEEK_CUR); + file >> txOut; + } catch (const std::exception& e) { + return error("%s: Deserialize or I/O error - %s", __func__, e.what()); + } + hashBlock = header.GetHash(); + if (txOut->GetHash() != hash) + return error("%s: txid mismatch", __func__); + return true; } - hashBlock = header.GetHash(); - if (txOut->GetHash() != hash) - return error("%s: txid mismatch", __func__); - return true; + + // transaction not found in index, nothing more can be done + return false; } - } - if (fAllowSlow) { // use coin database to locate block that contains transaction, and scan it - const Coin& coin = AccessByTxid(*pcoinsTip, hash); - if (!coin.IsSpent()) pindexSlow = chainActive[coin.nHeight]; + if (fAllowSlow) { // use coin database to locate block that contains transaction, and scan it + const Coin& coin = AccessByTxid(*pcoinsTip, hash); + if (!coin.IsSpent()) pindexSlow = chainActive[coin.nHeight]; + } } if (pindexSlow) { @@ -1034,8 +1069,6 @@ CAmount GetBlockSubsidy(int nHeight, const Consensus::Params& consensusParams) bool IsInitialBlockDownload() { - const CChainParams& chainParams = Params(); - // Once this function has returned false, it must remain false. static std::atomic<bool> latchToFalse{false}; // Optimization: pre-test latch before taking the lock. @@ -1047,9 +1080,9 @@ bool IsInitialBlockDownload() return false; if (fImporting || fReindex) return true; - if (chainActive.Tip() == NULL) + if (chainActive.Tip() == nullptr) return true; - if (chainActive.Tip()->nChainWork < UintToArith256(chainParams.GetConsensus().nMinimumChainWork)) + if (chainActive.Tip()->nChainWork < nMinimumChainWork) return true; if (chainActive.Tip()->GetBlockTime() < (GetTime() - nMaxTipAge)) return true; @@ -1058,12 +1091,12 @@ bool IsInitialBlockDownload() return false; } -CBlockIndex *pindexBestForkTip = NULL, *pindexBestForkBase = NULL; +CBlockIndex *pindexBestForkTip = nullptr, *pindexBestForkBase = nullptr; static void AlertNotify(const std::string& strMessage) { uiInterface.NotifyAlertChanged(); - std::string strCmd = GetArg("-alertnotify", ""); + std::string strCmd = gArgs.GetArg("-alertnotify", ""); if (strCmd.empty()) return; // Alert text should be plain ascii coming from a trusted source, but to @@ -1088,7 +1121,7 @@ static void CheckForkWarningConditions() // If our best fork is no longer within 72 blocks (+/- 12 hours if no one mines it) // of our head, drop it if (pindexBestForkTip && chainActive.Height() - pindexBestForkTip->nHeight >= 72) - pindexBestForkTip = NULL; + pindexBestForkTip = nullptr; if (pindexBestForkTip || (pindexBestInvalid && pindexBestInvalid->nChainWork > chainActive.Tip()->nChainWork + (GetBlockProof(*chainActive.Tip()) * 6))) { @@ -1140,7 +1173,7 @@ static void CheckForkWarningConditionsOnNewFork(CBlockIndex* pindexNewForkTip) // or a chain that is entirely longer than ours and invalid (note that this should be detected by both) // We define it this way because it allows us to only store the highest fork tip (+ base) which meets // the 7-block condition and from this always have the most-likely-to-cause-warning fork - if (pfork && (!pindexBestForkTip || (pindexBestForkTip && pindexNewForkTip->nHeight > pindexBestForkTip->nHeight)) && + if (pfork && (!pindexBestForkTip || pindexNewForkTip->nHeight > pindexBestForkTip->nHeight) && pindexNewForkTip->nChainWork - pfork->nChainWork > (GetBlockProof(*pfork) * 7) && chainActive.Height() - pindexNewForkTip->nHeight < 72) { @@ -1171,6 +1204,7 @@ void static InvalidChainFound(CBlockIndex* pindexNew) void static InvalidBlockFound(CBlockIndex *pindex, const CValidationState &state) { if (!state.CorruptionPossible()) { pindex->nStatus |= BLOCK_FAILED_VALID; + g_failed_blocks.insert(pindex); setDirtyBlockIndex.insert(pindex); setBlockIndexCandidates.erase(pindex); InvalidChainFound(pindex); @@ -1201,7 +1235,7 @@ void UpdateCoins(const CTransaction& tx, CCoinsViewCache& inputs, int nHeight) bool CScriptCheck::operator()() { const CScript &scriptSig = ptxTo->vin[nIn].scriptSig; const CScriptWitness *witness = &ptxTo->vin[nIn].scriptWitness; - return VerifyScript(scriptSig, scriptPubKey, witness, nFlags, CachingTransactionSignatureChecker(ptxTo, nIn, amount, cacheStore, *txdata), &error); + return VerifyScript(scriptSig, m_tx_out.scriptPubKey, witness, nFlags, CachingTransactionSignatureChecker(ptxTo, nIn, m_tx_out.nValue, cacheStore, *txdata), &error); } int GetSpendHeight(const CCoinsViewCache& inputs) @@ -1218,7 +1252,7 @@ static uint256 scriptExecutionCacheNonce(GetRandHash()); void InitScriptExecutionCache() { // nMaxCacheSize is unsigned. If -maxsigcachesize is set to zero, // setup_bytes creates the minimum possible cache (2 elements). - size_t nMaxCacheSize = std::min(std::max((int64_t)0, GetArg("-maxsigcachesize", DEFAULT_MAX_SIG_CACHE_SIZE) / 2), MAX_MAX_SIG_CACHE_SIZE) * ((size_t) 1 << 20); + size_t nMaxCacheSize = std::min(std::max((int64_t)0, gArgs.GetArg("-maxsigcachesize", DEFAULT_MAX_SIG_CACHE_SIZE) / 2), MAX_MAX_SIG_CACHE_SIZE) * ((size_t) 1 << 20); size_t nElems = scriptExecutionCache.setup_bytes(nMaxCacheSize); LogPrintf("Using %zu MiB out of %zu/2 requested for script execution cache, able to store %zu elements\n", (nElems*sizeof(uint256)) >>20, (nMaxCacheSize*2)>>20, nElems); @@ -1228,7 +1262,7 @@ void InitScriptExecutionCache() { * Check whether all inputs of this transaction are valid (no double spends, scripts & sigs, amounts) * This does not modify the UTXO set. * - * If pvChecks is not NULL, script checks are pushed onto it instead of being performed inline. Any + * If pvChecks is not nullptr, script checks are pushed onto it instead of being performed inline. Any * script checks which are not necessary (eg due to script execution cache hits) are, obviously, * not pushed onto pvChecks/run. * @@ -1242,9 +1276,6 @@ bool CheckInputs(const CTransaction& tx, CValidationState &state, const CCoinsVi { if (!tx.IsCoinBase()) { - if (!Consensus::CheckTxInputs(tx, state, inputs, GetSpendHeight(inputs))) - return false; - if (pvChecks) pvChecks->reserve(tx.vin.size()); @@ -1283,11 +1314,9 @@ bool CheckInputs(const CTransaction& tx, CValidationState &state, const CCoinsVi // a sanity check that our caching is not introducing consensus // failures through additional data in, eg, the coins being // spent being checked as a part of CScriptCheck. - const CScript& scriptPubKey = coin.out.scriptPubKey; - const CAmount amount = coin.out.nValue; // Verify signature - CScriptCheck check(scriptPubKey, amount, tx, i, flags, cacheSigStore, &txdata); + CScriptCheck check(coin.out, tx, i, flags, cacheSigStore, &txdata); if (pvChecks) { pvChecks->push_back(CScriptCheck()); check.swap(pvChecks->back()); @@ -1299,7 +1328,7 @@ bool CheckInputs(const CTransaction& tx, CValidationState &state, const CCoinsVi // arguments; if so, don't trigger DoS protection to // avoid splitting the network between upgraded and // non-upgraded nodes. - CScriptCheck check2(scriptPubKey, amount, tx, i, + CScriptCheck check2(coin.out, tx, i, flags & ~STANDARD_NOT_MANDATORY_VERIFY_FLAGS, cacheSigStore, &txdata); if (check2()) return state.Invalid(false, REJECT_NONSTANDARD, strprintf("non-mandatory-script-verify-flag (%s)", ScriptErrorString(check.GetScriptError()))); @@ -1565,7 +1594,7 @@ private: int bit; public: - WarningBitsConditionChecker(int bitIn) : bit(bitIn) {} + explicit WarningBitsConditionChecker(int bitIn) : bit(bitIn) {} int64_t BeginTime(const Consensus::Params& params) const override { return 0; } int64_t EndTime(const Consensus::Params& params) const override { return std::numeric_limits<int64_t>::max(); } @@ -1586,11 +1615,12 @@ static ThresholdConditionCache warningcache[VERSIONBITS_NUM_BITS]; static unsigned int GetBlockScriptFlags(const CBlockIndex* pindex, const Consensus::Params& consensusparams) { AssertLockHeld(cs_main); - // BIP16 didn't become active until Apr 1 2012 - int64_t nBIP16SwitchTime = 1333238400; - bool fStrictPayToScriptHash = (pindex->GetBlockTime() >= nBIP16SwitchTime); + unsigned int flags = SCRIPT_VERIFY_NONE; - unsigned int flags = fStrictPayToScriptHash ? SCRIPT_VERIFY_P2SH : SCRIPT_VERIFY_NONE; + // Start enforcing P2SH (BIP16) + if (pindex->nHeight >= consensusparams.BIP16Height) { + flags |= SCRIPT_VERIFY_P2SH; + } // Start enforcing the DERSIG (BIP66) rule if (pindex->nHeight >= consensusparams.BIP66Height) { @@ -1625,6 +1655,7 @@ static int64_t nTimeConnect = 0; static int64_t nTimeIndex = 0; static int64_t nTimeCallbacks = 0; static int64_t nTimeTotal = 0; +static int64_t nBlocksTotal = 0; /** Apply the effects of this block (with given index) on the UTXO set represented by coins. * Validity checks that depend on the UTXO set are also done; ConnectBlock() @@ -1635,16 +1666,28 @@ static bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockInd AssertLockHeld(cs_main); assert(pindex); // pindex->phashBlock can be null if called by CreateNewBlock/TestBlockValidity - assert((pindex->phashBlock == NULL) || + assert((pindex->phashBlock == nullptr) || (*pindex->phashBlock == block.GetHash())); int64_t nTimeStart = GetTimeMicros(); // Check it again in case a previous version let a bad block in + // NOTE: We don't currently (re-)invoke ContextualCheckBlock() or + // ContextualCheckBlockHeader() here. This means that if we add a new + // consensus rule that is enforced in one of those two functions, then we + // may have let in a block that violates the rule prior to updating the + // software, and we would NOT be enforcing the rule here. Fully solving + // upgrade from one software version to the next after a consensus rule + // change is potentially tricky and issue-specific (see RewindBlockIndex() + // for one general approach that was used for BIP 141 deployment). + // Also, currently the rule against blocks more than 2 hours in the future + // is enforced in ContextualCheckBlockHeader(); we wouldn't want to + // re-enforce that rule here (at least until we make it impossible for + // GetAdjustedTime() to go backward). if (!CheckBlock(block, state, chainparams.GetConsensus(), !fJustCheck, !fJustCheck)) return error("%s: Consensus::CheckBlock: %s", __func__, FormatStateMessage(state)); // verify that the view's current state corresponds to the previous block - uint256 hashPrevBlock = pindex->pprev == NULL ? uint256() : pindex->pprev->GetBlockHash(); + uint256 hashPrevBlock = pindex->pprev == nullptr ? uint256() : pindex->pprev->GetBlockHash(); assert(hashPrevBlock == view.GetBestBlock()); // Special case for the genesis block, skipping connection of its transactions @@ -1655,6 +1698,8 @@ static bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockInd return true; } + nBlocksTotal++; + bool fScriptChecks = true; if (!hashAssumeValid.IsNull()) { // We've been configured with the hash of a block which has been externally verified to have a valid history. @@ -1666,7 +1711,7 @@ static bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockInd if (it != mapBlockIndex.end()) { if (it->second->GetAncestor(pindex->nHeight) == pindex && pindexBestHeader->GetAncestor(pindex->nHeight) == pindex && - pindexBestHeader->nChainWork >= UintToArith256(chainparams.GetConsensus().nMinimumChainWork)) { + pindexBestHeader->nChainWork >= nMinimumChainWork) { // This block is a member of the assumed verified chain and an ancestor of the best header. // The equivalent time check discourages hash power from extorting the network via DOS attack // into accepting an invalid block through telling users they must manually set assumevalid. @@ -1682,7 +1727,7 @@ static bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockInd } int64_t nTime1 = GetTimeMicros(); nTimeCheck += nTime1 - nTimeStart; - LogPrint(BCLog::BENCH, " - Sanity checks: %.2fms [%.2fs]\n", 0.001 * (nTime1 - nTimeStart), nTimeCheck * 0.000001); + LogPrint(BCLog::BENCH, " - Sanity checks: %.2fms [%.2fs (%.2fms/blk)]\n", MILLI * (nTime1 - nTimeStart), nTimeCheck * MICRO, nTimeCheck * MILLI / nBlocksTotal); // Do not allow blocks that contain transactions which 'overwrite' older transactions, // unless those are already completely spent. @@ -1706,6 +1751,7 @@ static bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockInd // before the first had been spent. Since those coinbases are sufficiently buried its no longer possible to create further // duplicate transactions descending from the known pairs either. // If we're on the known chain at height greater than where BIP34 activated, we can save the db accesses needed for the BIP30 check. + assert(pindex->pprev); CBlockIndex *pindexBIP34height = pindex->pprev->GetAncestor(chainparams.GetConsensus().BIP34Height); //Only continue to enforce if we're below BIP34 activation height or the block hash at that height doesn't correspond. fEnforceBIP30 = fEnforceBIP30 && (!pindexBIP34height || !(pindexBIP34height->GetBlockHash() == chainparams.GetConsensus().BIP34Hash)); @@ -1731,11 +1777,11 @@ static bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockInd unsigned int flags = GetBlockScriptFlags(pindex, chainparams.GetConsensus()); int64_t nTime2 = GetTimeMicros(); nTimeForks += nTime2 - nTime1; - LogPrint(BCLog::BENCH, " - Fork checks: %.2fms [%.2fs]\n", 0.001 * (nTime2 - nTime1), nTimeForks * 0.000001); + LogPrint(BCLog::BENCH, " - Fork checks: %.2fms [%.2fs (%.2fms/blk)]\n", MILLI * (nTime2 - nTime1), nTimeForks * MICRO, nTimeForks * MILLI / nBlocksTotal); CBlockUndo blockundo; - CCheckQueueControl<CScriptCheck> control(fScriptChecks && nScriptCheckThreads ? &scriptcheckqueue : NULL); + CCheckQueueControl<CScriptCheck> control(fScriptChecks && nScriptCheckThreads ? &scriptcheckqueue : nullptr); std::vector<int> prevheights; CAmount nFees = 0; @@ -1755,9 +1801,15 @@ static bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockInd if (!tx.IsCoinBase()) { - if (!view.HaveInputs(tx)) - return state.DoS(100, error("ConnectBlock(): inputs missing/spent"), - REJECT_INVALID, "bad-txns-inputs-missingorspent"); + CAmount txfee = 0; + if (!Consensus::CheckTxInputs(tx, state, view, pindex->nHeight, txfee)) { + return error("%s: Consensus::CheckTxInputs: %s, %s", __func__, tx.GetHash().ToString(), FormatStateMessage(state)); + } + nFees += txfee; + if (!MoneyRange(nFees)) { + return state.DoS(100, error("%s: accumulated fee in the block out of range.", __func__), + REJECT_INVALID, "bad-txns-accumulated-fee-outofrange"); + } // Check that transaction is BIP68 final // BIP68 lock checks (as opposed to nLockTime checks) must @@ -1785,11 +1837,9 @@ static bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockInd txdata.emplace_back(tx); if (!tx.IsCoinBase()) { - nFees += view.GetValueIn(tx)-tx.GetValueOut(); - std::vector<CScriptCheck> vChecks; bool fCacheResults = fJustCheck; /* Don't cache results if we're actually connecting blocks (still consult the cache, though) */ - if (!CheckInputs(tx, state, view, fScriptChecks, flags, fCacheResults, fCacheResults, txdata[i], nScriptCheckThreads ? &vChecks : NULL)) + if (!CheckInputs(tx, state, view, fScriptChecks, flags, fCacheResults, fCacheResults, txdata[i], nScriptCheckThreads ? &vChecks : nullptr)) return error("ConnectBlock(): CheckInputs on %s failed with %s", tx.GetHash().ToString(), FormatStateMessage(state)); control.Add(vChecks); @@ -1805,7 +1855,7 @@ static bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockInd pos.nTxOffset += ::GetSerializeSize(tx, SER_DISK, CLIENT_VERSION); } int64_t nTime3 = GetTimeMicros(); nTimeConnect += nTime3 - nTime2; - LogPrint(BCLog::BENCH, " - Connect %u transactions: %.2fms (%.3fms/tx, %.3fms/txin) [%.2fs]\n", (unsigned)block.vtx.size(), 0.001 * (nTime3 - nTime2), 0.001 * (nTime3 - nTime2) / block.vtx.size(), nInputs <= 1 ? 0 : 0.001 * (nTime3 - nTime2) / (nInputs-1), nTimeConnect * 0.000001); + LogPrint(BCLog::BENCH, " - Connect %u transactions: %.2fms (%.3fms/tx, %.3fms/txin) [%.2fs (%.2fms/blk)]\n", (unsigned)block.vtx.size(), MILLI * (nTime3 - nTime2), MILLI * (nTime3 - nTime2) / block.vtx.size(), nInputs <= 1 ? 0 : MILLI * (nTime3 - nTime2) / (nInputs-1), nTimeConnect * MICRO, nTimeConnect * MILLI / nBlocksTotal); CAmount blockReward = nFees + GetBlockSubsidy(pindex->nHeight, chainparams.GetConsensus()); if (block.vtx[0]->GetValueOut() > blockReward) @@ -1817,7 +1867,7 @@ static bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockInd if (!control.Wait()) return state.DoS(100, error("%s: CheckQueue failed", __func__), REJECT_INVALID, "block-validation-failed"); int64_t nTime4 = GetTimeMicros(); nTimeVerify += nTime4 - nTime2; - LogPrint(BCLog::BENCH, " - Verify %u txins: %.2fms (%.3fms/txin) [%.2fs]\n", nInputs - 1, 0.001 * (nTime4 - nTime2), nInputs <= 1 ? 0 : 0.001 * (nTime4 - nTime2) / (nInputs-1), nTimeVerify * 0.000001); + LogPrint(BCLog::BENCH, " - Verify %u txins: %.2fms (%.3fms/txin) [%.2fs (%.2fms/blk)]\n", nInputs - 1, MILLI * (nTime4 - nTime2), nInputs <= 1 ? 0 : MILLI * (nTime4 - nTime2) / (nInputs-1), nTimeVerify * MICRO, nTimeVerify * MILLI / nBlocksTotal); if (fJustCheck) return true; @@ -1845,14 +1895,15 @@ static bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockInd if (!pblocktree->WriteTxIndex(vPos)) return AbortNode(state, "Failed to write transaction index"); + assert(pindex->phashBlock); // add this block to the view's block chain view.SetBestBlock(pindex->GetBlockHash()); int64_t nTime5 = GetTimeMicros(); nTimeIndex += nTime5 - nTime4; - LogPrint(BCLog::BENCH, " - Index writing: %.2fms [%.2fs]\n", 0.001 * (nTime5 - nTime4), nTimeIndex * 0.000001); + LogPrint(BCLog::BENCH, " - Index writing: %.2fms [%.2fs (%.2fms/blk)]\n", MILLI * (nTime5 - nTime4), nTimeIndex * MICRO, nTimeIndex * MILLI / nBlocksTotal); int64_t nTime6 = GetTimeMicros(); nTimeCallbacks += nTime6 - nTime5; - LogPrint(BCLog::BENCH, " - Callbacks: %.2fms [%.2fs]\n", 0.001 * (nTime6 - nTime5), nTimeCallbacks * 0.000001); + LogPrint(BCLog::BENCH, " - Callbacks: %.2fms [%.2fs (%.2fms/blk)]\n", MILLI * (nTime6 - nTime5), nTimeCallbacks * MICRO, nTimeCallbacks * MILLI / nBlocksTotal); return true; } @@ -1865,95 +1916,100 @@ static bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockInd */ bool static FlushStateToDisk(const CChainParams& chainparams, CValidationState &state, FlushStateMode mode, int nManualPruneHeight) { int64_t nMempoolUsage = mempool.DynamicMemoryUsage(); - LOCK2(cs_main, cs_LastBlockFile); + LOCK(cs_main); static int64_t nLastWrite = 0; static int64_t nLastFlush = 0; static int64_t nLastSetChain = 0; std::set<int> setFilesToPrune; bool fFlushForPrune = false; + bool fDoFullFlush = false; + int64_t nNow = 0; try { - if (fPruneMode && (fCheckForPruning || nManualPruneHeight > 0) && !fReindex) { - if (nManualPruneHeight > 0) { - FindFilesToPruneManual(setFilesToPrune, nManualPruneHeight); - } else { - FindFilesToPrune(setFilesToPrune, chainparams.PruneAfterHeight()); - fCheckForPruning = false; - } - if (!setFilesToPrune.empty()) { - fFlushForPrune = true; - if (!fHavePruned) { - pblocktree->WriteFlag("prunedblockfiles", true); - fHavePruned = true; - } - } - } - int64_t nNow = GetTimeMicros(); - // Avoid writing/flushing immediately after startup. - if (nLastWrite == 0) { - nLastWrite = nNow; - } - if (nLastFlush == 0) { - nLastFlush = nNow; - } - if (nLastSetChain == 0) { - nLastSetChain = nNow; - } - int64_t nMempoolSizeMax = GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000; - int64_t cacheSize = pcoinsTip->DynamicMemoryUsage(); - int64_t nTotalSpace = nCoinCacheUsage + std::max<int64_t>(nMempoolSizeMax - nMempoolUsage, 0); - // The cache is large and we're within 10% and 10 MiB of the limit, but we have time now (not in the middle of a block processing). - bool fCacheLarge = mode == FLUSH_STATE_PERIODIC && cacheSize > std::max((9 * nTotalSpace) / 10, nTotalSpace - MAX_BLOCK_COINSDB_USAGE * 1024 * 1024); - // The cache is over the limit, we have to write now. - bool fCacheCritical = mode == FLUSH_STATE_IF_NEEDED && cacheSize > nTotalSpace; - // It's been a while since we wrote the block index to disk. Do this frequently, so we don't need to redownload after a crash. - bool fPeriodicWrite = mode == FLUSH_STATE_PERIODIC && nNow > nLastWrite + (int64_t)DATABASE_WRITE_INTERVAL * 1000000; - // It's been very long since we flushed the cache. Do this infrequently, to optimize cache usage. - bool fPeriodicFlush = mode == FLUSH_STATE_PERIODIC && nNow > nLastFlush + (int64_t)DATABASE_FLUSH_INTERVAL * 1000000; - // Combine all conditions that result in a full cache flush. - bool fDoFullFlush = (mode == FLUSH_STATE_ALWAYS) || fCacheLarge || fCacheCritical || fPeriodicFlush || fFlushForPrune; - // Write blocks and block index to disk. - if (fDoFullFlush || fPeriodicWrite) { - // Depend on nMinDiskSpace to ensure we can write block index - if (!CheckDiskSpace(0)) - return state.Error("out of disk space"); - // First make sure all block and undo data is flushed to disk. - FlushBlockFile(); - // Then update all block file information (which may refer to block and undo files). - { - std::vector<std::pair<int, const CBlockFileInfo*> > vFiles; - vFiles.reserve(setDirtyFileInfo.size()); - for (std::set<int>::iterator it = setDirtyFileInfo.begin(); it != setDirtyFileInfo.end(); ) { - vFiles.push_back(std::make_pair(*it, &vinfoBlockFile[*it])); - setDirtyFileInfo.erase(it++); + { + LOCK(cs_LastBlockFile); + if (fPruneMode && (fCheckForPruning || nManualPruneHeight > 0) && !fReindex) { + if (nManualPruneHeight > 0) { + FindFilesToPruneManual(setFilesToPrune, nManualPruneHeight); + } else { + FindFilesToPrune(setFilesToPrune, chainparams.PruneAfterHeight()); + fCheckForPruning = false; } - std::vector<const CBlockIndex*> vBlocks; - vBlocks.reserve(setDirtyBlockIndex.size()); - for (std::set<CBlockIndex*>::iterator it = setDirtyBlockIndex.begin(); it != setDirtyBlockIndex.end(); ) { - vBlocks.push_back(*it); - setDirtyBlockIndex.erase(it++); + if (!setFilesToPrune.empty()) { + fFlushForPrune = true; + if (!fHavePruned) { + pblocktree->WriteFlag("prunedblockfiles", true); + fHavePruned = true; + } } - if (!pblocktree->WriteBatchSync(vFiles, nLastBlockFile, vBlocks)) { - return AbortNode(state, "Failed to write to block index database"); + } + nNow = GetTimeMicros(); + // Avoid writing/flushing immediately after startup. + if (nLastWrite == 0) { + nLastWrite = nNow; + } + if (nLastFlush == 0) { + nLastFlush = nNow; + } + if (nLastSetChain == 0) { + nLastSetChain = nNow; + } + int64_t nMempoolSizeMax = gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000; + int64_t cacheSize = pcoinsTip->DynamicMemoryUsage(); + int64_t nTotalSpace = nCoinCacheUsage + std::max<int64_t>(nMempoolSizeMax - nMempoolUsage, 0); + // The cache is large and we're within 10% and 10 MiB of the limit, but we have time now (not in the middle of a block processing). + bool fCacheLarge = mode == FLUSH_STATE_PERIODIC && cacheSize > std::max((9 * nTotalSpace) / 10, nTotalSpace - MAX_BLOCK_COINSDB_USAGE * 1024 * 1024); + // The cache is over the limit, we have to write now. + bool fCacheCritical = mode == FLUSH_STATE_IF_NEEDED && cacheSize > nTotalSpace; + // It's been a while since we wrote the block index to disk. Do this frequently, so we don't need to redownload after a crash. + bool fPeriodicWrite = mode == FLUSH_STATE_PERIODIC && nNow > nLastWrite + (int64_t)DATABASE_WRITE_INTERVAL * 1000000; + // It's been very long since we flushed the cache. Do this infrequently, to optimize cache usage. + bool fPeriodicFlush = mode == FLUSH_STATE_PERIODIC && nNow > nLastFlush + (int64_t)DATABASE_FLUSH_INTERVAL * 1000000; + // Combine all conditions that result in a full cache flush. + fDoFullFlush = (mode == FLUSH_STATE_ALWAYS) || fCacheLarge || fCacheCritical || fPeriodicFlush || fFlushForPrune; + // Write blocks and block index to disk. + if (fDoFullFlush || fPeriodicWrite) { + // Depend on nMinDiskSpace to ensure we can write block index + if (!CheckDiskSpace(0)) + return state.Error("out of disk space"); + // First make sure all block and undo data is flushed to disk. + FlushBlockFile(); + // Then update all block file information (which may refer to block and undo files). + { + std::vector<std::pair<int, const CBlockFileInfo*> > vFiles; + vFiles.reserve(setDirtyFileInfo.size()); + for (std::set<int>::iterator it = setDirtyFileInfo.begin(); it != setDirtyFileInfo.end(); ) { + vFiles.push_back(std::make_pair(*it, &vinfoBlockFile[*it])); + setDirtyFileInfo.erase(it++); + } + std::vector<const CBlockIndex*> vBlocks; + vBlocks.reserve(setDirtyBlockIndex.size()); + for (std::set<CBlockIndex*>::iterator it = setDirtyBlockIndex.begin(); it != setDirtyBlockIndex.end(); ) { + vBlocks.push_back(*it); + setDirtyBlockIndex.erase(it++); + } + if (!pblocktree->WriteBatchSync(vFiles, nLastBlockFile, vBlocks)) { + return AbortNode(state, "Failed to write to block index database"); + } } + // Finally remove any pruned files + if (fFlushForPrune) + UnlinkPrunedFiles(setFilesToPrune); + nLastWrite = nNow; + } + // Flush best chain related state. This can only be done if the blocks / block index write was also done. + if (fDoFullFlush) { + // Typical Coin structures on disk are around 48 bytes in size. + // Pushing a new one to the database can cause it to be written + // twice (once in the log, and once in the tables). This is already + // an overestimation, as most will delete an existing entry or + // overwrite one. Still, use a conservative safety factor of 2. + if (!CheckDiskSpace(48 * 2 * 2 * pcoinsTip->GetCacheSize())) + return state.Error("out of disk space"); + // Flush the chainstate (which may refer to block index entries). + if (!pcoinsTip->Flush()) + return AbortNode(state, "Failed to write to coin database"); + nLastFlush = nNow; } - // Finally remove any pruned files - if (fFlushForPrune) - UnlinkPrunedFiles(setFilesToPrune); - nLastWrite = nNow; - } - // Flush best chain related state. This can only be done if the blocks / block index write was also done. - if (fDoFullFlush) { - // Typical Coin structures on disk are around 48 bytes in size. - // Pushing a new one to the database can cause it to be written - // twice (once in the log, and once in the tables). This is already - // an overestimation, as most will delete an existing entry or - // overwrite one. Still, use a conservative safety factor of 2. - if (!CheckDiskSpace(48 * 2 * 2 * pcoinsTip->GetCacheSize())) - return state.Error("out of disk space"); - // Flush the chainstate (which may refer to block index entries). - if (!pcoinsTip->Flush()) - return AbortNode(state, "Failed to write to coin database"); - nLastFlush = nNow; } if (fDoFullFlush || ((mode == FLUSH_STATE_ALWAYS || mode == FLUSH_STATE_PERIODIC) && nNow > nLastSetChain + (int64_t)DATABASE_WRITE_INTERVAL * 1000000)) { // Update best block in wallet (so we can detect restored wallets). @@ -2016,7 +2072,7 @@ void static UpdateTip(CBlockIndex *pindexNew, const CChainParams& chainParams) { } } // Check the version of the last 100 blocks to see if we need to upgrade: - for (int i = 0; i < 100 && pindex != NULL; i++) + for (int i = 0; i < 100 && pindex != nullptr; i++) { int32_t nExpectedVersion = ComputeBlockVersion(pindex->pprev, chainParams.GetConsensus()); if (pindex->nVersion > VERSIONBITS_LAST_OLD_BLOCK_VERSION && (pindex->nVersion & ~nExpectedVersion) != 0) @@ -2049,7 +2105,7 @@ void static UpdateTip(CBlockIndex *pindexNew, const CChainParams& chainParams) { * should make the mempool consistent again by calling UpdateMempoolForReorg. * with cs_main held. * - * If disconnectpool is NULL, then no disconnected transactions are added to + * If disconnectpool is nullptr, then no disconnected transactions are added to * disconnectpool (note that the caller is responsible for mempool consistency * in any case). */ @@ -2065,14 +2121,14 @@ bool static DisconnectTip(CValidationState& state, const CChainParams& chainpara // Apply the block atomically to the chain state. int64_t nStart = GetTimeMicros(); { - CCoinsViewCache view(pcoinsTip); + CCoinsViewCache view(pcoinsTip.get()); assert(view.GetBestBlock() == pindexDelete->GetBlockHash()); if (DisconnectBlock(block, pindexDelete, view) != DISCONNECT_OK) return error("DisconnectTip(): DisconnectBlock %s failed", pindexDelete->GetBlockHash().ToString()); bool flushed = view.Flush(); assert(flushed); } - LogPrint(BCLog::BENCH, "- Disconnect block: %.2fms\n", (GetTimeMicros() - nStart) * 0.001); + LogPrint(BCLog::BENCH, "- Disconnect block: %.2fms\n", (GetTimeMicros() - nStart) * MILLI); // Write the chain state to disk, if necessary. if (!FlushStateToDisk(chainparams, state, FLUSH_STATE_IF_NEEDED)) return false; @@ -2105,7 +2161,7 @@ static int64_t nTimeChainState = 0; static int64_t nTimePostConnect = 0; struct PerBlockConnectTrace { - CBlockIndex* pindex = NULL; + CBlockIndex* pindex = nullptr; std::shared_ptr<const CBlock> pblock; std::shared_ptr<std::vector<CTransactionRef>> conflictedTxs; PerBlockConnectTrace() : conflictedTxs(std::make_shared<std::vector<CTransactionRef>>()) {} @@ -2132,7 +2188,7 @@ private: CTxMemPool &pool; public: - ConnectTrace(CTxMemPool &_pool) : blocksConnected(1), pool(_pool) { + explicit ConnectTrace(CTxMemPool &_pool) : blocksConnected(1), pool(_pool) { pool.NotifyEntryRemoved.connect(boost::bind(&ConnectTrace::NotifyEntryRemoved, this, _1, _2)); } @@ -2170,7 +2226,7 @@ public: }; /** - * Connect a new block to chainActive. pblock is either NULL or a pointer to a CBlock + * Connect a new block to chainActive. pblock is either nullptr or a pointer to a CBlock * corresponding to pindexNew, to bypass loading it again from disk. * * The block is added to connectTrace if connection succeeds. @@ -2193,9 +2249,9 @@ bool static ConnectTip(CValidationState& state, const CChainParams& chainparams, // Apply the block atomically to the chain state. int64_t nTime2 = GetTimeMicros(); nTimeReadFromDisk += nTime2 - nTime1; int64_t nTime3; - LogPrint(BCLog::BENCH, " - Load block from disk: %.2fms [%.2fs]\n", (nTime2 - nTime1) * 0.001, nTimeReadFromDisk * 0.000001); + LogPrint(BCLog::BENCH, " - Load block from disk: %.2fms [%.2fs]\n", (nTime2 - nTime1) * MILLI, nTimeReadFromDisk * MICRO); { - CCoinsViewCache view(pcoinsTip); + CCoinsViewCache view(pcoinsTip.get()); bool rv = ConnectBlock(blockConnecting, state, pindexNew, view, chainparams); GetMainSignals().BlockChecked(blockConnecting, state); if (!rv) { @@ -2204,17 +2260,17 @@ bool static ConnectTip(CValidationState& state, const CChainParams& chainparams, return error("ConnectTip(): ConnectBlock %s failed", pindexNew->GetBlockHash().ToString()); } nTime3 = GetTimeMicros(); nTimeConnectTotal += nTime3 - nTime2; - LogPrint(BCLog::BENCH, " - Connect total: %.2fms [%.2fs]\n", (nTime3 - nTime2) * 0.001, nTimeConnectTotal * 0.000001); + LogPrint(BCLog::BENCH, " - Connect total: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime3 - nTime2) * MILLI, nTimeConnectTotal * MICRO, nTimeConnectTotal * MILLI / nBlocksTotal); bool flushed = view.Flush(); assert(flushed); } int64_t nTime4 = GetTimeMicros(); nTimeFlush += nTime4 - nTime3; - LogPrint(BCLog::BENCH, " - Flush: %.2fms [%.2fs]\n", (nTime4 - nTime3) * 0.001, nTimeFlush * 0.000001); + LogPrint(BCLog::BENCH, " - Flush: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime4 - nTime3) * MILLI, nTimeFlush * MICRO, nTimeFlush * MILLI / nBlocksTotal); // Write the chain state to disk, if necessary. if (!FlushStateToDisk(chainparams, state, FLUSH_STATE_IF_NEEDED)) return false; int64_t nTime5 = GetTimeMicros(); nTimeChainState += nTime5 - nTime4; - LogPrint(BCLog::BENCH, " - Writing chainstate: %.2fms [%.2fs]\n", (nTime5 - nTime4) * 0.001, nTimeChainState * 0.000001); + LogPrint(BCLog::BENCH, " - Writing chainstate: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime5 - nTime4) * MILLI, nTimeChainState * MICRO, nTimeChainState * MILLI / nBlocksTotal); // Remove conflicting transactions from the mempool.; mempool.removeForBlock(blockConnecting.vtx, pindexNew->nHeight); disconnectpool.removeForBlock(blockConnecting.vtx); @@ -2222,8 +2278,8 @@ bool static ConnectTip(CValidationState& state, const CChainParams& chainparams, UpdateTip(pindexNew, chainparams); int64_t nTime6 = GetTimeMicros(); nTimePostConnect += nTime6 - nTime5; nTimeTotal += nTime6 - nTime1; - LogPrint(BCLog::BENCH, " - Connect postprocess: %.2fms [%.2fs]\n", (nTime6 - nTime5) * 0.001, nTimePostConnect * 0.000001); - LogPrint(BCLog::BENCH, "- Connect block: %.2fms [%.2fs]\n", (nTime6 - nTime1) * 0.001, nTimeTotal * 0.000001); + LogPrint(BCLog::BENCH, " - Connect postprocess: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime6 - nTime5) * MILLI, nTimePostConnect * MICRO, nTimePostConnect * MILLI / nBlocksTotal); + LogPrint(BCLog::BENCH, "- Connect block: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime6 - nTime1) * MILLI, nTimeTotal * MICRO, nTimeTotal * MILLI / nBlocksTotal); connectTrace.BlockConnected(pindexNew, std::move(pthisBlock)); return true; @@ -2235,13 +2291,13 @@ bool static ConnectTip(CValidationState& state, const CChainParams& chainparams, */ static CBlockIndex* FindMostWorkChain() { do { - CBlockIndex *pindexNew = NULL; + CBlockIndex *pindexNew = nullptr; // Find the best candidate header. { std::set<CBlockIndex*, CBlockIndexWorkComparator>::reverse_iterator it = setBlockIndexCandidates.rbegin(); if (it == setBlockIndexCandidates.rend()) - return NULL; + return nullptr; pindexNew = *it; } @@ -2260,7 +2316,7 @@ static CBlockIndex* FindMostWorkChain() { bool fMissingData = !(pindexTest->nStatus & BLOCK_HAVE_DATA); if (fFailedChain || fMissingData) { // Candidate chain is not usable (either invalid or missing data) - if (fFailedChain && (pindexBestInvalid == NULL || pindexNew->nChainWork > pindexBestInvalid->nChainWork)) + if (fFailedChain && (pindexBestInvalid == nullptr || pindexNew->nChainWork > pindexBestInvalid->nChainWork)) pindexBestInvalid = pindexNew; CBlockIndex *pindexFailed = pindexNew; // Remove the entire chain from the set. @@ -2301,7 +2357,7 @@ static void PruneBlockIndexCandidates() { /** * Try to make some progress towards making pindexMostWork the active block. - * pblock is either NULL or a pointer to a CBlock corresponding to pindexMostWork. + * pblock is either nullptr or a pointer to a CBlock corresponding to pindexMostWork. */ static bool ActivateBestChainStep(CValidationState& state, const CChainParams& chainparams, CBlockIndex* pindexMostWork, const std::shared_ptr<const CBlock>& pblock, bool& fInvalidFound, ConnectTrace& connectTrace) { @@ -2340,7 +2396,7 @@ static bool ActivateBestChainStep(CValidationState& state, const CChainParams& c nHeight = nTargetHeight; // Connect new blocks. - BOOST_REVERSE_FOREACH(CBlockIndex *pindexConnect, vpindexToConnect) { + for (CBlockIndex *pindexConnect : reverse_iterate(vpindexToConnect)) { if (!ConnectTip(state, chainparams, pindexConnect, pindexConnect == pindexMostWork ? pblock : std::shared_ptr<const CBlock>(), connectTrace, disconnectpool)) { if (state.IsInvalid()) { // The block violates a consensus rule. @@ -2373,7 +2429,7 @@ static bool ActivateBestChainStep(CValidationState& state, const CChainParams& c // any disconnected transactions back to the mempool. UpdateMempoolForReorg(disconnectpool, true); } - mempool.check(pcoinsTip); + mempool.check(pcoinsTip.get()); // Callbacks/notifications for a new best chain. if (fInvalidFound) @@ -2387,8 +2443,8 @@ static bool ActivateBestChainStep(CValidationState& state, const CChainParams& c static void NotifyHeaderTip() { bool fNotify = false; bool fInitialBlockDownload = false; - static CBlockIndex* pindexHeaderOld = NULL; - CBlockIndex* pindexHeader = NULL; + static CBlockIndex* pindexHeaderOld = nullptr; + CBlockIndex* pindexHeader = nullptr; { LOCK(cs_main); pindexHeader = pindexBestHeader; @@ -2407,7 +2463,7 @@ static void NotifyHeaderTip() { /** * Make the best chain active, in multiple steps. The result is either failure - * or an activated best chain. pblock is either NULL or a pointer to a block + * or an activated best chain. pblock is either nullptr or a pointer to a block * that is already loaded (to avoid loading it again from disk). */ bool ActivateBestChain(CValidationState &state, const CChainParams& chainparams, std::shared_ptr<const CBlock> pblock) { @@ -2416,9 +2472,9 @@ bool ActivateBestChain(CValidationState &state, const CChainParams& chainparams, // us in the middle of ProcessNewBlock - do not assume pblock is set // sanely for performance or correctness! - CBlockIndex *pindexMostWork = NULL; - CBlockIndex *pindexNewTip = NULL; - int nStopAtHeight = GetArg("-stopatheight", DEFAULT_STOPATHEIGHT); + CBlockIndex *pindexMostWork = nullptr; + CBlockIndex *pindexNewTip = nullptr; + int nStopAtHeight = gArgs.GetArg("-stopatheight", DEFAULT_STOPATHEIGHT); do { boost::this_thread::interruption_point(); if (ShutdownRequested()) @@ -2431,12 +2487,12 @@ bool ActivateBestChain(CValidationState &state, const CChainParams& chainparams, ConnectTrace connectTrace(mempool); // Destructed before cs_main is unlocked CBlockIndex *pindexOldTip = chainActive.Tip(); - if (pindexMostWork == NULL) { + if (pindexMostWork == nullptr) { pindexMostWork = FindMostWorkChain(); } // Whether we have anything to do at all. - if (pindexMostWork == NULL || pindexMostWork == chainActive.Tip()) + if (pindexMostWork == nullptr || pindexMostWork == chainActive.Tip()) return true; bool fInvalidFound = false; @@ -2446,7 +2502,7 @@ bool ActivateBestChain(CValidationState &state, const CChainParams& chainparams, if (fInvalidFound) { // Wipe cache, we may need another branch now. - pindexMostWork = NULL; + pindexMostWork = nullptr; } pindexNewTip = chainActive.Tip(); pindexFork = chainActive.FindFork(pindexOldTip); @@ -2454,7 +2510,7 @@ bool ActivateBestChain(CValidationState &state, const CChainParams& chainparams, for (const PerBlockConnectTrace& trace : connectTrace.GetBlocksConnected()) { assert(trace.pblock && trace.pindex); - GetMainSignals().BlockConnected(trace.pblock, trace.pindex, *trace.conflictedTxs); + GetMainSignals().BlockConnected(trace.pblock, trace.pindex, trace.conflictedTxs); } } // When we reach this point, we switched to a new tip (stored in pindexNewTip). @@ -2515,17 +2571,18 @@ bool InvalidateBlock(CValidationState& state, const CChainParams& chainparams, C { AssertLockHeld(cs_main); - // Mark the block itself as invalid. - pindex->nStatus |= BLOCK_FAILED_VALID; - setDirtyBlockIndex.insert(pindex); - setBlockIndexCandidates.erase(pindex); + // We first disconnect backwards and then mark the blocks as invalid. + // This prevents a case where pruned nodes may fail to invalidateblock + // and be left unable to start as they have no tip candidates (as there + // are no blocks that meet the "have data and are not invalid per + // nStatus" criteria for inclusion in setBlockIndexCandidates). + + bool pindex_was_in_chain = false; + CBlockIndex *invalid_walk_tip = chainActive.Tip(); DisconnectedBlockTransactions disconnectpool; while (chainActive.Contains(pindex)) { - CBlockIndex *pindexWalk = chainActive.Tip(); - pindexWalk->nStatus |= BLOCK_FAILED_CHILD; - setDirtyBlockIndex.insert(pindexWalk); - setBlockIndexCandidates.erase(pindexWalk); + pindex_was_in_chain = true; // ActivateBestChain considers blocks already in chainActive // unconditionally valid already, so force disconnect away from it. if (!DisconnectTip(state, chainparams, &disconnectpool)) { @@ -2536,6 +2593,21 @@ bool InvalidateBlock(CValidationState& state, const CChainParams& chainparams, C } } + // Now mark the blocks we just disconnected as descendants invalid + // (note this may not be all descendants). + while (pindex_was_in_chain && invalid_walk_tip != pindex) { + invalid_walk_tip->nStatus |= BLOCK_FAILED_CHILD; + setDirtyBlockIndex.insert(invalid_walk_tip); + setBlockIndexCandidates.erase(invalid_walk_tip); + invalid_walk_tip = invalid_walk_tip->pprev; + } + + // Mark the block itself as invalid. + pindex->nStatus |= BLOCK_FAILED_VALID; + setDirtyBlockIndex.insert(pindex); + setBlockIndexCandidates.erase(pindex); + g_failed_blocks.insert(pindex); + // DisconnectTip will add transactions to disconnectpool; try to add these // back to the mempool. UpdateMempoolForReorg(disconnectpool, true); @@ -2571,14 +2643,15 @@ bool ResetBlockFailureFlags(CBlockIndex *pindex) { } if (it->second == pindexBestInvalid) { // Reset invalid block marker if it was pointing to one of those. - pindexBestInvalid = NULL; + pindexBestInvalid = nullptr; } + g_failed_blocks.erase(it->second); } it++; } // Remove the invalidity flag from all ancestors too. - while (pindex != NULL) { + while (pindex != nullptr) { if (pindex->nStatus & BLOCK_FAILED_MASK) { pindex->nStatus &= ~BLOCK_FAILED_MASK; setDirtyBlockIndex.insert(pindex); @@ -2598,7 +2671,6 @@ static CBlockIndex* AddToBlockIndex(const CBlockHeader& block) // Construct new block index object CBlockIndex* pindexNew = new CBlockIndex(block); - assert(pindexNew); // We assign the sequence id to blocks only when the full data is available, // to avoid miners withholding blocks but broadcasting headers, to get a // competitive advantage. @@ -2615,7 +2687,7 @@ static CBlockIndex* AddToBlockIndex(const CBlockHeader& block) pindexNew->nTimeMax = (pindexNew->pprev ? std::max(pindexNew->pprev->nTimeMax, pindexNew->nTime) : pindexNew->nTime); pindexNew->nChainWork = (pindexNew->pprev ? pindexNew->pprev->nChainWork : 0) + GetBlockProof(*pindexNew); pindexNew->RaiseValidity(BLOCK_VALID_TREE); - if (pindexBestHeader == NULL || pindexBestHeader->nChainWork < pindexNew->nChainWork) + if (pindexBestHeader == nullptr || pindexBestHeader->nChainWork < pindexNew->nChainWork) pindexBestHeader = pindexNew; setDirtyBlockIndex.insert(pindexNew); @@ -2638,7 +2710,7 @@ static bool ReceivedBlockTransactions(const CBlock &block, CValidationState& sta pindexNew->RaiseValidity(BLOCK_VALID_TRANSACTIONS); setDirtyBlockIndex.insert(pindexNew); - if (pindexNew->pprev == NULL || pindexNew->pprev->nChainTx) { + if (pindexNew->pprev == nullptr || pindexNew->pprev->nChainTx) { // If pindexNew is the genesis block or all parents are BLOCK_VALID_TRANSACTIONS. std::deque<CBlockIndex*> queue; queue.push_back(pindexNew); @@ -2652,7 +2724,7 @@ static bool ReceivedBlockTransactions(const CBlock &block, CValidationState& sta LOCK(cs_nBlockSequenceId); pindex->nSequenceId = nBlockSequenceId++; } - if (chainActive.Tip() == NULL || !setBlockIndexCandidates.value_comp()(pindex, chainActive.Tip())) { + if (chainActive.Tip() == nullptr || !setBlockIndexCandidates.value_comp()(pindex, chainActive.Tip())) { setBlockIndexCandidates.insert(pindex); } std::pair<std::multimap<CBlockIndex*, CBlockIndex*>::iterator, std::multimap<CBlockIndex*, CBlockIndex*>::iterator> range = mapBlocksUnlinked.equal_range(pindex); @@ -2802,7 +2874,7 @@ bool CheckBlock(const CBlock& block, CValidationState& state, const Consensus::P // checks that use witness data may be performed here. // Size limits - if (block.vtx.empty() || block.vtx.size() > MAX_BLOCK_BASE_SIZE || ::GetSerializeSize(block, SER_NETWORK, PROTOCOL_VERSION | SERIALIZE_TRANSACTION_NO_WITNESS) > MAX_BLOCK_BASE_SIZE) + if (block.vtx.empty() || block.vtx.size() * WITNESS_SCALE_FACTOR > MAX_BLOCK_WEIGHT || ::GetSerializeSize(block, SER_NETWORK, PROTOCOL_VERSION | SERIALIZE_TRANSACTION_NO_WITNESS) * WITNESS_SCALE_FACTOR > MAX_BLOCK_WEIGHT) return state.DoS(100, false, REJECT_INVALID, "bad-blk-length", false, "size limits failed"); // First transaction must be coinbase, the rest must not be @@ -2832,22 +2904,6 @@ bool CheckBlock(const CBlock& block, CValidationState& state, const Consensus::P return true; } -static bool CheckIndexAgainstCheckpoint(const CBlockIndex* pindexPrev, CValidationState& state, const CChainParams& chainparams, const uint256& hash) -{ - if (*pindexPrev->phashBlock == chainparams.GetConsensus().hashGenesisBlock) - return true; - - int nHeight = pindexPrev->nHeight+1; - // Don't accept any forks from the main chain prior to last checkpoint. - // GetLastCheckpoint finds the last checkpoint in MapCheckpoints that's in our - // MapBlockIndex. - CBlockIndex* pcheckpoint = Checkpoints::GetLastCheckpoint(chainparams.Checkpoints()); - if (pcheckpoint && nHeight < pcheckpoint->nHeight) - return state.DoS(100, error("%s: forked chain older than last checkpoint (height %d)", __func__, nHeight), REJECT_CHECKPOINT, "bad-fork-prior-to-checkpoint"); - - return true; -} - bool IsWitnessEnabled(const CBlockIndex* pindexPrev, const Consensus::Params& params) { LOCK(cs_main); @@ -2888,8 +2944,8 @@ std::vector<unsigned char> GenerateCoinbaseCommitment(CBlock& block, const CBloc std::vector<unsigned char> ret(32, 0x00); if (consensusParams.vDeployments[Consensus::DEPLOYMENT_SEGWIT].nTimeout != 0) { if (commitpos == -1) { - uint256 witnessroot = BlockWitnessMerkleRoot(block, NULL); - CHash256().Write(witnessroot.begin(), 32).Write(&ret[0], 32).Finalize(witnessroot.begin()); + uint256 witnessroot = BlockWitnessMerkleRoot(block, nullptr); + CHash256().Write(witnessroot.begin(), 32).Write(ret.data(), 32).Finalize(witnessroot.begin()); CTxOut out; out.nValue = 0; out.scriptPubKey.resize(38); @@ -2912,15 +2968,33 @@ std::vector<unsigned char> GenerateCoinbaseCommitment(CBlock& block, const CBloc /** Context-dependent validity checks. * By "context", we mean only the previous block headers, but not the UTXO - * set; UTXO-related validity checks are done in ConnectBlock(). */ -static bool ContextualCheckBlockHeader(const CBlockHeader& block, CValidationState& state, const Consensus::Params& consensusParams, const CBlockIndex* pindexPrev, int64_t nAdjustedTime) + * set; UTXO-related validity checks are done in ConnectBlock(). + * NOTE: This function is not currently invoked by ConnectBlock(), so we + * should consider upgrade issues if we change which consensus rules are + * enforced in this function (eg by adding a new consensus rule). See comment + * in ConnectBlock(). + * Note that -reindex-chainstate skips the validation that happens here! + */ +static bool ContextualCheckBlockHeader(const CBlockHeader& block, CValidationState& state, const CChainParams& params, const CBlockIndex* pindexPrev, int64_t nAdjustedTime) { - assert(pindexPrev != NULL); + assert(pindexPrev != nullptr); const int nHeight = pindexPrev->nHeight + 1; + // Check proof of work + const Consensus::Params& consensusParams = params.GetConsensus(); if (block.nBits != GetNextWorkRequired(pindexPrev, &block, consensusParams)) return state.DoS(100, false, REJECT_INVALID, "bad-diffbits", false, "incorrect proof of work"); + // Check against checkpoints + if (fCheckpointsEnabled) { + // Don't accept any forks from the main chain prior to last checkpoint. + // GetLastCheckpoint finds the last checkpoint in MapCheckpoints that's in our + // MapBlockIndex. + CBlockIndex* pcheckpoint = Checkpoints::GetLastCheckpoint(params.Checkpoints()); + if (pcheckpoint && nHeight < pcheckpoint->nHeight) + return state.DoS(100, error("%s: forked chain older than last checkpoint (height %d)", __func__, nHeight), REJECT_CHECKPOINT, "bad-fork-prior-to-checkpoint"); + } + // Check timestamp against prev if (block.GetBlockTime() <= pindexPrev->GetMedianTimePast()) return state.Invalid(false, REJECT_INVALID, "time-too-old", "block's timestamp is too early"); @@ -2940,9 +3014,15 @@ static bool ContextualCheckBlockHeader(const CBlockHeader& block, CValidationSta return true; } +/** NOTE: This function is not currently invoked by ConnectBlock(), so we + * should consider upgrade issues if we change which consensus rules are + * enforced in this function (eg by adding a new consensus rule). See comment + * in ConnectBlock(). + * Note that -reindex-chainstate skips the validation that happens here! + */ static bool ContextualCheckBlock(const CBlock& block, CValidationState& state, const Consensus::Params& consensusParams, const CBlockIndex* pindexPrev) { - const int nHeight = pindexPrev == NULL ? 0 : pindexPrev->nHeight + 1; + const int nHeight = pindexPrev == nullptr ? 0 : pindexPrev->nHeight + 1; // Start enforcing BIP113 (Median Time Past) using versionbits logic. int nLockTimeFlags = 0; @@ -3027,7 +3107,7 @@ static bool AcceptBlockHeader(const CBlockHeader& block, CValidationState& state // Check for duplicate uint256 hash = block.GetHash(); BlockMap::iterator miSelf = mapBlockIndex.find(hash); - CBlockIndex *pindex = NULL; + CBlockIndex *pindex = nullptr; if (hash != chainparams.GetConsensus().hashGenesisBlock) { if (miSelf != mapBlockIndex.end()) { @@ -3044,22 +3124,32 @@ static bool AcceptBlockHeader(const CBlockHeader& block, CValidationState& state return error("%s: Consensus::CheckBlockHeader: %s, %s", __func__, hash.ToString(), FormatStateMessage(state)); // Get prev block index - CBlockIndex* pindexPrev = NULL; + CBlockIndex* pindexPrev = nullptr; BlockMap::iterator mi = mapBlockIndex.find(block.hashPrevBlock); if (mi == mapBlockIndex.end()) return state.DoS(10, error("%s: prev block not found", __func__), 0, "prev-blk-not-found"); pindexPrev = (*mi).second; if (pindexPrev->nStatus & BLOCK_FAILED_MASK) return state.DoS(100, error("%s: prev block invalid", __func__), REJECT_INVALID, "bad-prevblk"); - - assert(pindexPrev); - if (fCheckpointsEnabled && !CheckIndexAgainstCheckpoint(pindexPrev, state, chainparams, hash)) - return error("%s: CheckIndexAgainstCheckpoint(): %s", __func__, state.GetRejectReason().c_str()); - - if (!ContextualCheckBlockHeader(block, state, chainparams.GetConsensus(), pindexPrev, GetAdjustedTime())) + if (!ContextualCheckBlockHeader(block, state, chainparams, pindexPrev, GetAdjustedTime())) return error("%s: Consensus::ContextualCheckBlockHeader: %s, %s", __func__, hash.ToString(), FormatStateMessage(state)); + + if (!pindexPrev->IsValid(BLOCK_VALID_SCRIPTS)) { + for (const CBlockIndex* failedit : g_failed_blocks) { + if (pindexPrev->GetAncestor(failedit->nHeight) == failedit) { + assert(failedit->nStatus & BLOCK_FAILED_VALID); + CBlockIndex* invalid_walk = pindexPrev; + while (invalid_walk != failedit) { + invalid_walk->nStatus |= BLOCK_FAILED_CHILD; + setDirtyBlockIndex.insert(invalid_walk); + invalid_walk = invalid_walk->pprev; + } + return state.DoS(100, error("%s: prev block invalid", __func__), REJECT_INVALID, "bad-prevblk"); + } + } + } } - if (pindex == NULL) + if (pindex == nullptr) pindex = AddToBlockIndex(block); if (ppindex) @@ -3071,13 +3161,15 @@ static bool AcceptBlockHeader(const CBlockHeader& block, CValidationState& state } // Exposed wrapper for AcceptBlockHeader -bool ProcessNewBlockHeaders(const std::vector<CBlockHeader>& headers, CValidationState& state, const CChainParams& chainparams, const CBlockIndex** ppindex) +bool ProcessNewBlockHeaders(const std::vector<CBlockHeader>& headers, CValidationState& state, const CChainParams& chainparams, const CBlockIndex** ppindex, CBlockHeader *first_invalid) { + if (first_invalid != nullptr) first_invalid->SetNull(); { LOCK(cs_main); for (const CBlockHeader& header : headers) { - CBlockIndex *pindex = NULL; // Use a temp pindex instead of ppindex to avoid a const_cast + CBlockIndex *pindex = nullptr; // Use a temp pindex instead of ppindex to avoid a const_cast if (!AcceptBlockHeader(header, state, chainparams, &pindex)) { + if (first_invalid) *first_invalid = header; return false; } if (ppindex) { @@ -3089,7 +3181,7 @@ bool ProcessNewBlockHeaders(const std::vector<CBlockHeader>& headers, CValidatio return true; } -/** Store block on disk. If dbp is non-NULL, the file is known to already reside on disk */ +/** Store block on disk. If dbp is non-nullptr, the file is known to already reside on disk */ static bool AcceptBlock(const std::shared_ptr<const CBlock>& pblock, CValidationState& state, const CChainParams& chainparams, CBlockIndex** ppindex, bool fRequested, const CDiskBlockPos* dbp, bool* fNewBlock) { const CBlock& block = *pblock; @@ -3097,7 +3189,7 @@ static bool AcceptBlock(const std::shared_ptr<const CBlock>& pblock, CValidation if (fNewBlock) *fNewBlock = false; AssertLockHeld(cs_main); - CBlockIndex *pindexDummy = NULL; + CBlockIndex *pindexDummy = nullptr; CBlockIndex *&pindex = ppindex ? *ppindex : pindexDummy; if (!AcceptBlockHeader(block, state, chainparams, &pindex)) @@ -3107,7 +3199,7 @@ static bool AcceptBlock(const std::shared_ptr<const CBlock>& pblock, CValidation // process an unrequested block if it's new and has enough work to // advance our tip, and isn't too many blocks ahead. bool fAlreadyHave = pindex->nStatus & BLOCK_HAVE_DATA; - bool fHasMoreWork = (chainActive.Tip() ? pindex->nChainWork > chainActive.Tip()->nChainWork : true); + bool fHasMoreOrSameWork = (chainActive.Tip() ? pindex->nChainWork >= chainActive.Tip()->nChainWork : true); // Blocks that are too out-of-order needlessly limit the effectiveness of // pruning, because pruning will not delete block files that contain any // blocks which are too close in height to the tip. Apply this test @@ -3124,9 +3216,15 @@ static bool AcceptBlock(const std::shared_ptr<const CBlock>& pblock, CValidation // and unrequested blocks. if (fAlreadyHave) return true; if (!fRequested) { // If we didn't ask for it: - if (pindex->nTx != 0) return true; // This is a previously-processed block that was pruned - if (!fHasMoreWork) return true; // Don't process less-work chains - if (fTooFarAhead) return true; // Block height is too high + if (pindex->nTx != 0) return true; // This is a previously-processed block that was pruned + if (!fHasMoreOrSameWork) return true; // Don't process less-work chains + if (fTooFarAhead) return true; // Block height is too high + + // Protect against DoS attacks from low-work chains. + // If our tip is behind, a peer could try to send us + // low-work blocks on a fake chain that we would never + // request; don't process these. + if (pindex->nChainWork < nMinimumChainWork) return true; } if (fNewBlock) *fNewBlock = true; @@ -3150,11 +3248,11 @@ static bool AcceptBlock(const std::shared_ptr<const CBlock>& pblock, CValidation try { unsigned int nBlockSize = ::GetSerializeSize(block, SER_DISK, CLIENT_VERSION); CDiskBlockPos blockPos; - if (dbp != NULL) + if (dbp != nullptr) blockPos = *dbp; - if (!FindBlockPos(state, blockPos, nBlockSize+8, nHeight, block.GetBlockTime(), dbp != NULL)) + if (!FindBlockPos(state, blockPos, nBlockSize+8, nHeight, block.GetBlockTime(), dbp != nullptr)) return error("AcceptBlock(): FindBlockPos failed"); - if (dbp == NULL) + if (dbp == nullptr) if (!WriteBlockToDisk(block, blockPos, chainparams.MessageStart())) AbortNode(state, "Failed to write block"); if (!ReceivedBlockTransactions(block, state, pindex, blockPos, chainparams.GetConsensus())) @@ -3172,7 +3270,7 @@ static bool AcceptBlock(const std::shared_ptr<const CBlock>& pblock, CValidation bool ProcessNewBlock(const CChainParams& chainparams, const std::shared_ptr<const CBlock> pblock, bool fForceProcessing, bool *fNewBlock) { { - CBlockIndex *pindex = NULL; + CBlockIndex *pindex = nullptr; if (fNewBlock) *fNewBlock = false; CValidationState state; // Ensure that CheckBlock() passes before calling AcceptBlock, as @@ -3183,12 +3281,12 @@ bool ProcessNewBlock(const CChainParams& chainparams, const std::shared_ptr<cons if (ret) { // Store to disk - ret = AcceptBlock(pblock, state, chainparams, &pindex, fForceProcessing, NULL, fNewBlock); + ret = AcceptBlock(pblock, state, chainparams, &pindex, fForceProcessing, nullptr, fNewBlock); } CheckBlockIndex(chainparams.GetConsensus()); if (!ret) { GetMainSignals().BlockChecked(*pblock, state); - return error("%s: AcceptBlock FAILED", __func__); + return error("%s: AcceptBlock FAILED (%s)", __func__, state.GetDebugMessage()); } } @@ -3205,16 +3303,13 @@ bool TestBlockValidity(CValidationState& state, const CChainParams& chainparams, { AssertLockHeld(cs_main); assert(pindexPrev && pindexPrev == chainActive.Tip()); - if (fCheckpointsEnabled && !CheckIndexAgainstCheckpoint(pindexPrev, state, chainparams, block.GetHash())) - return error("%s: CheckIndexAgainstCheckpoint(): %s", __func__, state.GetRejectReason().c_str()); - - CCoinsViewCache viewNew(pcoinsTip); + CCoinsViewCache viewNew(pcoinsTip.get()); CBlockIndex indexDummy(block); indexDummy.pprev = pindexPrev; indexDummy.nHeight = pindexPrev->nHeight + 1; // NOTE: CheckBlockHeader is called by CheckBlock - if (!ContextualCheckBlockHeader(block, state, chainparams.GetConsensus(), pindexPrev, GetAdjustedTime())) + if (!ContextualCheckBlockHeader(block, state, chainparams, pindexPrev, GetAdjustedTime())) return error("%s: Consensus::ContextualCheckBlockHeader: %s", __func__, FormatStateMessage(state)); if (!CheckBlock(block, state, chainparams.GetConsensus(), fCheckPOW, fCheckMerkleRoot)) return error("%s: Consensus::CheckBlock: %s", __func__, FormatStateMessage(state)); @@ -3232,8 +3327,10 @@ bool TestBlockValidity(CValidationState& state, const CChainParams& chainparams, */ /* Calculate the amount of disk space the block & undo files currently use */ -static uint64_t CalculateCurrentUsage() +uint64_t CalculateCurrentUsage() { + LOCK(cs_LastBlockFile); + uint64_t retval = 0; for (const CBlockFileInfo &file : vinfoBlockFile) { retval += file.nSize + file.nUndoSize; @@ -3244,8 +3341,10 @@ static uint64_t CalculateCurrentUsage() /* Prune a block file (modify associated database entries)*/ void PruneOneBlockFile(const int fileNumber) { - for (BlockMap::iterator it = mapBlockIndex.begin(); it != mapBlockIndex.end(); ++it) { - CBlockIndex* pindex = it->second; + LOCK(cs_LastBlockFile); + + for (const auto& entry : mapBlockIndex) { + CBlockIndex* pindex = entry.second; if (pindex->nFile == fileNumber) { pindex->nStatus &= ~BLOCK_HAVE_DATA; pindex->nStatus &= ~BLOCK_HAVE_UNDO; @@ -3290,7 +3389,7 @@ static void FindFilesToPruneManual(std::set<int>& setFilesToPrune, int nManualPr assert(fPruneMode && nManualPruneHeight > 0); LOCK2(cs_main, cs_LastBlockFile); - if (chainActive.Tip() == NULL) + if (chainActive.Tip() == nullptr) return; // last block to prune is the lesser of (user-specified height, MIN_BLOCKS_TO_KEEP from the tip) @@ -3332,7 +3431,7 @@ void PruneBlockFilesManual(int nManualPruneHeight) static void FindFilesToPrune(std::set<int>& setFilesToPrune, uint64_t nPruneAfterHeight) { LOCK2(cs_main, cs_LastBlockFile); - if (chainActive.Tip() == NULL || nPruneTarget == 0) { + if (chainActive.Tip() == nullptr || nPruneTarget == 0) { return; } if ((uint64_t)chainActive.Tip()->nHeight <= nPruneAfterHeight) { @@ -3390,21 +3489,21 @@ bool CheckDiskSpace(uint64_t nAdditionalBytes) static FILE* OpenDiskFile(const CDiskBlockPos &pos, const char *prefix, bool fReadOnly) { if (pos.IsNull()) - return NULL; + return nullptr; fs::path path = GetBlockPosFilename(pos, prefix); fs::create_directories(path.parent_path()); - FILE* file = fsbridge::fopen(path, "rb+"); + FILE* file = fsbridge::fopen(path, fReadOnly ? "rb": "rb+"); if (!file && !fReadOnly) file = fsbridge::fopen(path, "wb+"); if (!file) { LogPrintf("Unable to open file %s\n", path.string()); - return NULL; + return nullptr; } if (pos.nPos) { if (fseek(file, pos.nPos, SEEK_SET)) { LogPrintf("Unable to seek to position %u of %s\n", pos.nPos, path.string()); fclose(file); - return NULL; + return nullptr; } } return file; @@ -3427,7 +3526,7 @@ fs::path GetBlockPosFilename(const CDiskBlockPos &pos, const char *prefix) CBlockIndex * InsertBlockIndex(uint256 hash) { if (hash.IsNull()) - return NULL; + return nullptr; // Return existing BlockMap::iterator mi = mapBlockIndex.find(hash); @@ -3436,8 +3535,6 @@ CBlockIndex * InsertBlockIndex(uint256 hash) // Create new CBlockIndex* pindexNew = new CBlockIndex(); - if (!pindexNew) - throw std::runtime_error(std::string(__func__) + ": new CBlockIndex failed"); mi = mapBlockIndex.insert(std::make_pair(hash, pindexNew)).first; pindexNew->phashBlock = &((*mi).first); @@ -3479,13 +3576,17 @@ bool static LoadBlockIndexDB(const CChainParams& chainparams) pindex->nChainTx = pindex->nTx; } } - if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS) && (pindex->nChainTx || pindex->pprev == NULL)) + if (!(pindex->nStatus & BLOCK_FAILED_MASK) && pindex->pprev && (pindex->pprev->nStatus & BLOCK_FAILED_MASK)) { + pindex->nStatus |= BLOCK_FAILED_CHILD; + setDirtyBlockIndex.insert(pindex); + } + if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS) && (pindex->nChainTx || pindex->pprev == nullptr)) setBlockIndexCandidates.insert(pindex); if (pindex->nStatus & BLOCK_FAILED_MASK && (!pindexBestInvalid || pindex->nChainWork > pindexBestInvalid->nChainWork)) pindexBestInvalid = pindex; if (pindex->pprev) pindex->BuildSkip(); - if (pindex->IsValid(BLOCK_VALID_TREE) && (pindexBestHeader == NULL || CBlockIndexWorkComparator()(pindexBestHeader, pindex))) + if (pindex->IsValid(BLOCK_VALID_TREE) && (pindexBestHeader == nullptr || CBlockIndexWorkComparator()(pindexBestHeader, pindex))) pindexBestHeader = pindex; } @@ -3532,7 +3633,7 @@ bool static LoadBlockIndexDB(const CChainParams& chainparams) // Check whether we need to continue reindexing bool fReindexing = false; pblocktree->ReadReindexing(fReindexing); - fReindex |= fReindexing; + if(fReindexing) fReindex = true; // Check whether we have a transaction index pblocktree->ReadFlag("txindex", fTxIndex); @@ -3541,14 +3642,24 @@ bool static LoadBlockIndexDB(const CChainParams& chainparams) return true; } -void LoadChainTip(const CChainParams& chainparams) +bool LoadChainTip(const CChainParams& chainparams) { - if (chainActive.Tip() && chainActive.Tip()->GetBlockHash() == pcoinsTip->GetBestBlock()) return; + if (chainActive.Tip() && chainActive.Tip()->GetBlockHash() == pcoinsTip->GetBestBlock()) return true; + + if (pcoinsTip->GetBestBlock().IsNull() && mapBlockIndex.size() == 1) { + // In case we just added the genesis block, connect it now, so + // that we always have a chainActive.Tip() when we return. + LogPrintf("%s: Connecting genesis block...\n", __func__); + CValidationState state; + if (!ActivateBestChain(state, chainparams)) { + return false; + } + } // Load pointer to end of best chain BlockMap::iterator it = mapBlockIndex.find(pcoinsTip->GetBestBlock()); if (it == mapBlockIndex.end()) - return; + return false; chainActive.SetTip(it->second); PruneBlockIndexCandidates(); @@ -3557,34 +3668,33 @@ void LoadChainTip(const CChainParams& chainparams) chainActive.Tip()->GetBlockHash().ToString(), chainActive.Height(), DateTimeStrFormat("%Y-%m-%d %H:%M:%S", chainActive.Tip()->GetBlockTime()), GuessVerificationProgress(chainparams.TxData(), chainActive.Tip())); + return true; } CVerifyDB::CVerifyDB() { - uiInterface.ShowProgress(_("Verifying blocks..."), 0); + uiInterface.ShowProgress(_("Verifying blocks..."), 0, false); } CVerifyDB::~CVerifyDB() { - uiInterface.ShowProgress("", 100); + uiInterface.ShowProgress("", 100, false); } bool CVerifyDB::VerifyDB(const CChainParams& chainparams, CCoinsView *coinsview, int nCheckLevel, int nCheckDepth) { LOCK(cs_main); - if (chainActive.Tip() == NULL || chainActive.Tip()->pprev == NULL) + if (chainActive.Tip() == nullptr || chainActive.Tip()->pprev == nullptr) return true; // Verify blocks in the best chain - if (nCheckDepth <= 0) - nCheckDepth = 1000000000; // suffices until the year 19000 - if (nCheckDepth > chainActive.Height()) + if (nCheckDepth <= 0 || nCheckDepth > chainActive.Height()) nCheckDepth = chainActive.Height(); nCheckLevel = std::max(0, std::min(4, nCheckLevel)); LogPrintf("Verifying last %i blocks at level %i\n", nCheckDepth, nCheckLevel); CCoinsViewCache coins(coinsview); CBlockIndex* pindexState = chainActive.Tip(); - CBlockIndex* pindexFailure = NULL; + CBlockIndex* pindexFailure = nullptr; int nGoodTransactions = 0; CValidationState state; int reportDone = 0; @@ -3598,7 +3708,7 @@ bool CVerifyDB::VerifyDB(const CChainParams& chainparams, CCoinsView *coinsview, LogPrintf("[%d%%]...", percentageDone); reportDone = percentageDone/10; } - uiInterface.ShowProgress(_("Verifying blocks..."), percentageDone); + uiInterface.ShowProgress(_("Verifying blocks..."), percentageDone, false); if (pindex->nHeight < chainActive.Height()-nCheckDepth) break; if (fPruneMode && !(pindex->nStatus & BLOCK_HAVE_DATA)) { @@ -3649,7 +3759,7 @@ bool CVerifyDB::VerifyDB(const CChainParams& chainparams, CCoinsView *coinsview, CBlockIndex *pindex = pindexState; while (pindex != chainActive.Tip()) { boost::this_thread::interruption_point(); - uiInterface.ShowProgress(_("Verifying blocks..."), std::max(1, std::min(99, 100 - (int)(((double)(chainActive.Height() - pindex->nHeight)) / (double)nCheckDepth * 50)))); + uiInterface.ShowProgress(_("Verifying blocks..."), std::max(1, std::min(99, 100 - (int)(((double)(chainActive.Height() - pindex->nHeight)) / (double)nCheckDepth * 50))), false); pindex = chainActive.Next(pindex); CBlock block; if (!ReadBlockFromDisk(block, pindex, chainparams.GetConsensus())) @@ -3696,7 +3806,7 @@ bool ReplayBlocks(const CChainParams& params, CCoinsView* view) if (hashHeads.empty()) return true; // We're already in a consistent state. if (hashHeads.size() != 2) return error("ReplayBlocks(): unknown inconsistent state"); - uiInterface.ShowProgress(_("Replaying blocks..."), 0); + uiInterface.ShowProgress(_("Replaying blocks..."), 0, false); LogPrintf("Replaying blocks\n"); const CBlockIndex* pindexOld = nullptr; // Old tip during the interrupted flush. @@ -3747,7 +3857,7 @@ bool ReplayBlocks(const CChainParams& params, CCoinsView* view) cache.SetBestBlock(pindexNew->GetBlockHash()); cache.Flush(); - uiInterface.ShowProgress("", 100); + uiInterface.ShowProgress("", 100, false); return true; } @@ -3755,6 +3865,8 @@ bool RewindBlockIndex(const CChainParams& params) { LOCK(cs_main); + // Note that during -reindex-chainstate we are called with an empty chainActive! + int nHeight = 1; while (nHeight <= chainActive.Height()) { if (IsWitnessEnabled(chainActive[nHeight - 1], params.GetConsensus()) && !(chainActive[nHeight]->nStatus & BLOCK_OPT_WITNESS)) { @@ -3775,7 +3887,7 @@ bool RewindBlockIndex(const CChainParams& params) // of the blockchain). break; } - if (!DisconnectTip(state, params, NULL)) { + if (!DisconnectTip(state, params, nullptr)) { return error("RewindBlockIndex: unable to disconnect block at height %i", pindex->nHeight); } // Occasionally flush state to disk. @@ -3786,8 +3898,8 @@ bool RewindBlockIndex(const CChainParams& params) // Reduce validity flag and have-data flags. // We do this after actual disconnecting, otherwise we'll end up writing the lack of data // to disk before writing the chainstate, resulting in a failure to continue if interrupted. - for (BlockMap::iterator it = mapBlockIndex.begin(); it != mapBlockIndex.end(); it++) { - CBlockIndex* pindexIter = it->second; + for (const auto& entry : mapBlockIndex) { + CBlockIndex* pindexIter = entry.second; // Note: If we encounter an insufficiently validated block that // is on chainActive, it must be because we are a pruning node, and @@ -3824,12 +3936,19 @@ bool RewindBlockIndex(const CChainParams& params) } } - PruneBlockIndexCandidates(); + if (chainActive.Tip() != nullptr) { + // We can't prune block index candidates based on our tip if we have + // no tip due to chainActive being empty! + PruneBlockIndexCandidates(); - CheckBlockIndex(params.GetConsensus()); + CheckBlockIndex(params.GetConsensus()); - if (!FlushStateToDisk(params, state, FLUSH_STATE_ALWAYS)) { - return false; + // FlushStateToDisk can possibly read chainActive. Be conservative + // and skip it here, we're about to -reindex-chainstate anyway, so + // it'll get called a bunch real soon. + if (!FlushStateToDisk(params, state, FLUSH_STATE_ALWAYS)) { + return false; + } } return true; @@ -3842,15 +3961,16 @@ void UnloadBlockIndex() { LOCK(cs_main); setBlockIndexCandidates.clear(); - chainActive.SetTip(NULL); - pindexBestInvalid = NULL; - pindexBestHeader = NULL; + chainActive.SetTip(nullptr); + pindexBestInvalid = nullptr; + pindexBestHeader = nullptr; mempool.clear(); mapBlocksUnlinked.clear(); vinfoBlockFile.clear(); nLastBlockFile = 0; nBlockSequenceId = 1; setDirtyBlockIndex.clear(); + g_failed_blocks.clear(); setDirtyFileInfo.clear(); versionbitscache.Clear(); for (int b = 0; b < VERSIONBITS_NUM_BITS; b++) { @@ -3867,42 +3987,54 @@ void UnloadBlockIndex() bool LoadBlockIndex(const CChainParams& chainparams) { // Load block index from databases - if (!fReindex && !LoadBlockIndexDB(chainparams)) - return false; + bool needs_init = fReindex; + if (!fReindex) { + bool ret = LoadBlockIndexDB(chainparams); + if (!ret) return false; + needs_init = mapBlockIndex.empty(); + } + + if (needs_init) { + // Everything here is for *new* reindex/DBs. Thus, though + // LoadBlockIndexDB may have set fReindex if we shut down + // mid-reindex previously, we don't check fReindex and + // instead only check it prior to LoadBlockIndexDB to set + // needs_init. + + LogPrintf("Initializing databases...\n"); + // Use the provided setting for -txindex in the new database + fTxIndex = gArgs.GetBoolArg("-txindex", DEFAULT_TXINDEX); + pblocktree->WriteFlag("txindex", fTxIndex); + } return true; } -bool InitBlockIndex(const CChainParams& chainparams) +bool LoadGenesisBlock(const CChainParams& chainparams) { LOCK(cs_main); - // Check whether we're already initialized - if (chainActive.Genesis() != NULL) + // Check whether we're already initialized by checking for genesis in + // mapBlockIndex. Note that we can't use chainActive here, since it is + // set based on the coins db, not the block index db, which is the only + // thing loaded at this point. + if (mapBlockIndex.count(chainparams.GenesisBlock().GetHash())) return true; - // Use the provided setting for -txindex in the new database - fTxIndex = GetBoolArg("-txindex", DEFAULT_TXINDEX); - pblocktree->WriteFlag("txindex", fTxIndex); - LogPrintf("Initializing databases...\n"); - - // Only add the genesis block if not reindexing (in which case we reuse the one already on disk) - if (!fReindex) { - try { - CBlock &block = const_cast<CBlock&>(chainparams.GenesisBlock()); - // Start new block file - unsigned int nBlockSize = ::GetSerializeSize(block, SER_DISK, CLIENT_VERSION); - CDiskBlockPos blockPos; - CValidationState state; - if (!FindBlockPos(state, blockPos, nBlockSize+8, 0, block.GetBlockTime())) - return error("LoadBlockIndex(): FindBlockPos failed"); - if (!WriteBlockToDisk(block, blockPos, chainparams.MessageStart())) - return error("LoadBlockIndex(): writing genesis block to disk failed"); - CBlockIndex *pindex = AddToBlockIndex(block); - if (!ReceivedBlockTransactions(block, state, pindex, blockPos, chainparams.GetConsensus())) - return error("LoadBlockIndex(): genesis block not accepted"); - } catch (const std::runtime_error& e) { - return error("LoadBlockIndex(): failed to initialize block database: %s", e.what()); - } + try { + CBlock &block = const_cast<CBlock&>(chainparams.GenesisBlock()); + // Start new block file + unsigned int nBlockSize = ::GetSerializeSize(block, SER_DISK, CLIENT_VERSION); + CDiskBlockPos blockPos; + CValidationState state; + if (!FindBlockPos(state, blockPos, nBlockSize+8, 0, block.GetBlockTime())) + return error("%s: FindBlockPos failed", __func__); + if (!WriteBlockToDisk(block, blockPos, chainparams.MessageStart())) + return error("%s: writing genesis block to disk failed", __func__); + CBlockIndex *pindex = AddToBlockIndex(block); + if (!ReceivedBlockTransactions(block, state, pindex, blockPos, chainparams.GetConsensus())) + return error("%s: genesis block not accepted", __func__); + } catch (const std::runtime_error& e) { + return error("%s: failed to write genesis block: %s", __func__, e.what()); } return true; @@ -3968,7 +4100,7 @@ bool LoadExternalBlockFile(const CChainParams& chainparams, FILE* fileIn, CDiskB if (mapBlockIndex.count(hash) == 0 || (mapBlockIndex[hash]->nStatus & BLOCK_HAVE_DATA) == 0) { LOCK(cs_main); CValidationState state; - if (AcceptBlock(pblock, state, chainparams, NULL, true, dbp, NULL)) + if (AcceptBlock(pblock, state, chainparams, nullptr, true, dbp, nullptr)) nLoaded++; if (state.IsError()) break; @@ -4002,7 +4134,7 @@ bool LoadExternalBlockFile(const CChainParams& chainparams, FILE* fileIn, CDiskB head.ToString()); LOCK(cs_main); CValidationState dummy; - if (AcceptBlock(pblockrecursive, dummy, chainparams, NULL, true, &it->second, NULL)) + if (AcceptBlock(pblockrecursive, dummy, chainparams, nullptr, true, &it->second, nullptr)) { nLoaded++; queue.push_back(pblockrecursive->GetHash()); @@ -4043,41 +4175,41 @@ void static CheckBlockIndex(const Consensus::Params& consensusParams) // Build forward-pointing map of the entire block tree. std::multimap<CBlockIndex*,CBlockIndex*> forward; - for (BlockMap::iterator it = mapBlockIndex.begin(); it != mapBlockIndex.end(); it++) { - forward.insert(std::make_pair(it->second->pprev, it->second)); + for (auto& entry : mapBlockIndex) { + forward.insert(std::make_pair(entry.second->pprev, entry.second)); } assert(forward.size() == mapBlockIndex.size()); - std::pair<std::multimap<CBlockIndex*,CBlockIndex*>::iterator,std::multimap<CBlockIndex*,CBlockIndex*>::iterator> rangeGenesis = forward.equal_range(NULL); + std::pair<std::multimap<CBlockIndex*,CBlockIndex*>::iterator,std::multimap<CBlockIndex*,CBlockIndex*>::iterator> rangeGenesis = forward.equal_range(nullptr); CBlockIndex *pindex = rangeGenesis.first->second; rangeGenesis.first++; - assert(rangeGenesis.first == rangeGenesis.second); // There is only one index entry with parent NULL. + assert(rangeGenesis.first == rangeGenesis.second); // There is only one index entry with parent nullptr. // Iterate over the entire block tree, using depth-first search. // Along the way, remember whether there are blocks on the path from genesis // block being explored which are the first to have certain properties. size_t nNodes = 0; int nHeight = 0; - CBlockIndex* pindexFirstInvalid = NULL; // Oldest ancestor of pindex which is invalid. - CBlockIndex* pindexFirstMissing = NULL; // Oldest ancestor of pindex which does not have BLOCK_HAVE_DATA. - CBlockIndex* pindexFirstNeverProcessed = NULL; // Oldest ancestor of pindex for which nTx == 0. - CBlockIndex* pindexFirstNotTreeValid = NULL; // Oldest ancestor of pindex which does not have BLOCK_VALID_TREE (regardless of being valid or not). - CBlockIndex* pindexFirstNotTransactionsValid = NULL; // Oldest ancestor of pindex which does not have BLOCK_VALID_TRANSACTIONS (regardless of being valid or not). - CBlockIndex* pindexFirstNotChainValid = NULL; // Oldest ancestor of pindex which does not have BLOCK_VALID_CHAIN (regardless of being valid or not). - CBlockIndex* pindexFirstNotScriptsValid = NULL; // Oldest ancestor of pindex which does not have BLOCK_VALID_SCRIPTS (regardless of being valid or not). - while (pindex != NULL) { + CBlockIndex* pindexFirstInvalid = nullptr; // Oldest ancestor of pindex which is invalid. + CBlockIndex* pindexFirstMissing = nullptr; // Oldest ancestor of pindex which does not have BLOCK_HAVE_DATA. + CBlockIndex* pindexFirstNeverProcessed = nullptr; // Oldest ancestor of pindex for which nTx == 0. + CBlockIndex* pindexFirstNotTreeValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_TREE (regardless of being valid or not). + CBlockIndex* pindexFirstNotTransactionsValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_TRANSACTIONS (regardless of being valid or not). + CBlockIndex* pindexFirstNotChainValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_CHAIN (regardless of being valid or not). + CBlockIndex* pindexFirstNotScriptsValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_SCRIPTS (regardless of being valid or not). + while (pindex != nullptr) { nNodes++; - if (pindexFirstInvalid == NULL && pindex->nStatus & BLOCK_FAILED_VALID) pindexFirstInvalid = pindex; - if (pindexFirstMissing == NULL && !(pindex->nStatus & BLOCK_HAVE_DATA)) pindexFirstMissing = pindex; - if (pindexFirstNeverProcessed == NULL && pindex->nTx == 0) pindexFirstNeverProcessed = pindex; - if (pindex->pprev != NULL && pindexFirstNotTreeValid == NULL && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_TREE) pindexFirstNotTreeValid = pindex; - if (pindex->pprev != NULL && pindexFirstNotTransactionsValid == NULL && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_TRANSACTIONS) pindexFirstNotTransactionsValid = pindex; - if (pindex->pprev != NULL && pindexFirstNotChainValid == NULL && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_CHAIN) pindexFirstNotChainValid = pindex; - if (pindex->pprev != NULL && pindexFirstNotScriptsValid == NULL && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_SCRIPTS) pindexFirstNotScriptsValid = pindex; + if (pindexFirstInvalid == nullptr && pindex->nStatus & BLOCK_FAILED_VALID) pindexFirstInvalid = pindex; + if (pindexFirstMissing == nullptr && !(pindex->nStatus & BLOCK_HAVE_DATA)) pindexFirstMissing = pindex; + if (pindexFirstNeverProcessed == nullptr && pindex->nTx == 0) pindexFirstNeverProcessed = pindex; + if (pindex->pprev != nullptr && pindexFirstNotTreeValid == nullptr && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_TREE) pindexFirstNotTreeValid = pindex; + if (pindex->pprev != nullptr && pindexFirstNotTransactionsValid == nullptr && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_TRANSACTIONS) pindexFirstNotTransactionsValid = pindex; + if (pindex->pprev != nullptr && pindexFirstNotChainValid == nullptr && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_CHAIN) pindexFirstNotChainValid = pindex; + if (pindex->pprev != nullptr && pindexFirstNotScriptsValid == nullptr && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_SCRIPTS) pindexFirstNotScriptsValid = pindex; // Begin: actual consistency checks. - if (pindex->pprev == NULL) { + if (pindex->pprev == nullptr) { // Genesis block checks. assert(pindex->GetBlockHash() == consensusParams.hashGenesisBlock); // Genesis block's hash must match. assert(pindex == chainActive.Genesis()); // The current active chain's genesis block must be this block. @@ -4096,26 +4228,26 @@ void static CheckBlockIndex(const Consensus::Params& consensusParams) if (pindex->nStatus & BLOCK_HAVE_UNDO) assert(pindex->nStatus & BLOCK_HAVE_DATA); assert(((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_TRANSACTIONS) == (pindex->nTx > 0)); // This is pruning-independent. // All parents having had data (at some point) is equivalent to all parents being VALID_TRANSACTIONS, which is equivalent to nChainTx being set. - assert((pindexFirstNeverProcessed != NULL) == (pindex->nChainTx == 0)); // nChainTx != 0 is used to signal that all parent blocks have been processed (but may have been pruned). - assert((pindexFirstNotTransactionsValid != NULL) == (pindex->nChainTx == 0)); + assert((pindexFirstNeverProcessed != nullptr) == (pindex->nChainTx == 0)); // nChainTx != 0 is used to signal that all parent blocks have been processed (but may have been pruned). + assert((pindexFirstNotTransactionsValid != nullptr) == (pindex->nChainTx == 0)); assert(pindex->nHeight == nHeight); // nHeight must be consistent. - assert(pindex->pprev == NULL || pindex->nChainWork >= pindex->pprev->nChainWork); // For every block except the genesis block, the chainwork must be larger than the parent's. + assert(pindex->pprev == nullptr || pindex->nChainWork >= pindex->pprev->nChainWork); // For every block except the genesis block, the chainwork must be larger than the parent's. assert(nHeight < 2 || (pindex->pskip && (pindex->pskip->nHeight < nHeight))); // The pskip pointer must point back for all but the first 2 blocks. - assert(pindexFirstNotTreeValid == NULL); // All mapBlockIndex entries must at least be TREE valid - if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_TREE) assert(pindexFirstNotTreeValid == NULL); // TREE valid implies all parents are TREE valid - if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_CHAIN) assert(pindexFirstNotChainValid == NULL); // CHAIN valid implies all parents are CHAIN valid - if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_SCRIPTS) assert(pindexFirstNotScriptsValid == NULL); // SCRIPTS valid implies all parents are SCRIPTS valid - if (pindexFirstInvalid == NULL) { + assert(pindexFirstNotTreeValid == nullptr); // All mapBlockIndex entries must at least be TREE valid + if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_TREE) assert(pindexFirstNotTreeValid == nullptr); // TREE valid implies all parents are TREE valid + if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_CHAIN) assert(pindexFirstNotChainValid == nullptr); // CHAIN valid implies all parents are CHAIN valid + if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_SCRIPTS) assert(pindexFirstNotScriptsValid == nullptr); // SCRIPTS valid implies all parents are SCRIPTS valid + if (pindexFirstInvalid == nullptr) { // Checks for not-invalid blocks. assert((pindex->nStatus & BLOCK_FAILED_MASK) == 0); // The failed mask cannot be set for blocks without invalid parents. } - if (!CBlockIndexWorkComparator()(pindex, chainActive.Tip()) && pindexFirstNeverProcessed == NULL) { - if (pindexFirstInvalid == NULL) { + if (!CBlockIndexWorkComparator()(pindex, chainActive.Tip()) && pindexFirstNeverProcessed == nullptr) { + if (pindexFirstInvalid == nullptr) { // If this block sorts at least as good as the current tip and // is valid and we have all data for its parents, it must be in // setBlockIndexCandidates. chainActive.Tip() must also be there // even if some data has been pruned. - if (pindexFirstMissing == NULL || pindex == chainActive.Tip()) { + if (pindexFirstMissing == nullptr || pindex == chainActive.Tip()) { assert(setBlockIndexCandidates.count(pindex)); } // If some parent is missing, then it could be that this block was in @@ -4136,13 +4268,13 @@ void static CheckBlockIndex(const Consensus::Params& consensusParams) } rangeUnlinked.first++; } - if (pindex->pprev && (pindex->nStatus & BLOCK_HAVE_DATA) && pindexFirstNeverProcessed != NULL && pindexFirstInvalid == NULL) { + if (pindex->pprev && (pindex->nStatus & BLOCK_HAVE_DATA) && pindexFirstNeverProcessed != nullptr && pindexFirstInvalid == nullptr) { // If this block has block data available, some parent was never received, and has no invalid parents, it must be in mapBlocksUnlinked. assert(foundInUnlinked); } if (!(pindex->nStatus & BLOCK_HAVE_DATA)) assert(!foundInUnlinked); // Can't be in mapBlocksUnlinked if we don't HAVE_DATA - if (pindexFirstMissing == NULL) assert(!foundInUnlinked); // We aren't missing data for any parent -- cannot be in mapBlocksUnlinked. - if (pindex->pprev && (pindex->nStatus & BLOCK_HAVE_DATA) && pindexFirstNeverProcessed == NULL && pindexFirstMissing != NULL) { + if (pindexFirstMissing == nullptr) assert(!foundInUnlinked); // We aren't missing data for any parent -- cannot be in mapBlocksUnlinked. + if (pindex->pprev && (pindex->nStatus & BLOCK_HAVE_DATA) && pindexFirstNeverProcessed == nullptr && pindexFirstMissing != nullptr) { // We HAVE_DATA for this block, have received data for all parents at some point, but we're currently missing data for some parent. assert(fHavePruned); // We must have pruned. // This block may have entered mapBlocksUnlinked if: @@ -4154,7 +4286,7 @@ void static CheckBlockIndex(const Consensus::Params& consensusParams) // So if this block is itself better than chainActive.Tip() and it wasn't in // setBlockIndexCandidates, then it must be in mapBlocksUnlinked. if (!CBlockIndexWorkComparator()(pindex, chainActive.Tip()) && setBlockIndexCandidates.count(pindex) == 0) { - if (pindexFirstInvalid == NULL) { + if (pindexFirstInvalid == nullptr) { assert(foundInUnlinked); } } @@ -4175,13 +4307,13 @@ void static CheckBlockIndex(const Consensus::Params& consensusParams) while (pindex) { // We are going to either move to a parent or a sibling of pindex. // If pindex was the first with a certain property, unset the corresponding variable. - if (pindex == pindexFirstInvalid) pindexFirstInvalid = NULL; - if (pindex == pindexFirstMissing) pindexFirstMissing = NULL; - if (pindex == pindexFirstNeverProcessed) pindexFirstNeverProcessed = NULL; - if (pindex == pindexFirstNotTreeValid) pindexFirstNotTreeValid = NULL; - if (pindex == pindexFirstNotTransactionsValid) pindexFirstNotTransactionsValid = NULL; - if (pindex == pindexFirstNotChainValid) pindexFirstNotChainValid = NULL; - if (pindex == pindexFirstNotScriptsValid) pindexFirstNotScriptsValid = NULL; + if (pindex == pindexFirstInvalid) pindexFirstInvalid = nullptr; + if (pindex == pindexFirstMissing) pindexFirstMissing = nullptr; + if (pindex == pindexFirstNeverProcessed) pindexFirstNeverProcessed = nullptr; + if (pindex == pindexFirstNotTreeValid) pindexFirstNotTreeValid = nullptr; + if (pindex == pindexFirstNotTransactionsValid) pindexFirstNotTransactionsValid = nullptr; + if (pindex == pindexFirstNotChainValid) pindexFirstNotChainValid = nullptr; + if (pindex == pindexFirstNotScriptsValid) pindexFirstNotScriptsValid = nullptr; // Find our parent. CBlockIndex* pindexPar = pindex->pprev; // Find which child we just visited. @@ -4216,6 +4348,8 @@ std::string CBlockFileInfo::ToString() const CBlockFileInfo* GetBlockFileInfo(size_t n) { + LOCK(cs_LastBlockFile); + return &vinfoBlockFile.at(n); } @@ -4242,7 +4376,7 @@ static const uint64_t MEMPOOL_DUMP_VERSION = 1; bool LoadMempool(void) { const CChainParams& chainparams = Params(); - int64_t nExpiryTimeout = GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60; + int64_t nExpiryTimeout = gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60; FILE* filestr = fsbridge::fopen(GetDataDir() / "mempool.dat", "rb"); CAutoFile file(filestr, SER_DISK, CLIENT_VERSION); if (file.IsNull()) { @@ -4251,8 +4385,9 @@ bool LoadMempool(void) } int64_t count = 0; - int64_t skipped = 0; + int64_t expired = 0; int64_t failed = 0; + int64_t already_there = 0; int64_t nNow = GetTime(); try { @@ -4278,14 +4413,23 @@ bool LoadMempool(void) CValidationState state; if (nTime + nExpiryTimeout > nNow) { LOCK(cs_main); - AcceptToMemoryPoolWithTime(chainparams, mempool, state, tx, true, NULL, nTime, NULL, false, 0); + AcceptToMemoryPoolWithTime(chainparams, mempool, state, tx, nullptr /* pfMissingInputs */, nTime, + nullptr /* plTxnReplaced */, false /* bypass_limits */, 0 /* nAbsurdFee */); if (state.IsValid()) { ++count; } else { - ++failed; + // mempool may contain the transaction already, e.g. from + // wallet(s) having loaded it while we were processing + // mempool transactions; consider these as valid, instead of + // failed, but mark them as 'already there' + if (mempool.exists(tx->GetHash())) { + ++already_there; + } else { + ++failed; + } } } else { - ++skipped; + ++expired; } if (ShutdownRequested()) return false; @@ -4301,11 +4445,11 @@ bool LoadMempool(void) return false; } - LogPrintf("Imported mempool transactions from disk: %i successes, %i failed, %i expired\n", count, failed, skipped); + LogPrintf("Imported mempool transactions from disk: %i succeeded, %i failed, %i expired, %i already there\n", count, failed, expired, already_there); return true; } -void DumpMempool(void) +bool DumpMempool(void) { int64_t start = GetTimeMicros(); @@ -4325,7 +4469,7 @@ void DumpMempool(void) try { FILE* filestr = fsbridge::fopen(GetDataDir() / "mempool.dat.new", "wb"); if (!filestr) { - return; + return false; } CAutoFile file(filestr, SER_DISK, CLIENT_VERSION); @@ -4346,18 +4490,20 @@ void DumpMempool(void) file.fclose(); RenameOver(GetDataDir() / "mempool.dat.new", GetDataDir() / "mempool.dat"); int64_t last = GetTimeMicros(); - LogPrintf("Dumped mempool: %gs to copy, %gs to dump\n", (mid-start)*0.000001, (last-mid)*0.000001); + LogPrintf("Dumped mempool: %gs to copy, %gs to dump\n", (mid-start)*MICRO, (last-mid)*MICRO); } catch (const std::exception& e) { LogPrintf("Failed to dump mempool: %s. Continuing anyway.\n", e.what()); + return false; } + return true; } //! Guess how far we are in the verification process at the given block index double GuessVerificationProgress(const ChainTxData& data, CBlockIndex *pindex) { - if (pindex == NULL) + if (pindex == nullptr) return 0.0; - int64_t nNow = time(NULL); + int64_t nNow = time(nullptr); double fTxTotal; |