aboutsummaryrefslogtreecommitdiff
path: root/src/validation.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/validation.cpp')
-rw-r--r--src/validation.cpp1232
1 files changed, 566 insertions, 666 deletions
diff --git a/src/validation.cpp b/src/validation.cpp
index f3d34dca70..3026df34fe 100644
--- a/src/validation.cpp
+++ b/src/validation.cpp
@@ -8,18 +8,19 @@
#include <arith_uint256.h>
#include <chain.h>
#include <chainparams.h>
-#include <checkpoints.h>
#include <checkqueue.h>
#include <consensus/consensus.h>
#include <consensus/merkle.h>
+#include <consensus/tx_check.h>
#include <consensus/tx_verify.h>
#include <consensus/validation.h>
#include <cuckoocache.h>
+#include <flatfile.h>
#include <hash.h>
#include <index/txindex.h>
#include <policy/fees.h>
#include <policy/policy.h>
-#include <policy/rbf.h>
+#include <policy/settings.h>
#include <pow.h>
#include <primitives/block.h>
#include <primitives/transaction.h>
@@ -34,15 +35,19 @@
#include <txdb.h>
#include <txmempool.h>
#include <ui_interface.h>
+#include <uint256.h>
#include <undo.h>
-#include <util/system.h>
#include <util/moneystr.h>
+#include <util/rbf.h>
#include <util/strencodings.h>
+#include <util/system.h>
+#include <util/validation.h>
#include <validationinterface.h>
#include <warnings.h>
#include <future>
#include <sstream>
+#include <string>
#include <boost/algorithm/string/replace.hpp>
#include <boost/thread.hpp>
@@ -54,161 +59,29 @@
#define MICRO 0.000001
#define MILLI 0.001
-/**
- * Global state
- */
-namespace {
- struct CBlockIndexWorkComparator
- {
- bool operator()(const CBlockIndex *pa, const CBlockIndex *pb) const {
- // First sort by most total work, ...
- if (pa->nChainWork > pb->nChainWork) return false;
- if (pa->nChainWork < pb->nChainWork) return true;
-
- // ... then by earliest time received, ...
- if (pa->nSequenceId < pb->nSequenceId) return false;
- if (pa->nSequenceId > pb->nSequenceId) return true;
-
- // Use pointer address as tie breaker (should only happen with blocks
- // loaded from disk, as those all have id 0).
- if (pa < pb) return false;
- if (pa > pb) return true;
-
- // Identical blocks.
- return false;
- }
- };
-} // anon namespace
-
-enum DisconnectResult
-{
- DISCONNECT_OK, // All good.
- DISCONNECT_UNCLEAN, // Rolled back, but UTXO set was inconsistent with block.
- DISCONNECT_FAILED // Something else went wrong.
-};
-
-class ConnectTrace;
-
-/**
- * CChainState stores and provides an API to update our local knowledge of the
- * current best chain and header tree.
- *
- * It generally provides access to the current block tree, as well as functions
- * to provide new data, which it will appropriately validate and incorporate in
- * its state as necessary.
- *
- * Eventually, the API here is targeted at being exposed externally as a
- * consumable libconsensus library, so any functions added must only call
- * other class member functions, pure functions in other parts of the consensus
- * library, callbacks via the validation interface, or read/write-to-disk
- * functions (eventually this will also be via callbacks).
- */
-class CChainState {
-private:
- /**
- * The set of all CBlockIndex entries with BLOCK_VALID_TRANSACTIONS (for itself and all ancestors) and
- * as good as our current tip or better. Entries may be failed, though, and pruning nodes may be
- * missing the data for the block.
- */
- std::set<CBlockIndex*, CBlockIndexWorkComparator> setBlockIndexCandidates;
-
- /**
- * Every received block is assigned a unique and increasing identifier, so we
- * know which one to give priority in case of a fork.
- */
- CCriticalSection cs_nBlockSequenceId;
- /** Blocks loaded from disk are assigned id 0, so start the counter at 1. */
- int32_t nBlockSequenceId = 1;
- /** Decreasing counter (used by subsequent preciousblock calls). */
- int32_t nBlockReverseSequenceId = -1;
- /** chainwork for the last block that preciousblock has been applied to. */
- arith_uint256 nLastPreciousChainwork = 0;
-
- /** In order to efficiently track invalidity of headers, we keep the set of
- * blocks which we tried to connect and found to be invalid here (ie which
- * were set to BLOCK_FAILED_VALID since the last restart). We can then
- * walk this set and check if a new header is a descendant of something in
- * this set, preventing us from having to walk mapBlockIndex when we try
- * to connect a bad block and fail.
- *
- * While this is more complicated than marking everything which descends
- * from an invalid block as invalid at the time we discover it to be
- * invalid, doing so would require walking all of mapBlockIndex to find all
- * descendants. Since this case should be very rare, keeping track of all
- * BLOCK_FAILED_VALID blocks in a set should be just fine and work just as
- * well.
- *
- * Because we already walk mapBlockIndex in height-order at startup, we go
- * ahead and mark descendants of invalid blocks as FAILED_CHILD at that time,
- * instead of putting things in this set.
- */
- std::set<CBlockIndex*> m_failed_blocks;
-
- /**
- * the ChainState CriticalSection
- * A lock that must be held when modifying this ChainState - held in ActivateBestChain()
- */
- CCriticalSection m_cs_chainstate;
-
-public:
- CChain chainActive;
- BlockMap mapBlockIndex GUARDED_BY(cs_main);
- std::multimap<CBlockIndex*, CBlockIndex*> mapBlocksUnlinked;
- CBlockIndex *pindexBestInvalid = nullptr;
-
- bool LoadBlockIndex(const Consensus::Params& consensus_params, CBlockTreeDB& blocktree) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
-
- bool ActivateBestChain(CValidationState &state, const CChainParams& chainparams, std::shared_ptr<const CBlock> pblock);
-
- /**
- * If a block header hasn't already been seen, call CheckBlockHeader on it, ensure
- * that it doesn't descend from an invalid block, and then add it to mapBlockIndex.
- */
- bool AcceptBlockHeader(const CBlockHeader& block, CValidationState& state, const CChainParams& chainparams, CBlockIndex** ppindex) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
- bool AcceptBlock(const std::shared_ptr<const CBlock>& pblock, CValidationState& state, const CChainParams& chainparams, CBlockIndex** ppindex, bool fRequested, const CDiskBlockPos* dbp, bool* fNewBlock) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
-
- // Block (dis)connection on a given view:
- DisconnectResult DisconnectBlock(const CBlock& block, const CBlockIndex* pindex, CCoinsViewCache& view);
- bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pindex,
- CCoinsViewCache& view, const CChainParams& chainparams, bool fJustCheck = false) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
-
- // Block disconnection on our pcoinsTip:
- bool DisconnectTip(CValidationState& state, const CChainParams& chainparams, DisconnectedBlockTransactions* disconnectpool) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
-
- // Manual block validity manipulation:
- bool PreciousBlock(CValidationState& state, const CChainParams& params, CBlockIndex* pindex) LOCKS_EXCLUDED(cs_main);
- bool InvalidateBlock(CValidationState& state, const CChainParams& chainparams, CBlockIndex* pindex) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
- void ResetBlockFailureFlags(CBlockIndex* pindex) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
+bool CBlockIndexWorkComparator::operator()(const CBlockIndex *pa, const CBlockIndex *pb) const {
+ // First sort by most total work, ...
+ if (pa->nChainWork > pb->nChainWork) return false;
+ if (pa->nChainWork < pb->nChainWork) return true;
- bool ReplayBlocks(const CChainParams& params, CCoinsView* view);
- bool RewindBlockIndex(const CChainParams& params);
- bool LoadGenesisBlock(const CChainParams& chainparams);
+ // ... then by earliest time received, ...
+ if (pa->nSequenceId < pb->nSequenceId) return false;
+ if (pa->nSequenceId > pb->nSequenceId) return true;
- void PruneBlockIndexCandidates();
+ // Use pointer address as tie breaker (should only happen with blocks
+ // loaded from disk, as those all have id 0).
+ if (pa < pb) return false;
+ if (pa > pb) return true;
- void UnloadBlockIndex();
-
-private:
- bool ActivateBestChainStep(CValidationState& state, const CChainParams& chainparams, CBlockIndex* pindexMostWork, const std::shared_ptr<const CBlock>& pblock, bool& fInvalidFound, ConnectTrace& connectTrace) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
- bool ConnectTip(CValidationState& state, const CChainParams& chainparams, CBlockIndex* pindexNew, const std::shared_ptr<const CBlock>& pblock, ConnectTrace& connectTrace, DisconnectedBlockTransactions &disconnectpool) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
-
- CBlockIndex* AddToBlockIndex(const CBlockHeader& block) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
- /** Create a new block index entry for a given block hash */
- CBlockIndex* InsertBlockIndex(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
- /**
- * Make various assertions about the state of the block index.
- *
- * By default this only executes fully when using the Regtest chain; see: fCheckBlockIndex.
- */
- void CheckBlockIndex(const Consensus::Params& consensusParams);
+ // Identical blocks.
+ return false;
+}
- void InvalidBlockFound(CBlockIndex *pindex, const CValidationState &state) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
- CBlockIndex* FindMostWorkChain() EXCLUSIVE_LOCKS_REQUIRED(cs_main);
- void ReceivedBlockTransactions(const CBlock& block, CBlockIndex* pindexNew, const CDiskBlockPos& pos, const Consensus::Params& consensusParams) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
+CChainState g_chainstate;
+CChainState& ChainstateActive() { return g_chainstate; }
- bool RollforwardBlock(const CBlockIndex* pindex, CCoinsViewCache& inputs, const CChainParams& params) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
-} g_chainstate;
+CChain& ChainActive() { return g_chainstate.m_chain; }
/**
* Mutex to guard access to validation specific variables, such as reading
@@ -222,8 +95,7 @@ private:
*/
RecursiveMutex cs_main;
-BlockMap& mapBlockIndex = g_chainstate.mapBlockIndex;
-CChain& chainActive = g_chainstate.chainActive;
+BlockMap& mapBlockIndex = ::ChainstateActive().mapBlockIndex;
CBlockIndex *pindexBestHeader = nullptr;
Mutex g_best_block_mutex;
std::condition_variable g_best_block_cv;
@@ -233,38 +105,32 @@ std::atomic_bool fImporting(false);
std::atomic_bool fReindex(false);
bool fHavePruned = false;
bool fPruneMode = false;
-bool fIsBareMultisigStd = DEFAULT_PERMIT_BAREMULTISIG;
bool fRequireStandard = true;
bool fCheckBlockIndex = false;
bool fCheckpointsEnabled = DEFAULT_CHECKPOINTS_ENABLED;
size_t nCoinCacheUsage = 5000 * 300;
uint64_t nPruneTarget = 0;
int64_t nMaxTipAge = DEFAULT_MAX_TIP_AGE;
-bool fEnableReplacement = DEFAULT_ENABLE_REPLACEMENT;
uint256 hashAssumeValid;
arith_uint256 nMinimumChainWork;
CFeeRate minRelayTxFee = CFeeRate(DEFAULT_MIN_RELAY_TX_FEE);
-CAmount maxTxFee = DEFAULT_TRANSACTION_MAXFEE;
CBlockPolicyEstimator feeEstimator;
CTxMemPool mempool(&feeEstimator);
-std::atomic_bool g_is_mempool_loaded{false};
/** Constant stuff for coinbase transactions we create: */
CScript COINBASE_FLAGS;
-const std::string strMessageMagic = "Bitcoin Signed Message:\n";
-
// Internal stuff
namespace {
- CBlockIndex *&pindexBestInvalid = g_chainstate.pindexBestInvalid;
+ CBlockIndex *&pindexBestInvalid = ::ChainstateActive().pindexBestInvalid;
/** All pairs A->B, where A (or one of its ancestors) misses transactions, but B has transactions.
* Pruned nodes may have entries where B is missing data.
*/
- std::multimap<CBlockIndex*, CBlockIndex*>& mapBlocksUnlinked = g_chainstate.mapBlocksUnlinked;
+ std::multimap<CBlockIndex*, CBlockIndex*>& mapBlocksUnlinked = ::ChainstateActive().mapBlocksUnlinked;
CCriticalSection cs_LastBlockFile;
std::vector<CBlockFileInfo> vinfoBlockFile;
@@ -305,19 +171,13 @@ std::unique_ptr<CCoinsViewDB> pcoinsdbview;
std::unique_ptr<CCoinsViewCache> pcoinsTip;
std::unique_ptr<CBlockTreeDB> pblocktree;
-enum class FlushStateMode {
- NONE,
- IF_NEEDED,
- PERIODIC,
- ALWAYS
-};
-
// See definition for documentation
-static bool FlushStateToDisk(const CChainParams& chainParams, CValidationState &state, FlushStateMode mode, int nManualPruneHeight=0);
static void FindFilesToPruneManual(std::set<int>& setFilesToPrune, int nManualPruneHeight);
static void FindFilesToPrune(std::set<int>& setFilesToPrune, uint64_t nPruneAfterHeight);
bool CheckInputs(const CTransaction& tx, CValidationState &state, const CCoinsViewCache &inputs, bool fScriptChecks, unsigned int flags, bool cacheSigStore, bool cacheFullScriptStore, PrecomputedTransactionData& txdata, std::vector<CScriptCheck> *pvChecks = nullptr);
-static FILE* OpenUndoFile(const CDiskBlockPos &pos, bool fReadOnly = false);
+static FILE* OpenUndoFile(const FlatFilePos &pos, bool fReadOnly = false);
+static FlatFileSeq BlockFileSeq();
+static FlatFileSeq UndoFileSeq();
bool CheckFinalTx(const CTransaction &tx, int flags)
{
@@ -331,13 +191,13 @@ bool CheckFinalTx(const CTransaction &tx, int flags)
// scheduled, so no flags are set.
flags = std::max(flags, 0);
- // CheckFinalTx() uses chainActive.Height()+1 to evaluate
+ // CheckFinalTx() uses ::ChainActive().Height()+1 to evaluate
// nLockTime because when IsFinalTx() is called within
// CBlock::AcceptBlock(), the height of the block *being*
// evaluated is what is used. Thus if we want to know if a
// transaction can be part of the *next* block, we need to call
- // IsFinalTx() with one more than chainActive.Height().
- const int nBlockHeight = chainActive.Height() + 1;
+ // IsFinalTx() with one more than ::ChainActive().Height().
+ const int nBlockHeight = ::ChainActive().Height() + 1;
// BIP113 requires that time-locked transactions have nLockTime set to
// less than the median time of the previous block they're contained in.
@@ -345,7 +205,7 @@ bool CheckFinalTx(const CTransaction &tx, int flags)
// chain tip, so we use that to calculate the median time passed to
// IsFinalTx() if LOCKTIME_MEDIAN_TIME_PAST is set.
const int64_t nBlockTime = (flags & LOCKTIME_MEDIAN_TIME_PAST)
- ? chainActive.Tip()->GetMedianTimePast()
+ ? ::ChainActive().Tip()->GetMedianTimePast()
: GetAdjustedTime();
return IsFinalTx(tx, nBlockHeight, nBlockTime);
@@ -358,9 +218,9 @@ bool TestLockPointValidity(const LockPoints* lp)
// If there are relative lock times then the maxInputBlock will be set
// If there are no relative lock times, the LockPoints don't depend on the chain
if (lp->maxInputBlock) {
- // Check whether chainActive is an extension of the block at which the LockPoints
+ // Check whether ::ChainActive() is an extension of the block at which the LockPoints
// calculation was valid. If not LockPoints are no longer valid
- if (!chainActive.Contains(lp->maxInputBlock)) {
+ if (!::ChainActive().Contains(lp->maxInputBlock)) {
return false;
}
}
@@ -374,17 +234,17 @@ bool CheckSequenceLocks(const CTxMemPool& pool, const CTransaction& tx, int flag
AssertLockHeld(cs_main);
AssertLockHeld(pool.cs);
- CBlockIndex* tip = chainActive.Tip();
+ CBlockIndex* tip = ::ChainActive().Tip();
assert(tip != nullptr);
CBlockIndex index;
index.pprev = tip;
- // CheckSequenceLocks() uses chainActive.Height()+1 to evaluate
+ // CheckSequenceLocks() uses ::ChainActive().Height()+1 to evaluate
// height based locks because when SequenceLocks() is called within
// ConnectBlock(), the height of the block *being*
// evaluated is what is used.
// Thus if we want to know if a transaction can be part of the
- // *next* block, we need to use one more than chainActive.Height()
+ // *next* block, we need to use one more than ::ChainActive().Height()
index.nHeight = tip->nHeight + 1;
std::pair<int, int64_t> lockPair;
@@ -394,7 +254,7 @@ bool CheckSequenceLocks(const CTxMemPool& pool, const CTransaction& tx, int flag
lockPair.second = lp->time;
}
else {
- // pcoinsTip contains the UTXO set for chainActive.Tip()
+ // pcoinsTip contains the UTXO set for ::ChainActive().Tip()
CCoinsViewMemPool viewMemPool(pcoinsTip.get(), pool);
std::vector<int> prevheights;
prevheights.resize(tx.vin.size());
@@ -456,23 +316,14 @@ static void LimitMempoolSize(CTxMemPool& pool, size_t limit, unsigned long age)
pcoinsTip->Uncache(removed);
}
-/** Convert CValidationState to a human-readable message for logging */
-std::string FormatStateMessage(const CValidationState &state)
-{
- return strprintf("%s%s (code %i)",
- state.GetRejectReason(),
- state.GetDebugMessage().empty() ? "" : ", "+state.GetDebugMessage(),
- state.GetRejectCode());
-}
-
static bool IsCurrentForFeeEstimation() EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{
AssertLockHeld(cs_main);
- if (IsInitialBlockDownload())
+ if (::ChainstateActive().IsInitialBlockDownload())
return false;
- if (chainActive.Tip()->GetBlockTime() < (GetTime() - MAX_FEE_ESTIMATION_TIP_AGE))
+ if (::ChainActive().Tip()->GetBlockTime() < (GetTime() - MAX_FEE_ESTIMATION_TIP_AGE))
return false;
- if (chainActive.Height() < pindexBestHeader->nHeight - 1)
+ if (::ChainActive().Height() < pindexBestHeader->nHeight - 1)
return false;
return true;
}
@@ -524,7 +375,7 @@ static void UpdateMempoolForReorg(DisconnectedBlockTransactions &disconnectpool,
mempool.UpdateTransactionsFromBlock(vHashUpdate);
// We also need to remove any now-immature transactions
- mempool.removeForReorg(pcoinsTip.get(), chainActive.Tip()->nHeight + 1, STANDARD_LOCKTIME_VERIFY_FLAGS);
+ mempool.removeForReorg(pcoinsTip.get(), ::ChainActive().Tip()->nHeight + 1, STANDARD_LOCKTIME_VERIFY_FLAGS);
// Re-limit mempool size, in case we added any transactions
LimitMempoolSize(mempool, gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000, gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60);
}
@@ -565,6 +416,13 @@ static bool CheckInputsFromMempoolAndCache(const CTransaction& tx, CValidationSt
return CheckInputs(tx, state, view, true, flags, cacheSigStore, true, txdata);
}
+/**
+ * @param[out] coins_to_uncache Return any outpoints which were not previously present in the
+ * coins cache, but were added as a result of validating the tx
+ * for mempool acceptance. This allows the caller to optionally
+ * remove the cache additions if the associated transaction ends
+ * up being rejected by the mempool.
+ */
static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool& pool, CValidationState& state, const CTransactionRef& ptx,
bool* pfMissingInputs, int64_t nAcceptTime, std::list<CTransactionRef>* plTxnReplaced,
bool bypass_limits, const CAmount& nAbsurdFee, std::vector<COutPoint>& coins_to_uncache, bool test_accept) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
@@ -582,28 +440,28 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool
// Coinbase is only valid in a block, not as a loose transaction
if (tx.IsCoinBase())
- return state.DoS(100, false, REJECT_INVALID, "coinbase");
+ return state.Invalid(ValidationInvalidReason::CONSENSUS, false, REJECT_INVALID, "coinbase");
// Rather not work on nonstandard transactions (unless -testnet/-regtest)
std::string reason;
if (fRequireStandard && !IsStandardTx(tx, reason))
- return state.DoS(0, false, REJECT_NONSTANDARD, reason);
+ return state.Invalid(ValidationInvalidReason::TX_NOT_STANDARD, false, REJECT_NONSTANDARD, reason);
// Do not work on transactions that are too small.
// A transaction with 1 segwit input and 1 P2WPHK output has non-witness size of 82 bytes.
// Transactions smaller than this are not relayed to reduce unnecessary malloc overhead.
if (::GetSerializeSize(tx, PROTOCOL_VERSION | SERIALIZE_TRANSACTION_NO_WITNESS) < MIN_STANDARD_TX_NONWITNESS_SIZE)
- return state.DoS(0, false, REJECT_NONSTANDARD, "tx-size-small");
+ return state.Invalid(ValidationInvalidReason::TX_NOT_STANDARD, false, REJECT_NONSTANDARD, "tx-size-small");
// Only accept nLockTime-using transactions that can be mined in the next
// block; we don't want our mempool filled up with transactions that can't
// be mined yet.
if (!CheckFinalTx(tx, STANDARD_LOCKTIME_VERIFY_FLAGS))
- return state.DoS(0, false, REJECT_NONSTANDARD, "non-final");
+ return state.Invalid(ValidationInvalidReason::TX_PREMATURE_SPEND, false, REJECT_NONSTANDARD, "non-final");
// is it already in the memory pool?
if (pool.exists(hash)) {
- return state.Invalid(false, REJECT_DUPLICATE, "txn-already-in-mempool");
+ return state.Invalid(ValidationInvalidReason::TX_CONFLICT, false, REJECT_DUPLICATE, "txn-already-in-mempool");
}
// Check for conflicts with in-memory transactions
@@ -627,19 +485,16 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool
// unconfirmed ancestors anyway; doing otherwise is hopelessly
// insecure.
bool fReplacementOptOut = true;
- if (fEnableReplacement)
+ for (const CTxIn &_txin : ptxConflicting->vin)
{
- for (const CTxIn &_txin : ptxConflicting->vin)
+ if (_txin.nSequence <= MAX_BIP125_RBF_SEQUENCE)
{
- if (_txin.nSequence <= MAX_BIP125_RBF_SEQUENCE)
- {
- fReplacementOptOut = false;
- break;
- }
+ fReplacementOptOut = false;
+ break;
}
}
if (fReplacementOptOut) {
- return state.Invalid(false, REJECT_DUPLICATE, "txn-mempool-conflict");
+ return state.Invalid(ValidationInvalidReason::TX_MEMPOOL_POLICY, false, REJECT_DUPLICATE, "txn-mempool-conflict");
}
setConflicts.insert(ptxConflicting->GetHash());
@@ -660,12 +515,16 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool
if (!pcoinsTip->HaveCoinInCache(txin.prevout)) {
coins_to_uncache.push_back(txin.prevout);
}
+
+ // Note: this call may add txin.prevout to the coins cache
+ // (pcoinsTip.cacheCoins) by way of FetchCoin(). It should be removed
+ // later (via coins_to_uncache) if this tx turns out to be invalid.
if (!view.HaveCoin(txin.prevout)) {
// Are inputs missing because we already have the tx?
for (size_t out = 0; out < tx.vout.size(); out++) {
// Optimistically just do efficient check of cache for outputs
if (pcoinsTip->HaveCoinInCache(COutPoint(hash, out))) {
- return state.Invalid(false, REJECT_DUPLICATE, "txn-already-known");
+ return state.Invalid(ValidationInvalidReason::TX_CONFLICT, false, REJECT_DUPLICATE, "txn-already-known");
}
}
// Otherwise assume this might be an orphan tx for which we just haven't seen parents yet
@@ -688,7 +547,7 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool
// Must keep pool.cs for this unless we change CheckSequenceLocks to take a
// CoinsViewCache instead of create its own
if (!CheckSequenceLocks(pool, tx, STANDARD_LOCKTIME_VERIFY_FLAGS, &lp))
- return state.DoS(0, false, REJECT_NONSTANDARD, "non-BIP68-final");
+ return state.Invalid(ValidationInvalidReason::TX_PREMATURE_SPEND, false, REJECT_NONSTANDARD, "non-BIP68-final");
CAmount nFees = 0;
if (!Consensus::CheckTxInputs(tx, state, view, GetSpendHeight(view), nFees)) {
@@ -697,11 +556,11 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool
// Check for non-standard pay-to-script-hash in inputs
if (fRequireStandard && !AreInputsStandard(tx, view))
- return state.Invalid(false, REJECT_NONSTANDARD, "bad-txns-nonstandard-inputs");
+ return state.Invalid(ValidationInvalidReason::TX_NOT_STANDARD, false, REJECT_NONSTANDARD, "bad-txns-nonstandard-inputs");
// Check for non-standard witness in P2WSH
if (tx.HasWitness() && fRequireStandard && !IsWitnessStandard(tx, view))
- return state.DoS(0, false, REJECT_NONSTANDARD, "bad-witness-nonstandard", true);
+ return state.Invalid(ValidationInvalidReason::TX_WITNESS_MUTATED, false, REJECT_NONSTANDARD, "bad-witness-nonstandard");
int64_t nSigOpsCost = GetTransactionSigOpCost(tx, view, STANDARD_SCRIPT_VERIFY_FLAGS);
@@ -720,31 +579,26 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool
}
}
- CTxMemPoolEntry entry(ptx, nFees, nAcceptTime, chainActive.Height(),
+ CTxMemPoolEntry entry(ptx, nFees, nAcceptTime, ::ChainActive().Height(),
fSpendsCoinbase, nSigOpsCost, lp);
unsigned int nSize = entry.GetTxSize();
- // Check that the transaction doesn't have an excessive number of
- // sigops, making it impossible to mine. Since the coinbase transaction
- // itself can contain sigops MAX_STANDARD_TX_SIGOPS is less than
- // MAX_BLOCK_SIGOPS; we still consider this an invalid rather than
- // merely non-standard transaction.
if (nSigOpsCost > MAX_STANDARD_TX_SIGOPS_COST)
- return state.DoS(0, false, REJECT_NONSTANDARD, "bad-txns-too-many-sigops", false,
+ return state.Invalid(ValidationInvalidReason::TX_NOT_STANDARD, false, REJECT_NONSTANDARD, "bad-txns-too-many-sigops",
strprintf("%d", nSigOpsCost));
CAmount mempoolRejectFee = pool.GetMinFee(gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000).GetFee(nSize);
if (!bypass_limits && mempoolRejectFee > 0 && nModifiedFees < mempoolRejectFee) {
- return state.DoS(0, false, REJECT_INSUFFICIENTFEE, "mempool min fee not met", false, strprintf("%d < %d", nModifiedFees, mempoolRejectFee));
+ return state.Invalid(ValidationInvalidReason::TX_MEMPOOL_POLICY, false, REJECT_INSUFFICIENTFEE, "mempool min fee not met", strprintf("%d < %d", nModifiedFees, mempoolRejectFee));
}
// No transactions are allowed below minRelayTxFee except from disconnected blocks
if (!bypass_limits && nModifiedFees < ::minRelayTxFee.GetFee(nSize)) {
- return state.DoS(0, false, REJECT_INSUFFICIENTFEE, "min relay fee not met", false, strprintf("%d < %d", nModifiedFees, ::minRelayTxFee.GetFee(nSize)));
+ return state.Invalid(ValidationInvalidReason::TX_MEMPOOL_POLICY, false, REJECT_INSUFFICIENTFEE, "min relay fee not met", strprintf("%d < %d", nModifiedFees, ::minRelayTxFee.GetFee(nSize)));
}
if (nAbsurdFee && nFees > nAbsurdFee)
- return state.Invalid(false,
+ return state.Invalid(ValidationInvalidReason::TX_NOT_STANDARD, false,
REJECT_HIGHFEE, "absurdly-high-fee",
strprintf("%d > %d", nFees, nAbsurdFee));
@@ -756,7 +610,7 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool
size_t nLimitDescendantSize = gArgs.GetArg("-limitdescendantsize", DEFAULT_DESCENDANT_SIZE_LIMIT)*1000;
std::string errString;
if (!pool.CalculateMemPoolAncestors(entry, setAncestors, nLimitAncestors, nLimitAncestorSize, nLimitDescendants, nLimitDescendantSize, errString)) {
- return state.DoS(0, false, REJECT_NONSTANDARD, "too-long-mempool-chain", false, errString);
+ return state.Invalid(ValidationInvalidReason::TX_MEMPOOL_POLICY, false, REJECT_NONSTANDARD, "too-long-mempool-chain", errString);
}
// A transaction that spends outputs that would be replaced by it is invalid. Now
@@ -768,8 +622,7 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool
const uint256 &hashAncestor = ancestorIt->GetTx().GetHash();
if (setConflicts.count(hashAncestor))
{
- return state.DoS(10, false,
- REJECT_INVALID, "bad-txns-spends-conflicting-tx", false,
+ return state.Invalid(ValidationInvalidReason::CONSENSUS, false, REJECT_INVALID, "bad-txns-spends-conflicting-tx",
strprintf("%s spends conflicting transaction %s",
hash.ToString(),
hashAncestor.ToString()));
@@ -811,8 +664,7 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool
CFeeRate oldFeeRate(mi->GetModifiedFee(), mi->GetTxSize());
if (newFeeRate <= oldFeeRate)
{
- return state.DoS(0, false,
- REJECT_INSUFFICIENTFEE, "insufficient fee", false,
+ return state.Invalid(ValidationInvalidReason::TX_MEMPOOL_POLICY, false, REJECT_INSUFFICIENTFEE, "insufficient fee",
strprintf("rejecting replacement %s; new feerate %s <= old feerate %s",
hash.ToString(),
newFeeRate.ToString(),
@@ -840,8 +692,7 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool
nConflictingSize += it->GetTxSize();
}
} else {
- return state.DoS(0, false,
- REJECT_NONSTANDARD, "too many potential replacements", false,
+ return state.Invalid(ValidationInvalidReason::TX_MEMPOOL_POLICY, false, REJECT_NONSTANDARD, "too many potential replacements",
strprintf("rejecting replacement %s; too many potential replacements (%d > %d)\n",
hash.ToString(),
nConflictingCount,
@@ -860,8 +711,7 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool
// it's cheaper to just check if the new input refers to a
// tx that's in the mempool.
if (pool.exists(tx.vin[j].prevout.hash)) {
- return state.DoS(0, false,
- REJECT_NONSTANDARD, "replacement-adds-unconfirmed", false,
+ return state.Invalid(ValidationInvalidReason::TX_MEMPOOL_POLICY, false, REJECT_NONSTANDARD, "replacement-adds-unconfirmed",
strprintf("replacement %s adds unconfirmed input, idx %d",
hash.ToString(), j));
}
@@ -873,8 +723,7 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool
// transactions would not be paid for.
if (nModifiedFees < nConflictingFees)
{
- return state.DoS(0, false,
- REJECT_INSUFFICIENTFEE, "insufficient fee", false,
+ return state.Invalid(ValidationInvalidReason::TX_MEMPOOL_POLICY, false, REJECT_INSUFFICIENTFEE, "insufficient fee",
strprintf("rejecting replacement %s, less fees than conflicting txs; %s < %s",
hash.ToString(), FormatMoney(nModifiedFees), FormatMoney(nConflictingFees)));
}
@@ -884,8 +733,7 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool
CAmount nDeltaFees = nModifiedFees - nConflictingFees;
if (nDeltaFees < ::incrementalRelayFee.GetFee(nSize))
{
- return state.DoS(0, false,
- REJECT_INSUFFICIENTFEE, "insufficient fee", false,
+ return state.Invalid(ValidationInvalidReason::TX_MEMPOOL_POLICY, false, REJECT_INSUFFICIENTFEE, "insufficient fee",
strprintf("rejecting replacement %s, not enough additional fees to relay; %s < %s",
hash.ToString(),
FormatMoney(nDeltaFees),
@@ -906,8 +754,10 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool
if (!tx.HasWitness() && CheckInputs(tx, stateDummy, view, true, scriptVerifyFlags & ~(SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_CLEANSTACK), true, false, txdata) &&
!CheckInputs(tx, stateDummy, view, true, scriptVerifyFlags & ~SCRIPT_VERIFY_CLEANSTACK, true, false, txdata)) {
// Only the witness is missing, so the transaction itself may be fine.
- state.SetCorruptionPossible();
+ state.Invalid(ValidationInvalidReason::TX_WITNESS_MUTATED, false,
+ state.GetRejectCode(), state.GetRejectReason(), state.GetDebugMessage());
}
+ assert(IsTransactionReason(state.GetReason()));
return false; // state filled in by CheckInputs
}
@@ -926,7 +776,7 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool
// There is a similar check in CreateNewBlock() to prevent creating
// invalid blocks (using TestBlockValidity), however allowing such
// transactions into the mempool can be exploited as a DoS attack.
- unsigned int currentBlockScriptVerifyFlags = GetBlockScriptFlags(chainActive.Tip(), chainparams.GetConsensus());
+ unsigned int currentBlockScriptVerifyFlags = GetBlockScriptFlags(::ChainActive().Tip(), chainparams.GetConsensus());
if (!CheckInputsFromMempoolAndCache(tx, state, view, pool, currentBlockScriptVerifyFlags, true, txdata)) {
return error("%s: BUG! PLEASE REPORT THIS! CheckInputs failed against latest-block but not STANDARD flags %s, %s",
__func__, hash.ToString(), FormatStateMessage(state));
@@ -964,7 +814,7 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool
if (!bypass_limits) {
LimitMempoolSize(pool, gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000, gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60);
if (!pool.exists(hash))
- return state.DoS(0, false, REJECT_INSUFFICIENTFEE, "mempool full");
+ return state.Invalid(ValidationInvalidReason::TX_MEMPOOL_POLICY, false, REJECT_INSUFFICIENTFEE, "mempool full");
}
}
@@ -981,12 +831,17 @@ static bool AcceptToMemoryPoolWithTime(const CChainParams& chainparams, CTxMemPo
std::vector<COutPoint> coins_to_uncache;
bool res = AcceptToMemoryPoolWorker(chainparams, pool, state, tx, pfMissingInputs, nAcceptTime, plTxnReplaced, bypass_limits, nAbsurdFee, coins_to_uncache, test_accept);
if (!res) {
+ // Remove coins that were not present in the coins cache before calling ATMPW;
+ // this is to prevent memory DoS in case we receive a large number of
+ // invalid transactions that attempt to overrun the in-memory coins cache
+ // (`CCoinsViewCache::cacheCoins`).
+
for (const COutPoint& hashTx : coins_to_uncache)
pcoinsTip->Uncache(hashTx);
}
// After we've (potentially) uncached entries, ensure our coins cache is still within its size limits
CValidationState stateDummy;
- FlushStateToDisk(chainparams, stateDummy, FlushStateMode::PERIODIC);
+ ::ChainstateActive().FlushStateToDisk(chainparams, stateDummy, FlushStateMode::PERIODIC);
return res;
}
@@ -1042,7 +897,7 @@ bool GetTransaction(const uint256& hash, CTransactionRef& txOut, const Consensus
// CBlock and CBlockIndex
//
-static bool WriteBlockToDisk(const CBlock& block, CDiskBlockPos& pos, const CMessageHeader::MessageStartChars& messageStart)
+static bool WriteBlockToDisk(const CBlock& block, FlatFilePos& pos, const CMessageHeader::MessageStartChars& messageStart)
{
// Open history file to append
CAutoFile fileout(OpenBlockFile(pos), SER_DISK, CLIENT_VERSION);
@@ -1063,7 +918,7 @@ static bool WriteBlockToDisk(const CBlock& block, CDiskBlockPos& pos, const CMes
return true;
}
-bool ReadBlockFromDisk(CBlock& block, const CDiskBlockPos& pos, const Consensus::Params& consensusParams)
+bool ReadBlockFromDisk(CBlock& block, const FlatFilePos& pos, const Consensus::Params& consensusParams)
{
block.SetNull();
@@ -1089,7 +944,7 @@ bool ReadBlockFromDisk(CBlock& block, const CDiskBlockPos& pos, const Consensus:
bool ReadBlockFromDisk(CBlock& block, const CBlockIndex* pindex, const Consensus::Params& consensusParams)
{
- CDiskBlockPos blockPos;
+ FlatFilePos blockPos;
{
LOCK(cs_main);
blockPos = pindex->GetBlockPos();
@@ -1103,9 +958,9 @@ bool ReadBlockFromDisk(CBlock& block, const CBlockIndex* pindex, const Consensus
return true;
}
-bool ReadRawBlockFromDisk(std::vector<uint8_t>& block, const CDiskBlockPos& pos, const CMessageHeader::MessageStartChars& message_start)
+bool ReadRawBlockFromDisk(std::vector<uint8_t>& block, const FlatFilePos& pos, const CMessageHeader::MessageStartChars& message_start)
{
- CDiskBlockPos hpos = pos;
+ FlatFilePos hpos = pos;
hpos.nPos -= 8; // Seek back 8 bytes for meta header
CAutoFile filein(OpenBlockFile(hpos, true), SER_DISK, CLIENT_VERSION);
if (filein.IsNull()) {
@@ -1140,7 +995,7 @@ bool ReadRawBlockFromDisk(std::vector<uint8_t>& block, const CDiskBlockPos& pos,
bool ReadRawBlockFromDisk(std::vector<uint8_t>& block, const CBlockIndex* pindex, const CMessageHeader::MessageStartChars& message_start)
{
- CDiskBlockPos block_pos;
+ FlatFilePos block_pos;
{
LOCK(cs_main);
block_pos = pindex->GetBlockPos();
@@ -1162,27 +1017,30 @@ CAmount GetBlockSubsidy(int nHeight, const Consensus::Params& consensusParams)
return nSubsidy;
}
-bool IsInitialBlockDownload()
+// Note that though this is marked const, we may end up modifying `m_cached_finished_ibd`, which
+// is a performance-related implementation detail. This function must be marked
+// `const` so that `CValidationInterface` clients (which are given a `const CChainState*`)
+// can call it.
+//
+bool CChainState::IsInitialBlockDownload() const
{
- // Once this function has returned false, it must remain false.
- static std::atomic<bool> latchToFalse{false};
// Optimization: pre-test latch before taking the lock.
- if (latchToFalse.load(std::memory_order_relaxed))
+ if (m_cached_finished_ibd.load(std::memory_order_relaxed))
return false;
LOCK(cs_main);
- if (latchToFalse.load(std::memory_order_relaxed))
+ if (m_cached_finished_ibd.load(std::memory_order_relaxed))
return false;
if (fImporting || fReindex)
return true;
- if (chainActive.Tip() == nullptr)
+ if (m_chain.Tip() == nullptr)
return true;
- if (chainActive.Tip()->nChainWork < nMinimumChainWork)
+ if (m_chain.Tip()->nChainWork < nMinimumChainWork)
return true;
- if (chainActive.Tip()->GetBlockTime() < (GetTime() - nMaxTipAge))
+ if (m_chain.Tip()->GetBlockTime() < (GetTime() - nMaxTipAge))
return true;
LogPrintf("Leaving InitialBlockDownload (latching to false)\n");
- latchToFalse.store(true, std::memory_order_relaxed);
+ m_cached_finished_ibd.store(true, std::memory_order_relaxed);
return false;
}
@@ -1211,15 +1069,15 @@ static void CheckForkWarningConditions() EXCLUSIVE_LOCKS_REQUIRED(cs_main)
AssertLockHeld(cs_main);
// Before we get past initial download, we cannot reliably alert about forks
// (we assume we don't get stuck on a fork before finishing our initial sync)
- if (IsInitialBlockDownload())
+ if (::ChainstateActive().IsInitialBlockDownload())
return;
// If our best fork is no longer within 72 blocks (+/- 12 hours if no one mines it)
// of our head, drop it
- if (pindexBestForkTip && chainActive.Height() - pindexBestForkTip->nHeight >= 72)
+ if (pindexBestForkTip && ::ChainActive().Height() - pindexBestForkTip->nHeight >= 72)
pindexBestForkTip = nullptr;
- if (pindexBestForkTip || (pindexBestInvalid && pindexBestInvalid->nChainWork > chainActive.Tip()->nChainWork + (GetBlockProof(*chainActive.Tip()) * 6)))
+ if (pindexBestForkTip || (pindexBestInvalid && pindexBestInvalid->nChainWork > ::ChainActive().Tip()->nChainWork + (GetBlockProof(*::ChainActive().Tip()) * 6)))
{
if (!GetfLargeWorkForkFound() && pindexBestForkBase)
{
@@ -1252,7 +1110,7 @@ static void CheckForkWarningConditionsOnNewFork(CBlockIndex* pindexNewForkTip) E
AssertLockHeld(cs_main);
// If we are on a fork that is sufficiently large, set a warning flag
CBlockIndex* pfork = pindexNewForkTip;
- CBlockIndex* plonger = chainActive.Tip();
+ CBlockIndex* plonger = ::ChainActive().Tip();
while (pfork && pfork != plonger)
{
while (plonger && plonger->nHeight > pfork->nHeight)
@@ -1271,7 +1129,7 @@ static void CheckForkWarningConditionsOnNewFork(CBlockIndex* pindexNewForkTip) E
// the 7-block condition and from this always have the most-likely-to-cause-warning fork
if (pfork && (!pindexBestForkTip || pindexNewForkTip->nHeight > pindexBestForkTip->nHeight) &&
pindexNewForkTip->nChainWork - pfork->nChainWork > (GetBlockProof(*pfork) * 7) &&
- chainActive.Height() - pindexNewForkTip->nHeight < 72)
+ ::ChainActive().Height() - pindexNewForkTip->nHeight < 72)
{
pindexBestForkTip = pindexNewForkTip;
pindexBestForkBase = pfork;
@@ -1288,16 +1146,16 @@ void static InvalidChainFound(CBlockIndex* pindexNew) EXCLUSIVE_LOCKS_REQUIRED(c
LogPrintf("%s: invalid block=%s height=%d log2_work=%.8g date=%s\n", __func__,
pindexNew->GetBlockHash().ToString(), pindexNew->nHeight,
log(pindexNew->nChainWork.getdouble())/log(2.0), FormatISO8601DateTime(pindexNew->GetBlockTime()));
- CBlockIndex *tip = chainActive.Tip();
+ CBlockIndex *tip = ::ChainActive().Tip();
assert (tip);
LogPrintf("%s: current best=%s height=%d log2_work=%.8g date=%s\n", __func__,
- tip->GetBlockHash().ToString(), chainActive.Height(), log(tip->nChainWork.getdouble())/log(2.0),
+ tip->GetBlockHash().ToString(), ::ChainActive().Height(), log(tip->nChainWork.getdouble())/log(2.0),
FormatISO8601DateTime(tip->GetBlockTime()));
CheckForkWarningConditions();
}
void CChainState::InvalidBlockFound(CBlockIndex *pindex, const CValidationState &state) {
- if (!state.CorruptionPossible()) {
+ if (state.GetReason() != ValidationInvalidReason::BLOCK_MUTATED) {
pindex->nStatus |= BLOCK_FAILED_VALID;
m_failed_blocks.insert(pindex);
setDirtyBlockIndex.insert(pindex);
@@ -1365,6 +1223,9 @@ void InitScriptExecutionCache() {
* which are matched. This is useful for checking blocks where we will likely never need the cache
* entry again.
*
+ * Note that we may set state.reason to NOT_STANDARD for extra soft-fork flags in flags, block-checking
+ * callers should probably reset it to CONSENSUS in such cases.
+ *
* Non-static (and re-declared) in src/test/txvalidationcache_tests.cpp
*/
bool CheckInputs(const CTransaction& tx, CValidationState &state, const CCoinsViewCache &inputs, bool fScriptChecks, unsigned int flags, bool cacheSigStore, bool cacheFullScriptStore, PrecomputedTransactionData& txdata, std::vector<CScriptCheck> *pvChecks) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
@@ -1420,22 +1281,26 @@ bool CheckInputs(const CTransaction& tx, CValidationState &state, const CCoinsVi
// Check whether the failure was caused by a
// non-mandatory script verification check, such as
// non-standard DER encodings or non-null dummy
- // arguments; if so, don't trigger DoS protection to
- // avoid splitting the network between upgraded and
- // non-upgraded nodes.
+ // arguments; if so, ensure we return NOT_STANDARD
+ // instead of CONSENSUS to avoid downstream users
+ // splitting the network between upgraded and
+ // non-upgraded nodes by banning CONSENSUS-failing
+ // data providers.
CScriptCheck check2(coin.out, tx, i,
flags & ~STANDARD_NOT_MANDATORY_VERIFY_FLAGS, cacheSigStore, &txdata);
if (check2())
- return state.Invalid(false, REJECT_NONSTANDARD, strprintf("non-mandatory-script-verify-flag (%s)", ScriptErrorString(check.GetScriptError())));
+ return state.Invalid(ValidationInvalidReason::TX_NOT_STANDARD, false, REJECT_NONSTANDARD, strprintf("non-mandatory-script-verify-flag (%s)", ScriptErrorString(check.GetScriptError())));
}
- // Failures of other flags indicate a transaction that is
- // invalid in new blocks, e.g. an invalid P2SH. We DoS ban
- // such nodes as they are not following the protocol. That
- // said during an upgrade careful thought should be taken
- // as to the correct behavior - we may want to continue
- // peering with non-upgraded nodes even after soft-fork
- // super-majority signaling has occurred.
- return state.DoS(100,false, REJECT_INVALID, strprintf("mandatory-script-verify-flag-failed (%s)", ScriptErrorString(check.GetScriptError())));
+ // MANDATORY flag failures correspond to
+ // ValidationInvalidReason::CONSENSUS. Because CONSENSUS
+ // failures are the most serious case of validation
+ // failures, we may need to consider using
+ // RECENT_CONSENSUS_CHANGE for any script failure that
+ // could be due to non-upgraded nodes which we may want to
+ // support, to avoid splitting the network (but this
+ // depends on the details of how net_processing handles
+ // such errors).
+ return state.Invalid(ValidationInvalidReason::CONSENSUS, false, REJECT_INVALID, strprintf("mandatory-script-verify-flag-failed (%s)", ScriptErrorString(check.GetScriptError())));
}
}
@@ -1450,9 +1315,7 @@ bool CheckInputs(const CTransaction& tx, CValidationState &state, const CCoinsVi
return true;
}
-namespace {
-
-bool UndoWriteToDisk(const CBlockUndo& blockundo, CDiskBlockPos& pos, const uint256& hashBlock, const CMessageHeader::MessageStartChars& messageStart)
+static bool UndoWriteToDisk(const CBlockUndo& blockundo, FlatFilePos& pos, const uint256& hashBlock, const CMessageHeader::MessageStartChars& messageStart)
{
// Open history file to append
CAutoFile fileout(OpenUndoFile(pos), SER_DISK, CLIENT_VERSION);
@@ -1479,9 +1342,9 @@ bool UndoWriteToDisk(const CBlockUndo& blockundo, CDiskBlockPos& pos, const uint
return true;
}
-static bool UndoReadFromDisk(CBlockUndo& blockundo, const CBlockIndex *pindex)
+bool UndoReadFromDisk(CBlockUndo& blockundo, const CBlockIndex* pindex)
{
- CDiskBlockPos pos = pindex->GetUndoPos();
+ FlatFilePos pos = pindex->GetUndoPos();
if (pos.IsNull()) {
return error("%s: no undo data available", __func__);
}
@@ -1528,8 +1391,6 @@ static bool AbortNode(CValidationState& state, const std::string& strMessage, co
return state.Error(strMessage);
}
-} // namespace
-
/**
* Restore the UTXO in a Coin at a given COutPoint
* @param undo The Coin to be restored.
@@ -1627,37 +1488,24 @@ void static FlushBlockFile(bool fFinalize = false)
{
LOCK(cs_LastBlockFile);
- CDiskBlockPos posOld(nLastBlockFile, 0);
- bool status = true;
-
- FILE *fileOld = OpenBlockFile(posOld);
- if (fileOld) {
- if (fFinalize)
- status &= TruncateFile(fileOld, vinfoBlockFile[nLastBlockFile].nSize);
- status &= FileCommit(fileOld);
- fclose(fileOld);
- }
-
- fileOld = OpenUndoFile(posOld);
- if (fileOld) {
- if (fFinalize)
- status &= TruncateFile(fileOld, vinfoBlockFile[nLastBlockFile].nUndoSize);
- status &= FileCommit(fileOld);
- fclose(fileOld);
- }
+ FlatFilePos block_pos_old(nLastBlockFile, vinfoBlockFile[nLastBlockFile].nSize);
+ FlatFilePos undo_pos_old(nLastBlockFile, vinfoBlockFile[nLastBlockFile].nUndoSize);
+ bool status = true;
+ status &= BlockFileSeq().Flush(block_pos_old, fFinalize);
+ status &= UndoFileSeq().Flush(undo_pos_old, fFinalize);
if (!status) {
AbortNode("Flushing block file to disk failed. This is likely the result of an I/O error.");
}
}
-static bool FindUndoPos(CValidationState &state, int nFile, CDiskBlockPos &pos, unsigned int nAddSize);
+static bool FindUndoPos(CValidationState &state, int nFile, FlatFilePos &pos, unsigned int nAddSize);
static bool WriteUndoDataForBlock(const CBlockUndo& blockundo, CValidationState& state, CBlockIndex* pindex, const CChainParams& chainparams)
{
// Write undo information to disk
if (pindex->GetUndoPos().IsNull()) {
- CDiskBlockPos _pos;
+ FlatFilePos _pos;
if (!FindUndoPos(state, pindex->nFile, _pos, ::GetSerializeSize(blockundo, CLIENT_VERSION) + 40))
return error("ConnectBlock(): FindUndoPos failed");
if (!UndoWriteToDisk(blockundo, _pos, pindex->pprev->GetBlockHash(), chainparams.MessageStart()))
@@ -1674,8 +1522,8 @@ static bool WriteUndoDataForBlock(const CBlockUndo& blockundo, CValidationState&
static CCheckQueue<CScriptCheck> scriptcheckqueue(128);
-void ThreadScriptCheck() {
- RenameThread("bitcoin-scriptch");
+void ThreadScriptCheck(int worker_num) {
+ util::ThreadRename(strprintf("scriptch.%i", worker_num));
scriptcheckqueue.Thread();
}
@@ -1812,7 +1660,7 @@ bool CChainState::ConnectBlock(const CBlock& block, CValidationState& state, CBl
// re-enforce that rule here (at least until we make it impossible for
// GetAdjustedTime() to go backward).
if (!CheckBlock(block, state, chainparams.GetConsensus(), !fJustCheck, !fJustCheck)) {
- if (state.CorruptionPossible()) {
+ if (state.GetReason() == ValidationInvalidReason::BLOCK_MUTATED) {
// We don't write down blocks to disk if they may have been
// corrupted, so this should be impossible unless we're having hardware
// problems.
@@ -1947,7 +1795,7 @@ bool CChainState::ConnectBlock(const CBlock& block, CValidationState& state, CBl
for (const auto& tx : block.vtx) {
for (size_t o = 0; o < tx->vout.size(); o++) {
if (view.HaveCoin(COutPoint(tx->GetHash(), o))) {
- return state.DoS(100, error("ConnectBlock(): tried to overwrite transaction"),
+ return state.Invalid(ValidationInvalidReason::CONSENSUS, error("ConnectBlock(): tried to overwrite transaction"),
REJECT_INVALID, "bad-txns-BIP30");
}
}
@@ -1987,11 +1835,19 @@ bool CChainState::ConnectBlock(const CBlock& block, CValidationState& state, CBl
{
CAmount txfee = 0;
if (!Consensus::CheckTxInputs(tx, state, view, pindex->nHeight, txfee)) {
+ if (!IsBlockReason(state.GetReason())) {
+ // CheckTxInputs may return MISSING_INPUTS or
+ // PREMATURE_SPEND but we can't return that, as it's not
+ // defined for a block, so we reset the reason flag to
+ // CONSENSUS here.
+ state.Invalid(ValidationInvalidReason::CONSENSUS, false,
+ state.GetRejectCode(), state.GetRejectReason(), state.GetDebugMessage());
+ }
return error("%s: Consensus::CheckTxInputs: %s, %s", __func__, tx.GetHash().ToString(), FormatStateMessage(state));
}
nFees += txfee;
if (!MoneyRange(nFees)) {
- return state.DoS(100, error("%s: accumulated fee in the block out of range.", __func__),
+ return state.Invalid(ValidationInvalidReason::CONSENSUS, error("%s: accumulated fee in the block out of range.", __func__),
REJECT_INVALID, "bad-txns-accumulated-fee-outofrange");
}
@@ -2004,7 +1860,7 @@ bool CChainState::ConnectBlock(const CBlock& block, CValidationState& state, CBl
}
if (!SequenceLocks(tx, nLockTimeFlags, &prevheights, *pindex)) {
- return state.DoS(100, error("%s: contains a non-BIP68-final transaction", __func__),
+ return state.Invalid(ValidationInvalidReason::CONSENSUS, error("%s: contains a non-BIP68-final transaction", __func__),
REJECT_INVALID, "bad-txns-nonfinal");
}
}
@@ -2015,7 +1871,7 @@ bool CChainState::ConnectBlock(const CBlock& block, CValidationState& state, CBl
// * witness (when witness enabled in flags and excludes coinbase)
nSigOpsCost += GetTransactionSigOpCost(tx, view, flags);
if (nSigOpsCost > MAX_BLOCK_SIGOPS_COST)
- return state.DoS(100, error("ConnectBlock(): too many sigops"),
+ return state.Invalid(ValidationInvalidReason::CONSENSUS, error("ConnectBlock(): too many sigops"),
REJECT_INVALID, "bad-blk-sigops");
txdata.emplace_back(tx);
@@ -2023,9 +1879,20 @@ bool CChainState::ConnectBlock(const CBlock& block, CValidationState& state, CBl
{
std::vector<CScriptCheck> vChecks;
bool fCacheResults = fJustCheck; /* Don't cache results if we're actually connecting blocks (still consult the cache, though) */
- if (!CheckInputs(tx, state, view, fScriptChecks, flags, fCacheResults, fCacheResults, txdata[i], nScriptCheckThreads ? &vChecks : nullptr))
+ if (!CheckInputs(tx, state, view, fScriptChecks, flags, fCacheResults, fCacheResults, txdata[i], nScriptCheckThreads ? &vChecks : nullptr)) {
+ if (state.GetReason() == ValidationInvalidReason::TX_NOT_STANDARD) {
+ // CheckInputs may return NOT_STANDARD for extra flags we passed,
+ // but we can't return that, as it's not defined for a block, so
+ // we reset the reason flag to CONSENSUS here.
+ // In the event of a future soft-fork, we may need to
+ // consider whether rewriting to CONSENSUS or
+ // RECENT_CONSENSUS_CHANGE would be more appropriate.
+ state.Invalid(ValidationInvalidReason::CONSENSUS, false,
+ state.GetRejectCode(), state.GetRejectReason(), state.GetDebugMessage());
+ }
return error("ConnectBlock(): CheckInputs on %s failed with %s",
tx.GetHash().ToString(), FormatStateMessage(state));
+ }
control.Add(vChecks);
}
@@ -2040,13 +1907,13 @@ bool CChainState::ConnectBlock(const CBlock& block, CValidationState& state, CBl
CAmount blockReward = nFees + GetBlockSubsidy(pindex->nHeight, chainparams.GetConsensus());
if (block.vtx[0]->GetValueOut() > blockReward)
- return state.DoS(100,
+ return state.Invalid(ValidationInvalidReason::CONSENSUS,
error("ConnectBlock(): coinbase pays too much (actual=%d vs limit=%d)",
block.vtx[0]->GetValueOut(), blockReward),
REJECT_INVALID, "bad-cb-amount");
if (!control.Wait())
- return state.DoS(100, error("%s: CheckQueue failed", __func__), REJECT_INVALID, "block-validation-failed");
+ return state.Invalid(ValidationInvalidReason::CONSENSUS, error("%s: CheckQueue failed", __func__), REJECT_INVALID, "block-validation-failed");
int64_t nTime4 = GetTimeMicros(); nTimeVerify += nTime4 - nTime2;
LogPrint(BCLog::BENCH, " - Verify %u txins: %.2fms (%.3fms/txin) [%.2fs (%.2fms/blk)]\n", nInputs - 1, MILLI * (nTime4 - nTime2), nInputs <= 1 ? 0 : MILLI * (nTime4 - nTime2) / (nInputs-1), nTimeVerify * MICRO, nTimeVerify * MILLI / nBlocksTotal);
@@ -2074,16 +1941,12 @@ bool CChainState::ConnectBlock(const CBlock& block, CValidationState& state, CBl
return true;
}
-/**
- * Update the on-disk chain state.
- * The caches and indexes are flushed depending on the mode we're called with
- * if they're too large, if it's been a while since the last write,
- * or always and in all cases if we're in prune mode and are deleting files.
- *
- * If FlushStateMode::NONE is used, then FlushStateToDisk(...) won't do anything
- * besides checking if we need to prune.
- */
-bool static FlushStateToDisk(const CChainParams& chainparams, CValidationState &state, FlushStateMode mode, int nManualPruneHeight) {
+bool CChainState::FlushStateToDisk(
+ const CChainParams& chainparams,
+ CValidationState &state,
+ FlushStateMode mode,
+ int nManualPruneHeight)
+{
int64_t nMempoolUsage = mempool.DynamicMemoryUsage();
LOCK(cs_main);
static int64_t nLastWrite = 0;
@@ -2134,8 +1997,9 @@ bool static FlushStateToDisk(const CChainParams& chainparams, CValidationState &
// Write blocks and block index to disk.
if (fDoFullFlush || fPeriodicWrite) {
// Depend on nMinDiskSpace to ensure we can write block index
- if (!CheckDiskSpace(0, true))
- return state.Error("out of disk space");
+ if (!CheckDiskSpace(GetBlocksDir())) {
+ return AbortNode(state, "Disk space is low!", _("Error: Disk space is low!"));
+ }
// First make sure all block and undo data is flushed to disk.
FlushBlockFile();
// Then update all block file information (which may refer to block and undo files).
@@ -2168,8 +2032,9 @@ bool static FlushStateToDisk(const CChainParams& chainparams, CValidationState &
// twice (once in the log, and once in the tables). This is already
// an overestimation, as most will delete an existing entry or
// overwrite one. Still, use a conservative safety factor of 2.
- if (!CheckDiskSpace(48 * 2 * 2 * pcoinsTip->GetCacheSize()))
- return state.Error("out of disk space");
+ if (!CheckDiskSpace(GetDataDir(), 48 * 2 * 2 * pcoinsTip->GetCacheSize())) {
+ return AbortNode(state, "Disk space is low!", _("Error: Disk space is low!"));
+ }
// Flush the chainstate (which may refer to block index entries).
if (!pcoinsTip->Flush())
return AbortNode(state, "Failed to write to coin database");
@@ -2179,7 +2044,7 @@ bool static FlushStateToDisk(const CChainParams& chainparams, CValidationState &
}
if (full_flush_completed) {
// Update best block in wallet (so we can detect restored wallets).
- GetMainSignals().ChainStateFlushed(chainActive.GetLocator());
+ GetMainSignals().ChainStateFlushed(m_chain.GetLocator());
}
} catch (const std::runtime_error& e) {
return AbortNode(state, std::string("System error while flushing: ") + e.what());
@@ -2187,19 +2052,20 @@ bool static FlushStateToDisk(const CChainParams& chainparams, CValidationState &
return true;
}
-void FlushStateToDisk() {
+void CChainState::ForceFlushStateToDisk() {
CValidationState state;
const CChainParams& chainparams = Params();
- if (!FlushStateToDisk(chainparams, state, FlushStateMode::ALWAYS)) {
+ if (!this->FlushStateToDisk(chainparams, state, FlushStateMode::ALWAYS)) {
LogPrintf("%s: failed to flush state (%s)\n", __func__, FormatStateMessage(state));
}
}
-void PruneAndFlush() {
+void CChainState::PruneAndFlush() {
CValidationState state;
fCheckForPruning = true;
const CChainParams& chainparams = Params();
- if (!FlushStateToDisk(chainparams, state, FlushStateMode::NONE)) {
+
+ if (!this->FlushStateToDisk(chainparams, state, FlushStateMode::NONE)) {
LogPrintf("%s: failed to flush state (%s)\n", __func__, FormatStateMessage(state));
}
}
@@ -2233,7 +2099,7 @@ void static UpdateTip(const CBlockIndex *pindexNew, const CChainParams& chainPar
}
std::string warningMessages;
- if (!IsInitialBlockDownload())
+ if (!::ChainstateActive().IsInitialBlockDownload())
{
int nUpgraded = 0;
const CBlockIndex* pindex = pindexNew;
@@ -2271,7 +2137,7 @@ void static UpdateTip(const CBlockIndex *pindexNew, const CChainParams& chainPar
}
-/** Disconnect chainActive's tip.
+/** Disconnect m_chain's tip.
* After calling, the mempool will be in an inconsistent state, with
* transactions from disconnected blocks being added to disconnectpool. You
* should make the mempool consistent again by calling UpdateMempoolForReorg.
@@ -2283,7 +2149,7 @@ void static UpdateTip(const CBlockIndex *pindexNew, const CChainParams& chainPar
*/
bool CChainState::DisconnectTip(CValidationState& state, const CChainParams& chainparams, DisconnectedBlockTransactions *disconnectpool)
{
- CBlockIndex *pindexDelete = chainActive.Tip();
+ CBlockIndex *pindexDelete = m_chain.Tip();
assert(pindexDelete);
// Read block from disk.
std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
@@ -2318,7 +2184,7 @@ bool CChainState::DisconnectTip(CValidationState& state, const CChainParams& cha
}
}
- chainActive.SetTip(pindexDelete->pprev);
+ m_chain.SetTip(pindexDelete->pprev);
UpdateTip(pindexDelete->pprev, chainparams);
// Let wallets know transactions went from 1-confirmed to
@@ -2396,14 +2262,14 @@ public:
};
/**
- * Connect a new block to chainActive. pblock is either nullptr or a pointer to a CBlock
+ * Connect a new block to m_chain. pblock is either nullptr or a pointer to a CBlock
* corresponding to pindexNew, to bypass loading it again from disk.
*
* The block is added to connectTrace if connection succeeds.
*/
bool CChainState::ConnectTip(CValidationState& state, const CChainParams& chainparams, CBlockIndex* pindexNew, const std::shared_ptr<const CBlock>& pblock, ConnectTrace& connectTrace, DisconnectedBlockTransactions &disconnectpool)
{
- assert(pindexNew->pprev == chainActive.Tip());
+ assert(pindexNew->pprev == m_chain.Tip());
// Read block from disk.
int64_t nTime1 = GetTimeMicros();
std::shared_ptr<const CBlock> pthisBlock;
@@ -2444,8 +2310,8 @@ bool CChainState::ConnectTip(CValidationState& state, const CChainParams& chainp
// Remove conflicting transactions from the mempool.;
mempool.removeForBlock(blockConnecting.vtx, pindexNew->nHeight);
disconnectpool.removeForBlock(blockConnecting.vtx);
- // Update chainActive & related variables.
- chainActive.SetTip(pindexNew);
+ // Update m_chain & related variables.
+ m_chain.SetTip(pindexNew);
UpdateTip(pindexNew, chainparams);
int64_t nTime6 = GetTimeMicros(); nTimePostConnect += nTime6 - nTime5; nTimeTotal += nTime6 - nTime1;
@@ -2476,7 +2342,7 @@ CBlockIndex* CChainState::FindMostWorkChain() {
// Just going until the active chain is an optimization, as we know all blocks in it are valid already.
CBlockIndex *pindexTest = pindexNew;
bool fInvalidAncestor = false;
- while (pindexTest && !chainActive.Contains(pindexTest)) {
+ while (pindexTest && !m_chain.Contains(pindexTest)) {
assert(pindexTest->HaveTxsDownloaded() || pindexTest->nHeight == 0);
// Pruned nodes may have entries in setBlockIndexCandidates for
@@ -2519,7 +2385,7 @@ void CChainState::PruneBlockIndexCandidates() {
// Note that we can't delete the current block itself, as we may need to return to it later in case a
// reorganization to a better block fails.
std::set<CBlockIndex*, CBlockIndexWorkComparator>::iterator it = setBlockIndexCandidates.begin();
- while (it != setBlockIndexCandidates.end() && setBlockIndexCandidates.value_comp()(*it, chainActive.Tip())) {
+ while (it != setBlockIndexCandidates.end() && setBlockIndexCandidates.value_comp()(*it, m_chain.Tip())) {
setBlockIndexCandidates.erase(it++);
}
// Either the current tip or a successor of it we're working towards is left in setBlockIndexCandidates.
@@ -2534,13 +2400,13 @@ bool CChainState::ActivateBestChainStep(CValidationState& state, const CChainPar
{
AssertLockHeld(cs_main);
- const CBlockIndex *pindexOldTip = chainActive.Tip();
- const CBlockIndex *pindexFork = chainActive.FindFork(pindexMostWork);
+ const CBlockIndex *pindexOldTip = m_chain.Tip();
+ const CBlockIndex *pindexFork = m_chain.FindFork(pindexMostWork);
// Disconnect active blocks which are no longer in the best chain.
bool fBlocksDisconnected = false;
DisconnectedBlockTransactions disconnectpool;
- while (chainActive.Tip() && chainActive.Tip() != pindexFork) {
+ while (m_chain.Tip() && m_chain.Tip() != pindexFork) {
if (!DisconnectTip(state, chainparams, &disconnectpool)) {
// This is likely a fatal error, but keep the mempool consistent,
// just in case. Only remove from the mempool in this case.
@@ -2572,7 +2438,7 @@ bool CChainState::ActivateBestChainStep(CValidationState& state, const CChainPar
if (!ConnectTip(state, chainparams, pindexConnect, pindexConnect == pindexMostWork ? pblock : std::shared_ptr<const CBlock>(), connectTrace, disconnectpool)) {
if (state.IsInvalid()) {
// The block violates a consensus rule.
- if (!state.CorruptionPossible()) {
+ if (state.GetReason() != ValidationInvalidReason::BLOCK_MUTATED) {
InvalidChainFound(vpindexToConnect.front());
}
state = CValidationState();
@@ -2588,7 +2454,7 @@ bool CChainState::ActivateBestChainStep(CValidationState& state, const CChainPar
}
} else {
PruneBlockIndexCandidates();
- if (!pindexOldTip || chainActive.Tip()->nChainWork > pindexOldTip->nChainWork) {
+ if (!pindexOldTip || m_chain.Tip()->nChainWork > pindexOldTip->nChainWork) {
// We're in a better position than we were. Return temporarily to release the lock.
fContinue = false;
break;
@@ -2624,7 +2490,7 @@ static void NotifyHeaderTip() LOCKS_EXCLUDED(cs_main) {
if (pindexHeader != pindexHeaderOld) {
fNotify = true;
- fInitialBlockDownload = IsInitialBlockDownload();
+ fInitialBlockDownload = ::ChainstateActive().IsInitialBlockDownload();
pindexHeaderOld = pindexHeader;
}
}
@@ -2634,6 +2500,14 @@ static void NotifyHeaderTip() LOCKS_EXCLUDED(cs_main) {
}
}
+static void LimitValidationInterfaceQueue() LOCKS_EXCLUDED(cs_main) {
+ AssertLockNotHeld(cs_main);
+
+ if (GetMainSignals().CallbacksPending() > 10) {
+ SyncWithValidationInterfaceQueue();
+ }
+}
+
/**
* Make the best chain active, in multiple steps. The result is either failure
* or an activated best chain. pblock is either nullptr or a pointer to a block
@@ -2662,19 +2536,17 @@ bool CChainState::ActivateBestChain(CValidationState &state, const CChainParams&
do {
boost::this_thread::interruption_point();
- if (GetMainSignals().CallbacksPending() > 10) {
- // Block until the validation queue drains. This should largely
- // never happen in normal operation, however may happen during
- // reindex, causing memory blowup if we run too far ahead.
- // Note that if a validationinterface callback ends up calling
- // ActivateBestChain this may lead to a deadlock! We should
- // probably have a DEBUG_LOCKORDER test for this in the future.
- SyncWithValidationInterfaceQueue();
- }
+ // Block until the validation queue drains. This should largely
+ // never happen in normal operation, however may happen during
+ // reindex, causing memory blowup if we run too far ahead.
+ // Note that if a validationinterface callback ends up calling
+ // ActivateBestChain this may lead to a deadlock! We should
+ // probably have a DEBUG_LOCKORDER test for this in the future.
+ LimitValidationInterfaceQueue();
{
LOCK(cs_main);
- CBlockIndex* starting_tip = chainActive.Tip();
+ CBlockIndex* starting_tip = m_chain.Tip();
bool blocks_connected = false;
do {
// We absolutely may not unlock cs_main until we've made forward progress
@@ -2686,7 +2558,7 @@ bool CChainState::ActivateBestChain(CValidationState &state, const CChainParams&
}
// Whether we have anything to do at all.
- if (pindexMostWork == nullptr || pindexMostWork == chainActive.Tip()) {
+ if (pindexMostWork == nullptr || pindexMostWork == m_chain.Tip()) {
break;
}
@@ -2700,16 +2572,16 @@ bool CChainState::ActivateBestChain(CValidationState &state, const CChainParams&
// Wipe cache, we may need another branch now.
pindexMostWork = nullptr;
}
- pindexNewTip = chainActive.Tip();
+ pindexNewTip = m_chain.Tip();
for (const PerBlockConnectTrace& trace : connectTrace.GetBlocksConnected()) {
assert(trace.pblock && trace.pindex);
GetMainSignals().BlockConnected(trace.pblock, trace.pindex, trace.conflictedTxs);
}
- } while (!chainActive.Tip() || (starting_tip && CBlockIndexWorkComparator()(chainActive.Tip(), starting_tip)));
+ } while (!m_chain.Tip() || (starting_tip && CBlockIndexWorkComparator()(m_chain.Tip(), starting_tip)));
if (!blocks_connected) return true;
- const CBlockIndex* pindexFork = chainActive.FindFork(starting_tip);
+ const CBlockIndex* pindexFork = m_chain.FindFork(starting_tip);
bool fInitialDownload = IsInitialBlockDownload();
// Notify external listeners about the new tip.
@@ -2744,22 +2616,22 @@ bool CChainState::ActivateBestChain(CValidationState &state, const CChainParams&
}
bool ActivateBestChain(CValidationState &state, const CChainParams& chainparams, std::shared_ptr<const CBlock> pblock) {
- return g_chainstate.ActivateBestChain(state, chainparams, std::move(pblock));
+ return ::ChainstateActive().ActivateBestChain(state, chainparams, std::move(pblock));
}
bool CChainState::PreciousBlock(CValidationState& state, const CChainParams& params, CBlockIndex *pindex)
{
{
LOCK(cs_main);
- if (pindex->nChainWork < chainActive.Tip()->nChainWork) {
+ if (pindex->nChainWork < m_chain.Tip()->nChainWork) {
// Nothing to do, this block is not at the tip.
return true;
}
- if (chainActive.Tip()->nChainWork > nLastPreciousChainwork) {
+ if (m_chain.Tip()->nChainWork > nLastPreciousChainwork) {
// The chain has been extended since the last call, reset the counter.
nBlockReverseSequenceId = -1;
}
- nLastPreciousChainwork = chainActive.Tip()->nChainWork;
+ nLastPreciousChainwork = m_chain.Tip()->nChainWork;
setBlockIndexCandidates.erase(pindex);
pindex->nSequenceId = nBlockReverseSequenceId;
if (nBlockReverseSequenceId > std::numeric_limits<int32_t>::min()) {
@@ -2776,75 +2648,96 @@ bool CChainState::PreciousBlock(CValidationState& state, const CChainParams& par
return ActivateBestChain(state, params, std::shared_ptr<const CBlock>());
}
bool PreciousBlock(CValidationState& state, const CChainParams& params, CBlockIndex *pindex) {
- return g_chainstate.PreciousBlock(state, params, pindex);
+ return ::ChainstateActive().PreciousBlock(state, params, pindex);
}
bool CChainState::InvalidateBlock(CValidationState& state, const CChainParams& chainparams, CBlockIndex *pindex)
{
- AssertLockHeld(cs_main);
+ CBlockIndex* to_mark_failed = pindex;
+ bool pindex_was_in_chain = false;
+ int disconnected = 0;
- // We first disconnect backwards and then mark the blocks as invalid.
- // This prevents a case where pruned nodes may fail to invalidateblock
- // and be left unable to start as they have no tip candidates (as there
- // are no blocks that meet the "have data and are not invalid per
- // nStatus" criteria for inclusion in setBlockIndexCandidates).
+ // Disconnect (descendants of) pindex, and mark them invalid.
+ while (true) {
+ if (ShutdownRequested()) break;
- bool pindex_was_in_chain = false;
- CBlockIndex *invalid_walk_tip = chainActive.Tip();
+ // Make sure the queue of validation callbacks doesn't grow unboundedly.
+ LimitValidationInterfaceQueue();
- DisconnectedBlockTransactions disconnectpool;
- while (chainActive.Contains(pindex)) {
+ LOCK(cs_main);
+ if (!m_chain.Contains(pindex)) break;
pindex_was_in_chain = true;
- // ActivateBestChain considers blocks already in chainActive
- // unconditionally valid already, so force disconnect away from it.
- if (!DisconnectTip(state, chainparams, &disconnectpool)) {
- // It's probably hopeless to try to make the mempool consistent
- // here if DisconnectTip failed, but we can try.
- UpdateMempoolForReorg(disconnectpool, false);
- return false;
- }
- }
+ CBlockIndex *invalid_walk_tip = m_chain.Tip();
- // Now mark the blocks we just disconnected as descendants invalid
- // (note this may not be all descendants).
- while (pindex_was_in_chain && invalid_walk_tip != pindex) {
- invalid_walk_tip->nStatus |= BLOCK_FAILED_CHILD;
+ // ActivateBestChain considers blocks already in m_chain
+ // unconditionally valid already, so force disconnect away from it.
+ DisconnectedBlockTransactions disconnectpool;
+ bool ret = DisconnectTip(state, chainparams, &disconnectpool);
+ // DisconnectTip will add transactions to disconnectpool.
+ // Adjust the mempool to be consistent with the new tip, adding
+ // transactions back to the mempool if disconnecting was successful,
+ // and we're not doing a very deep invalidation (in which case
+ // keeping the mempool up to date is probably futile anyway).
+ UpdateMempoolForReorg(disconnectpool, /* fAddToMempool = */ (++disconnected <= 10) && ret);
+ if (!ret) return false;
+ assert(invalid_walk_tip->pprev == m_chain.Tip());
+
+ // We immediately mark the disconnected blocks as invalid.
+ // This prevents a case where pruned nodes may fail to invalidateblock
+ // and be left unable to start as they have no tip candidates (as there
+ // are no blocks that meet the "have data and are not invalid per
+ // nStatus" criteria for inclusion in setBlockIndexCandidates).
+ invalid_walk_tip->nStatus |= BLOCK_FAILED_VALID;
setDirtyBlockIndex.insert(invalid_walk_tip);
setBlockIndexCandidates.erase(invalid_walk_tip);
- invalid_walk_tip = invalid_walk_tip->pprev;
+ setBlockIndexCandidates.insert(invalid_walk_tip->pprev);
+ if (invalid_walk_tip->pprev == to_mark_failed && (to_mark_failed->nStatus & BLOCK_FAILED_VALID)) {
+ // We only want to mark the last disconnected block as BLOCK_FAILED_VALID; its children
+ // need to be BLOCK_FAILED_CHILD instead.
+ to_mark_failed->nStatus = (to_mark_failed->nStatus ^ BLOCK_FAILED_VALID) | BLOCK_FAILED_CHILD;
+ setDirtyBlockIndex.insert(to_mark_failed);
+ }
+
+ // Track the last disconnected block, so we can correct its BLOCK_FAILED_CHILD status in future
+ // iterations, or, if it's the last one, call InvalidChainFound on it.
+ to_mark_failed = invalid_walk_tip;
}
- // Mark the block itself as invalid.
- pindex->nStatus |= BLOCK_FAILED_VALID;
- setDirtyBlockIndex.insert(pindex);
- setBlockIndexCandidates.erase(pindex);
- m_failed_blocks.insert(pindex);
+ {
+ LOCK(cs_main);
+ if (m_chain.Contains(to_mark_failed)) {
+ // If the to-be-marked invalid block is in the active chain, something is interfering and we can't proceed.
+ return false;
+ }
- // DisconnectTip will add transactions to disconnectpool; try to add these
- // back to the mempool.
- UpdateMempoolForReorg(disconnectpool, true);
+ // Mark pindex (or the last disconnected block) as invalid, even when it never was in the main chain
+ to_mark_failed->nStatus |= BLOCK_FAILED_VALID;
+ setDirtyBlockIndex.insert(to_mark_failed);
+ setBlockIndexCandidates.erase(to_mark_failed);
+ m_failed_blocks.insert(to_mark_failed);
- // The resulting new best tip may not be in setBlockIndexCandidates anymore, so
- // add it again.
- BlockMap::iterator it = mapBlockIndex.begin();
- while (it != mapBlockIndex.end()) {
- if (it->second->IsValid(BLOCK_VALID_TRANSACTIONS) && it->second->HaveTxsDownloaded() && !setBlockIndexCandidates.value_comp()(it->second, chainActive.Tip())) {
- setBlockIndexCandidates.insert(it->second);
+ // The resulting new best tip may not be in setBlockIndexCandidates anymore, so
+ // add it again.
+ BlockMap::iterator it = mapBlockIndex.begin();
+ while (it != mapBlockIndex.end()) {
+ if (it->second->IsValid(BLOCK_VALID_TRANSACTIONS) && it->second->HaveTxsDownloaded() && !setBlockIndexCandidates.value_comp()(it->second, m_chain.Tip())) {
+ setBlockIndexCandidates.insert(it->second);
+ }
+ it++;
}
- it++;
- }
- InvalidChainFound(pindex);
+ InvalidChainFound(to_mark_failed);
+ }
// Only notify about a new block tip if the active chain was modified.
if (pindex_was_in_chain) {
- uiInterface.NotifyBlockTip(IsInitialBlockDownload(), pindex->pprev);
+ uiInterface.NotifyBlockTip(IsInitialBlockDownload(), to_mark_failed->pprev);
}
return true;
}
bool InvalidateBlock(CValidationState& state, const CChainParams& chainparams, CBlockIndex *pindex) {
- return g_chainstate.InvalidateBlock(state, chainparams, pindex);
+ return ::ChainstateActive().InvalidateBlock(state, chainparams, pindex);
}
void CChainState::ResetBlockFailureFlags(CBlockIndex *pindex) {
@@ -2858,7 +2751,7 @@ void CChainState::ResetBlockFailureFlags(CBlockIndex *pindex) {
if (!it->second->IsValid() && it->second->GetAncestor(nHeight) == pindex) {
it->second->nStatus &= ~BLOCK_FAILED_MASK;
setDirtyBlockIndex.insert(it->second);
- if (it->second->IsValid(BLOCK_VALID_TRANSACTIONS) && it->second->HaveTxsDownloaded() && setBlockIndexCandidates.value_comp()(chainActive.Tip(), it->second)) {
+ if (it->second->IsValid(BLOCK_VALID_TRANSACTIONS) && it->second->HaveTxsDownloaded() && setBlockIndexCandidates.value_comp()(m_chain.Tip(), it->second)) {
setBlockIndexCandidates.insert(it->second);
}
if (it->second == pindexBestInvalid) {
@@ -2882,7 +2775,7 @@ void CChainState::ResetBlockFailureFlags(CBlockIndex *pindex) {
}
void ResetBlockFailureFlags(CBlockIndex *pindex) {
- return g_chainstate.ResetBlockFailureFlags(pindex);
+ return ::ChainstateActive().ResetBlockFailureFlags(pindex);
}
CBlockIndex* CChainState::AddToBlockIndex(const CBlockHeader& block)
@@ -2922,7 +2815,7 @@ CBlockIndex* CChainState::AddToBlockIndex(const CBlockHeader& block)
}
/** Mark a block as having its data received and checked (up to BLOCK_VALID_TRANSACTIONS). */
-void CChainState::ReceivedBlockTransactions(const CBlock& block, CBlockIndex* pindexNew, const CDiskBlockPos& pos, const Consensus::Params& consensusParams)
+void CChainState::ReceivedBlockTransactions(const CBlock& block, CBlockIndex* pindexNew, const FlatFilePos& pos, const Consensus::Params& consensusParams)
{
pindexNew->nTx = block.vtx.size();
pindexNew->nChainTx = 0;
@@ -2950,7 +2843,7 @@ void CChainState::ReceivedBlockTransactions(const CBlock& block, CBlockIndex* pi
LOCK(cs_nBlockSequenceId);
pindex->nSequenceId = nBlockSequenceId++;
}
- if (chainActive.Tip() == nullptr || !setBlockIndexCandidates.value_comp()(pindex, chainActive.Tip())) {
+ if (m_chain.Tip() == nullptr || !setBlockIndexCandidates.value_comp()(pindex, m_chain.Tip())) {
setBlockIndexCandidates.insert(pindex);
}
std::pair<std::multimap<CBlockIndex*, CBlockIndex*>::iterator, std::multimap<CBlockIndex*, CBlockIndex*>::iterator> range = mapBlocksUnlinked.equal_range(pindex);
@@ -2968,7 +2861,7 @@ void CChainState::ReceivedBlockTransactions(const CBlock& block, CBlockIndex* pi
}
}
-static bool FindBlockPos(CDiskBlockPos &pos, unsigned int nAddSize, unsigned int nHeight, uint64_t nTime, bool fKnown = false)
+static bool FindBlockPos(FlatFilePos &pos, unsigned int nAddSize, unsigned int nHeight, uint64_t nTime, bool fKnown = false)
{
LOCK(cs_LastBlockFile);
@@ -3003,21 +2896,13 @@ static bool FindBlockPos(CDiskBlockPos &pos, unsigned int nAddSize, unsigned int
vinfoBlockFile[nFile].nSize += nAddSize;
if (!fKnown) {
- unsigned int nOldChunks = (pos.nPos + BLOCKFILE_CHUNK_SIZE - 1) / BLOCKFILE_CHUNK_SIZE;
- unsigned int nNewChunks = (vinfoBlockFile[nFile].nSize + BLOCKFILE_CHUNK_SIZE - 1) / BLOCKFILE_CHUNK_SIZE;
- if (nNewChunks > nOldChunks) {
- if (fPruneMode)
- fCheckForPruning = true;
- if (CheckDiskSpace(nNewChunks * BLOCKFILE_CHUNK_SIZE - pos.nPos, true)) {
- FILE *file = OpenBlockFile(pos);
- if (file) {
- LogPrintf("Pre-allocating up to position 0x%x in blk%05u.dat\n", nNewChunks * BLOCKFILE_CHUNK_SIZE, pos.nFile);
- AllocateFileRange(file, pos.nPos, nNewChunks * BLOCKFILE_CHUNK_SIZE - pos.nPos);
- fclose(file);
- }
- }
- else
- return error("out of disk space");
+ bool out_of_space;
+ size_t bytes_allocated = BlockFileSeq().Allocate(pos, nAddSize, out_of_space);
+ if (out_of_space) {
+ return AbortNode("Disk space is low!", _("Error: Disk space is low!"));
+ }
+ if (bytes_allocated != 0 && fPruneMode) {
+ fCheckForPruning = true;
}
}
@@ -3025,32 +2910,23 @@ static bool FindBlockPos(CDiskBlockPos &pos, unsigned int nAddSize, unsigned int
return true;
}
-static bool FindUndoPos(CValidationState &state, int nFile, CDiskBlockPos &pos, unsigned int nAddSize)
+static bool FindUndoPos(CValidationState &state, int nFile, FlatFilePos &pos, unsigned int nAddSize)
{
pos.nFile = nFile;
LOCK(cs_LastBlockFile);
- unsigned int nNewSize;
pos.nPos = vinfoBlockFile[nFile].nUndoSize;
- nNewSize = vinfoBlockFile[nFile].nUndoSize += nAddSize;
+ vinfoBlockFile[nFile].nUndoSize += nAddSize;
setDirtyFileInfo.insert(nFile);
- unsigned int nOldChunks = (pos.nPos + UNDOFILE_CHUNK_SIZE - 1) / UNDOFILE_CHUNK_SIZE;
- unsigned int nNewChunks = (nNewSize + UNDOFILE_CHUNK_SIZE - 1) / UNDOFILE_CHUNK_SIZE;
- if (nNewChunks > nOldChunks) {
- if (fPruneMode)
- fCheckForPruning = true;
- if (CheckDiskSpace(nNewChunks * UNDOFILE_CHUNK_SIZE - pos.nPos, true)) {
- FILE *file = OpenUndoFile(pos);
- if (file) {
- LogPrintf("Pre-allocating up to position 0x%x in rev%05u.dat\n", nNewChunks * UNDOFILE_CHUNK_SIZE, pos.nFile);
- AllocateFileRange(file, pos.nPos, nNewChunks * UNDOFILE_CHUNK_SIZE - pos.nPos);
- fclose(file);
- }
- }
- else
- return state.Error("out of disk space");
+ bool out_of_space;
+ size_t bytes_allocated = UndoFileSeq().Allocate(pos, nAddSize, out_of_space);
+ if (out_of_space) {
+ return AbortNode(state, "Disk space is low!", _("Error: Disk space is low!"));
+ }
+ if (bytes_allocated != 0 && fPruneMode) {
+ fCheckForPruning = true;
}
return true;
@@ -3060,7 +2936,7 @@ static bool CheckBlockHeader(const CBlockHeader& block, CValidationState& state,
{
// Check proof of work matches claimed amount
if (fCheckPOW && !CheckProofOfWork(block.GetHash(), block.nBits, consensusParams))
- return state.DoS(50, false, REJECT_INVALID, "high-hash", false, "proof of work failed");
+ return state.Invalid(ValidationInvalidReason::BLOCK_INVALID_HEADER, false, REJECT_INVALID, "high-hash", "proof of work failed");
return true;
}
@@ -3082,13 +2958,13 @@ bool CheckBlock(const CBlock& block, CValidationState& state, const Consensus::P
bool mutated;
uint256 hashMerkleRoot2 = BlockMerkleRoot(block, &mutated);
if (block.hashMerkleRoot != hashMerkleRoot2)
- return state.DoS(100, false, REJECT_INVALID, "bad-txnmrklroot", true, "hashMerkleRoot mismatch");
+ return state.Invalid(ValidationInvalidReason::BLOCK_MUTATED, false, REJECT_INVALID, "bad-txnmrklroot", "hashMerkleRoot mismatch");
// Check for merkle tree malleability (CVE-2012-2459): repeating sequences
// of transactions in a block without affecting the merkle root of a block,
// while still invalidating it.
if (mutated)
- return state.DoS(100, false, REJECT_INVALID, "bad-txns-duplicate", true, "duplicate transaction");
+ return state.Invalid(ValidationInvalidReason::BLOCK_MUTATED, false, REJECT_INVALID, "bad-txns-duplicate", "duplicate transaction");
}
// All potential-corruption validation must be done before we do any
@@ -3099,19 +2975,19 @@ bool CheckBlock(const CBlock& block, CValidationState& state, const Consensus::P
// Size limits
if (block.vtx.empty() || block.vtx.size() * WITNESS_SCALE_FACTOR > MAX_BLOCK_WEIGHT || ::GetSerializeSize(block, PROTOCOL_VERSION | SERIALIZE_TRANSACTION_NO_WITNESS) * WITNESS_SCALE_FACTOR > MAX_BLOCK_WEIGHT)
- return state.DoS(100, false, REJECT_INVALID, "bad-blk-length", false, "size limits failed");
+ return state.Invalid(ValidationInvalidReason::CONSENSUS, false, REJECT_INVALID, "bad-blk-length", "size limits failed");
// First transaction must be coinbase, the rest must not be
if (block.vtx.empty() || !block.vtx[0]->IsCoinBase())
- return state.DoS(100, false, REJECT_INVALID, "bad-cb-missing", false, "first tx is not coinbase");
+ return state.Invalid(ValidationInvalidReason::CONSENSUS, false, REJECT_INVALID, "bad-cb-missing", "first tx is not coinbase");
for (unsigned int i = 1; i < block.vtx.size(); i++)
if (block.vtx[i]->IsCoinBase())
- return state.DoS(100, false, REJECT_INVALID, "bad-cb-multiple", false, "more than one coinbase");
+ return state.Invalid(ValidationInvalidReason::CONSENSUS, false, REJECT_INVALID, "bad-cb-multiple", "more than one coinbase");
// Check transactions
for (const auto& tx : block.vtx)
if (!CheckTransaction(*tx, state, true))
- return state.Invalid(false, state.GetRejectCode(), state.GetRejectReason(),
+ return state.Invalid(state.GetReason(), false, state.GetRejectCode(), state.GetRejectReason(),
strprintf("Transaction check failed (tx hash %s) %s", tx->GetHash().ToString(), state.GetDebugMessage()));
unsigned int nSigOps = 0;
@@ -3120,7 +2996,7 @@ bool CheckBlock(const CBlock& block, CValidationState& state, const Consensus::P
nSigOps += GetLegacySigOpCount(*tx);
}
if (nSigOps * WITNESS_SCALE_FACTOR > MAX_BLOCK_SIGOPS_COST)
- return state.DoS(100, false, REJECT_INVALID, "bad-blk-sigops", false, "out-of-bounds SigOpCount");
+ return state.Invalid(ValidationInvalidReason::CONSENSUS, false, REJECT_INVALID, "bad-blk-sigops", "out-of-bounds SigOpCount");
if (fCheckPOW && fCheckMerkleRoot)
block.fChecked = true;
@@ -3196,6 +3072,22 @@ std::vector<unsigned char> GenerateCoinbaseCommitment(CBlock& block, const CBloc
return commitment;
}
+//! Returns last CBlockIndex* that is a checkpoint
+static CBlockIndex* GetLastCheckpoint(const CCheckpointData& data) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
+{
+ const MapCheckpoints& checkpoints = data.mapCheckpoints;
+
+ for (const MapCheckpoints::value_type& i : reverse_iterate(checkpoints))
+ {
+ const uint256& hash = i.second;
+ CBlockIndex* pindex = LookupBlockIndex(hash);
+ if (pindex) {
+ return pindex;
+ }
+ }
+ return nullptr;
+}
+
/** Context-dependent validity checks.
* By "context", we mean only the previous block headers, but not the UTXO
* set; UTXO-related validity checks are done in ConnectBlock().
@@ -3205,7 +3097,7 @@ std::vector<unsigned char> GenerateCoinbaseCommitment(CBlock& block, const CBloc
* in ConnectBlock().
* Note that -reindex-chainstate skips the validation that happens here!
*/
-static bool ContextualCheckBlockHeader(const CBlockHeader& block, CValidationState& state, const CChainParams& params, const CBlockIndex* pindexPrev, int64_t nAdjustedTime)
+static bool ContextualCheckBlockHeader(const CBlockHeader& block, CValidationState& state, const CChainParams& params, const CBlockIndex* pindexPrev, int64_t nAdjustedTime) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{
assert(pindexPrev != nullptr);
const int nHeight = pindexPrev->nHeight + 1;
@@ -3213,32 +3105,32 @@ static bool ContextualCheckBlockHeader(const CBlockHeader& block, CValidationSta
// Check proof of work
const Consensus::Params& consensusParams = params.GetConsensus();
if (block.nBits != GetNextWorkRequired(pindexPrev, &block, consensusParams))
- return state.DoS(100, false, REJECT_INVALID, "bad-diffbits", false, "incorrect proof of work");
+ return state.Invalid(ValidationInvalidReason::BLOCK_INVALID_HEADER, false, REJECT_INVALID, "bad-diffbits", "incorrect proof of work");
// Check against checkpoints
if (fCheckpointsEnabled) {
// Don't accept any forks from the main chain prior to last checkpoint.
// GetLastCheckpoint finds the last checkpoint in MapCheckpoints that's in our
// MapBlockIndex.
- CBlockIndex* pcheckpoint = Checkpoints::GetLastCheckpoint(params.Checkpoints());
+ CBlockIndex* pcheckpoint = GetLastCheckpoint(params.Checkpoints());
if (pcheckpoint && nHeight < pcheckpoint->nHeight)
- return state.DoS(100, error("%s: forked chain older than last checkpoint (height %d)", __func__, nHeight), REJECT_CHECKPOINT, "bad-fork-prior-to-checkpoint");
+ return state.Invalid(ValidationInvalidReason::BLOCK_CHECKPOINT, error("%s: forked chain older than last checkpoint (height %d)", __func__, nHeight), REJECT_CHECKPOINT, "bad-fork-prior-to-checkpoint");
}
// Check timestamp against prev
if (block.GetBlockTime() <= pindexPrev->GetMedianTimePast())
- return state.Invalid(false, REJECT_INVALID, "time-too-old", "block's timestamp is too early");
+ return state.Invalid(ValidationInvalidReason::BLOCK_INVALID_HEADER, false, REJECT_INVALID, "time-too-old", "block's timestamp is too early");
// Check timestamp
if (block.GetBlockTime() > nAdjustedTime + MAX_FUTURE_BLOCK_TIME)
- return state.Invalid(false, REJECT_INVALID, "time-too-new", "block timestamp too far in the future");
+ return state.Invalid(ValidationInvalidReason::BLOCK_TIME_FUTURE, false, REJECT_INVALID, "time-too-new", "block timestamp too far in the future");
// Reject outdated version blocks when 95% (75% on testnet) of the network has upgraded:
// check for version 2, 3 and 4 upgrades
if((block.nVersion < 2 && nHeight >= consensusParams.BIP34Height) ||
(block.nVersion < 3 && nHeight >= consensusParams.BIP66Height) ||
(block.nVersion < 4 && nHeight >= consensusParams.BIP65Height))
- return state.Invalid(false, REJECT_OBSOLETE, strprintf("bad-version(0x%08x)", block.nVersion),
+ return state.Invalid(ValidationInvalidReason::BLOCK_INVALID_HEADER, false, REJECT_OBSOLETE, strprintf("bad-version(0x%08x)", block.nVersion),
strprintf("rejected nVersion=0x%08x block", block.nVersion));
return true;
@@ -3268,7 +3160,7 @@ static bool ContextualCheckBlock(const CBlock& block, CValidationState& state, c
// Check that all transactions are finalized
for (const auto& tx : block.vtx) {
if (!IsFinalTx(*tx, nHeight, nLockTimeCutoff)) {
- return state.DoS(10, false, REJECT_INVALID, "bad-txns-nonfinal", false, "non-final transaction");
+ return state.Invalid(ValidationInvalidReason::CONSENSUS, false, REJECT_INVALID, "bad-txns-nonfinal", "non-final transaction");
}
}
@@ -3278,7 +3170,7 @@ static bool ContextualCheckBlock(const CBlock& block, CValidationState& state, c
CScript expect = CScript() << nHeight;
if (block.vtx[0]->vin[0].scriptSig.size() < expect.size() ||
!std::equal(expect.begin(), expect.end(), block.vtx[0]->vin[0].scriptSig.begin())) {
- return state.DoS(100, false, REJECT_INVALID, "bad-cb-height", false, "block height mismatch in coinbase");
+ return state.Invalid(ValidationInvalidReason::CONSENSUS, false, REJECT_INVALID, "bad-cb-height", "block height mismatch in coinbase");
}
}
@@ -3300,11 +3192,11 @@ static bool ContextualCheckBlock(const CBlock& block, CValidationState& state, c
// already does not permit it, it is impossible to trigger in the
// witness tree.
if (block.vtx[0]->vin[0].scriptWitness.stack.size() != 1 || block.vtx[0]->vin[0].scriptWitness.stack[0].size() != 32) {
- return state.DoS(100, false, REJECT_INVALID, "bad-witness-nonce-size", true, strprintf("%s : invalid witness reserved value size", __func__));
+ return state.Invalid(ValidationInvalidReason::BLOCK_MUTATED, false, REJECT_INVALID, "bad-witness-nonce-size", strprintf("%s : invalid witness reserved value size", __func__));
}
CHash256().Write(hashWitness.begin(), 32).Write(&block.vtx[0]->vin[0].scriptWitness.stack[0][0], 32).Finalize(hashWitness.begin());
if (memcmp(hashWitness.begin(), &block.vtx[0]->vout[commitpos].scriptPubKey[6], 32)) {
- return state.DoS(100, false, REJECT_INVALID, "bad-witness-merkle-match", true, strprintf("%s : witness merkle commitment mismatch", __func__));
+ return state.Invalid(ValidationInvalidReason::BLOCK_MUTATED, false, REJECT_INVALID, "bad-witness-merkle-match", strprintf("%s : witness merkle commitment mismatch", __func__));
}
fHaveWitness = true;
}
@@ -3314,7 +3206,7 @@ static bool ContextualCheckBlock(const CBlock& block, CValidationState& state, c
if (!fHaveWitness) {
for (const auto& tx : block.vtx) {
if (tx->HasWitness()) {
- return state.DoS(100, false, REJECT_INVALID, "unexpected-witness", true, strprintf("%s : unexpected witness data found", __func__));
+ return state.Invalid(ValidationInvalidReason::BLOCK_MUTATED, false, REJECT_INVALID, "unexpected-witness", strprintf("%s : unexpected witness data found", __func__));
}
}
}
@@ -3326,7 +3218,7 @@ static bool ContextualCheckBlock(const CBlock& block, CValidationState& state, c
// the block hash, so we couldn't mark the block as permanently
// failed).
if (GetBlockWeight(block) > MAX_BLOCK_WEIGHT) {
- return state.DoS(100, false, REJECT_INVALID, "bad-blk-weight", false, strprintf("%s : weight limit failed", __func__));
+ return state.Invalid(ValidationInvalidReason::CONSENSUS, false, REJECT_INVALID, "bad-blk-weight", strprintf("%s : weight limit failed", __func__));
}
return true;
@@ -3346,7 +3238,7 @@ bool CChainState::AcceptBlockHeader(const CBlockHeader& block, CValidationState&
if (ppindex)
*ppindex = pindex;
if (pindex->nStatus & BLOCK_FAILED_MASK)
- return state.Invalid(error("%s: block %s is marked invalid", __func__, hash.ToString()), 0, "duplicate");
+ return state.Invalid(ValidationInvalidReason::CACHED_INVALID, error("%s: block %s is marked invalid", __func__, hash.ToString()), 0, "duplicate");
return true;
}
@@ -3357,10 +3249,10 @@ bool CChainState::AcceptBlockHeader(const CBlockHeader& block, CValidationState&
CBlockIndex* pindexPrev = nullptr;
BlockMap::iterator mi = mapBlockIndex.find(block.hashPrevBlock);
if (mi == mapBlockIndex.end())
- return state.DoS(10, error("%s: prev block not found", __func__), 0, "prev-blk-not-found");
+ return state.Invalid(ValidationInvalidReason::BLOCK_MISSING_PREV, error("%s: prev block not found", __func__), 0, "prev-blk-not-found");
pindexPrev = (*mi).second;
if (pindexPrev->nStatus & BLOCK_FAILED_MASK)
- return state.DoS(100, error("%s: prev block invalid", __func__), REJECT_INVALID, "bad-prevblk");
+ return state.Invalid(ValidationInvalidReason::BLOCK_INVALID_PREV, error("%s: prev block invalid", __func__), REJECT_INVALID, "bad-prevblk");
if (!ContextualCheckBlockHeader(block, state, chainparams, pindexPrev, GetAdjustedTime()))
return error("%s: Consensus::ContextualCheckBlockHeader: %s, %s", __func__, hash.ToString(), FormatStateMessage(state));
@@ -3397,7 +3289,7 @@ bool CChainState::AcceptBlockHeader(const CBlockHeader& block, CValidationState&
setDirtyBlockIndex.insert(invalid_walk);
invalid_walk = invalid_walk->pprev;
}
- return state.DoS(100, error("%s: prev block invalid", __func__), REJECT_INVALID, "bad-prevblk");
+ return state.Invalid(ValidationInvalidReason::BLOCK_INVALID_PREV, error("%s: prev block invalid", __func__), REJECT_INVALID, "bad-prevblk");
}
}
}
@@ -3421,7 +3313,7 @@ bool ProcessNewBlockHeaders(const std::vector<CBlockHeader>& headers, CValidatio
LOCK(cs_main);
for (const CBlockHeader& header : headers) {
CBlockIndex *pindex = nullptr; // Use a temp pindex instead of ppindex to avoid a const_cast
- if (!g_chainstate.AcceptBlockHeader(header, state, chainparams, &pindex)) {
+ if (!::ChainstateActive().AcceptBlockHeader(header, state, chainparams, &pindex)) {
if (first_invalid) *first_invalid = header;
return false;
}
@@ -3435,26 +3327,26 @@ bool ProcessNewBlockHeaders(const std::vector<CBlockHeader>& headers, CValidatio
}
/** Store block on disk. If dbp is non-nullptr, the file is known to already reside on disk */
-static CDiskBlockPos SaveBlockToDisk(const CBlock& block, int nHeight, const CChainParams& chainparams, const CDiskBlockPos* dbp) {
+static FlatFilePos SaveBlockToDisk(const CBlock& block, int nHeight, const CChainParams& chainparams, const FlatFilePos* dbp) {
unsigned int nBlockSize = ::GetSerializeSize(block, CLIENT_VERSION);
- CDiskBlockPos blockPos;
+ FlatFilePos blockPos;
if (dbp != nullptr)
blockPos = *dbp;
if (!FindBlockPos(blockPos, nBlockSize+8, nHeight, block.GetBlockTime(), dbp != nullptr)) {
error("%s: FindBlockPos failed", __func__);
- return CDiskBlockPos();
+ return FlatFilePos();
}
if (dbp == nullptr) {
if (!WriteBlockToDisk(block, blockPos, chainparams.MessageStart())) {
AbortNode("Failed to write block");
- return CDiskBlockPos();
+ return FlatFilePos();
}
}
return blockPos;
}
/** Store block on disk. If dbp is non-nullptr, the file is known to already reside on disk */
-bool CChainState::AcceptBlock(const std::shared_ptr<const CBlock>& pblock, CValidationState& state, const CChainParams& chainparams, CBlockIndex** ppindex, bool fRequested, const CDiskBlockPos* dbp, bool* fNewBlock)
+bool CChainState::AcceptBlock(const std::shared_ptr<const CBlock>& pblock, CValidationState& state, const CChainParams& chainparams, CBlockIndex** ppindex, bool fRequested, const FlatFilePos* dbp, bool* fNewBlock)
{
const CBlock& block = *pblock;
@@ -3471,13 +3363,13 @@ bool CChainState::AcceptBlock(const std::shared_ptr<const CBlock>& pblock, CVali
// process an unrequested block if it's new and has enough work to
// advance our tip, and isn't too many blocks ahead.
bool fAlreadyHave = pindex->nStatus & BLOCK_HAVE_DATA;
- bool fHasMoreOrSameWork = (chainActive.Tip() ? pindex->nChainWork >= chainActive.Tip()->nChainWork : true);
+ bool fHasMoreOrSameWork = (m_chain.Tip() ? pindex->nChainWork >= m_chain.Tip()->nChainWork : true);
// Blocks that are too out-of-order needlessly limit the effectiveness of
// pruning, because pruning will not delete block files that contain any
// blocks which are too close in height to the tip. Apply this test
// regardless of whether pruning is enabled; it should generally be safe to
// not process unrequested blocks.
- bool fTooFarAhead = (pindex->nHeight > int(chainActive.Height() + MIN_BLOCKS_TO_KEEP));
+ bool fTooFarAhead = (pindex->nHeight > int(m_chain.Height() + MIN_BLOCKS_TO_KEEP));
// TODO: Decouple this function from the block download logic by removing fRequested
// This requires some new chain data structure to efficiently look up if a
@@ -3501,7 +3393,8 @@ bool CChainState::AcceptBlock(const std::shared_ptr<const CBlock>& pblock, CVali
if (!CheckBlock(block, state, chainparams.GetConsensus()) ||
!ContextualCheckBlock(block, state, chainparams.GetConsensus(), pindex->pprev)) {
- if (state.IsInvalid() && !state.CorruptionPossible()) {
+ assert(IsBlockReason(state.GetReason()));
+ if (state.IsInvalid() && state.GetReason() != ValidationInvalidReason::BLOCK_MUTATED) {
pindex->nStatus |= BLOCK_FAILED_VALID;
setDirtyBlockIndex.insert(pindex);
}
@@ -3510,13 +3403,13 @@ bool CChainState::AcceptBlock(const std::shared_ptr<const CBlock>& pblock, CVali
// Header is valid/has work, merkle tree and segwit merkle tree are good...RELAY NOW
// (but if it does not build on our best tip, let the SendMessages loop relay it)
- if (!IsInitialBlockDownload() && chainActive.Tip() == pindex->pprev)
+ if (!IsInitialBlockDownload() && m_chain.Tip() == pindex->pprev)
GetMainSignals().NewPoWValidBlock(pindex, pblock);
// Write block to history file
if (fNewBlock) *fNewBlock = true;
try {
- CDiskBlockPos blockPos = SaveBlockToDisk(block, pindex->nHeight, chainparams, dbp);
+ FlatFilePos blockPos = SaveBlockToDisk(block, pindex->nHeight, chainparams, dbp);
if (blockPos.IsNull()) {
state.Error(strprintf("%s: Failed to find position to write new block to disk", __func__));
return false;
@@ -3551,7 +3444,7 @@ bool ProcessNewBlock(const CChainParams& chainparams, const std::shared_ptr<cons
bool ret = CheckBlock(*pblock, state, chainparams.GetConsensus());
if (ret) {
// Store to disk
- ret = g_chainstate.AcceptBlock(pblock, state, chainparams, &pindex, fForceProcessing, nullptr, fNewBlock);
+ ret = ::ChainstateActive().AcceptBlock(pblock, state, chainparams, &pindex, fForceProcessing, nullptr, fNewBlock);
}
if (!ret) {
GetMainSignals().BlockChecked(*pblock, state);
@@ -3562,7 +3455,7 @@ bool ProcessNewBlock(const CChainParams& chainparams, const std::shared_ptr<cons
NotifyHeaderTip();
CValidationState state; // Only used to report errors, not invalidity - ignore it
- if (!g_chainstate.ActivateBestChain(state, chainparams, pblock))
+ if (!::ChainstateActive().ActivateBestChain(state, chainparams, pblock))
return error("%s: ActivateBestChain failed (%s)", __func__, FormatStateMessage(state));
return true;
@@ -3571,7 +3464,7 @@ bool ProcessNewBlock(const CChainParams& chainparams, const std::shared_ptr<cons
bool TestBlockValidity(CValidationState& state, const CChainParams& chainparams, const CBlock& block, CBlockIndex* pindexPrev, bool fCheckPOW, bool fCheckMerkleRoot)
{
AssertLockHeld(cs_main);
- assert(pindexPrev && pindexPrev == chainActive.Tip());
+ assert(pindexPrev && pindexPrev == ::ChainActive().Tip());
CCoinsViewCache viewNew(pcoinsTip.get());
uint256 block_hash(block.GetHash());
CBlockIndex indexDummy(block);
@@ -3586,7 +3479,7 @@ bool TestBlockValidity(CValidationState& state, const CChainParams& chainparams,
return error("%s: Consensus::CheckBlock: %s", __func__, FormatStateMessage(state));
if (!ContextualCheckBlock(block, state, chainparams.GetConsensus(), pindexPrev))
return error("%s: Consensus::ContextualCheckBlock: %s", __func__, FormatStateMessage(state));
- if (!g_chainstate.ConnectBlock(block, state, &indexDummy, viewNew, chainparams, true))
+ if (!::ChainstateActive().ConnectBlock(block, state, &indexDummy, viewNew, chainparams, true))
return false;
assert(state.IsValid());
@@ -3647,9 +3540,9 @@ void PruneOneBlockFile(const int fileNumber)
void UnlinkPrunedFiles(const std::set<int>& setFilesToPrune)
{
for (std::set<int>::iterator it = setFilesToPrune.begin(); it != setFilesToPrune.end(); ++it) {
- CDiskBlockPos pos(*it, 0);
- fs::remove(GetBlockPosFilename(pos, "blk"));
- fs::remove(GetBlockPosFilename(pos, "rev"));
+ FlatFilePos pos(*it, 0);
+ fs::remove(BlockFileSeq().FileName(pos));
+ fs::remove(UndoFileSeq().FileName(pos));
LogPrintf("Prune: %s deleted blk/rev (%05u)\n", __func__, *it);
}
}
@@ -3660,11 +3553,11 @@ static void FindFilesToPruneManual(std::set<int>& setFilesToPrune, int nManualPr
assert(fPruneMode && nManualPruneHeight > 0);
LOCK2(cs_main, cs_LastBlockFile);
- if (chainActive.Tip() == nullptr)
+ if (::ChainActive().Tip() == nullptr)
return;
// last block to prune is the lesser of (user-specified height, MIN_BLOCKS_TO_KEEP from the tip)
- unsigned int nLastBlockWeCanPrune = std::min((unsigned)nManualPruneHeight, chainActive.Tip()->nHeight - MIN_BLOCKS_TO_KEEP);
+ unsigned int nLastBlockWeCanPrune = std::min((unsigned)nManualPruneHeight, ::ChainActive().Tip()->nHeight - MIN_BLOCKS_TO_KEEP);
int count=0;
for (int fileNumber = 0; fileNumber < nLastBlockFile; fileNumber++) {
if (vinfoBlockFile[fileNumber].nSize == 0 || vinfoBlockFile[fileNumber].nHeightLast > nLastBlockWeCanPrune)
@@ -3681,7 +3574,8 @@ void PruneBlockFilesManual(int nManualPruneHeight)
{
CValidationState state;
const CChainParams& chainparams = Params();
- if (!FlushStateToDisk(chainparams, state, FlushStateMode::NONE, nManualPruneHeight)) {
+ if (!::ChainstateActive().FlushStateToDisk(
+ chainparams, state, FlushStateMode::NONE, nManualPruneHeight)) {
LogPrintf("%s: failed to flush state (%s)\n", __func__, FormatStateMessage(state));
}
}
@@ -3704,14 +3598,14 @@ void PruneBlockFilesManual(int nManualPruneHeight)
static void FindFilesToPrune(std::set<int>& setFilesToPrune, uint64_t nPruneAfterHeight)
{
LOCK2(cs_main, cs_LastBlockFile);
- if (chainActive.Tip() == nullptr || nPruneTarget == 0) {
+ if (::ChainActive().Tip() == nullptr || nPruneTarget == 0) {
return;
}
- if ((uint64_t)chainActive.Tip()->nHeight <= nPruneAfterHeight) {
+ if ((uint64_t)::ChainActive().Tip()->nHeight <= nPruneAfterHeight) {
return;
}
- unsigned int nLastBlockWeCanPrune = chainActive.Tip()->nHeight - MIN_BLOCKS_TO_KEEP;
+ unsigned int nLastBlockWeCanPrune = ::ChainActive().Tip()->nHeight - MIN_BLOCKS_TO_KEEP;
uint64_t nCurrentUsage = CalculateCurrentUsage();
// We don't check to prune until after we've allocated new space for files
// So we should leave a buffer under our target to account for another allocation
@@ -3725,7 +3619,7 @@ static void FindFilesToPrune(std::set<int>& setFilesToPrune, uint64_t nPruneAfte
// To avoid excessive prune events negating the benefit of high dbcache
// values, we should not prune too rapidly.
// So when pruning in IBD, increase the buffer a bit to avoid a re-prune too soon.
- if (IsInitialBlockDownload()) {
+ if (::ChainstateActive().IsInitialBlockDownload()) {
// Since this is only relevant during IBD, we use a fixed 10%
nBuffer += nPruneTarget / 10;
}
@@ -3757,52 +3651,28 @@ static void FindFilesToPrune(std::set<int>& setFilesToPrune, uint64_t nPruneAfte
nLastBlockWeCanPrune, count);
}
-bool CheckDiskSpace(uint64_t nAdditionalBytes, bool blocks_dir)
+static FlatFileSeq BlockFileSeq()
{
- uint64_t nFreeBytesAvailable = fs::space(blocks_dir ? GetBlocksDir() : GetDataDir()).available;
-
- // Check for nMinDiskSpace bytes (currently 50MB)
- if (nFreeBytesAvailable < nMinDiskSpace + nAdditionalBytes)
- return AbortNode("Disk space is low!", _("Error: Disk space is low!"));
-
- return true;
+ return FlatFileSeq(GetBlocksDir(), "blk", BLOCKFILE_CHUNK_SIZE);
}
-static FILE* OpenDiskFile(const CDiskBlockPos &pos, const char *prefix, bool fReadOnly)
+static FlatFileSeq UndoFileSeq()
{
- if (pos.IsNull())
- return nullptr;
- fs::path path = GetBlockPosFilename(pos, prefix);
- fs::create_directories(path.parent_path());
- FILE* file = fsbridge::fopen(path, fReadOnly ? "rb": "rb+");
- if (!file && !fReadOnly)
- file = fsbridge::fopen(path, "wb+");
- if (!file) {
- LogPrintf("Unable to open file %s\n", path.string());
- return nullptr;
- }
- if (pos.nPos) {
- if (fseek(file, pos.nPos, SEEK_SET)) {
- LogPrintf("Unable to seek to position %u of %s\n", pos.nPos, path.string());
- fclose(file);
- return nullptr;
- }
- }
- return file;
+ return FlatFileSeq(GetBlocksDir(), "rev", UNDOFILE_CHUNK_SIZE);
}
-FILE* OpenBlockFile(const CDiskBlockPos &pos, bool fReadOnly) {
- return OpenDiskFile(pos, "blk", fReadOnly);
+FILE* OpenBlockFile(const FlatFilePos &pos, bool fReadOnly) {
+ return BlockFileSeq().Open(pos, fReadOnly);
}
/** Open an undo file (rev?????.dat) */
-static FILE* OpenUndoFile(const CDiskBlockPos &pos, bool fReadOnly) {
- return OpenDiskFile(pos, "rev", fReadOnly);
+static FILE* OpenUndoFile(const FlatFilePos &pos, bool fReadOnly) {
+ return UndoFileSeq().Open(pos, fReadOnly);
}
-fs::path GetBlockPosFilename(const CDiskBlockPos &pos, const char *prefix)
+fs::path GetBlockPosFilename(const FlatFilePos &pos)
{
- return GetBlocksDir() / strprintf("%s%05u.dat", prefix, pos.nFile);
+ return BlockFileSeq().FileName(pos);
}
CBlockIndex * CChainState::InsertBlockIndex(const uint256& hash)
@@ -3841,6 +3711,7 @@ bool CChainState::LoadBlockIndex(const Consensus::Params& consensus_params, CBlo
sort(vSortedByHeight.begin(), vSortedByHeight.end());
for (const std::pair<int, CBlockIndex*>& item : vSortedByHeight)
{
+ if (ShutdownRequested()) return false;
CBlockIndex* pindex = item.second;
pindex->nChainWork = (pindex->pprev ? pindex->pprev->nChainWork : 0) + GetBlockProof(*pindex);
pindex->nTimeMax = (pindex->pprev ? std::max(pindex->pprev->nTimeMax, pindex->nTime) : pindex->nTime);
@@ -3877,7 +3748,7 @@ bool CChainState::LoadBlockIndex(const Consensus::Params& consensus_params, CBlo
bool static LoadBlockIndexDB(const CChainParams& chainparams) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{
- if (!g_chainstate.LoadBlockIndex(chainparams.GetConsensus(), *pblocktree))
+ if (!::ChainstateActive().LoadBlockIndex(chainparams.GetConsensus(), *pblocktree))
return false;
// Load block file info
@@ -3909,7 +3780,7 @@ bool static LoadBlockIndexDB(const CChainParams& chainparams) EXCLUSIVE_LOCKS_RE
}
for (std::set<int>::iterator it = setBlkDataFiles.begin(); it != setBlkDataFiles.end(); it++)
{
- CDiskBlockPos pos(*it, 0);
+ FlatFilePos pos(*it, 0);
if (CAutoFile(OpenBlockFile(pos, true), SER_DISK, CLIENT_VERSION).IsNull()) {
return false;
}
@@ -3931,33 +3802,23 @@ bool static LoadBlockIndexDB(const CChainParams& chainparams) EXCLUSIVE_LOCKS_RE
bool LoadChainTip(const CChainParams& chainparams)
{
AssertLockHeld(cs_main);
+ assert(!pcoinsTip->GetBestBlock().IsNull()); // Never called when the coins view is empty
- if (chainActive.Tip() && chainActive.Tip()->GetBlockHash() == pcoinsTip->GetBestBlock()) return true;
-
- if (pcoinsTip->GetBestBlock().IsNull() && mapBlockIndex.size() == 1) {
- // In case we just added the genesis block, connect it now, so
- // that we always have a chainActive.Tip() when we return.
- LogPrintf("%s: Connecting genesis block...\n", __func__);
- CValidationState state;
- if (!ActivateBestChain(state, chainparams)) {
- LogPrintf("%s: failed to activate chain (%s)\n", __func__, FormatStateMessage(state));
- return false;
- }
- }
+ if (::ChainActive().Tip() && ::ChainActive().Tip()->GetBlockHash() == pcoinsTip->GetBestBlock()) return true;
// Load pointer to end of best chain
CBlockIndex* pindex = LookupBlockIndex(pcoinsTip->GetBestBlock());
if (!pindex) {
return false;
}
- chainActive.SetTip(pindex);
+ ::ChainActive().SetTip(pindex);
- g_chainstate.PruneBlockIndexCandidates();
+ ::ChainstateActive().PruneBlockIndexCandidates();
LogPrintf("Loaded best chain: hashBestChain=%s height=%d date=%s progress=%f\n",
- chainActive.Tip()->GetBlockHash().ToString(), chainActive.Height(),
- FormatISO8601DateTime(chainActive.Tip()->GetBlockTime()),
- GuessVerificationProgress(chainparams.TxData(), chainActive.Tip()));
+ ::ChainActive().Tip()->GetBlockHash().ToString(), ::ChainActive().Height(),
+ FormatISO8601DateTime(::ChainActive().Tip()->GetBlockTime()),
+ GuessVerificationProgress(chainparams.TxData(), ::ChainActive().Tip()));
return true;
}
@@ -3974,12 +3835,12 @@ CVerifyDB::~CVerifyDB()
bool CVerifyDB::VerifyDB(const CChainParams& chainparams, CCoinsView *coinsview, int nCheckLevel, int nCheckDepth)
{
LOCK(cs_main);
- if (chainActive.Tip() == nullptr || chainActive.Tip()->pprev == nullptr)
+ if (::ChainActive().Tip() == nullptr || ::ChainActive().Tip()->pprev == nullptr)
return true;
// Verify blocks in the best chain
- if (nCheckDepth <= 0 || nCheckDepth > chainActive.Height())
- nCheckDepth = chainActive.Height();
+ if (nCheckDepth <= 0 || nCheckDepth > ::ChainActive().Height())
+ nCheckDepth = ::ChainActive().Height();
nCheckLevel = std::max(0, std::min(4, nCheckLevel));
LogPrintf("Verifying last %i blocks at level %i\n", nCheckDepth, nCheckLevel);
CCoinsViewCache coins(coinsview);
@@ -3989,16 +3850,16 @@ bool CVerifyDB::VerifyDB(const CChainParams& chainparams, CCoinsView *coinsview,
CValidationState state;
int reportDone = 0;
LogPrintf("[0%%]..."); /* Continued */
- for (pindex = chainActive.Tip(); pindex && pindex->pprev; pindex = pindex->pprev) {
+ for (pindex = ::ChainActive().Tip(); pindex && pindex->pprev; pindex = pindex->pprev) {
boost::this_thread::interruption_point();
- const int percentageDone = std::max(1, std::min(99, (int)(((double)(chainActive.Height() - pindex->nHeight)) / (double)nCheckDepth * (nCheckLevel >= 4 ? 50 : 100))));
+ const int percentageDone = std::max(1, std::min(99, (int)(((double)(::ChainActive().Height() - pindex->nHeight)) / (double)nCheckDepth * (nCheckLevel >= 4 ? 50 : 100))));
if (reportDone < percentageDone/10) {
// report every 10% step
LogPrintf("[%d%%]...", percentageDone); /* Continued */
reportDone = percentageDone/10;
}
uiInterface.ShowProgress(_("Verifying blocks..."), percentageDone, false);
- if (pindex->nHeight <= chainActive.Height()-nCheckDepth)
+ if (pindex->nHeight <= ::ChainActive().Height()-nCheckDepth)
break;
if (fPruneMode && !(pindex->nStatus & BLOCK_HAVE_DATA)) {
// If pruning, only go back as far as we have data.
@@ -4025,7 +3886,7 @@ bool CVerifyDB::VerifyDB(const CChainParams& chainparams, CCoinsView *coinsview,
// check level 3: check for inconsistencies during memory-only disconnect of tip blocks
if (nCheckLevel >= 3 && (coins.DynamicMemoryUsage() + pcoinsTip->DynamicMemoryUsage()) <= nCoinCacheUsage) {
assert(coins.GetBestBlock() == pindex->GetBlockHash());
- DisconnectResult res = g_chainstate.DisconnectBlock(block, pindex, coins);
+ DisconnectResult res = ::ChainstateActive().DisconnectBlock(block, pindex, coins);
if (res == DISCONNECT_FAILED) {
return error("VerifyDB(): *** irrecoverable inconsistency in block data at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
}
@@ -4040,27 +3901,27 @@ bool CVerifyDB::VerifyDB(const CChainParams& chainparams, CCoinsView *coinsview,
return true;
}
if (pindexFailure)
- return error("VerifyDB(): *** coin database inconsistencies found (last %i blocks, %i good transactions before that)\n", chainActive.Height() - pindexFailure->nHeight + 1, nGoodTransactions);
+ return error("VerifyDB(): *** coin database inconsistencies found (last %i blocks, %i good transactions before that)\n", ::ChainActive().Height() - pindexFailure->nHeight + 1, nGoodTransactions);
// store block count as we move pindex at check level >= 4
- int block_count = chainActive.Height() - pindex->nHeight;
+ int block_count = ::ChainActive().Height() - pindex->nHeight;
// check level 4: try reconnecting blocks
if (nCheckLevel >= 4) {
- while (pindex != chainActive.Tip()) {
+ while (pindex != ::ChainActive().Tip()) {
boost::this_thread::interruption_point();
- const int percentageDone = std::max(1, std::min(99, 100 - (int)(((double)(chainActive.Height() - pindex->nHeight)) / (double)nCheckDepth * 50)));
+ const int percentageDone = std::max(1, std::min(99, 100 - (int)(((double)(::ChainActive().Height() - pindex->nHeight)) / (double)nCheckDepth * 50)));
if (reportDone < percentageDone/10) {
// report every 10% step
LogPrintf("[%d%%]...", percentageDone); /* Continued */
reportDone = percentageDone/10;
}
uiInterface.ShowProgress(_("Verifying blocks..."), percentageDone, false);
- pindex = chainActive.Next(pindex);
+ pindex = ::ChainActive().Next(pindex);
CBlock block;
if (!ReadBlockFromDisk(block, pindex, chainparams.GetConsensus()))
return error("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
- if (!g_chainstate.ConnectBlock(block, state, pindex, coins, chainparams))
+ if (!::ChainstateActive().ConnectBlock(block, state, pindex, coins, chainparams))
return error("VerifyDB(): *** found unconnectable block at %d, hash=%s (%s)", pindex->nHeight, pindex->GetBlockHash().ToString(), FormatStateMessage(state));
}
}
@@ -4159,41 +4020,117 @@ bool CChainState::ReplayBlocks(const CChainParams& params, CCoinsView* view)
}
bool ReplayBlocks(const CChainParams& params, CCoinsView* view) {
- return g_chainstate.ReplayBlocks(params, view);
+ return ::ChainstateActive().ReplayBlocks(params, view);
+}
+
+//! Helper for CChainState::RewindBlockIndex
+void CChainState::EraseBlockData(CBlockIndex* index)
+{
+ AssertLockHeld(cs_main);
+ assert(!m_chain.Contains(index)); // Make sure this block isn't active
+
+ // Reduce validity
+ index->nStatus = std::min<unsigned int>(index->nStatus & BLOCK_VALID_MASK, BLOCK_VALID_TREE) | (index->nStatus & ~BLOCK_VALID_MASK);
+ // Remove have-data flags.
+ index->nStatus &= ~(BLOCK_HAVE_DATA | BLOCK_HAVE_UNDO);
+ // Remove storage location.
+ index->nFile = 0;
+ index->nDataPos = 0;
+ index->nUndoPos = 0;
+ // Remove various other things
+ index->nTx = 0;
+ index->nChainTx = 0;
+ index->nSequenceId = 0;
+ // Make sure it gets written.
+ setDirtyBlockIndex.insert(index);
+ // Update indexes
+ setBlockIndexCandidates.erase(index);
+ std::pair<std::multimap<CBlockIndex*, CBlockIndex*>::iterator, std::multimap<CBlockIndex*, CBlockIndex*>::iterator> ret = mapBlocksUnlinked.equal_range(index->pprev);
+ while (ret.first != ret.second) {
+ if (ret.first->second == index) {
+ mapBlocksUnlinked.erase(ret.first++);
+ } else {
+ ++ret.first;
+ }
+ }
+ // Mark parent as eligible for main chain again
+ if (index->pprev && index->pprev->IsValid(BLOCK_VALID_TRANSACTIONS) && index->pprev->HaveTxsDownloaded()) {
+ setBlockIndexCandidates.insert(index->pprev);
+ }
}
bool CChainState::RewindBlockIndex(const CChainParams& params)
{
- LOCK(cs_main);
+ // Note that during -reindex-chainstate we are called with an empty m_chain!
- // Note that during -reindex-chainstate we are called with an empty chainActive!
+ // First erase all post-segwit blocks without witness not in the main chain,
+ // as this can we done without costly DisconnectTip calls. Active
+ // blocks will be dealt with below (releasing cs_main in between).
+ {
+ LOCK(cs_main);
+ for (const auto& entry : mapBlockIndex) {
+ if (IsWitnessEnabled(entry.second->pprev, params.GetConsensus()) && !(entry.second->nStatus & BLOCK_OPT_WITNESS) && !m_chain.Contains(entry.second)) {
+ EraseBlockData(entry.second);
+ }
+ }
+ }
+ // Find what height we need to reorganize to.
+ CBlockIndex *tip;
int nHeight = 1;
- while (nHeight <= chainActive.Height()) {
- // Although SCRIPT_VERIFY_WITNESS is now generally enforced on all
- // blocks in ConnectBlock, we don't need to go back and
- // re-download/re-verify blocks from before segwit actually activated.
- if (IsWitnessEnabled(chainActive[nHeight - 1], params.GetConsensus()) && !(chainActive[nHeight]->nStatus & BLOCK_OPT_WITNESS)) {
- break;
+ {
+ LOCK(cs_main);
+ while (nHeight <= m_chain.Height()) {
+ // Although SCRIPT_VERIFY_WITNESS is now generally enforced on all
+ // blocks in ConnectBlock, we don't need to go back and
+ // re-download/re-verify blocks from before segwit actually activated.
+ if (IsWitnessEnabled(m_chain[nHeight - 1], params.GetConsensus()) && !(m_chain[nHeight]->nStatus & BLOCK_OPT_WITNESS)) {
+ break;
+ }
+ nHeight++;
}
- nHeight++;
- }
+ tip = m_chain.Tip();
+ }
// nHeight is now the height of the first insufficiently-validated block, or tipheight + 1
+
CValidationState state;
- CBlockIndex* pindex = chainActive.Tip();
- while (chainActive.Height() >= nHeight) {
- if (fPruneMode && !(chainActive.Tip()->nStatus & BLOCK_HAVE_DATA)) {
- // If pruning, don't try rewinding past the HAVE_DATA point;
- // since older blocks can't be served anyway, there's
- // no need to walk further, and trying to DisconnectTip()
- // will fail (and require a needless reindex/redownload
- // of the blockchain).
- break;
- }
- if (!DisconnectTip(state, params, nullptr)) {
- return error("RewindBlockIndex: unable to disconnect block at height %i (%s)", pindex->nHeight, FormatStateMessage(state));
+ // Loop until the tip is below nHeight, or we reach a pruned block.
+ while (!ShutdownRequested()) {
+ {
+ LOCK(cs_main);
+ // Make sure nothing changed from under us (this won't happen because RewindBlockIndex runs before importing/network are active)
+ assert(tip == m_chain.Tip());
+ if (tip == nullptr || tip->nHeight < nHeight) break;
+ if (fPruneMode && !(tip->nStatus & BLOCK_HAVE_DATA)) {
+ // If pruning, don't try rewinding past the HAVE_DATA point;
+ // since older blocks can't be served anyway, there's
+ // no need to walk further, and trying to DisconnectTip()
+ // will fail (and require a needless reindex/redownload
+ // of the blockchain).
+ break;
+ }
+
+ // Disconnect block
+ if (!DisconnectTip(state, params, nullptr)) {
+ return error("RewindBlockIndex: unable to disconnect block at height %i (%s)", tip->nHeight, FormatStateMessage(state));
+ }
+
+ // Reduce validity flag and have-data flags.
+ // We do this after actual disconnecting, otherwise we'll end up writing the lack of data
+ // to disk before writing the chainstate, resulting in a failure to continue if interrupted.
+ // Note: If we encounter an insufficiently validated block that
+ // is on m_chain, it must be because we are a pruning node, and
+ // this block or some successor doesn't HAVE_DATA, so we were unable to
+ // rewind all the way. Blocks remaining on m_chain at this point
+ // must not have their validity reduced.
+ EraseBlockData(tip);
+
+ tip = tip->pprev;
}
+ // Make sure the queue of validation callbacks doesn't grow unboundedly.
+ LimitValidationInterfaceQueue();
+
// Occasionally flush state to disk.
if (!FlushStateToDisk(params, state, FlushStateMode::PERIODIC)) {
LogPrintf("RewindBlockIndex: unable to flush state to disk (%s)\n", FormatStateMessage(state));
@@ -4201,69 +4138,32 @@ bool CChainState::RewindBlockIndex(const CChainParams& params)
}
}
- // Reduce validity flag and have-data flags.
- // We do this after actual disconnecting, otherwise we'll end up writing the lack of data
- // to disk before writing the chainstate, resulting in a failure to continue if interrupted.
- for (const auto& entry : mapBlockIndex) {
- CBlockIndex* pindexIter = entry.second;
-
- // Note: If we encounter an insufficiently validated block that
- // is on chainActive, it must be because we are a pruning node, and
- // this block or some successor doesn't HAVE_DATA, so we were unable to
- // rewind all the way. Blocks remaining on chainActive at this point
- // must not have their validity reduced.
- if (IsWitnessEnabled(pindexIter->pprev, params.GetConsensus()) && !(pindexIter->nStatus & BLOCK_OPT_WITNESS) && !chainActive.Contains(pindexIter)) {
- // Reduce validity
- pindexIter->nStatus = std::min<unsigned int>(pindexIter->nStatus & BLOCK_VALID_MASK, BLOCK_VALID_TREE) | (pindexIter->nStatus & ~BLOCK_VALID_MASK);
- // Remove have-data flags.
- pindexIter->nStatus &= ~(BLOCK_HAVE_DATA | BLOCK_HAVE_UNDO);
- // Remove storage location.
- pindexIter->nFile = 0;
- pindexIter->nDataPos = 0;
- pindexIter->nUndoPos = 0;
- // Remove various other things
- pindexIter->nTx = 0;
- pindexIter->nChainTx = 0;
- pindexIter->nSequenceId = 0;
- // Make sure it gets written.
- setDirtyBlockIndex.insert(pindexIter);
- // Update indexes
- setBlockIndexCandidates.erase(pindexIter);
- std::pair<std::multimap<CBlockIndex*, CBlockIndex*>::iterator, std::multimap<CBlockIndex*, CBlockIndex*>::iterator> ret = mapBlocksUnlinked.equal_range(pindexIter->pprev);
- while (ret.first != ret.second) {
- if (ret.first->second == pindexIter) {
- mapBlocksUnlinked.erase(ret.first++);
- } else {
- ++ret.first;
- }
- }
- } else if (pindexIter->IsValid(BLOCK_VALID_TRANSACTIONS) && pindexIter->HaveTxsDownloaded()) {
- setBlockIndexCandidates.insert(pindexIter);
- }
- }
-
- if (chainActive.Tip() != nullptr) {
- // We can't prune block index candidates based on our tip if we have
- // no tip due to chainActive being empty!
- PruneBlockIndexCandidates();
+ {
+ LOCK(cs_main);
+ if (m_chain.Tip() != nullptr) {
+ // We can't prune block index candidates based on our tip if we have
+ // no tip due to m_chain being empty!
+ PruneBlockIndexCandidates();
- CheckBlockIndex(params.GetConsensus());
+ CheckBlockIndex(params.GetConsensus());
+ }
}
return true;
}
bool RewindBlockIndex(const CChainParams& params) {
- if (!g_chainstate.RewindBlockIndex(params)) {
+ if (!::ChainstateActive().RewindBlockIndex(params)) {
return false;
}
- if (chainActive.Tip() != nullptr) {
- // FlushStateToDisk can possibly read chainActive. Be conservative
+ LOCK(cs_main);
+ if (::ChainActive().Tip() != nullptr) {
+ // FlushStateToDisk can possibly read ::ChainActive(). Be conservative
// and skip it here, we're about to -reindex-chainstate anyway, so
// it'll get called a bunch real soon.
CValidationState state;
- if (!FlushStateToDisk(params, state, FlushStateMode::ALWAYS)) {
+ if (!::ChainstateActive().FlushStateToDisk(params, state, FlushStateMode::ALWAYS)) {
LogPrintf("RewindBlockIndex: unable to flush state to disk (%s)\n", FormatStateMessage(state));
return false;
}
@@ -4284,7 +4184,7 @@ void CChainState::UnloadBlockIndex() {
void UnloadBlockIndex()
{
LOCK(cs_main);
- chainActive.SetTip(nullptr);
+ ::ChainActive().SetTip(nullptr);
pindexBestInvalid = nullptr;
pindexBestHeader = nullptr;
mempool.clear();
@@ -4304,7 +4204,7 @@ void UnloadBlockIndex()
mapBlockIndex.clear();
fHavePruned = false;
- g_chainstate.UnloadBlockIndex();
+ ::ChainstateActive().UnloadBlockIndex();
}
bool LoadBlockIndex(const CChainParams& chainparams)
@@ -4334,7 +4234,7 @@ bool CChainState::LoadGenesisBlock(const CChainParams& chainparams)
LOCK(cs_main);
// Check whether we're already initialized by checking for genesis in
- // mapBlockIndex. Note that we can't use chainActive here, since it is
+ // mapBlockIndex. Note that we can't use m_chain here, since it is
// set based on the coins db, not the block index db, which is the only
// thing loaded at this point.
if (mapBlockIndex.count(chainparams.GenesisBlock().GetHash()))
@@ -4342,7 +4242,7 @@ bool CChainState::LoadGenesisBlock(const CChainParams& chainparams)
try {
const CBlock& block = chainparams.GenesisBlock();
- CDiskBlockPos blockPos = SaveBlockToDisk(block, 0, chainparams, nullptr);
+ FlatFilePos blockPos = SaveBlockToDisk(block, 0, chainparams, nullptr);
if (blockPos.IsNull())
return error("%s: writing genesis block to disk failed", __func__);
CBlockIndex *pindex = AddToBlockIndex(block);
@@ -4356,13 +4256,13 @@ bool CChainState::LoadGenesisBlock(const CChainParams& chainparams)
bool LoadGenesisBlock(const CChainParams& chainparams)
{
- return g_chainstate.LoadGenesisBlock(chainparams);
+ return ::ChainstateActive().LoadGenesisBlock(chainparams);
}
-bool LoadExternalBlockFile(const CChainParams& chainparams, FILE* fileIn, CDiskBlockPos *dbp)
+bool LoadExternalBlockFile(const CChainParams& chainparams, FILE* fileIn, FlatFilePos *dbp)
{
// Map of disk positions for blocks with unknown parent (only used for reindex)
- static std::multimap<uint256, CDiskBlockPos> mapBlocksUnknownParent;
+ static std::multimap<uint256, FlatFilePos> mapBlocksUnknownParent;
int64_t nStart = GetTimeMillis();
int nLoaded = 0;
@@ -4421,7 +4321,7 @@ bool LoadExternalBlockFile(const CChainParams& chainparams, FILE* fileIn, CDiskB
CBlockIndex* pindex = LookupBlockIndex(hash);
if (!pindex || (pindex->nStatus & BLOCK_HAVE_DATA) == 0) {
CValidationState state;
- if (g_chainstate.AcceptBlock(pblock, state, chainparams, nullptr, true, dbp, nullptr)) {
+ if (::ChainstateActive().AcceptBlock(pblock, state, chainparams, nullptr, true, dbp, nullptr)) {
nLoaded++;
}
if (state.IsError()) {
@@ -4448,9 +4348,9 @@ bool LoadExternalBlockFile(const CChainParams& chainparams, FILE* fileIn, CDiskB
while (!queue.empty()) {
uint256 head = queue.front();
queue.pop_front();
- std::pair<std::multimap<uint256, CDiskBlockPos>::iterator, std::multimap<uint256, CDiskBlockPos>::iterator> range = mapBlocksUnknownParent.equal_range(head);
+ std::pair<std::multimap<uint256, FlatFilePos>::iterator, std::multimap<uint256, FlatFilePos>::iterator> range = mapBlocksUnknownParent.equal_range(head);
while (range.first != range.second) {
- std::multimap<uint256, CDiskBlockPos>::iterator it = range.first;
+ std::multimap<uint256, FlatFilePos>::iterator it = range.first;
std::shared_ptr<CBlock> pblockrecursive = std::make_shared<CBlock>();
if (ReadBlockFromDisk(*pblockrecursive, it->second, chainparams.GetConsensus()))
{
@@ -4458,7 +4358,7 @@ bool LoadExternalBlockFile(const CChainParams& chainparams, FILE* fileIn, CDiskB
head.ToString());
LOCK(cs_main);
CValidationState dummy;
- if (g_chainstate.AcceptBlock(pblockrecursive, dummy, chainparams, nullptr, true, &it->second, nullptr))
+ if (::ChainstateActive().AcceptBlock(pblockrecursive, dummy, chainparams, nullptr, true, &it->second, nullptr))
{
nLoaded++;
queue.push_back(pblockrecursive->GetHash());
@@ -4491,8 +4391,8 @@ void CChainState::CheckBlockIndex(const Consensus::Params& consensusParams)
// During a reindex, we read the genesis block and call CheckBlockIndex before ActivateBestChain,
// so we have the genesis block in mapBlockIndex but no active chain. (A few of the tests when
- // iterating the block tree require that chainActive has been initialized.)
- if (chainActive.Height() < 0) {
+ // iterating the block tree require that m_chain has been initialized.)
+ if (m_chain.Height() < 0) {
assert(mapBlockIndex.size() <= 1);
return;
}
@@ -4536,7 +4436,7 @@ void CChainState::CheckBlockIndex(const Consensus::Params& consensusParams)
if (pindex->pprev == nullptr) {
// Genesis block checks.
assert(pindex->GetBlockHash() == consensusParams.hashGenesisBlock); // Genesis block's hash must match.
- assert(pindex == chainActive.Genesis()); // The current active chain's genesis block must be this block.
+ assert(pindex == m_chain.Genesis()); // The current active chain's genesis block must be this block.
}
if (!pindex->HaveTxsDownloaded()) assert(pindex->nSequenceId <= 0); // nSequenceId can't be set positive for blocks that aren't linked (negative is used for preciousblock)
// VALID_TRANSACTIONS is equivalent to nTx > 0 for all nodes (whether or not pruning has occurred).
@@ -4565,13 +4465,13 @@ void CChainState::CheckBlockIndex(const Consensus::Params& consensusParams)
// Checks for not-invalid blocks.
assert((pindex->nStatus & BLOCK_FAILED_MASK) == 0); // The failed mask cannot be set for blocks without invalid parents.
}
- if (!CBlockIndexWorkComparator()(pindex, chainActive.Tip()) && pindexFirstNeverProcessed == nullptr) {
+ if (!CBlockIndexWorkComparator()(pindex, m_chain.Tip()) && pindexFirstNeverProcessed == nullptr) {
if (pindexFirstInvalid == nullptr) {
// If this block sorts at least as good as the current tip and
// is valid and we have all data for its parents, it must be in
- // setBlockIndexCandidates. chainActive.Tip() must also be there
+ // setBlockIndexCandidates. m_chain.Tip() must also be there
// even if some data has been pruned.
- if (pindexFirstMissing == nullptr || pindex == chainActive.Tip()) {
+ if (pindexFirstMissing == nullptr || pindex == m_chain.Tip()) {
assert(setBlockIndexCandidates.count(pindex));
}
// If some parent is missing, then it could be that this block was in
@@ -4605,11 +4505,11 @@ void CChainState::CheckBlockIndex(const Consensus::Params& consensusParams)
// - it has a descendant that at some point had more work than the
// tip, and
// - we tried switching to that descendant but were missing
- // data for some intermediate block between chainActive and the
+ // data for some intermediate block between m_chain and the
// tip.
- // So if this block is itself better than chainActive.Tip() and it wasn't in
+ // So if this block is itself better than m_chain.Tip() and it wasn't in
// setBlockIndexCandidates, then it must be in mapBlocksUnlinked.
- if (!CBlockIndexWorkComparator()(pindex, chainActive.Tip()) && setBlockIndexCandidates.count(pindex) == 0) {
+ if (!CBlockIndexWorkComparator()(pindex, m_chain.Tip()) && setBlockIndexCandidates.count(pindex) == 0) {
if (pindexFirstInvalid == nullptr) {
assert(foundInUnlinked);
}
@@ -4680,24 +4580,24 @@ CBlockFileInfo* GetBlockFileInfo(size_t n)
ThresholdState VersionBitsTipState(const Consensus::Params& params, Consensus::DeploymentPos pos)
{
LOCK(cs_main);
- return VersionBitsState(chainActive.Tip(), params, pos, versionbitscache);
+ return VersionBitsState(::ChainActive().Tip(), params, pos, versionbitscache);
}
BIP9Stats VersionBitsTipStatistics(const Consensus::Params& params, Consensus::DeploymentPos pos)
{
LOCK(cs_main);
- return VersionBitsStatistics(chainActive.Tip(), params, pos);
+ return VersionBitsStatistics(::ChainActive().Tip(), params, pos);
}
int VersionBitsTipStateSinceHeight(const Consensus::Params& params, Consensus::DeploymentPos pos)
{
LOCK(cs_main);
- return VersionBitsStateSinceHeight(chainActive.Tip(), params, pos, versionbitscache);
+ return VersionBitsStateSinceHeight(::ChainActive().Tip(), params, pos, versionbitscache);
}
static const uint64_t MEMPOOL_DUMP_VERSION = 1;
-bool LoadMempool()
+bool LoadMempool(CTxMemPool& pool)
{
const CChainParams& chainparams = Params();
int64_t nExpiryTimeout = gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60;
@@ -4732,12 +4632,12 @@ bool LoadMempool()
CAmount amountdelta = nFeeDelta;
if (amountdelta) {
- mempool.PrioritiseTransaction(tx->GetHash(), amountdelta);
+ pool.PrioritiseTransaction(tx->GetHash(), amountdelta);
}
CValidationState state;
if (nTime + nExpiryTimeout > nNow) {
LOCK(cs_main);
- AcceptToMemoryPoolWithTime(chainparams, mempool, state, tx, nullptr /* pfMissingInputs */, nTime,
+ AcceptToMemoryPoolWithTime(chainparams, pool, state, tx, nullptr /* pfMissingInputs */, nTime,
nullptr /* plTxnReplaced */, false /* bypass_limits */, 0 /* nAbsurdFee */,
false /* test_accept */);
if (state.IsValid()) {
@@ -4747,7 +4647,7 @@ bool LoadMempool()
// wallet(s) having loaded it while we were processing
// mempool transactions; consider these as valid, instead of
// failed, but mark them as 'already there'
- if (mempool.exists(tx->GetHash())) {
+ if (pool.exists(tx->GetHash())) {
++already_there;
} else {
++failed;
@@ -4763,7 +4663,7 @@ bool LoadMempool()
file >> mapDeltas;
for (const auto& i : mapDeltas) {
- mempool.PrioritiseTransaction(i.first, i.second);
+ pool.PrioritiseTransaction(i.first, i.second);
}
} catch (const std::exception& e) {
LogPrintf("Failed to deserialize mempool data on disk: %s. Continuing anyway.\n", e.what());
@@ -4774,7 +4674,7 @@ bool LoadMempool()
return true;
}
-bool DumpMempool()
+bool DumpMempool(const CTxMemPool& pool)
{
int64_t start = GetTimeMicros();
@@ -4785,11 +4685,11 @@ bool DumpMempool()
LOCK(dump_mutex);
{
- LOCK(mempool.cs);
- for (const auto &i : mempool.mapDeltas) {
+ LOCK(pool.cs);
+ for (const auto &i : pool.mapDeltas) {
mapDeltas[i.first] = i.second;
}
- vinfo = mempool.infoAll();
+ vinfo = pool.infoAll();
}
int64_t mid = GetTimeMicros();