aboutsummaryrefslogtreecommitdiff
path: root/src/validation.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/validation.cpp')
-rw-r--r--src/validation.cpp1196
1 files changed, 620 insertions, 576 deletions
diff --git a/src/validation.cpp b/src/validation.cpp
index 563cd59382..4347e0b0cc 100644
--- a/src/validation.cpp
+++ b/src/validation.cpp
@@ -8,18 +8,20 @@
#include <arith_uint256.h>
#include <chain.h>
#include <chainparams.h>
-#include <checkpoints.h>
#include <checkqueue.h>
#include <consensus/consensus.h>
#include <consensus/merkle.h>
+#include <consensus/tx_check.h>
#include <consensus/tx_verify.h>
#include <consensus/validation.h>
#include <cuckoocache.h>
+#include <flatfile.h>
#include <hash.h>
#include <index/txindex.h>
#include <policy/fees.h>
#include <policy/policy.h>
#include <policy/rbf.h>
+#include <policy/settings.h>
#include <pow.h>
#include <primitives/block.h>
#include <primitives/transaction.h>
@@ -34,15 +36,19 @@
#include <txdb.h>
#include <txmempool.h>
#include <ui_interface.h>
+#include <uint256.h>
#include <undo.h>
-#include <util.h>
-#include <utilmoneystr.h>
-#include <utilstrencodings.h>
+#include <util/moneystr.h>
+#include <util/rbf.h>
+#include <util/strencodings.h>
+#include <util/system.h>
+#include <util/validation.h>
#include <validationinterface.h>
#include <warnings.h>
#include <future>
#include <sstream>
+#include <string>
#include <boost/algorithm/string/replace.hpp>
#include <boost/thread.hpp>
@@ -151,37 +157,39 @@ private:
CCriticalSection m_cs_chainstate;
public:
- CChain chainActive;
- BlockMap mapBlockIndex;
+ //! The current chain of blockheaders we consult and build on.
+ //! @see CChain, CBlockIndex.
+ CChain m_chain;
+ BlockMap mapBlockIndex GUARDED_BY(cs_main);
std::multimap<CBlockIndex*, CBlockIndex*> mapBlocksUnlinked;
CBlockIndex *pindexBestInvalid = nullptr;
bool LoadBlockIndex(const Consensus::Params& consensus_params, CBlockTreeDB& blocktree) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
- bool ActivateBestChain(CValidationState &state, const CChainParams& chainparams, std::shared_ptr<const CBlock> pblock);
+ bool ActivateBestChain(CValidationState &state, const CChainParams& chainparams, std::shared_ptr<const CBlock> pblock) LOCKS_EXCLUDED(cs_main);
/**
* If a block header hasn't already been seen, call CheckBlockHeader on it, ensure
* that it doesn't descend from an invalid block, and then add it to mapBlockIndex.
*/
bool AcceptBlockHeader(const CBlockHeader& block, CValidationState& state, const CChainParams& chainparams, CBlockIndex** ppindex) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
- bool AcceptBlock(const std::shared_ptr<const CBlock>& pblock, CValidationState& state, const CChainParams& chainparams, CBlockIndex** ppindex, bool fRequested, const CDiskBlockPos* dbp, bool* fNewBlock) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
+ bool AcceptBlock(const std::shared_ptr<const CBlock>& pblock, CValidationState& state, const CChainParams& chainparams, CBlockIndex** ppindex, bool fRequested, const FlatFilePos* dbp, bool* fNewBlock) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
// Block (dis)connection on a given view:
DisconnectResult DisconnectBlock(const CBlock& block, const CBlockIndex* pindex, CCoinsViewCache& view);
bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pindex,
- CCoinsViewCache& view, const CChainParams& chainparams, bool fJustCheck = false);
+ CCoinsViewCache& view, const CChainParams& chainparams, bool fJustCheck = false) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
// Block disconnection on our pcoinsTip:
- bool DisconnectTip(CValidationState& state, const CChainParams& chainparams, DisconnectedBlockTransactions *disconnectpool);
+ bool DisconnectTip(CValidationState& state, const CChainParams& chainparams, DisconnectedBlockTransactions* disconnectpool) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
// Manual block validity manipulation:
bool PreciousBlock(CValidationState& state, const CChainParams& params, CBlockIndex* pindex) LOCKS_EXCLUDED(cs_main);
- bool InvalidateBlock(CValidationState& state, const CChainParams& chainparams, CBlockIndex* pindex) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
+ bool InvalidateBlock(CValidationState& state, const CChainParams& chainparams, CBlockIndex* pindex) LOCKS_EXCLUDED(cs_main);
void ResetBlockFailureFlags(CBlockIndex* pindex) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
bool ReplayBlocks(const CChainParams& params, CCoinsView* view);
- bool RewindBlockIndex(const CChainParams& params);
+ bool RewindBlockIndex(const CChainParams& params) LOCKS_EXCLUDED(cs_main);
bool LoadGenesisBlock(const CChainParams& chainparams);
void PruneBlockIndexCandidates();
@@ -189,8 +197,8 @@ public:
void UnloadBlockIndex();
private:
- bool ActivateBestChainStep(CValidationState& state, const CChainParams& chainparams, CBlockIndex* pindexMostWork, const std::shared_ptr<const CBlock>& pblock, bool& fInvalidFound, ConnectTrace& connectTrace);
- bool ConnectTip(CValidationState& state, const CChainParams& chainparams, CBlockIndex* pindexNew, const std::shared_ptr<const CBlock>& pblock, ConnectTrace& connectTrace, DisconnectedBlockTransactions &disconnectpool);
+ bool ActivateBestChainStep(CValidationState& state, const CChainParams& chainparams, CBlockIndex* pindexMostWork, const std::shared_ptr<const CBlock>& pblock, bool& fInvalidFound, ConnectTrace& connectTrace) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
+ bool ConnectTip(CValidationState& state, const CChainParams& chainparams, CBlockIndex* pindexNew, const std::shared_ptr<const CBlock>& pblock, ConnectTrace& connectTrace, DisconnectedBlockTransactions &disconnectpool) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
CBlockIndex* AddToBlockIndex(const CBlockHeader& block) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
/** Create a new block index entry for a given block hash */
@@ -202,30 +210,40 @@ private:
*/
void CheckBlockIndex(const Consensus::Params& consensusParams);
- void InvalidBlockFound(CBlockIndex *pindex, const CValidationState &state);
+ void InvalidBlockFound(CBlockIndex *pindex, const CValidationState &state) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
CBlockIndex* FindMostWorkChain() EXCLUSIVE_LOCKS_REQUIRED(cs_main);
- void ReceivedBlockTransactions(const CBlock& block, CBlockIndex* pindexNew, const CDiskBlockPos& pos, const Consensus::Params& consensusParams) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
-
+ void ReceivedBlockTransactions(const CBlock& block, CBlockIndex* pindexNew, const FlatFilePos& pos, const Consensus::Params& consensusParams) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
bool RollforwardBlock(const CBlockIndex* pindex, CCoinsViewCache& inputs, const CChainParams& params) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
-} g_chainstate;
+ //! Mark a block as not having block data
+ void EraseBlockData(CBlockIndex* index) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
+} g_chainstate;
+CChain& ChainActive() { return g_chainstate.m_chain; }
-CCriticalSection cs_main;
+/**
+ * Mutex to guard access to validation specific variables, such as reading
+ * or changing the chainstate.
+ *
+ * This may also need to be locked when updating the transaction pool, e.g. on
+ * AcceptToMemoryPool. See CTxMemPool::cs comment for details.
+ *
+ * The transaction pool has a separate lock to allow reading from it and the
+ * chainstate at the same time.
+ */
+RecursiveMutex cs_main;
BlockMap& mapBlockIndex = g_chainstate.mapBlockIndex;
-CChain& chainActive = g_chainstate.chainActive;
CBlockIndex *pindexBestHeader = nullptr;
-CWaitableCriticalSection g_best_block_mutex;
-CConditionVariable g_best_block_cv;
+Mutex g_best_block_mutex;
+std::condition_variable g_best_block_cv;
uint256 g_best_block;
int nScriptCheckThreads = 0;
std::atomic_bool fImporting(false);
std::atomic_bool fReindex(false);
bool fHavePruned = false;
bool fPruneMode = false;
-bool fIsBareMultisigStd = DEFAULT_PERMIT_BAREMULTISIG;
bool fRequireStandard = true;
bool fCheckBlockIndex = false;
bool fCheckpointsEnabled = DEFAULT_CHECKPOINTS_ENABLED;
@@ -238,17 +256,13 @@ uint256 hashAssumeValid;
arith_uint256 nMinimumChainWork;
CFeeRate minRelayTxFee = CFeeRate(DEFAULT_MIN_RELAY_TX_FEE);
-CAmount maxTxFee = DEFAULT_TRANSACTION_MAXFEE;
CBlockPolicyEstimator feeEstimator;
CTxMemPool mempool(&feeEstimator);
-std::atomic_bool g_is_mempool_loaded{false};
/** Constant stuff for coinbase transactions we create: */
CScript COINBASE_FLAGS;
-const std::string strMessageMagic = "Bitcoin Signed Message:\n";
-
// Internal stuff
namespace {
CBlockIndex *&pindexBestInvalid = g_chainstate.pindexBestInvalid;
@@ -309,7 +323,9 @@ static bool FlushStateToDisk(const CChainParams& chainParams, CValidationState &
static void FindFilesToPruneManual(std::set<int>& setFilesToPrune, int nManualPruneHeight);
static void FindFilesToPrune(std::set<int>& setFilesToPrune, uint64_t nPruneAfterHeight);
bool CheckInputs(const CTransaction& tx, CValidationState &state, const CCoinsViewCache &inputs, bool fScriptChecks, unsigned int flags, bool cacheSigStore, bool cacheFullScriptStore, PrecomputedTransactionData& txdata, std::vector<CScriptCheck> *pvChecks = nullptr);
-static FILE* OpenUndoFile(const CDiskBlockPos &pos, bool fReadOnly = false);
+static FILE* OpenUndoFile(const FlatFilePos &pos, bool fReadOnly = false);
+static FlatFileSeq BlockFileSeq();
+static FlatFileSeq UndoFileSeq();
bool CheckFinalTx(const CTransaction &tx, int flags)
{
@@ -323,13 +339,13 @@ bool CheckFinalTx(const CTransaction &tx, int flags)
// scheduled, so no flags are set.
flags = std::max(flags, 0);
- // CheckFinalTx() uses chainActive.Height()+1 to evaluate
+ // CheckFinalTx() uses ::ChainActive().Height()+1 to evaluate
// nLockTime because when IsFinalTx() is called within
// CBlock::AcceptBlock(), the height of the block *being*
// evaluated is what is used. Thus if we want to know if a
// transaction can be part of the *next* block, we need to call
- // IsFinalTx() with one more than chainActive.Height().
- const int nBlockHeight = chainActive.Height() + 1;
+ // IsFinalTx() with one more than ::ChainActive().Height().
+ const int nBlockHeight = ::ChainActive().Height() + 1;
// BIP113 requires that time-locked transactions have nLockTime set to
// less than the median time of the previous block they're contained in.
@@ -337,7 +353,7 @@ bool CheckFinalTx(const CTransaction &tx, int flags)
// chain tip, so we use that to calculate the median time passed to
// IsFinalTx() if LOCKTIME_MEDIAN_TIME_PAST is set.
const int64_t nBlockTime = (flags & LOCKTIME_MEDIAN_TIME_PAST)
- ? chainActive.Tip()->GetMedianTimePast()
+ ? ::ChainActive().Tip()->GetMedianTimePast()
: GetAdjustedTime();
return IsFinalTx(tx, nBlockHeight, nBlockTime);
@@ -350,9 +366,9 @@ bool TestLockPointValidity(const LockPoints* lp)
// If there are relative lock times then the maxInputBlock will be set
// If there are no relative lock times, the LockPoints don't depend on the chain
if (lp->maxInputBlock) {
- // Check whether chainActive is an extension of the block at which the LockPoints
+ // Check whether ::ChainActive() is an extension of the block at which the LockPoints
// calculation was valid. If not LockPoints are no longer valid
- if (!chainActive.Contains(lp->maxInputBlock)) {
+ if (!::ChainActive().Contains(lp->maxInputBlock)) {
return false;
}
}
@@ -361,22 +377,22 @@ bool TestLockPointValidity(const LockPoints* lp)
return true;
}
-bool CheckSequenceLocks(const CTransaction &tx, int flags, LockPoints* lp, bool useExistingLockPoints)
+bool CheckSequenceLocks(const CTxMemPool& pool, const CTransaction& tx, int flags, LockPoints* lp, bool useExistingLockPoints)
{
AssertLockHeld(cs_main);
- AssertLockHeld(mempool.cs);
+ AssertLockHeld(pool.cs);
- CBlockIndex* tip = chainActive.Tip();
+ CBlockIndex* tip = ::ChainActive().Tip();
assert(tip != nullptr);
CBlockIndex index;
index.pprev = tip;
- // CheckSequenceLocks() uses chainActive.Height()+1 to evaluate
+ // CheckSequenceLocks() uses ::ChainActive().Height()+1 to evaluate
// height based locks because when SequenceLocks() is called within
// ConnectBlock(), the height of the block *being*
// evaluated is what is used.
// Thus if we want to know if a transaction can be part of the
- // *next* block, we need to use one more than chainActive.Height()
+ // *next* block, we need to use one more than ::ChainActive().Height()
index.nHeight = tip->nHeight + 1;
std::pair<int, int64_t> lockPair;
@@ -386,8 +402,8 @@ bool CheckSequenceLocks(const CTransaction &tx, int flags, LockPoints* lp, bool
lockPair.second = lp->time;
}
else {
- // pcoinsTip contains the UTXO set for chainActive.Tip()
- CCoinsViewMemPool viewMemPool(pcoinsTip.get(), mempool);
+ // pcoinsTip contains the UTXO set for ::ChainActive().Tip()
+ CCoinsViewMemPool viewMemPool(pcoinsTip.get(), pool);
std::vector<int> prevheights;
prevheights.resize(tx.vin.size());
for (size_t txinIndex = 0; txinIndex < tx.vin.size(); txinIndex++) {
@@ -421,7 +437,7 @@ bool CheckSequenceLocks(const CTransaction &tx, int flags, LockPoints* lp, bool
// lock on a mempool input, so we can use the return value of
// CheckSequenceLocks to indicate the LockPoints validity
int maxInputHeight = 0;
- for (int height : prevheights) {
+ for (const int height : prevheights) {
// Can ignore mempool inputs since we'll fail if they had non-zero locks
if (height != tip->nHeight+1) {
maxInputHeight = std::max(maxInputHeight, height);
@@ -448,23 +464,14 @@ static void LimitMempoolSize(CTxMemPool& pool, size_t limit, unsigned long age)
pcoinsTip->Uncache(removed);
}
-/** Convert CValidationState to a human-readable message for logging */
-std::string FormatStateMessage(const CValidationState &state)
-{
- return strprintf("%s%s (code %i)",
- state.GetRejectReason(),
- state.GetDebugMessage().empty() ? "" : ", "+state.GetDebugMessage(),
- state.GetRejectCode());
-}
-
-static bool IsCurrentForFeeEstimation()
+static bool IsCurrentForFeeEstimation() EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{
AssertLockHeld(cs_main);
if (IsInitialBlockDownload())
return false;
- if (chainActive.Tip()->GetBlockTime() < (GetTime() - MAX_FEE_ESTIMATION_TIP_AGE))
+ if (::ChainActive().Tip()->GetBlockTime() < (GetTime() - MAX_FEE_ESTIMATION_TIP_AGE))
return false;
- if (chainActive.Height() < pindexBestHeader->nHeight - 1)
+ if (::ChainActive().Height() < pindexBestHeader->nHeight - 1)
return false;
return true;
}
@@ -482,7 +489,7 @@ static bool IsCurrentForFeeEstimation()
* and instead just erase from the mempool as needed.
*/
-static void UpdateMempoolForReorg(DisconnectedBlockTransactions &disconnectpool, bool fAddToMempool)
+static void UpdateMempoolForReorg(DisconnectedBlockTransactions &disconnectpool, bool fAddToMempool) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{
AssertLockHeld(cs_main);
std::vector<uint256> vHashUpdate;
@@ -516,7 +523,7 @@ static void UpdateMempoolForReorg(DisconnectedBlockTransactions &disconnectpool,
mempool.UpdateTransactionsFromBlock(vHashUpdate);
// We also need to remove any now-immature transactions
- mempool.removeForReorg(pcoinsTip.get(), chainActive.Tip()->nHeight + 1, STANDARD_LOCKTIME_VERIFY_FLAGS);
+ mempool.removeForReorg(pcoinsTip.get(), ::ChainActive().Tip()->nHeight + 1, STANDARD_LOCKTIME_VERIFY_FLAGS);
// Re-limit mempool size, in case we added any transactions
LimitMempoolSize(mempool, gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000, gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60);
}
@@ -524,7 +531,7 @@ static void UpdateMempoolForReorg(DisconnectedBlockTransactions &disconnectpool,
// Used to avoid mempool polluting consensus critical paths if CCoinsViewMempool
// were somehow broken and returning the wrong scriptPubKeys
static bool CheckInputsFromMempoolAndCache(const CTransaction& tx, CValidationState& state, const CCoinsViewCache& view, const CTxMemPool& pool,
- unsigned int flags, bool cacheSigStore, PrecomputedTransactionData& txdata) {
+ unsigned int flags, bool cacheSigStore, PrecomputedTransactionData& txdata) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
AssertLockHeld(cs_main);
// pool.cs should be locked already, but go ahead and re-take the lock here
@@ -557,9 +564,16 @@ static bool CheckInputsFromMempoolAndCache(const CTransaction& tx, CValidationSt
return CheckInputs(tx, state, view, true, flags, cacheSigStore, true, txdata);
}
+/**
+ * @param[out] coins_to_uncache Return any outpoints which were not previously present in the
+ * coins cache, but were added as a result of validating the tx
+ * for mempool acceptance. This allows the caller to optionally
+ * remove the cache additions if the associated transaction ends
+ * up being rejected by the mempool.
+ */
static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool& pool, CValidationState& state, const CTransactionRef& ptx,
bool* pfMissingInputs, int64_t nAcceptTime, std::list<CTransactionRef>* plTxnReplaced,
- bool bypass_limits, const CAmount& nAbsurdFee, std::vector<COutPoint>& coins_to_uncache, bool test_accept)
+ bool bypass_limits, const CAmount& nAbsurdFee, std::vector<COutPoint>& coins_to_uncache, bool test_accept) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{
const CTransaction& tx = *ptx;
const uint256 hash = tx.GetHash();
@@ -574,38 +588,36 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool
// Coinbase is only valid in a block, not as a loose transaction
if (tx.IsCoinBase())
- return state.DoS(100, false, REJECT_INVALID, "coinbase");
+ return state.Invalid(ValidationInvalidReason::CONSENSUS, false, REJECT_INVALID, "coinbase");
// Rather not work on nonstandard transactions (unless -testnet/-regtest)
std::string reason;
if (fRequireStandard && !IsStandardTx(tx, reason))
- return state.DoS(0, false, REJECT_NONSTANDARD, reason);
+ return state.Invalid(ValidationInvalidReason::TX_NOT_STANDARD, false, REJECT_NONSTANDARD, reason);
// Do not work on transactions that are too small.
// A transaction with 1 segwit input and 1 P2WPHK output has non-witness size of 82 bytes.
// Transactions smaller than this are not relayed to reduce unnecessary malloc overhead.
- if (::GetSerializeSize(tx, SER_NETWORK, PROTOCOL_VERSION | SERIALIZE_TRANSACTION_NO_WITNESS) < MIN_STANDARD_TX_NONWITNESS_SIZE)
- return state.DoS(0, false, REJECT_NONSTANDARD, "tx-size-small");
+ if (::GetSerializeSize(tx, PROTOCOL_VERSION | SERIALIZE_TRANSACTION_NO_WITNESS) < MIN_STANDARD_TX_NONWITNESS_SIZE)
+ return state.Invalid(ValidationInvalidReason::TX_NOT_STANDARD, false, REJECT_NONSTANDARD, "tx-size-small");
// Only accept nLockTime-using transactions that can be mined in the next
// block; we don't want our mempool filled up with transactions that can't
// be mined yet.
if (!CheckFinalTx(tx, STANDARD_LOCKTIME_VERIFY_FLAGS))
- return state.DoS(0, false, REJECT_NONSTANDARD, "non-final");
+ return state.Invalid(ValidationInvalidReason::TX_PREMATURE_SPEND, false, REJECT_NONSTANDARD, "non-final");
// is it already in the memory pool?
if (pool.exists(hash)) {
- return state.Invalid(false, REJECT_DUPLICATE, "txn-already-in-mempool");
+ return state.Invalid(ValidationInvalidReason::TX_CONFLICT, false, REJECT_DUPLICATE, "txn-already-in-mempool");
}
// Check for conflicts with in-memory transactions
std::set<uint256> setConflicts;
for (const CTxIn &txin : tx.vin)
{
- auto itConflicting = pool.mapNextTx.find(txin.prevout);
- if (itConflicting != pool.mapNextTx.end())
- {
- const CTransaction *ptxConflicting = itConflicting->second;
+ const CTransaction* ptxConflicting = pool.GetConflictTx(txin.prevout);
+ if (ptxConflicting) {
if (!setConflicts.count(ptxConflicting->GetHash()))
{
// Allow opt-out of transaction replacement by setting
@@ -633,7 +645,7 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool
}
}
if (fReplacementOptOut) {
- return state.Invalid(false, REJECT_DUPLICATE, "txn-mempool-conflict");
+ return state.Invalid(ValidationInvalidReason::TX_MEMPOOL_POLICY, false, REJECT_DUPLICATE, "txn-mempool-conflict");
}
setConflicts.insert(ptxConflicting->GetHash());
@@ -654,12 +666,16 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool
if (!pcoinsTip->HaveCoinInCache(txin.prevout)) {
coins_to_uncache.push_back(txin.prevout);
}
+
+ // Note: this call may add txin.prevout to the coins cache
+ // (pcoinsTip.cacheCoins) by way of FetchCoin(). It should be removed
+ // later (via coins_to_uncache) if this tx turns out to be invalid.
if (!view.HaveCoin(txin.prevout)) {
// Are inputs missing because we already have the tx?
for (size_t out = 0; out < tx.vout.size(); out++) {
// Optimistically just do efficient check of cache for outputs
if (pcoinsTip->HaveCoinInCache(COutPoint(hash, out))) {
- return state.Invalid(false, REJECT_DUPLICATE, "txn-already-known");
+ return state.Invalid(ValidationInvalidReason::TX_CONFLICT, false, REJECT_DUPLICATE, "txn-already-known");
}
}
// Otherwise assume this might be an orphan tx for which we just haven't seen parents yet
@@ -681,8 +697,8 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool
// be mined yet.
// Must keep pool.cs for this unless we change CheckSequenceLocks to take a
// CoinsViewCache instead of create its own
- if (!CheckSequenceLocks(tx, STANDARD_LOCKTIME_VERIFY_FLAGS, &lp))
- return state.DoS(0, false, REJECT_NONSTANDARD, "non-BIP68-final");
+ if (!CheckSequenceLocks(pool, tx, STANDARD_LOCKTIME_VERIFY_FLAGS, &lp))
+ return state.Invalid(ValidationInvalidReason::TX_PREMATURE_SPEND, false, REJECT_NONSTANDARD, "non-BIP68-final");
CAmount nFees = 0;
if (!Consensus::CheckTxInputs(tx, state, view, GetSpendHeight(view), nFees)) {
@@ -691,11 +707,11 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool
// Check for non-standard pay-to-script-hash in inputs
if (fRequireStandard && !AreInputsStandard(tx, view))
- return state.Invalid(false, REJECT_NONSTANDARD, "bad-txns-nonstandard-inputs");
+ return state.Invalid(ValidationInvalidReason::TX_NOT_STANDARD, false, REJECT_NONSTANDARD, "bad-txns-nonstandard-inputs");
// Check for non-standard witness in P2WSH
if (tx.HasWitness() && fRequireStandard && !IsWitnessStandard(tx, view))
- return state.DoS(0, false, REJECT_NONSTANDARD, "bad-witness-nonstandard", true);
+ return state.Invalid(ValidationInvalidReason::TX_WITNESS_MUTATED, false, REJECT_NONSTANDARD, "bad-witness-nonstandard");
int64_t nSigOpsCost = GetTransactionSigOpCost(tx, view, STANDARD_SCRIPT_VERIFY_FLAGS);
@@ -714,31 +730,26 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool
}
}
- CTxMemPoolEntry entry(ptx, nFees, nAcceptTime, chainActive.Height(),
+ CTxMemPoolEntry entry(ptx, nFees, nAcceptTime, ::ChainActive().Height(),
fSpendsCoinbase, nSigOpsCost, lp);
unsigned int nSize = entry.GetTxSize();
- // Check that the transaction doesn't have an excessive number of
- // sigops, making it impossible to mine. Since the coinbase transaction
- // itself can contain sigops MAX_STANDARD_TX_SIGOPS is less than
- // MAX_BLOCK_SIGOPS; we still consider this an invalid rather than
- // merely non-standard transaction.
if (nSigOpsCost > MAX_STANDARD_TX_SIGOPS_COST)
- return state.DoS(0, false, REJECT_NONSTANDARD, "bad-txns-too-many-sigops", false,
+ return state.Invalid(ValidationInvalidReason::TX_NOT_STANDARD, false, REJECT_NONSTANDARD, "bad-txns-too-many-sigops",
strprintf("%d", nSigOpsCost));
CAmount mempoolRejectFee = pool.GetMinFee(gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000).GetFee(nSize);
if (!bypass_limits && mempoolRejectFee > 0 && nModifiedFees < mempoolRejectFee) {
- return state.DoS(0, false, REJECT_INSUFFICIENTFEE, "mempool min fee not met", false, strprintf("%d < %d", nModifiedFees, mempoolRejectFee));
+ return state.Invalid(ValidationInvalidReason::TX_MEMPOOL_POLICY, false, REJECT_INSUFFICIENTFEE, "mempool min fee not met", strprintf("%d < %d", nModifiedFees, mempoolRejectFee));
}
// No transactions are allowed below minRelayTxFee except from disconnected blocks
if (!bypass_limits && nModifiedFees < ::minRelayTxFee.GetFee(nSize)) {
- return state.DoS(0, false, REJECT_INSUFFICIENTFEE, "min relay fee not met", false, strprintf("%d < %d", nModifiedFees, ::minRelayTxFee.GetFee(nSize)));
+ return state.Invalid(ValidationInvalidReason::TX_MEMPOOL_POLICY, false, REJECT_INSUFFICIENTFEE, "min relay fee not met", strprintf("%d < %d", nModifiedFees, ::minRelayTxFee.GetFee(nSize)));
}
if (nAbsurdFee && nFees > nAbsurdFee)
- return state.Invalid(false,
+ return state.Invalid(ValidationInvalidReason::TX_NOT_STANDARD, false,
REJECT_HIGHFEE, "absurdly-high-fee",
strprintf("%d > %d", nFees, nAbsurdFee));
@@ -750,7 +761,7 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool
size_t nLimitDescendantSize = gArgs.GetArg("-limitdescendantsize", DEFAULT_DESCENDANT_SIZE_LIMIT)*1000;
std::string errString;
if (!pool.CalculateMemPoolAncestors(entry, setAncestors, nLimitAncestors, nLimitAncestorSize, nLimitDescendants, nLimitDescendantSize, errString)) {
- return state.DoS(0, false, REJECT_NONSTANDARD, "too-long-mempool-chain", false, errString);
+ return state.Invalid(ValidationInvalidReason::TX_MEMPOOL_POLICY, false, REJECT_NONSTANDARD, "too-long-mempool-chain", errString);
}
// A transaction that spends outputs that would be replaced by it is invalid. Now
@@ -762,8 +773,7 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool
const uint256 &hashAncestor = ancestorIt->GetTx().GetHash();
if (setConflicts.count(hashAncestor))
{
- return state.DoS(10, false,
- REJECT_INVALID, "bad-txns-spends-conflicting-tx", false,
+ return state.Invalid(ValidationInvalidReason::CONSENSUS, false, REJECT_INVALID, "bad-txns-spends-conflicting-tx",
strprintf("%s spends conflicting transaction %s",
hash.ToString(),
hashAncestor.ToString()));
@@ -786,16 +796,8 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool
CFeeRate newFeeRate(nModifiedFees, nSize);
std::set<uint256> setConflictsParents;
const int maxDescendantsToVisit = 100;
- CTxMemPool::setEntries setIterConflicting;
- for (const uint256 &hashConflicting : setConflicts)
- {
- CTxMemPool::txiter mi = pool.mapTx.find(hashConflicting);
- if (mi == pool.mapTx.end())
- continue;
-
- // Save these to avoid repeated lookups
- setIterConflicting.insert(mi);
-
+ const CTxMemPool::setEntries setIterConflicting = pool.GetIterSet(setConflicts);
+ for (const auto& mi : setIterConflicting) {
// Don't allow the replacement to reduce the feerate of the
// mempool.
//
@@ -813,8 +815,7 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool
CFeeRate oldFeeRate(mi->GetModifiedFee(), mi->GetTxSize());
if (newFeeRate <= oldFeeRate)
{
- return state.DoS(0, false,
- REJECT_INSUFFICIENTFEE, "insufficient fee", false,
+ return state.Invalid(ValidationInvalidReason::TX_MEMPOOL_POLICY, false, REJECT_INSUFFICIENTFEE, "insufficient fee",
strprintf("rejecting replacement %s; new feerate %s <= old feerate %s",
hash.ToString(),
newFeeRate.ToString(),
@@ -842,8 +843,7 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool
nConflictingSize += it->GetTxSize();
}
} else {
- return state.DoS(0, false,
- REJECT_NONSTANDARD, "too many potential replacements", false,
+ return state.Invalid(ValidationInvalidReason::TX_MEMPOOL_POLICY, false, REJECT_NONSTANDARD, "too many potential replacements",
strprintf("rejecting replacement %s; too many potential replacements (%d > %d)\n",
hash.ToString(),
nConflictingCount,
@@ -861,11 +861,11 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool
// Rather than check the UTXO set - potentially expensive -
// it's cheaper to just check if the new input refers to a
// tx that's in the mempool.
- if (pool.mapTx.find(tx.vin[j].prevout.hash) != pool.mapTx.end())
- return state.DoS(0, false,
- REJECT_NONSTANDARD, "replacement-adds-unconfirmed", false,
+ if (pool.exists(tx.vin[j].prevout.hash)) {
+ return state.Invalid(ValidationInvalidReason::TX_MEMPOOL_POLICY, false, REJECT_NONSTANDARD, "replacement-adds-unconfirmed",
strprintf("replacement %s adds unconfirmed input, idx %d",
hash.ToString(), j));
+ }
}
}
@@ -874,8 +874,7 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool
// transactions would not be paid for.
if (nModifiedFees < nConflictingFees)
{
- return state.DoS(0, false,
- REJECT_INSUFFICIENTFEE, "insufficient fee", false,
+ return state.Invalid(ValidationInvalidReason::TX_MEMPOOL_POLICY, false, REJECT_INSUFFICIENTFEE, "insufficient fee",
strprintf("rejecting replacement %s, less fees than conflicting txs; %s < %s",
hash.ToString(), FormatMoney(nModifiedFees), FormatMoney(nConflictingFees)));
}
@@ -885,8 +884,7 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool
CAmount nDeltaFees = nModifiedFees - nConflictingFees;
if (nDeltaFees < ::incrementalRelayFee.GetFee(nSize))
{
- return state.DoS(0, false,
- REJECT_INSUFFICIENTFEE, "insufficient fee", false,
+ return state.Invalid(ValidationInvalidReason::TX_MEMPOOL_POLICY, false, REJECT_INSUFFICIENTFEE, "insufficient fee",
strprintf("rejecting replacement %s, not enough additional fees to relay; %s < %s",
hash.ToString(),
FormatMoney(nDeltaFees),
@@ -894,10 +892,7 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool
}
}
- unsigned int scriptVerifyFlags = STANDARD_SCRIPT_VERIFY_FLAGS;
- if (!chainparams.RequireStandard()) {
- scriptVerifyFlags = gArgs.GetArg("-promiscuousmempoolflags", scriptVerifyFlags);
- }
+ constexpr unsigned int scriptVerifyFlags = STANDARD_SCRIPT_VERIFY_FLAGS;
// Check against previous transactions
// This is done last to help prevent CPU exhaustion denial-of-service attacks.
@@ -910,8 +905,10 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool
if (!tx.HasWitness() && CheckInputs(tx, stateDummy, view, true, scriptVerifyFlags & ~(SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_CLEANSTACK), true, false, txdata) &&
!CheckInputs(tx, stateDummy, view, true, scriptVerifyFlags & ~SCRIPT_VERIFY_CLEANSTACK, true, false, txdata)) {
// Only the witness is missing, so the transaction itself may be fine.
- state.SetCorruptionPossible();
+ state.Invalid(ValidationInvalidReason::TX_WITNESS_MUTATED, false,
+ state.GetRejectCode(), state.GetRejectReason(), state.GetDebugMessage());
}
+ assert(IsTransactionReason(state.GetReason()));
return false; // state filled in by CheckInputs
}
@@ -930,22 +927,10 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool
// There is a similar check in CreateNewBlock() to prevent creating
// invalid blocks (using TestBlockValidity), however allowing such
// transactions into the mempool can be exploited as a DoS attack.
- unsigned int currentBlockScriptVerifyFlags = GetBlockScriptFlags(chainActive.Tip(), Params().GetConsensus());
+ unsigned int currentBlockScriptVerifyFlags = GetBlockScriptFlags(::ChainActive().Tip(), chainparams.GetConsensus());
if (!CheckInputsFromMempoolAndCache(tx, state, view, pool, currentBlockScriptVerifyFlags, true, txdata)) {
- // If we're using promiscuousmempoolflags, we may hit this normally
- // Check if current block has some flags that scriptVerifyFlags
- // does not before printing an ominous warning
- if (!(~scriptVerifyFlags & currentBlockScriptVerifyFlags)) {
- return error("%s: BUG! PLEASE REPORT THIS! ConnectInputs failed against latest-block but not STANDARD flags %s, %s",
+ return error("%s: BUG! PLEASE REPORT THIS! CheckInputs failed against latest-block but not STANDARD flags %s, %s",
__func__, hash.ToString(), FormatStateMessage(state));
- } else {
- if (!CheckInputs(tx, state, view, true, MANDATORY_SCRIPT_VERIFY_FLAGS, true, false, txdata)) {
- return error("%s: ConnectInputs failed against MANDATORY but not STANDARD flags due to promiscuous mempool %s, %s",
- __func__, hash.ToString(), FormatStateMessage(state));
- } else {
- LogPrintf("Warning: -promiscuousmempool flags set to not include currently enforced soft forks, this may break mining or otherwise cause instability!\n");
- }
- }
}
if (test_accept) {
@@ -974,13 +959,13 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool
bool validForFeeEstimation = !fReplacementTransaction && !bypass_limits && IsCurrentForFeeEstimation() && pool.HasNoInputsOf(tx);
// Store transaction in memory
- pool.addUnchecked(hash, entry, setAncestors, validForFeeEstimation);
+ pool.addUnchecked(entry, setAncestors, validForFeeEstimation);
// trim mempool and check if tx was trimmed
if (!bypass_limits) {
LimitMempoolSize(pool, gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000, gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60);
if (!pool.exists(hash))
- return state.DoS(0, false, REJECT_INSUFFICIENTFEE, "mempool full");
+ return state.Invalid(ValidationInvalidReason::TX_MEMPOOL_POLICY, false, REJECT_INSUFFICIENTFEE, "mempool full");
}
}
@@ -992,11 +977,16 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool
/** (try to) add transaction to memory pool with a specified acceptance time **/
static bool AcceptToMemoryPoolWithTime(const CChainParams& chainparams, CTxMemPool& pool, CValidationState &state, const CTransactionRef &tx,
bool* pfMissingInputs, int64_t nAcceptTime, std::list<CTransactionRef>* plTxnReplaced,
- bool bypass_limits, const CAmount nAbsurdFee, bool test_accept)
+ bool bypass_limits, const CAmount nAbsurdFee, bool test_accept) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{
std::vector<COutPoint> coins_to_uncache;
bool res = AcceptToMemoryPoolWorker(chainparams, pool, state, tx, pfMissingInputs, nAcceptTime, plTxnReplaced, bypass_limits, nAbsurdFee, coins_to_uncache, test_accept);
if (!res) {
+ // Remove coins that were not present in the coins cache before calling ATMPW;
+ // this is to prevent memory DoS in case we receive a large number of
+ // invalid transactions that attempt to overrun the in-memory coins cache
+ // (`CCoinsViewCache::cacheCoins`).
+
for (const COutPoint& hashTx : coins_to_uncache)
pcoinsTip->Uncache(hashTx);
}
@@ -1018,13 +1008,11 @@ bool AcceptToMemoryPool(CTxMemPool& pool, CValidationState &state, const CTransa
* Return transaction in txOut, and if it was found inside a block, its hash is placed in hashBlock.
* If blockIndex is provided, the transaction is fetched from the corresponding block.
*/
-bool GetTransaction(const uint256& hash, CTransactionRef& txOut, const Consensus::Params& consensusParams, uint256& hashBlock, bool fAllowSlow, CBlockIndex* blockIndex)
+bool GetTransaction(const uint256& hash, CTransactionRef& txOut, const Consensus::Params& consensusParams, uint256& hashBlock, const CBlockIndex* const block_index)
{
- CBlockIndex* pindexSlow = blockIndex;
-
LOCK(cs_main);
- if (!blockIndex) {
+ if (!block_index) {
CTransactionRef ptx = mempool.get(hash);
if (ptx) {
txOut = ptx;
@@ -1034,20 +1022,13 @@ bool GetTransaction(const uint256& hash, CTransactionRef& txOut, const Consensus
if (g_txindex) {
return g_txindex->FindTx(hash, hashBlock, txOut);
}
-
- if (fAllowSlow) { // use coin database to locate block that contains transaction, and scan it
- const Coin& coin = AccessByTxid(*pcoinsTip, hash);
- if (!coin.IsSpent()) pindexSlow = chainActive[coin.nHeight];
- }
- }
-
- if (pindexSlow) {
+ } else {
CBlock block;
- if (ReadBlockFromDisk(block, pindexSlow, consensusParams)) {
+ if (ReadBlockFromDisk(block, block_index, consensusParams)) {
for (const auto& tx : block.vtx) {
if (tx->GetHash() == hash) {
txOut = tx;
- hashBlock = pindexSlow->GetBlockHash();
+ hashBlock = block_index->GetBlockHash();
return true;
}
}
@@ -1067,7 +1048,7 @@ bool GetTransaction(const uint256& hash, CTransactionRef& txOut, const Consensus
// CBlock and CBlockIndex
//
-static bool WriteBlockToDisk(const CBlock& block, CDiskBlockPos& pos, const CMessageHeader::MessageStartChars& messageStart)
+static bool WriteBlockToDisk(const CBlock& block, FlatFilePos& pos, const CMessageHeader::MessageStartChars& messageStart)
{
// Open history file to append
CAutoFile fileout(OpenBlockFile(pos), SER_DISK, CLIENT_VERSION);
@@ -1075,7 +1056,7 @@ static bool WriteBlockToDisk(const CBlock& block, CDiskBlockPos& pos, const CMes
return error("WriteBlockToDisk: OpenBlockFile failed");
// Write index header
- unsigned int nSize = GetSerializeSize(fileout, block);
+ unsigned int nSize = GetSerializeSize(block, fileout.GetVersion());
fileout << messageStart << nSize;
// Write block
@@ -1088,7 +1069,7 @@ static bool WriteBlockToDisk(const CBlock& block, CDiskBlockPos& pos, const CMes
return true;
}
-bool ReadBlockFromDisk(CBlock& block, const CDiskBlockPos& pos, const Consensus::Params& consensusParams)
+bool ReadBlockFromDisk(CBlock& block, const FlatFilePos& pos, const Consensus::Params& consensusParams)
{
block.SetNull();
@@ -1114,7 +1095,7 @@ bool ReadBlockFromDisk(CBlock& block, const CDiskBlockPos& pos, const Consensus:
bool ReadBlockFromDisk(CBlock& block, const CBlockIndex* pindex, const Consensus::Params& consensusParams)
{
- CDiskBlockPos blockPos;
+ FlatFilePos blockPos;
{
LOCK(cs_main);
blockPos = pindex->GetBlockPos();
@@ -1128,9 +1109,9 @@ bool ReadBlockFromDisk(CBlock& block, const CBlockIndex* pindex, const Consensus
return true;
}
-bool ReadRawBlockFromDisk(std::vector<uint8_t>& block, const CDiskBlockPos& pos, const CMessageHeader::MessageStartChars& message_start)
+bool ReadRawBlockFromDisk(std::vector<uint8_t>& block, const FlatFilePos& pos, const CMessageHeader::MessageStartChars& message_start)
{
- CDiskBlockPos hpos = pos;
+ FlatFilePos hpos = pos;
hpos.nPos -= 8; // Seek back 8 bytes for meta header
CAutoFile filein(OpenBlockFile(hpos, true), SER_DISK, CLIENT_VERSION);
if (filein.IsNull()) {
@@ -1165,7 +1146,7 @@ bool ReadRawBlockFromDisk(std::vector<uint8_t>& block, const CDiskBlockPos& pos,
bool ReadRawBlockFromDisk(std::vector<uint8_t>& block, const CBlockIndex* pindex, const CMessageHeader::MessageStartChars& message_start)
{
- CDiskBlockPos block_pos;
+ FlatFilePos block_pos;
{
LOCK(cs_main);
block_pos = pindex->GetBlockPos();
@@ -1200,11 +1181,11 @@ bool IsInitialBlockDownload()
return false;
if (fImporting || fReindex)
return true;
- if (chainActive.Tip() == nullptr)
+ if (::ChainActive().Tip() == nullptr)
return true;
- if (chainActive.Tip()->nChainWork < nMinimumChainWork)
+ if (::ChainActive().Tip()->nChainWork < nMinimumChainWork)
return true;
- if (chainActive.Tip()->GetBlockTime() < (GetTime() - nMaxTipAge))
+ if (::ChainActive().Tip()->GetBlockTime() < (GetTime() - nMaxTipAge))
return true;
LogPrintf("Leaving InitialBlockDownload (latching to false)\n");
latchToFalse.store(true, std::memory_order_relaxed);
@@ -1231,7 +1212,7 @@ static void AlertNotify(const std::string& strMessage)
t.detach(); // thread runs free
}
-static void CheckForkWarningConditions()
+static void CheckForkWarningConditions() EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{
AssertLockHeld(cs_main);
// Before we get past initial download, we cannot reliably alert about forks
@@ -1241,10 +1222,10 @@ static void CheckForkWarningConditions()
// If our best fork is no longer within 72 blocks (+/- 12 hours if no one mines it)
// of our head, drop it
- if (pindexBestForkTip && chainActive.Height() - pindexBestForkTip->nHeight >= 72)
+ if (pindexBestForkTip && ::ChainActive().Height() - pindexBestForkTip->nHeight >= 72)
pindexBestForkTip = nullptr;
- if (pindexBestForkTip || (pindexBestInvalid && pindexBestInvalid->nChainWork > chainActive.Tip()->nChainWork + (GetBlockProof(*chainActive.Tip()) * 6)))
+ if (pindexBestForkTip || (pindexBestInvalid && pindexBestInvalid->nChainWork > ::ChainActive().Tip()->nChainWork + (GetBlockProof(*::ChainActive().Tip()) * 6)))
{
if (!GetfLargeWorkForkFound() && pindexBestForkBase)
{
@@ -1272,12 +1253,12 @@ static void CheckForkWarningConditions()
}
}
-static void CheckForkWarningConditionsOnNewFork(CBlockIndex* pindexNewForkTip)
+static void CheckForkWarningConditionsOnNewFork(CBlockIndex* pindexNewForkTip) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{
AssertLockHeld(cs_main);
// If we are on a fork that is sufficiently large, set a warning flag
CBlockIndex* pfork = pindexNewForkTip;
- CBlockIndex* plonger = chainActive.Tip();
+ CBlockIndex* plonger = ::ChainActive().Tip();
while (pfork && pfork != plonger)
{
while (plonger && plonger->nHeight > pfork->nHeight)
@@ -1296,7 +1277,7 @@ static void CheckForkWarningConditionsOnNewFork(CBlockIndex* pindexNewForkTip)
// the 7-block condition and from this always have the most-likely-to-cause-warning fork
if (pfork && (!pindexBestForkTip || pindexNewForkTip->nHeight > pindexBestForkTip->nHeight) &&
pindexNewForkTip->nChainWork - pfork->nChainWork > (GetBlockProof(*pfork) * 7) &&
- chainActive.Height() - pindexNewForkTip->nHeight < 72)
+ ::ChainActive().Height() - pindexNewForkTip->nHeight < 72)
{
pindexBestForkTip = pindexNewForkTip;
pindexBestForkBase = pfork;
@@ -1305,7 +1286,7 @@ static void CheckForkWarningConditionsOnNewFork(CBlockIndex* pindexNewForkTip)
CheckForkWarningConditions();
}
-void static InvalidChainFound(CBlockIndex* pindexNew)
+void static InvalidChainFound(CBlockIndex* pindexNew) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{
if (!pindexBestInvalid || pindexNew->nChainWork > pindexBestInvalid->nChainWork)
pindexBestInvalid = pindexNew;
@@ -1313,16 +1294,16 @@ void static InvalidChainFound(CBlockIndex* pindexNew)
LogPrintf("%s: invalid block=%s height=%d log2_work=%.8g date=%s\n", __func__,
pindexNew->GetBlockHash().ToString(), pindexNew->nHeight,
log(pindexNew->nChainWork.getdouble())/log(2.0), FormatISO8601DateTime(pindexNew->GetBlockTime()));
- CBlockIndex *tip = chainActive.Tip();
+ CBlockIndex *tip = ::ChainActive().Tip();
assert (tip);
LogPrintf("%s: current best=%s height=%d log2_work=%.8g date=%s\n", __func__,
- tip->GetBlockHash().ToString(), chainActive.Height(), log(tip->nChainWork.getdouble())/log(2.0),
+ tip->GetBlockHash().ToString(), ::ChainActive().Height(), log(tip->nChainWork.getdouble())/log(2.0),
FormatISO8601DateTime(tip->GetBlockTime()));
CheckForkWarningConditions();
}
void CChainState::InvalidBlockFound(CBlockIndex *pindex, const CValidationState &state) {
- if (!state.CorruptionPossible()) {
+ if (state.GetReason() != ValidationInvalidReason::BLOCK_MUTATED) {
pindex->nStatus |= BLOCK_FAILED_VALID;
m_failed_blocks.insert(pindex);
setDirtyBlockIndex.insert(pindex);
@@ -1390,9 +1371,12 @@ void InitScriptExecutionCache() {
* which are matched. This is useful for checking blocks where we will likely never need the cache
* entry again.
*
+ * Note that we may set state.reason to NOT_STANDARD for extra soft-fork flags in flags, block-checking
+ * callers should probably reset it to CONSENSUS in such cases.
+ *
* Non-static (and re-declared) in src/test/txvalidationcache_tests.cpp
*/
-bool CheckInputs(const CTransaction& tx, CValidationState &state, const CCoinsViewCache &inputs, bool fScriptChecks, unsigned int flags, bool cacheSigStore, bool cacheFullScriptStore, PrecomputedTransactionData& txdata, std::vector<CScriptCheck> *pvChecks)
+bool CheckInputs(const CTransaction& tx, CValidationState &state, const CCoinsViewCache &inputs, bool fScriptChecks, unsigned int flags, bool cacheSigStore, bool cacheFullScriptStore, PrecomputedTransactionData& txdata, std::vector<CScriptCheck> *pvChecks) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{
if (!tx.IsCoinBase())
{
@@ -1445,22 +1429,26 @@ bool CheckInputs(const CTransaction& tx, CValidationState &state, const CCoinsVi
// Check whether the failure was caused by a
// non-mandatory script verification check, such as
// non-standard DER encodings or non-null dummy
- // arguments; if so, don't trigger DoS protection to
- // avoid splitting the network between upgraded and
- // non-upgraded nodes.
+ // arguments; if so, ensure we return NOT_STANDARD
+ // instead of CONSENSUS to avoid downstream users
+ // splitting the network between upgraded and
+ // non-upgraded nodes by banning CONSENSUS-failing
+ // data providers.
CScriptCheck check2(coin.out, tx, i,
flags & ~STANDARD_NOT_MANDATORY_VERIFY_FLAGS, cacheSigStore, &txdata);
if (check2())
- return state.Invalid(false, REJECT_NONSTANDARD, strprintf("non-mandatory-script-verify-flag (%s)", ScriptErrorString(check.GetScriptError())));
+ return state.Invalid(ValidationInvalidReason::TX_NOT_STANDARD, false, REJECT_NONSTANDARD, strprintf("non-mandatory-script-verify-flag (%s)", ScriptErrorString(check.GetScriptError())));
}
- // Failures of other flags indicate a transaction that is
- // invalid in new blocks, e.g. an invalid P2SH. We DoS ban
- // such nodes as they are not following the protocol. That
- // said during an upgrade careful thought should be taken
- // as to the correct behavior - we may want to continue
- // peering with non-upgraded nodes even after soft-fork
- // super-majority signaling has occurred.
- return state.DoS(100,false, REJECT_INVALID, strprintf("mandatory-script-verify-flag-failed (%s)", ScriptErrorString(check.GetScriptError())));
+ // MANDATORY flag failures correspond to
+ // ValidationInvalidReason::CONSENSUS. Because CONSENSUS
+ // failures are the most serious case of validation
+ // failures, we may need to consider using
+ // RECENT_CONSENSUS_CHANGE for any script failure that
+ // could be due to non-upgraded nodes which we may want to
+ // support, to avoid splitting the network (but this
+ // depends on the details of how net_processing handles
+ // such errors).
+ return state.Invalid(ValidationInvalidReason::CONSENSUS, false, REJECT_INVALID, strprintf("mandatory-script-verify-flag-failed (%s)", ScriptErrorString(check.GetScriptError())));
}
}
@@ -1475,9 +1463,7 @@ bool CheckInputs(const CTransaction& tx, CValidationState &state, const CCoinsVi
return true;
}
-namespace {
-
-bool UndoWriteToDisk(const CBlockUndo& blockundo, CDiskBlockPos& pos, const uint256& hashBlock, const CMessageHeader::MessageStartChars& messageStart)
+static bool UndoWriteToDisk(const CBlockUndo& blockundo, FlatFilePos& pos, const uint256& hashBlock, const CMessageHeader::MessageStartChars& messageStart)
{
// Open history file to append
CAutoFile fileout(OpenUndoFile(pos), SER_DISK, CLIENT_VERSION);
@@ -1485,7 +1471,7 @@ bool UndoWriteToDisk(const CBlockUndo& blockundo, CDiskBlockPos& pos, const uint
return error("%s: OpenUndoFile failed", __func__);
// Write index header
- unsigned int nSize = GetSerializeSize(fileout, blockundo);
+ unsigned int nSize = GetSerializeSize(blockundo, fileout.GetVersion());
fileout << messageStart << nSize;
// Write undo data
@@ -1504,9 +1490,9 @@ bool UndoWriteToDisk(const CBlockUndo& blockundo, CDiskBlockPos& pos, const uint
return true;
}
-static bool UndoReadFromDisk(CBlockUndo& blockundo, const CBlockIndex *pindex)
+bool UndoReadFromDisk(CBlockUndo& blockundo, const CBlockIndex* pindex)
{
- CDiskBlockPos pos = pindex->GetUndoPos();
+ FlatFilePos pos = pindex->GetUndoPos();
if (pos.IsNull()) {
return error("%s: no undo data available", __func__);
}
@@ -1553,8 +1539,6 @@ static bool AbortNode(CValidationState& state, const std::string& strMessage, co
return state.Error(strMessage);
}
-} // namespace
-
/**
* Restore the UTXO in a Coin at a given COutPoint
* @param undo The Coin to be restored.
@@ -1652,38 +1636,25 @@ void static FlushBlockFile(bool fFinalize = false)
{
LOCK(cs_LastBlockFile);
- CDiskBlockPos posOld(nLastBlockFile, 0);
- bool status = true;
-
- FILE *fileOld = OpenBlockFile(posOld);
- if (fileOld) {
- if (fFinalize)
- status &= TruncateFile(fileOld, vinfoBlockFile[nLastBlockFile].nSize);
- status &= FileCommit(fileOld);
- fclose(fileOld);
- }
-
- fileOld = OpenUndoFile(posOld);
- if (fileOld) {
- if (fFinalize)
- status &= TruncateFile(fileOld, vinfoBlockFile[nLastBlockFile].nUndoSize);
- status &= FileCommit(fileOld);
- fclose(fileOld);
- }
+ FlatFilePos block_pos_old(nLastBlockFile, vinfoBlockFile[nLastBlockFile].nSize);
+ FlatFilePos undo_pos_old(nLastBlockFile, vinfoBlockFile[nLastBlockFile].nUndoSize);
+ bool status = true;
+ status &= BlockFileSeq().Flush(block_pos_old, fFinalize);
+ status &= UndoFileSeq().Flush(undo_pos_old, fFinalize);
if (!status) {
AbortNode("Flushing block file to disk failed. This is likely the result of an I/O error.");
}
}
-static bool FindUndoPos(CValidationState &state, int nFile, CDiskBlockPos &pos, unsigned int nAddSize);
+static bool FindUndoPos(CValidationState &state, int nFile, FlatFilePos &pos, unsigned int nAddSize);
static bool WriteUndoDataForBlock(const CBlockUndo& blockundo, CValidationState& state, CBlockIndex* pindex, const CChainParams& chainparams)
{
// Write undo information to disk
if (pindex->GetUndoPos().IsNull()) {
- CDiskBlockPos _pos;
- if (!FindUndoPos(state, pindex->nFile, _pos, ::GetSerializeSize(blockundo, SER_DISK, CLIENT_VERSION) + 40))
+ FlatFilePos _pos;
+ if (!FindUndoPos(state, pindex->nFile, _pos, ::GetSerializeSize(blockundo, CLIENT_VERSION) + 40))
return error("ConnectBlock(): FindUndoPos failed");
if (!UndoWriteToDisk(blockundo, _pos, pindex->pprev->GetBlockHash(), chainparams.MessageStart()))
return AbortNode(state, "Failed to write undo data");
@@ -1699,13 +1670,12 @@ static bool WriteUndoDataForBlock(const CBlockUndo& blockundo, CValidationState&
static CCheckQueue<CScriptCheck> scriptcheckqueue(128);
-void ThreadScriptCheck() {
- RenameThread("bitcoin-scriptch");
+void ThreadScriptCheck(int worker_num) {
+ util::ThreadRename(strprintf("scriptch.%i", worker_num));
scriptcheckqueue.Thread();
}
-// Protected by cs_main
-VersionBitsCache versionbitscache;
+VersionBitsCache versionbitscache GUARDED_BY(cs_main);
int32_t ComputeBlockVersion(const CBlockIndex* pindexPrev, const Consensus::Params& params)
{
@@ -1746,8 +1716,7 @@ public:
}
};
-// Protected by cs_main
-static ThresholdConditionCache warningcache[VERSIONBITS_NUM_BITS];
+static ThresholdConditionCache warningcache[VERSIONBITS_NUM_BITS] GUARDED_BY(cs_main);
// 0.13.0 was shipped with a segwit deployment defined for testnet, but not for
// mainnet. We no longer need to support disabling the segwit deployment
@@ -1758,7 +1727,7 @@ static bool IsScriptWitnessEnabled(const Consensus::Params& params)
return params.vDeployments[Consensus::DEPLOYMENT_SEGWIT].nTimeout != 0;
}
-static unsigned int GetBlockScriptFlags(const CBlockIndex* pindex, const Consensus::Params& consensusparams) {
+static unsigned int GetBlockScriptFlags(const CBlockIndex* pindex, const Consensus::Params& consensusparams) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
AssertLockHeld(cs_main);
unsigned int flags = SCRIPT_VERIFY_NONE;
@@ -1839,7 +1808,7 @@ bool CChainState::ConnectBlock(const CBlock& block, CValidationState& state, CBl
// re-enforce that rule here (at least until we make it impossible for
// GetAdjustedTime() to go backward).
if (!CheckBlock(block, state, chainparams.GetConsensus(), !fJustCheck, !fJustCheck)) {
- if (state.CorruptionPossible()) {
+ if (state.GetReason() == ValidationInvalidReason::BLOCK_MUTATED) {
// We don't write down blocks to disk if they may have been
// corrupted, so this should be impossible unless we're having hardware
// problems.
@@ -1974,7 +1943,7 @@ bool CChainState::ConnectBlock(const CBlock& block, CValidationState& state, CBl
for (const auto& tx : block.vtx) {
for (size_t o = 0; o < tx->vout.size(); o++) {
if (view.HaveCoin(COutPoint(tx->GetHash(), o))) {
- return state.DoS(100, error("ConnectBlock(): tried to overwrite transaction"),
+ return state.Invalid(ValidationInvalidReason::CONSENSUS, error("ConnectBlock(): tried to overwrite transaction"),
REJECT_INVALID, "bad-txns-BIP30");
}
}
@@ -2014,11 +1983,19 @@ bool CChainState::ConnectBlock(const CBlock& block, CValidationState& state, CBl
{
CAmount txfee = 0;
if (!Consensus::CheckTxInputs(tx, state, view, pindex->nHeight, txfee)) {
+ if (!IsBlockReason(state.GetReason())) {
+ // CheckTxInputs may return MISSING_INPUTS or
+ // PREMATURE_SPEND but we can't return that, as it's not
+ // defined for a block, so we reset the reason flag to
+ // CONSENSUS here.
+ state.Invalid(ValidationInvalidReason::CONSENSUS, false,
+ state.GetRejectCode(), state.GetRejectReason(), state.GetDebugMessage());
+ }
return error("%s: Consensus::CheckTxInputs: %s, %s", __func__, tx.GetHash().ToString(), FormatStateMessage(state));
}
nFees += txfee;
if (!MoneyRange(nFees)) {
- return state.DoS(100, error("%s: accumulated fee in the block out of range.", __func__),
+ return state.Invalid(ValidationInvalidReason::CONSENSUS, error("%s: accumulated fee in the block out of range.", __func__),
REJECT_INVALID, "bad-txns-accumulated-fee-outofrange");
}
@@ -2031,7 +2008,7 @@ bool CChainState::ConnectBlock(const CBlock& block, CValidationState& state, CBl
}
if (!SequenceLocks(tx, nLockTimeFlags, &prevheights, *pindex)) {
- return state.DoS(100, error("%s: contains a non-BIP68-final transaction", __func__),
+ return state.Invalid(ValidationInvalidReason::CONSENSUS, error("%s: contains a non-BIP68-final transaction", __func__),
REJECT_INVALID, "bad-txns-nonfinal");
}
}
@@ -2042,7 +2019,7 @@ bool CChainState::ConnectBlock(const CBlock& block, CValidationState& state, CBl
// * witness (when witness enabled in flags and excludes coinbase)
nSigOpsCost += GetTransactionSigOpCost(tx, view, flags);
if (nSigOpsCost > MAX_BLOCK_SIGOPS_COST)
- return state.DoS(100, error("ConnectBlock(): too many sigops"),
+ return state.Invalid(ValidationInvalidReason::CONSENSUS, error("ConnectBlock(): too many sigops"),
REJECT_INVALID, "bad-blk-sigops");
txdata.emplace_back(tx);
@@ -2050,9 +2027,20 @@ bool CChainState::ConnectBlock(const CBlock& block, CValidationState& state, CBl
{
std::vector<CScriptCheck> vChecks;
bool fCacheResults = fJustCheck; /* Don't cache results if we're actually connecting blocks (still consult the cache, though) */
- if (!CheckInputs(tx, state, view, fScriptChecks, flags, fCacheResults, fCacheResults, txdata[i], nScriptCheckThreads ? &vChecks : nullptr))
+ if (!CheckInputs(tx, state, view, fScriptChecks, flags, fCacheResults, fCacheResults, txdata[i], nScriptCheckThreads ? &vChecks : nullptr)) {
+ if (state.GetReason() == ValidationInvalidReason::TX_NOT_STANDARD) {
+ // CheckInputs may return NOT_STANDARD for extra flags we passed,
+ // but we can't return that, as it's not defined for a block, so
+ // we reset the reason flag to CONSENSUS here.
+ // In the event of a future soft-fork, we may need to
+ // consider whether rewriting to CONSENSUS or
+ // RECENT_CONSENSUS_CHANGE would be more appropriate.
+ state.Invalid(ValidationInvalidReason::CONSENSUS, false,
+ state.GetRejectCode(), state.GetRejectReason(), state.GetDebugMessage());
+ }
return error("ConnectBlock(): CheckInputs on %s failed with %s",
tx.GetHash().ToString(), FormatStateMessage(state));
+ }
control.Add(vChecks);
}
@@ -2067,13 +2055,13 @@ bool CChainState::ConnectBlock(const CBlock& block, CValidationState& state, CBl
CAmount blockReward = nFees + GetBlockSubsidy(pindex->nHeight, chainparams.GetConsensus());
if (block.vtx[0]->GetValueOut() > blockReward)
- return state.DoS(100,
+ return state.Invalid(ValidationInvalidReason::CONSENSUS,
error("ConnectBlock(): coinbase pays too much (actual=%d vs limit=%d)",
block.vtx[0]->GetValueOut(), blockReward),
REJECT_INVALID, "bad-cb-amount");
if (!control.Wait())
- return state.DoS(100, error("%s: CheckQueue failed", __func__), REJECT_INVALID, "block-validation-failed");
+ return state.Invalid(ValidationInvalidReason::CONSENSUS, error("%s: CheckQueue failed", __func__), REJECT_INVALID, "block-validation-failed");
int64_t nTime4 = GetTimeMicros(); nTimeVerify += nTime4 - nTime2;
LogPrint(BCLog::BENCH, " - Verify %u txins: %.2fms (%.3fms/txin) [%.2fs (%.2fms/blk)]\n", nInputs - 1, MILLI * (nTime4 - nTime2), nInputs <= 1 ? 0 : MILLI * (nTime4 - nTime2) / (nInputs-1), nTimeVerify * MICRO, nTimeVerify * MILLI / nBlocksTotal);
@@ -2161,8 +2149,9 @@ bool static FlushStateToDisk(const CChainParams& chainparams, CValidationState &
// Write blocks and block index to disk.
if (fDoFullFlush || fPeriodicWrite) {
// Depend on nMinDiskSpace to ensure we can write block index
- if (!CheckDiskSpace(0, true))
- return state.Error("out of disk space");
+ if (!CheckDiskSpace(GetBlocksDir())) {
+ return AbortNode(state, "Disk space is low!", _("Error: Disk space is low!"));
+ }
// First make sure all block and undo data is flushed to disk.
FlushBlockFile();
// Then update all block file information (which may refer to block and undo files).
@@ -2195,8 +2184,9 @@ bool static FlushStateToDisk(const CChainParams& chainparams, CValidationState &
// twice (once in the log, and once in the tables). This is already
// an overestimation, as most will delete an existing entry or
// overwrite one. Still, use a conservative safety factor of 2.
- if (!CheckDiskSpace(48 * 2 * 2 * pcoinsTip->GetCacheSize()))
- return state.Error("out of disk space");
+ if (!CheckDiskSpace(GetDataDir(), 48 * 2 * 2 * pcoinsTip->GetCacheSize())) {
+ return AbortNode(state, "Disk space is low!", _("Error: Disk space is low!"));
+ }
// Flush the chainstate (which may refer to block index entries).
if (!pcoinsTip->Flush())
return AbortNode(state, "Failed to write to coin database");
@@ -2206,7 +2196,7 @@ bool static FlushStateToDisk(const CChainParams& chainparams, CValidationState &
}
if (full_flush_completed) {
// Update best block in wallet (so we can detect restored wallets).
- GetMainSignals().ChainStateFlushed(chainActive.GetLocator());
+ GetMainSignals().ChainStateFlushed(::ChainActive().GetLocator());
}
} catch (const std::runtime_error& e) {
return AbortNode(state, std::string("System error while flushing: ") + e.what());
@@ -2254,7 +2244,7 @@ void static UpdateTip(const CBlockIndex *pindexNew, const CChainParams& chainPar
mempool.AddTransactionsUpdated(1);
{
- WaitableLock lock(g_best_block_mutex);
+ LOCK(g_best_block_mutex);
g_best_block = pindexNew->GetBlockHash();
g_best_block_cv.notify_all();
}
@@ -2286,12 +2276,6 @@ void static UpdateTip(const CBlockIndex *pindexNew, const CChainParams& chainPar
}
if (nUpgraded > 0)
AppendWarning(warningMessages, strprintf(_("%d of last 100 blocks have unexpected version"), nUpgraded));
- if (nUpgraded > 100/2)
- {
- std::string strWarning = _("Warning: Unknown block versions being mined! It's possible unknown rules are in effect");
- // notify GetWarnings(), called by Qt and the JSON-RPC code to warn the user:
- DoWarning(strWarning);
- }
}
LogPrintf("%s: new best=%s height=%d version=0x%08x log2_work=%.8g tx=%lu date='%s' progress=%f cache=%.1fMiB(%utxo)", __func__, /* Continued */
pindexNew->GetBlockHash().ToString(), pindexNew->nHeight, pindexNew->nVersion,
@@ -2304,7 +2288,7 @@ void static UpdateTip(const CBlockIndex *pindexNew, const CChainParams& chainPar
}
-/** Disconnect chainActive's tip.
+/** Disconnect m_chain's tip.
* After calling, the mempool will be in an inconsistent state, with
* transactions from disconnected blocks being added to disconnectpool. You
* should make the mempool consistent again by calling UpdateMempoolForReorg.
@@ -2316,7 +2300,7 @@ void static UpdateTip(const CBlockIndex *pindexNew, const CChainParams& chainPar
*/
bool CChainState::DisconnectTip(CValidationState& state, const CChainParams& chainparams, DisconnectedBlockTransactions *disconnectpool)
{
- CBlockIndex *pindexDelete = chainActive.Tip();
+ CBlockIndex *pindexDelete = m_chain.Tip();
assert(pindexDelete);
// Read block from disk.
std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
@@ -2351,7 +2335,7 @@ bool CChainState::DisconnectTip(CValidationState& state, const CChainParams& cha
}
}
- chainActive.SetTip(pindexDelete->pprev);
+ m_chain.SetTip(pindexDelete->pprev);
UpdateTip(pindexDelete->pprev, chainparams);
// Let wallets know transactions went from 1-confirmed to
@@ -2392,14 +2376,11 @@ class ConnectTrace {
private:
std::vector<PerBlockConnectTrace> blocksConnected;
CTxMemPool &pool;
+ boost::signals2::scoped_connection m_connNotifyEntryRemoved;
public:
explicit ConnectTrace(CTxMemPool &_pool) : blocksConnected(1), pool(_pool) {
- pool.NotifyEntryRemoved.connect(boost::bind(&ConnectTrace::NotifyEntryRemoved, this, _1, _2));
- }
-
- ~ConnectTrace() {
- pool.NotifyEntryRemoved.disconnect(boost::bind(&ConnectTrace::NotifyEntryRemoved, this, _1, _2));
+ m_connNotifyEntryRemoved = pool.NotifyEntryRemoved.connect(std::bind(&ConnectTrace::NotifyEntryRemoved, this, std::placeholders::_1, std::placeholders::_2));
}
void BlockConnected(CBlockIndex* pindex, std::shared_ptr<const CBlock> pblock) {
@@ -2432,14 +2413,14 @@ public:
};
/**
- * Connect a new block to chainActive. pblock is either nullptr or a pointer to a CBlock
+ * Connect a new block to m_chain. pblock is either nullptr or a pointer to a CBlock
* corresponding to pindexNew, to bypass loading it again from disk.
*
* The block is added to connectTrace if connection succeeds.
*/
bool CChainState::ConnectTip(CValidationState& state, const CChainParams& chainparams, CBlockIndex* pindexNew, const std::shared_ptr<const CBlock>& pblock, ConnectTrace& connectTrace, DisconnectedBlockTransactions &disconnectpool)
{
- assert(pindexNew->pprev == chainActive.Tip());
+ assert(pindexNew->pprev == m_chain.Tip());
// Read block from disk.
int64_t nTime1 = GetTimeMicros();
std::shared_ptr<const CBlock> pthisBlock;
@@ -2463,7 +2444,7 @@ bool CChainState::ConnectTip(CValidationState& state, const CChainParams& chainp
if (!rv) {
if (state.IsInvalid())
InvalidBlockFound(pindexNew, state);
- return error("ConnectTip(): ConnectBlock %s failed", pindexNew->GetBlockHash().ToString());
+ return error("%s: ConnectBlock %s failed, %s", __func__, pindexNew->GetBlockHash().ToString(), FormatStateMessage(state));
}
nTime3 = GetTimeMicros(); nTimeConnectTotal += nTime3 - nTime2;
LogPrint(BCLog::BENCH, " - Connect total: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime3 - nTime2) * MILLI, nTimeConnectTotal * MICRO, nTimeConnectTotal * MILLI / nBlocksTotal);
@@ -2480,8 +2461,8 @@ bool CChainState::ConnectTip(CValidationState& state, const CChainParams& chainp
// Remove conflicting transactions from the mempool.;
mempool.removeForBlock(blockConnecting.vtx, pindexNew->nHeight);
disconnectpool.removeForBlock(blockConnecting.vtx);
- // Update chainActive & related variables.
- chainActive.SetTip(pindexNew);
+ // Update m_chain & related variables.
+ m_chain.SetTip(pindexNew);
UpdateTip(pindexNew, chainparams);
int64_t nTime6 = GetTimeMicros(); nTimePostConnect += nTime6 - nTime5; nTimeTotal += nTime6 - nTime1;
@@ -2512,8 +2493,8 @@ CBlockIndex* CChainState::FindMostWorkChain() {
// Just going until the active chain is an optimization, as we know all blocks in it are valid already.
CBlockIndex *pindexTest = pindexNew;
bool fInvalidAncestor = false;
- while (pindexTest && !chainActive.Contains(pindexTest)) {
- assert(pindexTest->nChainTx || pindexTest->nHeight == 0);
+ while (pindexTest && !m_chain.Contains(pindexTest)) {
+ assert(pindexTest->HaveTxsDownloaded() || pindexTest->nHeight == 0);
// Pruned nodes may have entries in setBlockIndexCandidates for
// which block files have been deleted. Remove those as candidates
@@ -2555,7 +2536,7 @@ void CChainState::PruneBlockIndexCandidates() {
// Note that we can't delete the current block itself, as we may need to return to it later in case a
// reorganization to a better block fails.
std::set<CBlockIndex*, CBlockIndexWorkComparator>::iterator it = setBlockIndexCandidates.begin();
- while (it != setBlockIndexCandidates.end() && setBlockIndexCandidates.value_comp()(*it, chainActive.Tip())) {
+ while (it != setBlockIndexCandidates.end() && setBlockIndexCandidates.value_comp()(*it, m_chain.Tip())) {
setBlockIndexCandidates.erase(it++);
}
// Either the current tip or a successor of it we're working towards is left in setBlockIndexCandidates.
@@ -2570,13 +2551,13 @@ bool CChainState::ActivateBestChainStep(CValidationState& state, const CChainPar
{
AssertLockHeld(cs_main);
- const CBlockIndex *pindexOldTip = chainActive.Tip();
- const CBlockIndex *pindexFork = chainActive.FindFork(pindexMostWork);
+ const CBlockIndex *pindexOldTip = m_chain.Tip();
+ const CBlockIndex *pindexFork = m_chain.FindFork(pindexMostWork);
// Disconnect active blocks which are no longer in the best chain.
bool fBlocksDisconnected = false;
DisconnectedBlockTransactions disconnectpool;
- while (chainActive.Tip() && chainActive.Tip() != pindexFork) {
+ while (m_chain.Tip() && m_chain.Tip() != pindexFork) {
if (!DisconnectTip(state, chainparams, &disconnectpool)) {
// This is likely a fatal error, but keep the mempool consistent,
// just in case. Only remove from the mempool in this case.
@@ -2608,7 +2589,7 @@ bool CChainState::ActivateBestChainStep(CValidationState& state, const CChainPar
if (!ConnectTip(state, chainparams, pindexConnect, pindexConnect == pindexMostWork ? pblock : std::shared_ptr<const CBlock>(), connectTrace, disconnectpool)) {
if (state.IsInvalid()) {
// The block violates a consensus rule.
- if (!state.CorruptionPossible()) {
+ if (state.GetReason() != ValidationInvalidReason::BLOCK_MUTATED) {
InvalidChainFound(vpindexToConnect.front());
}
state = CValidationState();
@@ -2624,7 +2605,7 @@ bool CChainState::ActivateBestChainStep(CValidationState& state, const CChainPar
}
} else {
PruneBlockIndexCandidates();
- if (!pindexOldTip || chainActive.Tip()->nChainWork > pindexOldTip->nChainWork) {
+ if (!pindexOldTip || m_chain.Tip()->nChainWork > pindexOldTip->nChainWork) {
// We're in a better position than we were. Return temporarily to release the lock.
fContinue = false;
break;
@@ -2670,6 +2651,14 @@ static void NotifyHeaderTip() LOCKS_EXCLUDED(cs_main) {
}
}
+static void LimitValidationInterfaceQueue() LOCKS_EXCLUDED(cs_main) {
+ AssertLockNotHeld(cs_main);
+
+ if (GetMainSignals().CallbacksPending() > 10) {
+ SyncWithValidationInterfaceQueue();
+ }
+}
+
/**
* Make the best chain active, in multiple steps. The result is either failure
* or an activated best chain. pblock is either nullptr or a pointer to a block
@@ -2698,19 +2687,17 @@ bool CChainState::ActivateBestChain(CValidationState &state, const CChainParams&
do {
boost::this_thread::interruption_point();
- if (GetMainSignals().CallbacksPending() > 10) {
- // Block until the validation queue drains. This should largely
- // never happen in normal operation, however may happen during
- // reindex, causing memory blowup if we run too far ahead.
- // Note that if a validationinterface callback ends up calling
- // ActivateBestChain this may lead to a deadlock! We should
- // probably have a DEBUG_LOCKORDER test for this in the future.
- SyncWithValidationInterfaceQueue();
- }
+ // Block until the validation queue drains. This should largely
+ // never happen in normal operation, however may happen during
+ // reindex, causing memory blowup if we run too far ahead.
+ // Note that if a validationinterface callback ends up calling
+ // ActivateBestChain this may lead to a deadlock! We should
+ // probably have a DEBUG_LOCKORDER test for this in the future.
+ LimitValidationInterfaceQueue();
{
LOCK(cs_main);
- CBlockIndex* starting_tip = chainActive.Tip();
+ CBlockIndex* starting_tip = m_chain.Tip();
bool blocks_connected = false;
do {
// We absolutely may not unlock cs_main until we've made forward progress
@@ -2722,7 +2709,7 @@ bool CChainState::ActivateBestChain(CValidationState &state, const CChainParams&
}
// Whether we have anything to do at all.
- if (pindexMostWork == nullptr || pindexMostWork == chainActive.Tip()) {
+ if (pindexMostWork == nullptr || pindexMostWork == m_chain.Tip()) {
break;
}
@@ -2736,16 +2723,16 @@ bool CChainState::ActivateBestChain(CValidationState &state, const CChainParams&
// Wipe cache, we may need another branch now.
pindexMostWork = nullptr;
}
- pindexNewTip = chainActive.Tip();
+ pindexNewTip = m_chain.Tip();
for (const PerBlockConnectTrace& trace : connectTrace.GetBlocksConnected()) {
assert(trace.pblock && trace.pindex);
GetMainSignals().BlockConnected(trace.pblock, trace.pindex, trace.conflictedTxs);
}
- } while (!chainActive.Tip() || (starting_tip && CBlockIndexWorkComparator()(chainActive.Tip(), starting_tip)));
+ } while (!m_chain.Tip() || (starting_tip && CBlockIndexWorkComparator()(m_chain.Tip(), starting_tip)));
if (!blocks_connected) return true;
- const CBlockIndex* pindexFork = chainActive.FindFork(starting_tip);
+ const CBlockIndex* pindexFork = m_chain.FindFork(starting_tip);
bool fInitialDownload = IsInitialBlockDownload();
// Notify external listeners about the new tip.
@@ -2787,15 +2774,15 @@ bool CChainState::PreciousBlock(CValidationState& state, const CChainParams& par
{
{
LOCK(cs_main);
- if (pindex->nChainWork < chainActive.Tip()->nChainWork) {
+ if (pindex->nChainWork < m_chain.Tip()->nChainWork) {
// Nothing to do, this block is not at the tip.
return true;
}
- if (chainActive.Tip()->nChainWork > nLastPreciousChainwork) {
+ if (m_chain.Tip()->nChainWork > nLastPreciousChainwork) {
// The chain has been extended since the last call, reset the counter.
nBlockReverseSequenceId = -1;
}
- nLastPreciousChainwork = chainActive.Tip()->nChainWork;
+ nLastPreciousChainwork = m_chain.Tip()->nChainWork;
setBlockIndexCandidates.erase(pindex);
pindex->nSequenceId = nBlockReverseSequenceId;
if (nBlockReverseSequenceId > std::numeric_limits<int32_t>::min()) {
@@ -2803,7 +2790,7 @@ bool CChainState::PreciousBlock(CValidationState& state, const CChainParams& par
// call preciousblock 2**31-1 times on the same set of tips...
nBlockReverseSequenceId--;
}
- if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS) && pindex->nChainTx) {
+ if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS) && pindex->HaveTxsDownloaded()) {
setBlockIndexCandidates.insert(pindex);
PruneBlockIndexCandidates();
}
@@ -2817,67 +2804,89 @@ bool PreciousBlock(CValidationState& state, const CChainParams& params, CBlockIn
bool CChainState::InvalidateBlock(CValidationState& state, const CChainParams& chainparams, CBlockIndex *pindex)
{
- AssertLockHeld(cs_main);
+ CBlockIndex* to_mark_failed = pindex;
+ bool pindex_was_in_chain = false;
+ int disconnected = 0;
- // We first disconnect backwards and then mark the blocks as invalid.
- // This prevents a case where pruned nodes may fail to invalidateblock
- // and be left unable to start as they have no tip candidates (as there
- // are no blocks that meet the "have data and are not invalid per
- // nStatus" criteria for inclusion in setBlockIndexCandidates).
+ // Disconnect (descendants of) pindex, and mark them invalid.
+ while (true) {
+ if (ShutdownRequested()) break;
- bool pindex_was_in_chain = false;
- CBlockIndex *invalid_walk_tip = chainActive.Tip();
+ // Make sure the queue of validation callbacks doesn't grow unboundedly.
+ LimitValidationInterfaceQueue();
- DisconnectedBlockTransactions disconnectpool;
- while (chainActive.Contains(pindex)) {
+ LOCK(cs_main);
+ if (!m_chain.Contains(pindex)) break;
pindex_was_in_chain = true;
- // ActivateBestChain considers blocks already in chainActive
- // unconditionally valid already, so force disconnect away from it.
- if (!DisconnectTip(state, chainparams, &disconnectpool)) {
- // It's probably hopeless to try to make the mempool consistent
- // here if DisconnectTip failed, but we can try.
- UpdateMempoolForReorg(disconnectpool, false);
- return false;
- }
- }
+ CBlockIndex *invalid_walk_tip = m_chain.Tip();
- // Now mark the blocks we just disconnected as descendants invalid
- // (note this may not be all descendants).
- while (pindex_was_in_chain && invalid_walk_tip != pindex) {
- invalid_walk_tip->nStatus |= BLOCK_FAILED_CHILD;
+ // ActivateBestChain considers blocks already in m_chain
+ // unconditionally valid already, so force disconnect away from it.
+ DisconnectedBlockTransactions disconnectpool;
+ bool ret = DisconnectTip(state, chainparams, &disconnectpool);
+ // DisconnectTip will add transactions to disconnectpool.
+ // Adjust the mempool to be consistent with the new tip, adding
+ // transactions back to the mempool if disconnecting was successful,
+ // and we're not doing a very deep invalidation (in which case
+ // keeping the mempool up to date is probably futile anyway).
+ UpdateMempoolForReorg(disconnectpool, /* fAddToMempool = */ (++disconnected <= 10) && ret);
+ if (!ret) return false;
+ assert(invalid_walk_tip->pprev == m_chain.Tip());
+
+ // We immediately mark the disconnected blocks as invalid.
+ // This prevents a case where pruned nodes may fail to invalidateblock
+ // and be left unable to start as they have no tip candidates (as there
+ // are no blocks that meet the "have data and are not invalid per
+ // nStatus" criteria for inclusion in setBlockIndexCandidates).
+ invalid_walk_tip->nStatus |= BLOCK_FAILED_VALID;
setDirtyBlockIndex.insert(invalid_walk_tip);
setBlockIndexCandidates.erase(invalid_walk_tip);
- invalid_walk_tip = invalid_walk_tip->pprev;
+ setBlockIndexCandidates.insert(invalid_walk_tip->pprev);
+ if (invalid_walk_tip->pprev == to_mark_failed && (to_mark_failed->nStatus & BLOCK_FAILED_VALID)) {
+ // We only want to mark the last disconnected block as BLOCK_FAILED_VALID; its children
+ // need to be BLOCK_FAILED_CHILD instead.
+ to_mark_failed->nStatus = (to_mark_failed->nStatus ^ BLOCK_FAILED_VALID) | BLOCK_FAILED_CHILD;
+ setDirtyBlockIndex.insert(to_mark_failed);
+ }
+
+ // Track the last disconnected block, so we can correct its BLOCK_FAILED_CHILD status in future
+ // iterations, or, if it's the last one, call InvalidChainFound on it.
+ to_mark_failed = invalid_walk_tip;
}
- // Mark the block itself as invalid.
- pindex->nStatus |= BLOCK_FAILED_VALID;
- setDirtyBlockIndex.insert(pindex);
- setBlockIndexCandidates.erase(pindex);
- m_failed_blocks.insert(pindex);
+ {
+ LOCK(cs_main);
+ if (m_chain.Contains(to_mark_failed)) {
+ // If the to-be-marked invalid block is in the active chain, something is interfering and we can't proceed.
+ return false;
+ }
- // DisconnectTip will add transactions to disconnectpool; try to add these
- // back to the mempool.
- UpdateMempoolForReorg(disconnectpool, true);
+ // Mark pindex (or the last disconnected block) as invalid, even when it never was in the main chain
+ to_mark_failed->nStatus |= BLOCK_FAILED_VALID;
+ setDirtyBlockIndex.insert(to_mark_failed);
+ setBlockIndexCandidates.erase(to_mark_failed);
+ m_failed_blocks.insert(to_mark_failed);
- // The resulting new best tip may not be in setBlockIndexCandidates anymore, so
- // add it again.
- BlockMap::iterator it = mapBlockIndex.begin();
- while (it != mapBlockIndex.end()) {
- if (it->second->IsValid(BLOCK_VALID_TRANSACTIONS) && it->second->nChainTx && !setBlockIndexCandidates.value_comp()(it->second, chainActive.Tip())) {
- setBlockIndexCandidates.insert(it->second);
+ // The resulting new best tip may not be in setBlockIndexCandidates anymore, so
+ // add it again.
+ BlockMap::iterator it = mapBlockIndex.begin();
+ while (it != mapBlockIndex.end()) {
+ if (it->second->IsValid(BLOCK_VALID_TRANSACTIONS) && it->second->HaveTxsDownloaded() && !setBlockIndexCandidates.value_comp()(it->second, m_chain.Tip())) {
+ setBlockIndexCandidates.insert(it->second);
+ }
+ it++;
}
- it++;
- }
- InvalidChainFound(pindex);
+ InvalidChainFound(to_mark_failed);
+ }
// Only notify about a new block tip if the active chain was modified.
if (pindex_was_in_chain) {
- uiInterface.NotifyBlockTip(IsInitialBlockDownload(), pindex->pprev);
+ uiInterface.NotifyBlockTip(IsInitialBlockDownload(), to_mark_failed->pprev);
}
return true;
}
+
bool InvalidateBlock(CValidationState& state, const CChainParams& chainparams, CBlockIndex *pindex) {
return g_chainstate.InvalidateBlock(state, chainparams, pindex);
}
@@ -2893,7 +2902,7 @@ void CChainState::ResetBlockFailureFlags(CBlockIndex *pindex) {
if (!it->second->IsValid() && it->second->GetAncestor(nHeight) == pindex) {
it->second->nStatus &= ~BLOCK_FAILED_MASK;
setDirtyBlockIndex.insert(it->second);
- if (it->second->IsValid(BLOCK_VALID_TRANSACTIONS) && it->second->nChainTx && setBlockIndexCandidates.value_comp()(chainActive.Tip(), it->second)) {
+ if (it->second->IsValid(BLOCK_VALID_TRANSACTIONS) && it->second->HaveTxsDownloaded() && setBlockIndexCandidates.value_comp()(m_chain.Tip(), it->second)) {
setBlockIndexCandidates.insert(it->second);
}
if (it->second == pindexBestInvalid) {
@@ -2957,7 +2966,7 @@ CBlockIndex* CChainState::AddToBlockIndex(const CBlockHeader& block)
}
/** Mark a block as having its data received and checked (up to BLOCK_VALID_TRANSACTIONS). */
-void CChainState::ReceivedBlockTransactions(const CBlock& block, CBlockIndex* pindexNew, const CDiskBlockPos& pos, const Consensus::Params& consensusParams)
+void CChainState::ReceivedBlockTransactions(const CBlock& block, CBlockIndex* pindexNew, const FlatFilePos& pos, const Consensus::Params& consensusParams)
{
pindexNew->nTx = block.vtx.size();
pindexNew->nChainTx = 0;
@@ -2971,7 +2980,7 @@ void CChainState::ReceivedBlockTransactions(const CBlock& block, CBlockIndex* pi
pindexNew->RaiseValidity(BLOCK_VALID_TRANSACTIONS);
setDirtyBlockIndex.insert(pindexNew);
- if (pindexNew->pprev == nullptr || pindexNew->pprev->nChainTx) {
+ if (pindexNew->pprev == nullptr || pindexNew->pprev->HaveTxsDownloaded()) {
// If pindexNew is the genesis block or all parents are BLOCK_VALID_TRANSACTIONS.
std::deque<CBlockIndex*> queue;
queue.push_back(pindexNew);
@@ -2985,7 +2994,7 @@ void CChainState::ReceivedBlockTransactions(const CBlock& block, CBlockIndex* pi
LOCK(cs_nBlockSequenceId);
pindex->nSequenceId = nBlockSequenceId++;
}
- if (chainActive.Tip() == nullptr || !setBlockIndexCandidates.value_comp()(pindex, chainActive.Tip())) {
+ if (m_chain.Tip() == nullptr || !setBlockIndexCandidates.value_comp()(pindex, m_chain.Tip())) {
setBlockIndexCandidates.insert(pindex);
}
std::pair<std::multimap<CBlockIndex*, CBlockIndex*>::iterator, std::multimap<CBlockIndex*, CBlockIndex*>::iterator> range = mapBlocksUnlinked.equal_range(pindex);
@@ -3003,7 +3012,7 @@ void CChainState::ReceivedBlockTransactions(const CBlock& block, CBlockIndex* pi
}
}
-static bool FindBlockPos(CDiskBlockPos &pos, unsigned int nAddSize, unsigned int nHeight, uint64_t nTime, bool fKnown = false)
+static bool FindBlockPos(FlatFilePos &pos, unsigned int nAddSize, unsigned int nHeight, uint64_t nTime, bool fKnown = false)
{
LOCK(cs_LastBlockFile);
@@ -3038,21 +3047,13 @@ static bool FindBlockPos(CDiskBlockPos &pos, unsigned int nAddSize, unsigned int
vinfoBlockFile[nFile].nSize += nAddSize;
if (!fKnown) {
- unsigned int nOldChunks = (pos.nPos + BLOCKFILE_CHUNK_SIZE - 1) / BLOCKFILE_CHUNK_SIZE;
- unsigned int nNewChunks = (vinfoBlockFile[nFile].nSize + BLOCKFILE_CHUNK_SIZE - 1) / BLOCKFILE_CHUNK_SIZE;
- if (nNewChunks > nOldChunks) {
- if (fPruneMode)
- fCheckForPruning = true;
- if (CheckDiskSpace(nNewChunks * BLOCKFILE_CHUNK_SIZE - pos.nPos, true)) {
- FILE *file = OpenBlockFile(pos);
- if (file) {
- LogPrintf("Pre-allocating up to position 0x%x in blk%05u.dat\n", nNewChunks * BLOCKFILE_CHUNK_SIZE, pos.nFile);
- AllocateFileRange(file, pos.nPos, nNewChunks * BLOCKFILE_CHUNK_SIZE - pos.nPos);
- fclose(file);
- }
- }
- else
- return error("out of disk space");
+ bool out_of_space;
+ size_t bytes_allocated = BlockFileSeq().Allocate(pos, nAddSize, out_of_space);
+ if (out_of_space) {
+ return AbortNode("Disk space is low!", _("Error: Disk space is low!"));
+ }
+ if (bytes_allocated != 0 && fPruneMode) {
+ fCheckForPruning = true;
}
}
@@ -3060,32 +3061,23 @@ static bool FindBlockPos(CDiskBlockPos &pos, unsigned int nAddSize, unsigned int
return true;
}
-static bool FindUndoPos(CValidationState &state, int nFile, CDiskBlockPos &pos, unsigned int nAddSize)
+static bool FindUndoPos(CValidationState &state, int nFile, FlatFilePos &pos, unsigned int nAddSize)
{
pos.nFile = nFile;
LOCK(cs_LastBlockFile);
- unsigned int nNewSize;
pos.nPos = vinfoBlockFile[nFile].nUndoSize;
- nNewSize = vinfoBlockFile[nFile].nUndoSize += nAddSize;
+ vinfoBlockFile[nFile].nUndoSize += nAddSize;
setDirtyFileInfo.insert(nFile);
- unsigned int nOldChunks = (pos.nPos + UNDOFILE_CHUNK_SIZE - 1) / UNDOFILE_CHUNK_SIZE;
- unsigned int nNewChunks = (nNewSize + UNDOFILE_CHUNK_SIZE - 1) / UNDOFILE_CHUNK_SIZE;
- if (nNewChunks > nOldChunks) {
- if (fPruneMode)
- fCheckForPruning = true;
- if (CheckDiskSpace(nNewChunks * UNDOFILE_CHUNK_SIZE - pos.nPos, true)) {
- FILE *file = OpenUndoFile(pos);
- if (file) {
- LogPrintf("Pre-allocating up to position 0x%x in rev%05u.dat\n", nNewChunks * UNDOFILE_CHUNK_SIZE, pos.nFile);
- AllocateFileRange(file, pos.nPos, nNewChunks * UNDOFILE_CHUNK_SIZE - pos.nPos);
- fclose(file);
- }
- }
- else
- return state.Error("out of disk space");
+ bool out_of_space;
+ size_t bytes_allocated = UndoFileSeq().Allocate(pos, nAddSize, out_of_space);
+ if (out_of_space) {
+ return AbortNode(state, "Disk space is low!", _("Error: Disk space is low!"));
+ }
+ if (bytes_allocated != 0 && fPruneMode) {
+ fCheckForPruning = true;
}
return true;
@@ -3095,7 +3087,7 @@ static bool CheckBlockHeader(const CBlockHeader& block, CValidationState& state,
{
// Check proof of work matches claimed amount
if (fCheckPOW && !CheckProofOfWork(block.GetHash(), block.nBits, consensusParams))
- return state.DoS(50, false, REJECT_INVALID, "high-hash", false, "proof of work failed");
+ return state.Invalid(ValidationInvalidReason::BLOCK_INVALID_HEADER, false, REJECT_INVALID, "high-hash", "proof of work failed");
return true;
}
@@ -3117,13 +3109,13 @@ bool CheckBlock(const CBlock& block, CValidationState& state, const Consensus::P
bool mutated;
uint256 hashMerkleRoot2 = BlockMerkleRoot(block, &mutated);
if (block.hashMerkleRoot != hashMerkleRoot2)
- return state.DoS(100, false, REJECT_INVALID, "bad-txnmrklroot", true, "hashMerkleRoot mismatch");
+ return state.Invalid(ValidationInvalidReason::BLOCK_MUTATED, false, REJECT_INVALID, "bad-txnmrklroot", "hashMerkleRoot mismatch");
// Check for merkle tree malleability (CVE-2012-2459): repeating sequences
// of transactions in a block without affecting the merkle root of a block,
// while still invalidating it.
if (mutated)
- return state.DoS(100, false, REJECT_INVALID, "bad-txns-duplicate", true, "duplicate transaction");
+ return state.Invalid(ValidationInvalidReason::BLOCK_MUTATED, false, REJECT_INVALID, "bad-txns-duplicate", "duplicate transaction");
}
// All potential-corruption validation must be done before we do any
@@ -3133,20 +3125,20 @@ bool CheckBlock(const CBlock& block, CValidationState& state, const Consensus::P
// checks that use witness data may be performed here.
// Size limits
- if (block.vtx.empty() || block.vtx.size() * WITNESS_SCALE_FACTOR > MAX_BLOCK_WEIGHT || ::GetSerializeSize(block, SER_NETWORK, PROTOCOL_VERSION | SERIALIZE_TRANSACTION_NO_WITNESS) * WITNESS_SCALE_FACTOR > MAX_BLOCK_WEIGHT)
- return state.DoS(100, false, REJECT_INVALID, "bad-blk-length", false, "size limits failed");
+ if (block.vtx.empty() || block.vtx.size() * WITNESS_SCALE_FACTOR > MAX_BLOCK_WEIGHT || ::GetSerializeSize(block, PROTOCOL_VERSION | SERIALIZE_TRANSACTION_NO_WITNESS) * WITNESS_SCALE_FACTOR > MAX_BLOCK_WEIGHT)
+ return state.Invalid(ValidationInvalidReason::CONSENSUS, false, REJECT_INVALID, "bad-blk-length", "size limits failed");
// First transaction must be coinbase, the rest must not be
if (block.vtx.empty() || !block.vtx[0]->IsCoinBase())
- return state.DoS(100, false, REJECT_INVALID, "bad-cb-missing", false, "first tx is not coinbase");
+ return state.Invalid(ValidationInvalidReason::CONSENSUS, false, REJECT_INVALID, "bad-cb-missing", "first tx is not coinbase");
for (unsigned int i = 1; i < block.vtx.size(); i++)
if (block.vtx[i]->IsCoinBase())
- return state.DoS(100, false, REJECT_INVALID, "bad-cb-multiple", false, "more than one coinbase");
+ return state.Invalid(ValidationInvalidReason::CONSENSUS, false, REJECT_INVALID, "bad-cb-multiple", "more than one coinbase");
// Check transactions
for (const auto& tx : block.vtx)
- if (!CheckTransaction(*tx, state, false))
- return state.Invalid(false, state.GetRejectCode(), state.GetRejectReason(),
+ if (!CheckTransaction(*tx, state, true))
+ return state.Invalid(state.GetReason(), false, state.GetRejectCode(), state.GetRejectReason(),
strprintf("Transaction check failed (tx hash %s) %s", tx->GetHash().ToString(), state.GetDebugMessage()));
unsigned int nSigOps = 0;
@@ -3155,7 +3147,7 @@ bool CheckBlock(const CBlock& block, CValidationState& state, const Consensus::P
nSigOps += GetLegacySigOpCount(*tx);
}
if (nSigOps * WITNESS_SCALE_FACTOR > MAX_BLOCK_SIGOPS_COST)
- return state.DoS(100, false, REJECT_INVALID, "bad-blk-sigops", false, "out-of-bounds SigOpCount");
+ return state.Invalid(ValidationInvalidReason::CONSENSUS, false, REJECT_INVALID, "bad-blk-sigops", "out-of-bounds SigOpCount");
if (fCheckPOW && fCheckMerkleRoot)
block.fChecked = true;
@@ -3231,6 +3223,22 @@ std::vector<unsigned char> GenerateCoinbaseCommitment(CBlock& block, const CBloc
return commitment;
}
+//! Returns last CBlockIndex* that is a checkpoint
+static CBlockIndex* GetLastCheckpoint(const CCheckpointData& data) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
+{
+ const MapCheckpoints& checkpoints = data.mapCheckpoints;
+
+ for (const MapCheckpoints::value_type& i : reverse_iterate(checkpoints))
+ {
+ const uint256& hash = i.second;
+ CBlockIndex* pindex = LookupBlockIndex(hash);
+ if (pindex) {
+ return pindex;
+ }
+ }
+ return nullptr;
+}
+
/** Context-dependent validity checks.
* By "context", we mean only the previous block headers, but not the UTXO
* set; UTXO-related validity checks are done in ConnectBlock().
@@ -3240,7 +3248,7 @@ std::vector<unsigned char> GenerateCoinbaseCommitment(CBlock& block, const CBloc
* in ConnectBlock().
* Note that -reindex-chainstate skips the validation that happens here!
*/
-static bool ContextualCheckBlockHeader(const CBlockHeader& block, CValidationState& state, const CChainParams& params, const CBlockIndex* pindexPrev, int64_t nAdjustedTime)
+static bool ContextualCheckBlockHeader(const CBlockHeader& block, CValidationState& state, const CChainParams& params, const CBlockIndex* pindexPrev, int64_t nAdjustedTime) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{
assert(pindexPrev != nullptr);
const int nHeight = pindexPrev->nHeight + 1;
@@ -3248,32 +3256,32 @@ static bool ContextualCheckBlockHeader(const CBlockHeader& block, CValidationSta
// Check proof of work
const Consensus::Params& consensusParams = params.GetConsensus();
if (block.nBits != GetNextWorkRequired(pindexPrev, &block, consensusParams))
- return state.DoS(100, false, REJECT_INVALID, "bad-diffbits", false, "incorrect proof of work");
+ return state.Invalid(ValidationInvalidReason::BLOCK_INVALID_HEADER, false, REJECT_INVALID, "bad-diffbits", "incorrect proof of work");
// Check against checkpoints
if (fCheckpointsEnabled) {
// Don't accept any forks from the main chain prior to last checkpoint.
// GetLastCheckpoint finds the last checkpoint in MapCheckpoints that's in our
// MapBlockIndex.
- CBlockIndex* pcheckpoint = Checkpoints::GetLastCheckpoint(params.Checkpoints());
+ CBlockIndex* pcheckpoint = GetLastCheckpoint(params.Checkpoints());
if (pcheckpoint && nHeight < pcheckpoint->nHeight)
- return state.DoS(100, error("%s: forked chain older than last checkpoint (height %d)", __func__, nHeight), REJECT_CHECKPOINT, "bad-fork-prior-to-checkpoint");
+ return state.Invalid(ValidationInvalidReason::BLOCK_CHECKPOINT, error("%s: forked chain older than last checkpoint (height %d)", __func__, nHeight), REJECT_CHECKPOINT, "bad-fork-prior-to-checkpoint");
}
// Check timestamp against prev
if (block.GetBlockTime() <= pindexPrev->GetMedianTimePast())
- return state.Invalid(false, REJECT_INVALID, "time-too-old", "block's timestamp is too early");
+ return state.Invalid(ValidationInvalidReason::BLOCK_INVALID_HEADER, false, REJECT_INVALID, "time-too-old", "block's timestamp is too early");
// Check timestamp
if (block.GetBlockTime() > nAdjustedTime + MAX_FUTURE_BLOCK_TIME)
- return state.Invalid(false, REJECT_INVALID, "time-too-new", "block timestamp too far in the future");
+ return state.Invalid(ValidationInvalidReason::BLOCK_TIME_FUTURE, false, REJECT_INVALID, "time-too-new", "block timestamp too far in the future");
// Reject outdated version blocks when 95% (75% on testnet) of the network has upgraded:
// check for version 2, 3 and 4 upgrades
if((block.nVersion < 2 && nHeight >= consensusParams.BIP34Height) ||
(block.nVersion < 3 && nHeight >= consensusParams.BIP66Height) ||
(block.nVersion < 4 && nHeight >= consensusParams.BIP65Height))
- return state.Invalid(false, REJECT_OBSOLETE, strprintf("bad-version(0x%08x)", block.nVersion),
+ return state.Invalid(ValidationInvalidReason::BLOCK_INVALID_HEADER, false, REJECT_OBSOLETE, strprintf("bad-version(0x%08x)", block.nVersion),
strprintf("rejected nVersion=0x%08x block", block.nVersion));
return true;
@@ -3292,6 +3300,7 @@ static bool ContextualCheckBlock(const CBlock& block, CValidationState& state, c
// Start enforcing BIP113 (Median Time Past) using versionbits logic.
int nLockTimeFlags = 0;
if (VersionBitsState(pindexPrev, consensusParams, Consensus::DEPLOYMENT_CSV, versionbitscache) == ThresholdState::ACTIVE) {
+ assert(pindexPrev != nullptr);
nLockTimeFlags |= LOCKTIME_MEDIAN_TIME_PAST;
}
@@ -3302,7 +3311,7 @@ static bool ContextualCheckBlock(const CBlock& block, CValidationState& state, c
// Check that all transactions are finalized
for (const auto& tx : block.vtx) {
if (!IsFinalTx(*tx, nHeight, nLockTimeCutoff)) {
- return state.DoS(10, false, REJECT_INVALID, "bad-txns-nonfinal", false, "non-final transaction");
+ return state.Invalid(ValidationInvalidReason::CONSENSUS, false, REJECT_INVALID, "bad-txns-nonfinal", "non-final transaction");
}
}
@@ -3312,7 +3321,7 @@ static bool ContextualCheckBlock(const CBlock& block, CValidationState& state, c
CScript expect = CScript() << nHeight;
if (block.vtx[0]->vin[0].scriptSig.size() < expect.size() ||
!std::equal(expect.begin(), expect.end(), block.vtx[0]->vin[0].scriptSig.begin())) {
- return state.DoS(100, false, REJECT_INVALID, "bad-cb-height", false, "block height mismatch in coinbase");
+ return state.Invalid(ValidationInvalidReason::CONSENSUS, false, REJECT_INVALID, "bad-cb-height", "block height mismatch in coinbase");
}
}
@@ -3334,11 +3343,11 @@ static bool ContextualCheckBlock(const CBlock& block, CValidationState& state, c
// already does not permit it, it is impossible to trigger in the
// witness tree.
if (block.vtx[0]->vin[0].scriptWitness.stack.size() != 1 || block.vtx[0]->vin[0].scriptWitness.stack[0].size() != 32) {
- return state.DoS(100, false, REJECT_INVALID, "bad-witness-nonce-size", true, strprintf("%s : invalid witness reserved value size", __func__));
+ return state.Invalid(ValidationInvalidReason::BLOCK_MUTATED, false, REJECT_INVALID, "bad-witness-nonce-size", strprintf("%s : invalid witness reserved value size", __func__));
}
CHash256().Write(hashWitness.begin(), 32).Write(&block.vtx[0]->vin[0].scriptWitness.stack[0][0], 32).Finalize(hashWitness.begin());
if (memcmp(hashWitness.begin(), &block.vtx[0]->vout[commitpos].scriptPubKey[6], 32)) {
- return state.DoS(100, false, REJECT_INVALID, "bad-witness-merkle-match", true, strprintf("%s : witness merkle commitment mismatch", __func__));
+ return state.Invalid(ValidationInvalidReason::BLOCK_MUTATED, false, REJECT_INVALID, "bad-witness-merkle-match", strprintf("%s : witness merkle commitment mismatch", __func__));
}
fHaveWitness = true;
}
@@ -3348,7 +3357,7 @@ static bool ContextualCheckBlock(const CBlock& block, CValidationState& state, c
if (!fHaveWitness) {
for (const auto& tx : block.vtx) {
if (tx->HasWitness()) {
- return state.DoS(100, false, REJECT_INVALID, "unexpected-witness", true, strprintf("%s : unexpected witness data found", __func__));
+ return state.Invalid(ValidationInvalidReason::BLOCK_MUTATED, false, REJECT_INVALID, "unexpected-witness", strprintf("%s : unexpected witness data found", __func__));
}
}
}
@@ -3360,7 +3369,7 @@ static bool ContextualCheckBlock(const CBlock& block, CValidationState& state, c
// the block hash, so we couldn't mark the block as permanently
// failed).
if (GetBlockWeight(block) > MAX_BLOCK_WEIGHT) {
- return state.DoS(100, false, REJECT_INVALID, "bad-blk-weight", false, strprintf("%s : weight limit failed", __func__));
+ return state.Invalid(ValidationInvalidReason::CONSENSUS, false, REJECT_INVALID, "bad-blk-weight", strprintf("%s : weight limit failed", __func__));
}
return true;
@@ -3380,7 +3389,7 @@ bool CChainState::AcceptBlockHeader(const CBlockHeader& block, CValidationState&
if (ppindex)
*ppindex = pindex;
if (pindex->nStatus & BLOCK_FAILED_MASK)
- return state.Invalid(error("%s: block %s is marked invalid", __func__, hash.ToString()), 0, "duplicate");
+ return state.Invalid(ValidationInvalidReason::CACHED_INVALID, error("%s: block %s is marked invalid", __func__, hash.ToString()), 0, "duplicate");
return true;
}
@@ -3391,17 +3400,37 @@ bool CChainState::AcceptBlockHeader(const CBlockHeader& block, CValidationState&
CBlockIndex* pindexPrev = nullptr;
BlockMap::iterator mi = mapBlockIndex.find(block.hashPrevBlock);
if (mi == mapBlockIndex.end())
- return state.DoS(10, error("%s: prev block not found", __func__), 0, "prev-blk-not-found");
+ return state.Invalid(ValidationInvalidReason::BLOCK_MISSING_PREV, error("%s: prev block not found", __func__), 0, "prev-blk-not-found");
pindexPrev = (*mi).second;
if (pindexPrev->nStatus & BLOCK_FAILED_MASK)
- return state.DoS(100, error("%s: prev block invalid", __func__), REJECT_INVALID, "bad-prevblk");
+ return state.Invalid(ValidationInvalidReason::BLOCK_INVALID_PREV, error("%s: prev block invalid", __func__), REJECT_INVALID, "bad-prevblk");
if (!ContextualCheckBlockHeader(block, state, chainparams, pindexPrev, GetAdjustedTime()))
return error("%s: Consensus::ContextualCheckBlockHeader: %s, %s", __func__, hash.ToString(), FormatStateMessage(state));
- // If the previous block index isn't valid, determine if it descends from any block which
- // has been found invalid (m_failed_blocks), then mark pindexPrev and any blocks
- // between them as failed.
+ /* Determine if this block descends from any block which has been found
+ * invalid (m_failed_blocks), then mark pindexPrev and any blocks between
+ * them as failed. For example:
+ *
+ * D3
+ * /
+ * B2 - C2
+ * / \
+ * A D2 - E2 - F2
+ * \
+ * B1 - C1 - D1 - E1
+ *
+ * In the case that we attempted to reorg from E1 to F2, only to find
+ * C2 to be invalid, we would mark D2, E2, and F2 as BLOCK_FAILED_CHILD
+ * but NOT D3 (it was not in any of our candidate sets at the time).
+ *
+ * In any case D3 will also be marked as BLOCK_FAILED_CHILD at restart
+ * in LoadBlockIndex.
+ */
if (!pindexPrev->IsValid(BLOCK_VALID_SCRIPTS)) {
+ // The above does not mean "invalid": it checks if the previous block
+ // hasn't been validated up to BLOCK_VALID_SCRIPTS. This is a performance
+ // optimization, in the common case of adding a new block to the tip,
+ // we don't need to iterate over the failed blocks list.
for (const CBlockIndex* failedit : m_failed_blocks) {
if (pindexPrev->GetAncestor(failedit->nHeight) == failedit) {
assert(failedit->nStatus & BLOCK_FAILED_VALID);
@@ -3411,7 +3440,7 @@ bool CChainState::AcceptBlockHeader(const CBlockHeader& block, CValidationState&
setDirtyBlockIndex.insert(invalid_walk);
invalid_walk = invalid_walk->pprev;
}
- return state.DoS(100, error("%s: prev block invalid", __func__), REJECT_INVALID, "bad-prevblk");
+ return state.Invalid(ValidationInvalidReason::BLOCK_INVALID_PREV, error("%s: prev block invalid", __func__), REJECT_INVALID, "bad-prevblk");
}
}
}
@@ -3449,26 +3478,26 @@ bool ProcessNewBlockHeaders(const std::vector<CBlockHeader>& headers, CValidatio
}
/** Store block on disk. If dbp is non-nullptr, the file is known to already reside on disk */
-static CDiskBlockPos SaveBlockToDisk(const CBlock& block, int nHeight, const CChainParams& chainparams, const CDiskBlockPos* dbp) {
- unsigned int nBlockSize = ::GetSerializeSize(block, SER_DISK, CLIENT_VERSION);
- CDiskBlockPos blockPos;
+static FlatFilePos SaveBlockToDisk(const CBlock& block, int nHeight, const CChainParams& chainparams, const FlatFilePos* dbp) {
+ unsigned int nBlockSize = ::GetSerializeSize(block, CLIENT_VERSION);
+ FlatFilePos blockPos;
if (dbp != nullptr)
blockPos = *dbp;
if (!FindBlockPos(blockPos, nBlockSize+8, nHeight, block.GetBlockTime(), dbp != nullptr)) {
error("%s: FindBlockPos failed", __func__);
- return CDiskBlockPos();
+ return FlatFilePos();
}
if (dbp == nullptr) {
if (!WriteBlockToDisk(block, blockPos, chainparams.MessageStart())) {
AbortNode("Failed to write block");
- return CDiskBlockPos();
+ return FlatFilePos();
}
}
return blockPos;
}
/** Store block on disk. If dbp is non-nullptr, the file is known to already reside on disk */
-bool CChainState::AcceptBlock(const std::shared_ptr<const CBlock>& pblock, CValidationState& state, const CChainParams& chainparams, CBlockIndex** ppindex, bool fRequested, const CDiskBlockPos* dbp, bool* fNewBlock)
+bool CChainState::AcceptBlock(const std::shared_ptr<const CBlock>& pblock, CValidationState& state, const CChainParams& chainparams, CBlockIndex** ppindex, bool fRequested, const FlatFilePos* dbp, bool* fNewBlock)
{
const CBlock& block = *pblock;
@@ -3485,13 +3514,13 @@ bool CChainState::AcceptBlock(const std::shared_ptr<const CBlock>& pblock, CVali
// process an unrequested block if it's new and has enough work to
// advance our tip, and isn't too many blocks ahead.
bool fAlreadyHave = pindex->nStatus & BLOCK_HAVE_DATA;
- bool fHasMoreOrSameWork = (chainActive.Tip() ? pindex->nChainWork >= chainActive.Tip()->nChainWork : true);
+ bool fHasMoreOrSameWork = (m_chain.Tip() ? pindex->nChainWork >= m_chain.Tip()->nChainWork : true);
// Blocks that are too out-of-order needlessly limit the effectiveness of
// pruning, because pruning will not delete block files that contain any
// blocks which are too close in height to the tip. Apply this test
// regardless of whether pruning is enabled; it should generally be safe to
// not process unrequested blocks.
- bool fTooFarAhead = (pindex->nHeight > int(chainActive.Height() + MIN_BLOCKS_TO_KEEP));
+ bool fTooFarAhead = (pindex->nHeight > int(m_chain.Height() + MIN_BLOCKS_TO_KEEP));
// TODO: Decouple this function from the block download logic by removing fRequested
// This requires some new chain data structure to efficiently look up if a
@@ -3515,7 +3544,8 @@ bool CChainState::AcceptBlock(const std::shared_ptr<const CBlock>& pblock, CVali
if (!CheckBlock(block, state, chainparams.GetConsensus()) ||
!ContextualCheckBlock(block, state, chainparams.GetConsensus(), pindex->pprev)) {
- if (state.IsInvalid() && !state.CorruptionPossible()) {
+ assert(IsBlockReason(state.GetReason()));
+ if (state.IsInvalid() && state.GetReason() != ValidationInvalidReason::BLOCK_MUTATED) {
pindex->nStatus |= BLOCK_FAILED_VALID;
setDirtyBlockIndex.insert(pindex);
}
@@ -3524,13 +3554,13 @@ bool CChainState::AcceptBlock(const std::shared_ptr<const CBlock>& pblock, CVali
// Header is valid/has work, merkle tree and segwit merkle tree are good...RELAY NOW
// (but if it does not build on our best tip, let the SendMessages loop relay it)
- if (!IsInitialBlockDownload() && chainActive.Tip() == pindex->pprev)
+ if (!IsInitialBlockDownload() && m_chain.Tip() == pindex->pprev)
GetMainSignals().NewPoWValidBlock(pindex, pblock);
// Write block to history file
if (fNewBlock) *fNewBlock = true;
try {
- CDiskBlockPos blockPos = SaveBlockToDisk(block, pindex->nHeight, chainparams, dbp);
+ FlatFilePos blockPos = SaveBlockToDisk(block, pindex->nHeight, chainparams, dbp);
if (blockPos.IsNull()) {
state.Error(strprintf("%s: Failed to find position to write new block to disk", __func__));
return false;
@@ -3555,12 +3585,14 @@ bool ProcessNewBlock(const CChainParams& chainparams, const std::shared_ptr<cons
CBlockIndex *pindex = nullptr;
if (fNewBlock) *fNewBlock = false;
CValidationState state;
- // Ensure that CheckBlock() passes before calling AcceptBlock, as
- // belt-and-suspenders.
- bool ret = CheckBlock(*pblock, state, chainparams.GetConsensus());
+ // CheckBlock() does not support multi-threaded block validation because CBlock::fChecked can cause data race.
+ // Therefore, the following critical section must include the CheckBlock() call as well.
LOCK(cs_main);
+ // Ensure that CheckBlock() passes before calling AcceptBlock, as
+ // belt-and-suspenders.
+ bool ret = CheckBlock(*pblock, state, chainparams.GetConsensus());
if (ret) {
// Store to disk
ret = g_chainstate.AcceptBlock(pblock, state, chainparams, &pindex, fForceProcessing, nullptr, fNewBlock);
@@ -3583,7 +3615,7 @@ bool ProcessNewBlock(const CChainParams& chainparams, const std::shared_ptr<cons
bool TestBlockValidity(CValidationState& state, const CChainParams& chainparams, const CBlock& block, CBlockIndex* pindexPrev, bool fCheckPOW, bool fCheckMerkleRoot)
{
AssertLockHeld(cs_main);
- assert(pindexPrev && pindexPrev == chainActive.Tip());
+ assert(pindexPrev && pindexPrev == ::ChainActive().Tip());
CCoinsViewCache viewNew(pcoinsTip.get());
uint256 block_hash(block.GetHash());
CBlockIndex indexDummy(block);
@@ -3659,9 +3691,9 @@ void PruneOneBlockFile(const int fileNumber)
void UnlinkPrunedFiles(const std::set<int>& setFilesToPrune)
{
for (std::set<int>::iterator it = setFilesToPrune.begin(); it != setFilesToPrune.end(); ++it) {
- CDiskBlockPos pos(*it, 0);
- fs::remove(GetBlockPosFilename(pos, "blk"));
- fs::remove(GetBlockPosFilename(pos, "rev"));
+ FlatFilePos pos(*it, 0);
+ fs::remove(BlockFileSeq().FileName(pos));
+ fs::remove(UndoFileSeq().FileName(pos));
LogPrintf("Prune: %s deleted blk/rev (%05u)\n", __func__, *it);
}
}
@@ -3672,11 +3704,11 @@ static void FindFilesToPruneManual(std::set<int>& setFilesToPrune, int nManualPr
assert(fPruneMode && nManualPruneHeight > 0);
LOCK2(cs_main, cs_LastBlockFile);
- if (chainActive.Tip() == nullptr)
+ if (::ChainActive().Tip() == nullptr)
return;
// last block to prune is the lesser of (user-specified height, MIN_BLOCKS_TO_KEEP from the tip)
- unsigned int nLastBlockWeCanPrune = std::min((unsigned)nManualPruneHeight, chainActive.Tip()->nHeight - MIN_BLOCKS_TO_KEEP);
+ unsigned int nLastBlockWeCanPrune = std::min((unsigned)nManualPruneHeight, ::ChainActive().Tip()->nHeight - MIN_BLOCKS_TO_KEEP);
int count=0;
for (int fileNumber = 0; fileNumber < nLastBlockFile; fileNumber++) {
if (vinfoBlockFile[fileNumber].nSize == 0 || vinfoBlockFile[fileNumber].nHeightLast > nLastBlockWeCanPrune)
@@ -3716,14 +3748,14 @@ void PruneBlockFilesManual(int nManualPruneHeight)
static void FindFilesToPrune(std::set<int>& setFilesToPrune, uint64_t nPruneAfterHeight)
{
LOCK2(cs_main, cs_LastBlockFile);
- if (chainActive.Tip() == nullptr || nPruneTarget == 0) {
+ if (::ChainActive().Tip() == nullptr || nPruneTarget == 0) {
return;
}
- if ((uint64_t)chainActive.Tip()->nHeight <= nPruneAfterHeight) {
+ if ((uint64_t)::ChainActive().Tip()->nHeight <= nPruneAfterHeight) {
return;
}
- unsigned int nLastBlockWeCanPrune = chainActive.Tip()->nHeight - MIN_BLOCKS_TO_KEEP;
+ unsigned int nLastBlockWeCanPrune = ::ChainActive().Tip()->nHeight - MIN_BLOCKS_TO_KEEP;
uint64_t nCurrentUsage = CalculateCurrentUsage();
// We don't check to prune until after we've allocated new space for files
// So we should leave a buffer under our target to account for another allocation
@@ -3769,52 +3801,28 @@ static void FindFilesToPrune(std::set<int>& setFilesToPrune, uint64_t nPruneAfte
nLastBlockWeCanPrune, count);
}
-bool CheckDiskSpace(uint64_t nAdditionalBytes, bool blocks_dir)
+static FlatFileSeq BlockFileSeq()
{
- uint64_t nFreeBytesAvailable = fs::space(blocks_dir ? GetBlocksDir() : GetDataDir()).available;
-
- // Check for nMinDiskSpace bytes (currently 50MB)
- if (nFreeBytesAvailable < nMinDiskSpace + nAdditionalBytes)
- return AbortNode("Disk space is low!", _("Error: Disk space is low!"));
-
- return true;
+ return FlatFileSeq(GetBlocksDir(), "blk", BLOCKFILE_CHUNK_SIZE);
}
-static FILE* OpenDiskFile(const CDiskBlockPos &pos, const char *prefix, bool fReadOnly)
+static FlatFileSeq UndoFileSeq()
{
- if (pos.IsNull())
- return nullptr;
- fs::path path = GetBlockPosFilename(pos, prefix);
- fs::create_directories(path.parent_path());
- FILE* file = fsbridge::fopen(path, fReadOnly ? "rb": "rb+");
- if (!file && !fReadOnly)
- file = fsbridge::fopen(path, "wb+");
- if (!file) {
- LogPrintf("Unable to open file %s\n", path.string());
- return nullptr;
- }
- if (pos.nPos) {
- if (fseek(file, pos.nPos, SEEK_SET)) {
- LogPrintf("Unable to seek to position %u of %s\n", pos.nPos, path.string());
- fclose(file);
- return nullptr;
- }
- }
- return file;
+ return FlatFileSeq(GetBlocksDir(), "rev", UNDOFILE_CHUNK_SIZE);
}
-FILE* OpenBlockFile(const CDiskBlockPos &pos, bool fReadOnly) {
- return OpenDiskFile(pos, "blk", fReadOnly);
+FILE* OpenBlockFile(const FlatFilePos &pos, bool fReadOnly) {
+ return BlockFileSeq().Open(pos, fReadOnly);
}
/** Open an undo file (rev?????.dat) */
-static FILE* OpenUndoFile(const CDiskBlockPos &pos, bool fReadOnly) {
- return OpenDiskFile(pos, "rev", fReadOnly);
+static FILE* OpenUndoFile(const FlatFilePos &pos, bool fReadOnly) {
+ return UndoFileSeq().Open(pos, fReadOnly);
}
-fs::path GetBlockPosFilename(const CDiskBlockPos &pos, const char *prefix)
+fs::path GetBlockPosFilename(const FlatFilePos &pos)
{
- return GetBlocksDir() / strprintf("%s%05u.dat", prefix, pos.nFile);
+ return BlockFileSeq().FileName(pos);
}
CBlockIndex * CChainState::InsertBlockIndex(const uint256& hash)
@@ -3842,8 +3850,6 @@ bool CChainState::LoadBlockIndex(const Consensus::Params& consensus_params, CBlo
if (!blocktree.LoadBlockIndexGuts(consensus_params, [this](const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { return this->InsertBlockIndex(hash); }))
return false;
- boost::this_thread::interruption_point();
-
// Calculate nChainWork
std::vector<std::pair<int, CBlockIndex*> > vSortedByHeight;
vSortedByHeight.reserve(mapBlockIndex.size());
@@ -3863,7 +3869,7 @@ bool CChainState::LoadBlockIndex(const Consensus::Params& consensus_params, CBlo
// Pruned nodes may have deleted the block.
if (pindex->nTx > 0) {
if (pindex->pprev) {
- if (pindex->pprev->nChainTx) {
+ if (pindex->pprev->HaveTxsDownloaded()) {
pindex->nChainTx = pindex->pprev->nChainTx + pindex->nTx;
} else {
pindex->nChainTx = 0;
@@ -3877,7 +3883,7 @@ bool CChainState::LoadBlockIndex(const Consensus::Params& consensus_params, CBlo
pindex->nStatus |= BLOCK_FAILED_CHILD;
setDirtyBlockIndex.insert(pindex);
}
- if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS) && (pindex->nChainTx || pindex->pprev == nullptr))
+ if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS) && (pindex->HaveTxsDownloaded() || pindex->pprev == nullptr))
setBlockIndexCandidates.insert(pindex);
if (pindex->nStatus & BLOCK_FAILED_MASK && (!pindexBestInvalid || pindex->nChainWork > pindexBestInvalid->nChainWork))
pindexBestInvalid = pindex;
@@ -3924,7 +3930,7 @@ bool static LoadBlockIndexDB(const CChainParams& chainparams) EXCLUSIVE_LOCKS_RE
}
for (std::set<int>::iterator it = setBlkDataFiles.begin(); it != setBlkDataFiles.end(); it++)
{
- CDiskBlockPos pos(*it, 0);
+ FlatFilePos pos(*it, 0);
if (CAutoFile(OpenBlockFile(pos, true), SER_DISK, CLIENT_VERSION).IsNull()) {
return false;
}
@@ -3946,33 +3952,23 @@ bool static LoadBlockIndexDB(const CChainParams& chainparams) EXCLUSIVE_LOCKS_RE
bool LoadChainTip(const CChainParams& chainparams)
{
AssertLockHeld(cs_main);
+ assert(!pcoinsTip->GetBestBlock().IsNull()); // Never called when the coins view is empty
- if (chainActive.Tip() && chainActive.Tip()->GetBlockHash() == pcoinsTip->GetBestBlock()) return true;
-
- if (pcoinsTip->GetBestBlock().IsNull() && mapBlockIndex.size() == 1) {
- // In case we just added the genesis block, connect it now, so
- // that we always have a chainActive.Tip() when we return.
- LogPrintf("%s: Connecting genesis block...\n", __func__);
- CValidationState state;
- if (!ActivateBestChain(state, chainparams)) {
- LogPrintf("%s: failed to activate chain (%s)\n", __func__, FormatStateMessage(state));
- return false;
- }
- }
+ if (::ChainActive().Tip() && ::ChainActive().Tip()->GetBlockHash() == pcoinsTip->GetBestBlock()) return true;
// Load pointer to end of best chain
CBlockIndex* pindex = LookupBlockIndex(pcoinsTip->GetBestBlock());
if (!pindex) {
return false;
}
- chainActive.SetTip(pindex);
+ ::ChainActive().SetTip(pindex);
g_chainstate.PruneBlockIndexCandidates();
LogPrintf("Loaded best chain: hashBestChain=%s height=%d date=%s progress=%f\n",
- chainActive.Tip()->GetBlockHash().ToString(), chainActive.Height(),
- FormatISO8601DateTime(chainActive.Tip()->GetBlockTime()),
- GuessVerificationProgress(chainparams.TxData(), chainActive.Tip()));
+ ::ChainActive().Tip()->GetBlockHash().ToString(), ::ChainActive().Height(),
+ FormatISO8601DateTime(::ChainActive().Tip()->GetBlockTime()),
+ GuessVerificationProgress(chainparams.TxData(), ::ChainActive().Tip()));
return true;
}
@@ -3989,12 +3985,12 @@ CVerifyDB::~CVerifyDB()
bool CVerifyDB::VerifyDB(const CChainParams& chainparams, CCoinsView *coinsview, int nCheckLevel, int nCheckDepth)
{
LOCK(cs_main);
- if (chainActive.Tip() == nullptr || chainActive.Tip()->pprev == nullptr)
+ if (::ChainActive().Tip() == nullptr || ::ChainActive().Tip()->pprev == nullptr)
return true;
// Verify blocks in the best chain
- if (nCheckDepth <= 0 || nCheckDepth > chainActive.Height())
- nCheckDepth = chainActive.Height();
+ if (nCheckDepth <= 0 || nCheckDepth > ::ChainActive().Height())
+ nCheckDepth = ::ChainActive().Height();
nCheckLevel = std::max(0, std::min(4, nCheckLevel));
LogPrintf("Verifying last %i blocks at level %i\n", nCheckDepth, nCheckLevel);
CCoinsViewCache coins(coinsview);
@@ -4004,16 +4000,16 @@ bool CVerifyDB::VerifyDB(const CChainParams& chainparams, CCoinsView *coinsview,
CValidationState state;
int reportDone = 0;
LogPrintf("[0%%]..."); /* Continued */
- for (pindex = chainActive.Tip(); pindex && pindex->pprev; pindex = pindex->pprev) {
+ for (pindex = ::ChainActive().Tip(); pindex && pindex->pprev; pindex = pindex->pprev) {
boost::this_thread::interruption_point();
- int percentageDone = std::max(1, std::min(99, (int)(((double)(chainActive.Height() - pindex->nHeight)) / (double)nCheckDepth * (nCheckLevel >= 4 ? 50 : 100))));
+ const int percentageDone = std::max(1, std::min(99, (int)(((double)(::ChainActive().Height() - pindex->nHeight)) / (double)nCheckDepth * (nCheckLevel >= 4 ? 50 : 100))));
if (reportDone < percentageDone/10) {
// report every 10% step
LogPrintf("[%d%%]...", percentageDone); /* Continued */
reportDone = percentageDone/10;
}
uiInterface.ShowProgress(_("Verifying blocks..."), percentageDone, false);
- if (pindex->nHeight <= chainActive.Height()-nCheckDepth)
+ if (pindex->nHeight <= ::ChainActive().Height()-nCheckDepth)
break;
if (fPruneMode && !(pindex->nStatus & BLOCK_HAVE_DATA)) {
// If pruning, only go back as far as we have data.
@@ -4055,17 +4051,23 @@ bool CVerifyDB::VerifyDB(const CChainParams& chainparams, CCoinsView *coinsview,
return true;
}
if (pindexFailure)
- return error("VerifyDB(): *** coin database inconsistencies found (last %i blocks, %i good transactions before that)\n", chainActive.Height() - pindexFailure->nHeight + 1, nGoodTransactions);
+ return error("VerifyDB(): *** coin database inconsistencies found (last %i blocks, %i good transactions before that)\n", ::ChainActive().Height() - pindexFailure->nHeight + 1, nGoodTransactions);
// store block count as we move pindex at check level >= 4
- int block_count = chainActive.Height() - pindex->nHeight;
+ int block_count = ::ChainActive().Height() - pindex->nHeight;
// check level 4: try reconnecting blocks
if (nCheckLevel >= 4) {
- while (pindex != chainActive.Tip()) {
+ while (pindex != ::ChainActive().Tip()) {
boost::this_thread::interruption_point();
- uiInterface.ShowProgress(_("Verifying blocks..."), std::max(1, std::min(99, 100 - (int)(((double)(chainActive.Height() - pindex->nHeight)) / (double)nCheckDepth * 50))), false);
- pindex = chainActive.Next(pindex);
+ const int percentageDone = std::max(1, std::min(99, 100 - (int)(((double)(::ChainActive().Height() - pindex->nHeight)) / (double)nCheckDepth * 50)));
+ if (reportDone < percentageDone/10) {
+ // report every 10% step
+ LogPrintf("[%d%%]...", percentageDone); /* Continued */
+ reportDone = percentageDone/10;
+ }
+ uiInterface.ShowProgress(_("Verifying blocks..."), percentageDone, false);
+ pindex = ::ChainActive().Next(pindex);
CBlock block;
if (!ReadBlockFromDisk(block, pindex, chainparams.GetConsensus()))
return error("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
@@ -4157,6 +4159,7 @@ bool CChainState::ReplayBlocks(const CChainParams& params, CCoinsView* view)
for (int nHeight = nForkHeight + 1; nHeight <= pindexNew->nHeight; ++nHeight) {
const CBlockIndex* pindex = pindexNew->GetAncestor(nHeight);
LogPrintf("Rolling forward %s (%i)\n", pindex->GetBlockHash().ToString(), nHeight);
+ uiInterface.ShowProgress(_("Replaying blocks..."), (int) ((nHeight - nForkHeight) * 100.0 / (pindexNew->nHeight - nForkHeight)) , false);
if (!RollforwardBlock(pindex, cache, params)) return false;
}
@@ -4170,38 +4173,114 @@ bool ReplayBlocks(const CChainParams& params, CCoinsView* view) {
return g_chainstate.ReplayBlocks(params, view);
}
+//! Helper for CChainState::RewindBlockIndex
+void CChainState::EraseBlockData(CBlockIndex* index)
+{
+ AssertLockHeld(cs_main);
+ assert(!m_chain.Contains(index)); // Make sure this block isn't active
+
+ // Reduce validity
+ index->nStatus = std::min<unsigned int>(index->nStatus & BLOCK_VALID_MASK, BLOCK_VALID_TREE) | (index->nStatus & ~BLOCK_VALID_MASK);
+ // Remove have-data flags.
+ index->nStatus &= ~(BLOCK_HAVE_DATA | BLOCK_HAVE_UNDO);
+ // Remove storage location.
+ index->nFile = 0;
+ index->nDataPos = 0;
+ index->nUndoPos = 0;
+ // Remove various other things
+ index->nTx = 0;
+ index->nChainTx = 0;
+ index->nSequenceId = 0;
+ // Make sure it gets written.
+ setDirtyBlockIndex.insert(index);
+ // Update indexes
+ setBlockIndexCandidates.erase(index);
+ std::pair<std::multimap<CBlockIndex*, CBlockIndex*>::iterator, std::multimap<CBlockIndex*, CBlockIndex*>::iterator> ret = mapBlocksUnlinked.equal_range(index->pprev);
+ while (ret.first != ret.second) {
+ if (ret.first->second == index) {
+ mapBlocksUnlinked.erase(ret.first++);
+ } else {
+ ++ret.first;
+ }
+ }
+ // Mark parent as eligible for main chain again
+ if (index->pprev && index->pprev->IsValid(BLOCK_VALID_TRANSACTIONS) && index->pprev->HaveTxsDownloaded()) {
+ setBlockIndexCandidates.insert(index->pprev);
+ }
+}
+
bool CChainState::RewindBlockIndex(const CChainParams& params)
{
- LOCK(cs_main);
+ // Note that during -reindex-chainstate we are called with an empty m_chain!
- // Note that during -reindex-chainstate we are called with an empty chainActive!
+ // First erase all post-segwit blocks without witness not in the main chain,
+ // as this can we done without costly DisconnectTip calls. Active
+ // blocks will be dealt with below (releasing cs_main in between).
+ {
+ LOCK(cs_main);
+ for (const auto& entry : mapBlockIndex) {
+ if (IsWitnessEnabled(entry.second->pprev, params.GetConsensus()) && !(entry.second->nStatus & BLOCK_OPT_WITNESS) && !m_chain.Contains(entry.second)) {
+ EraseBlockData(entry.second);
+ }
+ }
+ }
+ // Find what height we need to reorganize to.
+ CBlockIndex *tip;
int nHeight = 1;
- while (nHeight <= chainActive.Height()) {
- // Although SCRIPT_VERIFY_WITNESS is now generally enforced on all
- // blocks in ConnectBlock, we don't need to go back and
- // re-download/re-verify blocks from before segwit actually activated.
- if (IsWitnessEnabled(chainActive[nHeight - 1], params.GetConsensus()) && !(chainActive[nHeight]->nStatus & BLOCK_OPT_WITNESS)) {
- break;
+ {
+ LOCK(cs_main);
+ while (nHeight <= m_chain.Height()) {
+ // Although SCRIPT_VERIFY_WITNESS is now generally enforced on all
+ // blocks in ConnectBlock, we don't need to go back and
+ // re-download/re-verify blocks from before segwit actually activated.
+ if (IsWitnessEnabled(m_chain[nHeight - 1], params.GetConsensus()) && !(m_chain[nHeight]->nStatus & BLOCK_OPT_WITNESS)) {
+ break;
+ }
+ nHeight++;
}
- nHeight++;
- }
+ tip = m_chain.Tip();
+ }
// nHeight is now the height of the first insufficiently-validated block, or tipheight + 1
+
CValidationState state;
- CBlockIndex* pindex = chainActive.Tip();
- while (chainActive.Height() >= nHeight) {
- if (fPruneMode && !(chainActive.Tip()->nStatus & BLOCK_HAVE_DATA)) {
- // If pruning, don't try rewinding past the HAVE_DATA point;
- // since older blocks can't be served anyway, there's
- // no need to walk further, and trying to DisconnectTip()
- // will fail (and require a needless reindex/redownload
- // of the blockchain).
- break;
- }
- if (!DisconnectTip(state, params, nullptr)) {
- return error("RewindBlockIndex: unable to disconnect block at height %i (%s)", pindex->nHeight, FormatStateMessage(state));
+ // Loop until the tip is below nHeight, or we reach a pruned block.
+ while (!ShutdownRequested()) {
+ {
+ LOCK(cs_main);
+ // Make sure nothing changed from under us (this won't happen because RewindBlockIndex runs before importing/network are active)
+ assert(tip == m_chain.Tip());
+ if (tip == nullptr || tip->nHeight < nHeight) break;
+ if (fPruneMode && !(tip->nStatus & BLOCK_HAVE_DATA)) {
+ // If pruning, don't try rewinding past the HAVE_DATA point;
+ // since older blocks can't be served anyway, there's
+ // no need to walk further, and trying to DisconnectTip()
+ // will fail (and require a needless reindex/redownload
+ // of the blockchain).
+ break;
+ }
+
+ // Disconnect block
+ if (!DisconnectTip(state, params, nullptr)) {
+ return error("RewindBlockIndex: unable to disconnect block at height %i (%s)", tip->nHeight, FormatStateMessage(state));
+ }
+
+ // Reduce validity flag and have-data flags.
+ // We do this after actual disconnecting, otherwise we'll end up writing the lack of data
+ // to disk before writing the chainstate, resulting in a failure to continue if interrupted.
+ // Note: If we encounter an insufficiently validated block that
+ // is on m_chain, it must be because we are a pruning node, and
+ // this block or some successor doesn't HAVE_DATA, so we were unable to
+ // rewind all the way. Blocks remaining on m_chain at this point
+ // must not have their validity reduced.
+ EraseBlockData(tip);
+
+ tip = tip->pprev;
}
+ // Make sure the queue of validation callbacks doesn't grow unboundedly.
+ LimitValidationInterfaceQueue();
+
// Occasionally flush state to disk.
if (!FlushStateToDisk(params, state, FlushStateMode::PERIODIC)) {
LogPrintf("RewindBlockIndex: unable to flush state to disk (%s)\n", FormatStateMessage(state));
@@ -4209,53 +4288,15 @@ bool CChainState::RewindBlockIndex(const CChainParams& params)
}
}
- // Reduce validity flag and have-data flags.
- // We do this after actual disconnecting, otherwise we'll end up writing the lack of data
- // to disk before writing the chainstate, resulting in a failure to continue if interrupted.
- for (const auto& entry : mapBlockIndex) {
- CBlockIndex* pindexIter = entry.second;
-
- // Note: If we encounter an insufficiently validated block that
- // is on chainActive, it must be because we are a pruning node, and
- // this block or some successor doesn't HAVE_DATA, so we were unable to
- // rewind all the way. Blocks remaining on chainActive at this point
- // must not have their validity reduced.
- if (IsWitnessEnabled(pindexIter->pprev, params.GetConsensus()) && !(pindexIter->nStatus & BLOCK_OPT_WITNESS) && !chainActive.Contains(pindexIter)) {
- // Reduce validity
- pindexIter->nStatus = std::min<unsigned int>(pindexIter->nStatus & BLOCK_VALID_MASK, BLOCK_VALID_TREE) | (pindexIter->nStatus & ~BLOCK_VALID_MASK);
- // Remove have-data flags.
- pindexIter->nStatus &= ~(BLOCK_HAVE_DATA | BLOCK_HAVE_UNDO);
- // Remove storage location.
- pindexIter->nFile = 0;
- pindexIter->nDataPos = 0;
- pindexIter->nUndoPos = 0;
- // Remove various other things
- pindexIter->nTx = 0;
- pindexIter->nChainTx = 0;
- pindexIter->nSequenceId = 0;
- // Make sure it gets written.
- setDirtyBlockIndex.insert(pindexIter);
- // Update indexes
- setBlockIndexCandidates.erase(pindexIter);
- std::pair<std::multimap<CBlockIndex*, CBlockIndex*>::iterator, std::multimap<CBlockIndex*, CBlockIndex*>::iterator> ret = mapBlocksUnlinked.equal_range(pindexIter->pprev);
- while (ret.first != ret.second) {
- if (ret.first->second == pindexIter) {
- mapBlocksUnlinked.erase(ret.first++);
- } else {
- ++ret.first;
- }
- }
- } else if (pindexIter->IsValid(BLOCK_VALID_TRANSACTIONS) && pindexIter->nChainTx) {
- setBlockIndexCandidates.insert(pindexIter);
- }
- }
-
- if (chainActive.Tip() != nullptr) {
- // We can't prune block index candidates based on our tip if we have
- // no tip due to chainActive being empty!
- PruneBlockIndexCandidates();
+ {
+ LOCK(cs_main);
+ if (m_chain.Tip() != nullptr) {
+ // We can't prune block index candidates based on our tip if we have
+ // no tip due to m_chain being empty!
+ PruneBlockIndexCandidates();
- CheckBlockIndex(params.GetConsensus());
+ CheckBlockIndex(params.GetConsensus());
+ }
}
return true;
@@ -4266,8 +4307,8 @@ bool RewindBlockIndex(const CChainParams& params) {
return false;
}
- if (chainActive.Tip() != nullptr) {
- // FlushStateToDisk can possibly read chainActive. Be conservative
+ if (::ChainActive().Tip() != nullptr) {
+ // FlushStateToDisk can possibly read ::ChainActive(). Be conservative
// and skip it here, we're about to -reindex-chainstate anyway, so
// it'll get called a bunch real soon.
CValidationState state;
@@ -4292,7 +4333,7 @@ void CChainState::UnloadBlockIndex() {
void UnloadBlockIndex()
{
LOCK(cs_main);
- chainActive.SetTip(nullptr);
+ ::ChainActive().SetTip(nullptr);
pindexBestInvalid = nullptr;
pindexBestHeader = nullptr;
mempool.clear();
@@ -4306,7 +4347,7 @@ void UnloadBlockIndex()
warningcache[b].clear();
}
- for (BlockMap::value_type& entry : mapBlockIndex) {
+ for (const BlockMap::value_type& entry : mapBlockIndex) {
delete entry.second;
}
mapBlockIndex.clear();
@@ -4342,15 +4383,15 @@ bool CChainState::LoadGenesisBlock(const CChainParams& chainparams)
LOCK(cs_main);
// Check whether we're already initialized by checking for genesis in
- // mapBlockIndex. Note that we can't use chainActive here, since it is
+ // mapBlockIndex. Note that we can't use m_chain here, since it is
// set based on the coins db, not the block index db, which is the only
// thing loaded at this point.
if (mapBlockIndex.count(chainparams.GenesisBlock().GetHash()))
return true;
try {
- CBlock &block = const_cast<CBlock&>(chainparams.GenesisBlock());
- CDiskBlockPos blockPos = SaveBlockToDisk(block, 0, chainparams, nullptr);
+ const CBlock& block = chainparams.GenesisBlock();
+ FlatFilePos blockPos = SaveBlockToDisk(block, 0, chainparams, nullptr);
if (blockPos.IsNull())
return error("%s: writing genesis block to disk failed", __func__);
CBlockIndex *pindex = AddToBlockIndex(block);
@@ -4367,10 +4408,10 @@ bool LoadGenesisBlock(const CChainParams& chainparams)
return g_chainstate.LoadGenesisBlock(chainparams);
}
-bool LoadExternalBlockFile(const CChainParams& chainparams, FILE* fileIn, CDiskBlockPos *dbp)
+bool LoadExternalBlockFile(const CChainParams& chainparams, FILE* fileIn, FlatFilePos *dbp)
{
// Map of disk positions for blocks with unknown parent (only used for reindex)
- static std::multimap<uint256, CDiskBlockPos> mapBlocksUnknownParent;
+ static std::multimap<uint256, FlatFilePos> mapBlocksUnknownParent;
int64_t nStart = GetTimeMillis();
int nLoaded = 0;
@@ -4456,9 +4497,9 @@ bool LoadExternalBlockFile(const CChainParams& chainparams, FILE* fileIn, CDiskB
while (!queue.empty()) {
uint256 head = queue.front();
queue.pop_front();
- std::pair<std::multimap<uint256, CDiskBlockPos>::iterator, std::multimap<uint256, CDiskBlockPos>::iterator> range = mapBlocksUnknownParent.equal_range(head);
+ std::pair<std::multimap<uint256, FlatFilePos>::iterator, std::multimap<uint256, FlatFilePos>::iterator> range = mapBlocksUnknownParent.equal_range(head);
while (range.first != range.second) {
- std::multimap<uint256, CDiskBlockPos>::iterator it = range.first;
+ std::multimap<uint256, FlatFilePos>::iterator it = range.first;
std::shared_ptr<CBlock> pblockrecursive = std::make_shared<CBlock>();
if (ReadBlockFromDisk(*pblockrecursive, it->second, chainparams.GetConsensus()))
{
@@ -4499,15 +4540,15 @@ void CChainState::CheckBlockIndex(const Consensus::Params& consensusParams)
// During a reindex, we read the genesis block and call CheckBlockIndex before ActivateBestChain,
// so we have the genesis block in mapBlockIndex but no active chain. (A few of the tests when
- // iterating the block tree require that chainActive has been initialized.)
- if (chainActive.Height() < 0) {
+ // iterating the block tree require that m_chain has been initialized.)
+ if (m_chain.Height() < 0) {
assert(mapBlockIndex.size() <= 1);
return;
}
// Build forward-pointing map of the entire block tree.
std::multimap<CBlockIndex*,CBlockIndex*> forward;
- for (auto& entry : mapBlockIndex) {
+ for (const std::pair<const uint256, CBlockIndex*>& entry : mapBlockIndex) {
forward.insert(std::make_pair(entry.second->pprev, entry.second));
}
@@ -4544,9 +4585,9 @@ void CChainState::CheckBlockIndex(const Consensus::Params& consensusParams)
if (pindex->pprev == nullptr) {
// Genesis block checks.
assert(pindex->GetBlockHash() == consensusParams.hashGenesisBlock); // Genesis block's hash must match.
- assert(pindex == chainActive.Genesis()); // The current active chain's genesis block must be this block.
+ assert(pindex == m_chain.Genesis()); // The current active chain's genesis block must be this block.
}
- if (pindex->nChainTx == 0) assert(pindex->nSequenceId <= 0); // nSequenceId can't be set positive for blocks that aren't linked (negative is used for preciousblock)
+ if (!pindex->HaveTxsDownloaded()) assert(pindex->nSequenceId <= 0); // nSequenceId can't be set positive for blocks that aren't linked (negative is used for preciousblock)
// VALID_TRANSACTIONS is equivalent to nTx > 0 for all nodes (whether or not pruning has occurred).
// HAVE_DATA is only equivalent to nTx > 0 (or VALID_TRANSACTIONS) if no pruning has occurred.
if (!fHavePruned) {
@@ -4559,9 +4600,9 @@ void CChainState::CheckBlockIndex(const Consensus::Params& consensusParams)
}
if (pindex->nStatus & BLOCK_HAVE_UNDO) assert(pindex->nStatus & BLOCK_HAVE_DATA);
assert(((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_TRANSACTIONS) == (pindex->nTx > 0)); // This is pruning-independent.
- // All parents having had data (at some point) is equivalent to all parents being VALID_TRANSACTIONS, which is equivalent to nChainTx being set.
- assert((pindexFirstNeverProcessed != nullptr) == (pindex->nChainTx == 0)); // nChainTx != 0 is used to signal that all parent blocks have been processed (but may have been pruned).
- assert((pindexFirstNotTransactionsValid != nullptr) == (pindex->nChainTx == 0));
+ // All parents having had data (at some point) is equivalent to all parents being VALID_TRANSACTIONS, which is equivalent to HaveTxsDownloaded().
+ assert((pindexFirstNeverProcessed == nullptr) == pindex->HaveTxsDownloaded());
+ assert((pindexFirstNotTransactionsValid == nullptr) == pindex->HaveTxsDownloaded());
assert(pindex->nHeight == nHeight); // nHeight must be consistent.
assert(pindex->pprev == nullptr || pindex->nChainWork >= pindex->pprev->nChainWork); // For every block except the genesis block, the chainwork must be larger than the parent's.
assert(nHeight < 2 || (pindex->pskip && (pindex->pskip->nHeight < nHeight))); // The pskip pointer must point back for all but the first 2 blocks.
@@ -4573,13 +4614,13 @@ void CChainState::CheckBlockIndex(const Consensus::Params& consensusParams)
// Checks for not-invalid blocks.
assert((pindex->nStatus & BLOCK_FAILED_MASK) == 0); // The failed mask cannot be set for blocks without invalid parents.
}
- if (!CBlockIndexWorkComparator()(pindex, chainActive.Tip()) && pindexFirstNeverProcessed == nullptr) {
+ if (!CBlockIndexWorkComparator()(pindex, m_chain.Tip()) && pindexFirstNeverProcessed == nullptr) {
if (pindexFirstInvalid == nullptr) {
// If this block sorts at least as good as the current tip and
// is valid and we have all data for its parents, it must be in
- // setBlockIndexCandidates. chainActive.Tip() must also be there
+ // setBlockIndexCandidates. m_chain.Tip() must also be there
// even if some data has been pruned.
- if (pindexFirstMissing == nullptr || pindex == chainActive.Tip()) {
+ if (pindexFirstMissing == nullptr || pindex == m_chain.Tip()) {
assert(setBlockIndexCandidates.count(pindex));
}
// If some parent is missing, then it could be that this block was in
@@ -4613,11 +4654,11 @@ void CChainState::CheckBlockIndex(const Consensus::Params& consensusParams)
// - it has a descendant that at some point had more work than the
// tip, and
// - we tried switching to that descendant but were missing
- // data for some intermediate block between chainActive and the
+ // data for some intermediate block between m_chain and the
// tip.
- // So if this block is itself better than chainActive.Tip() and it wasn't in
+ // So if this block is itself better than m_chain.Tip() and it wasn't in
// setBlockIndexCandidates, then it must be in mapBlocksUnlinked.
- if (!CBlockIndexWorkComparator()(pindex, chainActive.Tip()) && setBlockIndexCandidates.count(pindex) == 0) {
+ if (!CBlockIndexWorkComparator()(pindex, m_chain.Tip()) && setBlockIndexCandidates.count(pindex) == 0) {
if (pindexFirstInvalid == nullptr) {
assert(foundInUnlinked);
}
@@ -4688,24 +4729,24 @@ CBlockFileInfo* GetBlockFileInfo(size_t n)
ThresholdState VersionBitsTipState(const Consensus::Params& params, Consensus::DeploymentPos pos)
{
LOCK(cs_main);
- return VersionBitsState(chainActive.Tip(), params, pos, versionbitscache);
+ return VersionBitsState(::ChainActive().Tip(), params, pos, versionbitscache);
}
BIP9Stats VersionBitsTipStatistics(const Consensus::Params& params, Consensus::DeploymentPos pos)
{
LOCK(cs_main);
- return VersionBitsStatistics(chainActive.Tip(), params, pos);
+ return VersionBitsStatistics(::ChainActive().Tip(), params, pos);
}
int VersionBitsTipStateSinceHeight(const Consensus::Params& params, Consensus::DeploymentPos pos)
{
LOCK(cs_main);
- return VersionBitsStateSinceHeight(chainActive.Tip(), params, pos, versionbitscache);
+ return VersionBitsStateSinceHeight(::ChainActive().Tip(), params, pos, versionbitscache);
}
static const uint64_t MEMPOOL_DUMP_VERSION = 1;
-bool LoadMempool(void)
+bool LoadMempool(CTxMemPool& pool)
{
const CChainParams& chainparams = Params();
int64_t nExpiryTimeout = gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60;
@@ -4740,12 +4781,12 @@ bool LoadMempool(void)
CAmount amountdelta = nFeeDelta;
if (amountdelta) {
- mempool.PrioritiseTransaction(tx->GetHash(), amountdelta);
+ pool.PrioritiseTransaction(tx->GetHash(), amountdelta);
}
CValidationState state;
if (nTime + nExpiryTimeout > nNow) {
LOCK(cs_main);
- AcceptToMemoryPoolWithTime(chainparams, mempool, state, tx, nullptr /* pfMissingInputs */, nTime,
+ AcceptToMemoryPoolWithTime(chainparams, pool, state, tx, nullptr /* pfMissingInputs */, nTime,
nullptr /* plTxnReplaced */, false /* bypass_limits */, 0 /* nAbsurdFee */,
false /* test_accept */);
if (state.IsValid()) {
@@ -4755,7 +4796,7 @@ bool LoadMempool(void)
// wallet(s) having loaded it while we were processing
// mempool transactions; consider these as valid, instead of
// failed, but mark them as 'already there'
- if (mempool.exists(tx->GetHash())) {
+ if (pool.exists(tx->GetHash())) {
++already_there;
} else {
++failed;
@@ -4771,7 +4812,7 @@ bool LoadMempool(void)
file >> mapDeltas;
for (const auto& i : mapDeltas) {
- mempool.PrioritiseTransaction(i.first, i.second);
+ pool.PrioritiseTransaction(i.first, i.second);
}
} catch (const std::exception& e) {
LogPrintf("Failed to deserialize mempool data on disk: %s. Continuing anyway.\n", e.what());
@@ -4782,19 +4823,22 @@ bool LoadMempool(void)
return true;
}
-bool DumpMempool(void)
+bool DumpMempool(const CTxMemPool& pool)
{
int64_t start = GetTimeMicros();
std::map<uint256, CAmount> mapDeltas;
std::vector<TxMempoolInfo> vinfo;
+ static Mutex dump_mutex;
+ LOCK(dump_mutex);
+
{
- LOCK(mempool.cs);
- for (const auto &i : mempool.mapDeltas) {
+ LOCK(pool.cs);
+ for (const auto &i : pool.mapDeltas) {
mapDeltas[i.first] = i.second;
}
- vinfo = mempool.infoAll();
+ vinfo = pool.infoAll();
}
int64_t mid = GetTimeMicros();