aboutsummaryrefslogtreecommitdiff
path: root/src/validation.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/validation.cpp')
-rw-r--r--src/validation.cpp1224
1 files changed, 726 insertions, 498 deletions
diff --git a/src/validation.cpp b/src/validation.cpp
index 0135521653..01803223d1 100644
--- a/src/validation.cpp
+++ b/src/validation.cpp
@@ -30,7 +30,6 @@
#include <reverse_iterator.h>
#include <script/script.h>
#include <script/sigcache.h>
-#include <script/standard.h>
#include <shutdown.h>
#include <timedata.h>
#include <tinyformat.h>
@@ -48,8 +47,6 @@
#include <validationinterface.h>
#include <warnings.h>
-#include <future>
-#include <sstream>
#include <string>
#include <boost/algorithm/string/replace.hpp>
@@ -186,7 +183,7 @@ std::unique_ptr<CBlockTreeDB> pblocktree;
// See definition for documentation
static void FindFilesToPruneManual(std::set<int>& setFilesToPrune, int nManualPruneHeight);
static void FindFilesToPrune(std::set<int>& setFilesToPrune, uint64_t nPruneAfterHeight);
-bool CheckInputs(const CTransaction& tx, CValidationState &state, const CCoinsViewCache &inputs, unsigned int flags, bool cacheSigStore, bool cacheFullScriptStore, PrecomputedTransactionData& txdata, std::vector<CScriptCheck> *pvChecks = nullptr);
+bool CheckInputs(const CTransaction& tx, TxValidationState &state, const CCoinsViewCache &inputs, unsigned int flags, bool cacheSigStore, bool cacheFullScriptStore, PrecomputedTransactionData& txdata, std::vector<CScriptCheck> *pvChecks = nullptr);
static FILE* OpenUndoFile(const FlatFilePos &pos, bool fReadOnly = false);
static FlatFileSeq BlockFileSeq();
static FlatFileSeq UndoFileSeq();
@@ -316,10 +313,10 @@ bool CheckSequenceLocks(const CTxMemPool& pool, const CTransaction& tx, int flag
// Returns the script flags which should be checked for a given block
static unsigned int GetBlockScriptFlags(const CBlockIndex* pindex, const Consensus::Params& chainparams);
-static void LimitMempoolSize(CTxMemPool& pool, size_t limit, unsigned long age)
+static void LimitMempoolSize(CTxMemPool& pool, size_t limit, std::chrono::seconds age)
EXCLUSIVE_LOCKS_REQUIRED(pool.cs, ::cs_main)
{
- int expired = pool.Expire(GetTime() - age);
+ int expired = pool.Expire(GetTime<std::chrono::seconds>() - age);
if (expired != 0) {
LogPrint(BCLog::MEMPOOL, "Expired %i transactions from the memory pool\n", expired);
}
@@ -368,9 +365,9 @@ static void UpdateMempoolForReorg(DisconnectedBlockTransactions& disconnectpool,
auto it = disconnectpool.queuedTx.get<insertion_order>().rbegin();
while (it != disconnectpool.queuedTx.get<insertion_order>().rend()) {
// ignore validation errors in resurrected transactions
- CValidationState stateDummy;
+ TxValidationState stateDummy;
if (!fAddToMempool || (*it)->IsCoinBase() ||
- !AcceptToMemoryPool(mempool, stateDummy, *it, nullptr /* pfMissingInputs */,
+ !AcceptToMemoryPool(mempool, stateDummy, *it,
nullptr /* plTxnReplaced */, true /* bypass_limits */, 0 /* nAbsurdFee */)) {
// If the transaction doesn't make it in to the mempool, remove any
// transactions that depend on it (which would now be orphans).
@@ -391,13 +388,13 @@ static void UpdateMempoolForReorg(DisconnectedBlockTransactions& disconnectpool,
// We also need to remove any now-immature transactions
mempool.removeForReorg(&::ChainstateActive().CoinsTip(), ::ChainActive().Tip()->nHeight + 1, STANDARD_LOCKTIME_VERIFY_FLAGS);
// Re-limit mempool size, in case we added any transactions
- LimitMempoolSize(mempool, gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000, gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60);
+ LimitMempoolSize(mempool, gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000, std::chrono::hours{gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY)});
}
// Used to avoid mempool polluting consensus critical paths if CCoinsViewMempool
// were somehow broken and returning the wrong scriptPubKeys
-static bool CheckInputsFromMempoolAndCache(const CTransaction& tx, CValidationState& state, const CCoinsViewCache& view, const CTxMemPool& pool,
- unsigned int flags, bool cacheSigStore, PrecomputedTransactionData& txdata) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
+static bool CheckInputsFromMempoolAndCache(const CTransaction& tx, TxValidationState& state, const CCoinsViewCache& view, const CTxMemPool& pool,
+ unsigned int flags, PrecomputedTransactionData& txdata) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
AssertLockHeld(cs_main);
// pool.cs should be locked already, but go ahead and re-take the lock here
@@ -427,62 +424,170 @@ static bool CheckInputsFromMempoolAndCache(const CTransaction& tx, CValidationSt
}
}
- return CheckInputs(tx, state, view, flags, cacheSigStore, true, txdata);
+ // Call CheckInputs() to cache signature and script validity against current tip consensus rules.
+ return CheckInputs(tx, state, view, flags, /* cacheSigStore = */ true, /* cacheFullSciptStore = */ true, txdata);
}
-/**
- * @param[out] coins_to_uncache Return any outpoints which were not previously present in the
- * coins cache, but were added as a result of validating the tx
- * for mempool acceptance. This allows the caller to optionally
- * remove the cache additions if the associated transaction ends
- * up being rejected by the mempool.
- */
-static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool& pool, CValidationState& state, const CTransactionRef& ptx,
- bool* pfMissingInputs, int64_t nAcceptTime, std::list<CTransactionRef>* plTxnReplaced,
- bool bypass_limits, const CAmount& nAbsurdFee, std::vector<COutPoint>& coins_to_uncache, bool test_accept) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
+namespace {
+
+class MemPoolAccept
{
- const CTransaction& tx = *ptx;
- const uint256 hash = tx.GetHash();
- AssertLockHeld(cs_main);
- LOCK(pool.cs); // mempool "read lock" (held through GetMainSignals().TransactionAddedToMempool())
- if (pfMissingInputs) {
- *pfMissingInputs = false;
+public:
+ MemPoolAccept(CTxMemPool& mempool) : m_pool(mempool), m_view(&m_dummy), m_viewmempool(&::ChainstateActive().CoinsTip(), m_pool),
+ m_limit_ancestors(gArgs.GetArg("-limitancestorcount", DEFAULT_ANCESTOR_LIMIT)),
+ m_limit_ancestor_size(gArgs.GetArg("-limitancestorsize", DEFAULT_ANCESTOR_SIZE_LIMIT)*1000),
+ m_limit_descendants(gArgs.GetArg("-limitdescendantcount", DEFAULT_DESCENDANT_LIMIT)),
+ m_limit_descendant_size(gArgs.GetArg("-limitdescendantsize", DEFAULT_DESCENDANT_SIZE_LIMIT)*1000) {}
+
+ // We put the arguments we're handed into a struct, so we can pass them
+ // around easier.
+ struct ATMPArgs {
+ const CChainParams& m_chainparams;
+ TxValidationState &m_state;
+ const int64_t m_accept_time;
+ std::list<CTransactionRef>* m_replaced_transactions;
+ const bool m_bypass_limits;
+ const CAmount& m_absurd_fee;
+ /*
+ * Return any outpoints which were not previously present in the coins
+ * cache, but were added as a result of validating the tx for mempool
+ * acceptance. This allows the caller to optionally remove the cache
+ * additions if the associated transaction ends up being rejected by
+ * the mempool.
+ */
+ std::vector<COutPoint>& m_coins_to_uncache;
+ const bool m_test_accept;
+ };
+
+ // Single transaction acceptance
+ bool AcceptSingleTransaction(const CTransactionRef& ptx, ATMPArgs& args) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
+
+private:
+ // All the intermediate state that gets passed between the various levels
+ // of checking a given transaction.
+ struct Workspace {
+ Workspace(const CTransactionRef& ptx) : m_ptx(ptx), m_hash(ptx->GetHash()) {}
+ std::set<uint256> m_conflicts;
+ CTxMemPool::setEntries m_all_conflicting;
+ CTxMemPool::setEntries m_ancestors;
+ std::unique_ptr<CTxMemPoolEntry> m_entry;
+
+ bool m_replacement_transaction;
+ CAmount m_modified_fees;
+ CAmount m_conflicting_fees;
+ size_t m_conflicting_size;
+
+ const CTransactionRef& m_ptx;
+ const uint256& m_hash;
+ };
+
+ // Run the policy checks on a given transaction, excluding any script checks.
+ // Looks up inputs, calculates feerate, considers replacement, evaluates
+ // package limits, etc. As this function can be invoked for "free" by a peer,
+ // only tests that are fast should be done here (to avoid CPU DoS).
+ bool PreChecks(ATMPArgs& args, Workspace& ws) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
+
+ // Run the script checks using our policy flags. As this can be slow, we should
+ // only invoke this on transactions that have otherwise passed policy checks.
+ bool PolicyScriptChecks(ATMPArgs& args, Workspace& ws, PrecomputedTransactionData& txdata) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
+
+ // Re-run the script checks, using consensus flags, and try to cache the
+ // result in the scriptcache. This should be done after
+ // PolicyScriptChecks(). This requires that all inputs either be in our
+ // utxo set or in the mempool.
+ bool ConsensusScriptChecks(ATMPArgs& args, Workspace& ws, PrecomputedTransactionData &txdata) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
+
+ // Try to add the transaction to the mempool, removing any conflicts first.
+ // Returns true if the transaction is in the mempool after any size
+ // limiting is performed, false otherwise.
+ bool Finalize(ATMPArgs& args, Workspace& ws) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
+
+ // Compare a package's feerate against minimum allowed.
+ bool CheckFeeRate(size_t package_size, CAmount package_fee, TxValidationState& state)
+ {
+ CAmount mempoolRejectFee = m_pool.GetMinFee(gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000).GetFee(package_size);
+ if (mempoolRejectFee > 0 && package_fee < mempoolRejectFee) {
+ return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "mempool min fee not met", strprintf("%d < %d", package_fee, mempoolRejectFee));
+ }
+
+ if (package_fee < ::minRelayTxFee.GetFee(package_size)) {
+ return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "min relay fee not met", strprintf("%d < %d", package_fee, ::minRelayTxFee.GetFee(package_size)));
+ }
+ return true;
}
+private:
+ CTxMemPool& m_pool;
+ CCoinsViewCache m_view;
+ CCoinsViewMemPool m_viewmempool;
+ CCoinsView m_dummy;
+
+ // The package limits in effect at the time of invocation.
+ const size_t m_limit_ancestors;
+ const size_t m_limit_ancestor_size;
+ // These may be modified while evaluating a transaction (eg to account for
+ // in-mempool conflicts; see below).
+ size_t m_limit_descendants;
+ size_t m_limit_descendant_size;
+};
+
+bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws)
+{
+ const CTransactionRef& ptx = ws.m_ptx;
+ const CTransaction& tx = *ws.m_ptx;
+ const uint256& hash = ws.m_hash;
+
+ // Copy/alias what we need out of args
+ TxValidationState &state = args.m_state;
+ const int64_t nAcceptTime = args.m_accept_time;
+ const bool bypass_limits = args.m_bypass_limits;
+ const CAmount& nAbsurdFee = args.m_absurd_fee;
+ std::vector<COutPoint>& coins_to_uncache = args.m_coins_to_uncache;
+
+ // Alias what we need out of ws
+ std::set<uint256>& setConflicts = ws.m_conflicts;
+ CTxMemPool::setEntries& allConflicting = ws.m_all_conflicting;
+ CTxMemPool::setEntries& setAncestors = ws.m_ancestors;
+ std::unique_ptr<CTxMemPoolEntry>& entry = ws.m_entry;
+ bool& fReplacementTransaction = ws.m_replacement_transaction;
+ CAmount& nModifiedFees = ws.m_modified_fees;
+ CAmount& nConflictingFees = ws.m_conflicting_fees;
+ size_t& nConflictingSize = ws.m_conflicting_size;
+
if (!CheckTransaction(tx, state))
return false; // state filled in by CheckTransaction
// Coinbase is only valid in a block, not as a loose transaction
if (tx.IsCoinBase())
- return state.Invalid(ValidationInvalidReason::CONSENSUS, false, REJECT_INVALID, "coinbase");
+ return state.Invalid(TxValidationResult::TX_CONSENSUS, "coinbase");
// Rather not work on nonstandard transactions (unless -testnet/-regtest)
std::string reason;
if (fRequireStandard && !IsStandardTx(tx, reason))
- return state.Invalid(ValidationInvalidReason::TX_NOT_STANDARD, false, REJECT_NONSTANDARD, reason);
+ return state.Invalid(TxValidationResult::TX_NOT_STANDARD, reason);
// Do not work on transactions that are too small.
// A transaction with 1 segwit input and 1 P2WPHK output has non-witness size of 82 bytes.
- // Transactions smaller than this are not relayed to reduce unnecessary malloc overhead.
+ // Transactions smaller than this are not relayed to mitigate CVE-2017-12842 by not relaying
+ // 64-byte transactions.
if (::GetSerializeSize(tx, PROTOCOL_VERSION | SERIALIZE_TRANSACTION_NO_WITNESS) < MIN_STANDARD_TX_NONWITNESS_SIZE)
- return state.Invalid(ValidationInvalidReason::TX_NOT_STANDARD, false, REJECT_NONSTANDARD, "tx-size-small");
+ return state.Invalid(TxValidationResult::TX_NOT_STANDARD, "tx-size-small");
// Only accept nLockTime-using transactions that can be mined in the next
// block; we don't want our mempool filled up with transactions that can't
// be mined yet.
if (!CheckFinalTx(tx, STANDARD_LOCKTIME_VERIFY_FLAGS))
- return state.Invalid(ValidationInvalidReason::TX_PREMATURE_SPEND, false, REJECT_NONSTANDARD, "non-final");
+ return state.Invalid(TxValidationResult::TX_PREMATURE_SPEND, "non-final");
// is it already in the memory pool?
- if (pool.exists(hash)) {
- return state.Invalid(ValidationInvalidReason::TX_CONFLICT, false, REJECT_DUPLICATE, "txn-already-in-mempool");
+ if (m_pool.exists(hash)) {
+ return state.Invalid(TxValidationResult::TX_CONFLICT, "txn-already-in-mempool");
}
// Check for conflicts with in-memory transactions
- std::set<uint256> setConflicts;
for (const CTxIn &txin : tx.vin)
{
- const CTransaction* ptxConflicting = pool.GetConflictTx(txin.prevout);
+ const CTransaction* ptxConflicting = m_pool.GetConflictTx(txin.prevout);
if (ptxConflicting) {
if (!setConflicts.count(ptxConflicting->GetHash()))
{
@@ -508,7 +613,7 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool
}
}
if (fReplacementOptOut) {
- return state.Invalid(ValidationInvalidReason::TX_MEMPOOL_POLICY, false, REJECT_DUPLICATE, "txn-mempool-conflict");
+ return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "txn-mempool-conflict");
}
setConflicts.insert(ptxConflicting->GetHash());
@@ -516,353 +621,431 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool
}
}
- {
- CCoinsView dummy;
- CCoinsViewCache view(&dummy);
-
- LockPoints lp;
- CCoinsViewCache& coins_cache = ::ChainstateActive().CoinsTip();
- CCoinsViewMemPool viewMemPool(&coins_cache, pool);
- view.SetBackend(viewMemPool);
-
- // do all inputs exist?
- for (const CTxIn& txin : tx.vin) {
- if (!coins_cache.HaveCoinInCache(txin.prevout)) {
- coins_to_uncache.push_back(txin.prevout);
- }
+ LockPoints lp;
+ m_view.SetBackend(m_viewmempool);
- // Note: this call may add txin.prevout to the coins cache
- // (CoinsTip().cacheCoins) by way of FetchCoin(). It should be removed
- // later (via coins_to_uncache) if this tx turns out to be invalid.
- if (!view.HaveCoin(txin.prevout)) {
- // Are inputs missing because we already have the tx?
- for (size_t out = 0; out < tx.vout.size(); out++) {
- // Optimistically just do efficient check of cache for outputs
- if (coins_cache.HaveCoinInCache(COutPoint(hash, out))) {
- return state.Invalid(ValidationInvalidReason::TX_CONFLICT, false, REJECT_DUPLICATE, "txn-already-known");
- }
- }
- // Otherwise assume this might be an orphan tx for which we just haven't seen parents yet
- if (pfMissingInputs) {
- *pfMissingInputs = true;
+ CCoinsViewCache& coins_cache = ::ChainstateActive().CoinsTip();
+ // do all inputs exist?
+ for (const CTxIn& txin : tx.vin) {
+ if (!coins_cache.HaveCoinInCache(txin.prevout)) {
+ coins_to_uncache.push_back(txin.prevout);
+ }
+
+ // Note: this call may add txin.prevout to the coins cache
+ // (coins_cache.cacheCoins) by way of FetchCoin(). It should be removed
+ // later (via coins_to_uncache) if this tx turns out to be invalid.
+ if (!m_view.HaveCoin(txin.prevout)) {
+ // Are inputs missing because we already have the tx?
+ for (size_t out = 0; out < tx.vout.size(); out++) {
+ // Optimistically just do efficient check of cache for outputs
+ if (coins_cache.HaveCoinInCache(COutPoint(hash, out))) {
+ return state.Invalid(TxValidationResult::TX_CONFLICT, "txn-already-known");
}
- return false; // fMissingInputs and !state.IsInvalid() is used to detect this condition, don't set state.Invalid()
}
+ // Otherwise assume this might be an orphan tx for which we just haven't seen parents yet
+ return state.Invalid(TxValidationResult::TX_MISSING_INPUTS, "bad-txns-inputs-missingorspent");
}
+ }
- // Bring the best block into scope
- view.GetBestBlock();
+ // Bring the best block into scope
+ m_view.GetBestBlock();
- // we have all inputs cached now, so switch back to dummy, so we don't need to keep lock on mempool
- view.SetBackend(dummy);
+ // we have all inputs cached now, so switch back to dummy (to protect
+ // against bugs where we pull more inputs from disk that miss being added
+ // to coins_to_uncache)
+ m_view.SetBackend(m_dummy);
- // Only accept BIP68 sequence locked transactions that can be mined in the next
- // block; we don't want our mempool filled up with transactions that can't
- // be mined yet.
- // Must keep pool.cs for this unless we change CheckSequenceLocks to take a
- // CoinsViewCache instead of create its own
- if (!CheckSequenceLocks(pool, tx, STANDARD_LOCKTIME_VERIFY_FLAGS, &lp))
- return state.Invalid(ValidationInvalidReason::TX_PREMATURE_SPEND, false, REJECT_NONSTANDARD, "non-BIP68-final");
+ // Only accept BIP68 sequence locked transactions that can be mined in the next
+ // block; we don't want our mempool filled up with transactions that can't
+ // be mined yet.
+ // Must keep pool.cs for this unless we change CheckSequenceLocks to take a
+ // CoinsViewCache instead of create its own
+ if (!CheckSequenceLocks(m_pool, tx, STANDARD_LOCKTIME_VERIFY_FLAGS, &lp))
+ return state.Invalid(TxValidationResult::TX_PREMATURE_SPEND, "non-BIP68-final");
- CAmount nFees = 0;
- if (!Consensus::CheckTxInputs(tx, state, view, GetSpendHeight(view), nFees)) {
- return error("%s: Consensus::CheckTxInputs: %s, %s", __func__, tx.GetHash().ToString(), FormatStateMessage(state));
- }
+ CAmount nFees = 0;
+ if (!Consensus::CheckTxInputs(tx, state, m_view, GetSpendHeight(m_view), nFees)) {
+ return error("%s: Consensus::CheckTxInputs: %s, %s", __func__, tx.GetHash().ToString(), FormatStateMessage(state));
+ }
- // Check for non-standard pay-to-script-hash in inputs
- if (fRequireStandard && !AreInputsStandard(tx, view))
- return state.Invalid(ValidationInvalidReason::TX_NOT_STANDARD, false, REJECT_NONSTANDARD, "bad-txns-nonstandard-inputs");
+ // Check for non-standard pay-to-script-hash in inputs
+ if (fRequireStandard && !AreInputsStandard(tx, m_view))
+ return state.Invalid(TxValidationResult::TX_NOT_STANDARD, "bad-txns-nonstandard-inputs");
- // Check for non-standard witness in P2WSH
- if (tx.HasWitness() && fRequireStandard && !IsWitnessStandard(tx, view))
- return state.Invalid(ValidationInvalidReason::TX_WITNESS_MUTATED, false, REJECT_NONSTANDARD, "bad-witness-nonstandard");
+ // Check for non-standard witness in P2WSH
+ if (tx.HasWitness() && fRequireStandard && !IsWitnessStandard(tx, m_view))
+ return state.Invalid(TxValidationResult::TX_WITNESS_MUTATED, "bad-witness-nonstandard");
- int64_t nSigOpsCost = GetTransactionSigOpCost(tx, view, STANDARD_SCRIPT_VERIFY_FLAGS);
+ int64_t nSigOpsCost = GetTransactionSigOpCost(tx, m_view, STANDARD_SCRIPT_VERIFY_FLAGS);
- // nModifiedFees includes any fee deltas from PrioritiseTransaction
- CAmount nModifiedFees = nFees;
- pool.ApplyDelta(hash, nModifiedFees);
+ // nModifiedFees includes any fee deltas from PrioritiseTransaction
+ nModifiedFees = nFees;
+ m_pool.ApplyDelta(hash, nModifiedFees);
- // Keep track of transactions that spend a coinbase, which we re-scan
- // during reorgs to ensure COINBASE_MATURITY is still met.
- bool fSpendsCoinbase = false;
- for (const CTxIn &txin : tx.vin) {
- const Coin &coin = view.AccessCoin(txin.prevout);
- if (coin.IsCoinBase()) {
- fSpendsCoinbase = true;
- break;
- }
+ // Keep track of transactions that spend a coinbase, which we re-scan
+ // during reorgs to ensure COINBASE_MATURITY is still met.
+ bool fSpendsCoinbase = false;
+ for (const CTxIn &txin : tx.vin) {
+ const Coin &coin = m_view.AccessCoin(txin.prevout);
+ if (coin.IsCoinBase()) {
+ fSpendsCoinbase = true;
+ break;
}
+ }
- CTxMemPoolEntry entry(ptx, nFees, nAcceptTime, ::ChainActive().Height(),
- fSpendsCoinbase, nSigOpsCost, lp);
- unsigned int nSize = entry.GetTxSize();
+ entry.reset(new CTxMemPoolEntry(ptx, nFees, nAcceptTime, ::ChainActive().Height(),
+ fSpendsCoinbase, nSigOpsCost, lp));
+ unsigned int nSize = entry->GetTxSize();
- if (nSigOpsCost > MAX_STANDARD_TX_SIGOPS_COST)
- return state.Invalid(ValidationInvalidReason::TX_NOT_STANDARD, false, REJECT_NONSTANDARD, "bad-txns-too-many-sigops",
+ if (nSigOpsCost > MAX_STANDARD_TX_SIGOPS_COST)
+ return state.Invalid(TxValidationResult::TX_NOT_STANDARD, "bad-txns-too-many-sigops",
strprintf("%d", nSigOpsCost));
- CAmount mempoolRejectFee = pool.GetMinFee(gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000).GetFee(nSize);
- if (!bypass_limits && mempoolRejectFee > 0 && nModifiedFees < mempoolRejectFee) {
- return state.Invalid(ValidationInvalidReason::TX_MEMPOOL_POLICY, false, REJECT_INSUFFICIENTFEE, "mempool min fee not met", strprintf("%d < %d", nModifiedFees, mempoolRejectFee));
- }
-
- // No transactions are allowed below minRelayTxFee except from disconnected blocks
- if (!bypass_limits && nModifiedFees < ::minRelayTxFee.GetFee(nSize)) {
- return state.Invalid(ValidationInvalidReason::TX_MEMPOOL_POLICY, false, REJECT_INSUFFICIENTFEE, "min relay fee not met", strprintf("%d < %d", nModifiedFees, ::minRelayTxFee.GetFee(nSize)));
- }
-
- if (nAbsurdFee && nFees > nAbsurdFee)
- return state.Invalid(ValidationInvalidReason::TX_NOT_STANDARD, false,
- REJECT_HIGHFEE, "absurdly-high-fee",
- strprintf("%d > %d", nFees, nAbsurdFee));
-
- // Calculate in-mempool ancestors, up to a limit.
- CTxMemPool::setEntries setAncestors;
- size_t nLimitAncestors = gArgs.GetArg("-limitancestorcount", DEFAULT_ANCESTOR_LIMIT);
- size_t nLimitAncestorSize = gArgs.GetArg("-limitancestorsize", DEFAULT_ANCESTOR_SIZE_LIMIT)*1000;
- size_t nLimitDescendants = gArgs.GetArg("-limitdescendantcount", DEFAULT_DESCENDANT_LIMIT);
- size_t nLimitDescendantSize = gArgs.GetArg("-limitdescendantsize", DEFAULT_DESCENDANT_SIZE_LIMIT)*1000;
- std::string errString;
- if (!pool.CalculateMemPoolAncestors(entry, setAncestors, nLimitAncestors, nLimitAncestorSize, nLimitDescendants, nLimitDescendantSize, errString)) {
- setAncestors.clear();
- // If CalculateMemPoolAncestors fails second time, we want the original error string.
- std::string dummy_err_string;
- // If the new transaction is relatively small (up to 40k weight)
- // and has at most one ancestor (ie ancestor limit of 2, including
- // the new transaction), allow it if its parent has exactly the
- // descendant limit descendants.
- //
- // This allows protocols which rely on distrusting counterparties
- // being able to broadcast descendants of an unconfirmed transaction
- // to be secure by simply only having two immediately-spendable
- // outputs - one for each counterparty. For more info on the uses for
- // this, see https://lists.linuxfoundation.org/pipermail/bitcoin-dev/2018-November/016518.html
- if (nSize > EXTRA_DESCENDANT_TX_SIZE_LIMIT ||
- !pool.CalculateMemPoolAncestors(entry, setAncestors, 2, nLimitAncestorSize, nLimitDescendants + 1, nLimitDescendantSize + EXTRA_DESCENDANT_TX_SIZE_LIMIT, dummy_err_string)) {
- return state.Invalid(ValidationInvalidReason::TX_MEMPOOL_POLICY, false, REJECT_NONSTANDARD, "too-long-mempool-chain", errString);
- }
- }
-
- // A transaction that spends outputs that would be replaced by it is invalid. Now
- // that we have the set of all ancestors we can detect this
- // pathological case by making sure setConflicts and setAncestors don't
- // intersect.
- for (CTxMemPool::txiter ancestorIt : setAncestors)
+ // No transactions are allowed below minRelayTxFee except from disconnected
+ // blocks
+ if (!bypass_limits && !CheckFeeRate(nSize, nModifiedFees, state)) return false;
+
+ if (nAbsurdFee && nFees > nAbsurdFee)
+ return state.Invalid(TxValidationResult::TX_NOT_STANDARD,
+ "absurdly-high-fee", strprintf("%d > %d", nFees, nAbsurdFee));
+
+ const CTxMemPool::setEntries setIterConflicting = m_pool.GetIterSet(setConflicts);
+ // Calculate in-mempool ancestors, up to a limit.
+ if (setConflicts.size() == 1) {
+ // In general, when we receive an RBF transaction with mempool conflicts, we want to know whether we
+ // would meet the chain limits after the conflicts have been removed. However, there isn't a practical
+ // way to do this short of calculating the ancestor and descendant sets with an overlay cache of
+ // changed mempool entries. Due to both implementation and runtime complexity concerns, this isn't
+ // very realistic, thus we only ensure a limited set of transactions are RBF'able despite mempool
+ // conflicts here. Importantly, we need to ensure that some transactions which were accepted using
+ // the below carve-out are able to be RBF'ed, without impacting the security the carve-out provides
+ // for off-chain contract systems (see link in the comment below).
+ //
+ // Specifically, the subset of RBF transactions which we allow despite chain limits are those which
+ // conflict directly with exactly one other transaction (but may evict children of said transaction),
+ // and which are not adding any new mempool dependencies. Note that the "no new mempool dependencies"
+ // check is accomplished later, so we don't bother doing anything about it here, but if BIP 125 is
+ // amended, we may need to move that check to here instead of removing it wholesale.
+ //
+ // Such transactions are clearly not merging any existing packages, so we are only concerned with
+ // ensuring that (a) no package is growing past the package size (not count) limits and (b) we are
+ // not allowing something to effectively use the (below) carve-out spot when it shouldn't be allowed
+ // to.
+ //
+ // To check these we first check if we meet the RBF criteria, above, and increment the descendant
+ // limits by the direct conflict and its descendants (as these are recalculated in
+ // CalculateMempoolAncestors by assuming the new transaction being added is a new descendant, with no
+ // removals, of each parent's existing dependent set). The ancestor count limits are unmodified (as
+ // the ancestor limits should be the same for both our new transaction and any conflicts).
+ // We don't bother incrementing m_limit_descendants by the full removal count as that limit never comes
+ // into force here (as we're only adding a single transaction).
+ assert(setIterConflicting.size() == 1);
+ CTxMemPool::txiter conflict = *setIterConflicting.begin();
+
+ m_limit_descendants += 1;
+ m_limit_descendant_size += conflict->GetSizeWithDescendants();
+ }
+
+ std::string errString;
+ if (!m_pool.CalculateMemPoolAncestors(*entry, setAncestors, m_limit_ancestors, m_limit_ancestor_size, m_limit_descendants, m_limit_descendant_size, errString)) {
+ setAncestors.clear();
+ // If CalculateMemPoolAncestors fails second time, we want the original error string.
+ std::string dummy_err_string;
+ // Contracting/payment channels CPFP carve-out:
+ // If the new transaction is relatively small (up to 40k weight)
+ // and has at most one ancestor (ie ancestor limit of 2, including
+ // the new transaction), allow it if its parent has exactly the
+ // descendant limit descendants.
+ //
+ // This allows protocols which rely on distrusting counterparties
+ // being able to broadcast descendants of an unconfirmed transaction
+ // to be secure by simply only having two immediately-spendable
+ // outputs - one for each counterparty. For more info on the uses for
+ // this, see https://lists.linuxfoundation.org/pipermail/bitcoin-dev/2018-November/016518.html
+ if (nSize > EXTRA_DESCENDANT_TX_SIZE_LIMIT ||
+ !m_pool.CalculateMemPoolAncestors(*entry, setAncestors, 2, m_limit_ancestor_size, m_limit_descendants + 1, m_limit_descendant_size + EXTRA_DESCENDANT_TX_SIZE_LIMIT, dummy_err_string)) {
+ return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "too-long-mempool-chain", errString);
+ }
+ }
+
+ // A transaction that spends outputs that would be replaced by it is invalid. Now
+ // that we have the set of all ancestors we can detect this
+ // pathological case by making sure setConflicts and setAncestors don't
+ // intersect.
+ for (CTxMemPool::txiter ancestorIt : setAncestors)
+ {
+ const uint256 &hashAncestor = ancestorIt->GetTx().GetHash();
+ if (setConflicts.count(hashAncestor))
{
- const uint256 &hashAncestor = ancestorIt->GetTx().GetHash();
- if (setConflicts.count(hashAncestor))
- {
- return state.Invalid(ValidationInvalidReason::CONSENSUS, false, REJECT_INVALID, "bad-txns-spends-conflicting-tx",
- strprintf("%s spends conflicting transaction %s",
- hash.ToString(),
- hashAncestor.ToString()));
- }
+ return state.Invalid(TxValidationResult::TX_CONSENSUS, "bad-txns-spends-conflicting-tx",
+ strprintf("%s spends conflicting transaction %s",
+ hash.ToString(),
+ hashAncestor.ToString()));
}
+ }
- // Check if it's economically rational to mine this transaction rather
- // than the ones it replaces.
- CAmount nConflictingFees = 0;
- size_t nConflictingSize = 0;
- uint64_t nConflictingCount = 0;
- CTxMemPool::setEntries allConflicting;
-
- // If we don't hold the lock allConflicting might be incomplete; the
- // subsequent RemoveStaged() and addUnchecked() calls don't guarantee
- // mempool consistency for us.
- const bool fReplacementTransaction = setConflicts.size();
- if (fReplacementTransaction)
- {
- CFeeRate newFeeRate(nModifiedFees, nSize);
- std::set<uint256> setConflictsParents;
- const int maxDescendantsToVisit = 100;
- const CTxMemPool::setEntries setIterConflicting = pool.GetIterSet(setConflicts);
- for (const auto& mi : setIterConflicting) {
- // Don't allow the replacement to reduce the feerate of the
- // mempool.
- //
- // We usually don't want to accept replacements with lower
- // feerates than what they replaced as that would lower the
- // feerate of the next block. Requiring that the feerate always
- // be increased is also an easy-to-reason about way to prevent
- // DoS attacks via replacements.
- //
- // We only consider the feerates of transactions being directly
- // replaced, not their indirect descendants. While that does
- // mean high feerate children are ignored when deciding whether
- // or not to replace, we do require the replacement to pay more
- // overall fees too, mitigating most cases.
- CFeeRate oldFeeRate(mi->GetModifiedFee(), mi->GetTxSize());
- if (newFeeRate <= oldFeeRate)
- {
- return state.Invalid(ValidationInvalidReason::TX_MEMPOOL_POLICY, false, REJECT_INSUFFICIENTFEE, "insufficient fee",
- strprintf("rejecting replacement %s; new feerate %s <= old feerate %s",
- hash.ToString(),
- newFeeRate.ToString(),
- oldFeeRate.ToString()));
- }
-
- for (const CTxIn &txin : mi->GetTx().vin)
- {
- setConflictsParents.insert(txin.prevout.hash);
- }
+ // Check if it's economically rational to mine this transaction rather
+ // than the ones it replaces.
+ nConflictingFees = 0;
+ nConflictingSize = 0;
+ uint64_t nConflictingCount = 0;
- nConflictingCount += mi->GetCountWithDescendants();
- }
- // This potentially overestimates the number of actual descendants
- // but we just want to be conservative to avoid doing too much
- // work.
- if (nConflictingCount <= maxDescendantsToVisit) {
- // If not too many to replace, then calculate the set of
- // transactions that would have to be evicted
- for (CTxMemPool::txiter it : setIterConflicting) {
- pool.CalculateDescendants(it, allConflicting);
- }
- for (CTxMemPool::txiter it : allConflicting) {
- nConflictingFees += it->GetModifiedFee();
- nConflictingSize += it->GetTxSize();
- }
- } else {
- return state.Invalid(ValidationInvalidReason::TX_MEMPOOL_POLICY, false, REJECT_NONSTANDARD, "too many potential replacements",
- strprintf("rejecting replacement %s; too many potential replacements (%d > %d)\n",
+ // If we don't hold the lock allConflicting might be incomplete; the
+ // subsequent RemoveStaged() and addUnchecked() calls don't guarantee
+ // mempool consistency for us.
+ fReplacementTransaction = setConflicts.size();
+ if (fReplacementTransaction)
+ {
+ CFeeRate newFeeRate(nModifiedFees, nSize);
+ std::set<uint256> setConflictsParents;
+ const int maxDescendantsToVisit = 100;
+ for (const auto& mi : setIterConflicting) {
+ // Don't allow the replacement to reduce the feerate of the
+ // mempool.
+ //
+ // We usually don't want to accept replacements with lower
+ // feerates than what they replaced as that would lower the
+ // feerate of the next block. Requiring that the feerate always
+ // be increased is also an easy-to-reason about way to prevent
+ // DoS attacks via replacements.
+ //
+ // We only consider the feerates of transactions being directly
+ // replaced, not their indirect descendants. While that does
+ // mean high feerate children are ignored when deciding whether
+ // or not to replace, we do require the replacement to pay more
+ // overall fees too, mitigating most cases.
+ CFeeRate oldFeeRate(mi->GetModifiedFee(), mi->GetTxSize());
+ if (newFeeRate <= oldFeeRate)
+ {
+ return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "insufficient fee",
+ strprintf("rejecting replacement %s; new feerate %s <= old feerate %s",
hash.ToString(),
- nConflictingCount,
- maxDescendantsToVisit));
+ newFeeRate.ToString(),
+ oldFeeRate.ToString()));
}
- for (unsigned int j = 0; j < tx.vin.size(); j++)
+ for (const CTxIn &txin : mi->GetTx().vin)
{
- // We don't want to accept replacements that require low
- // feerate junk to be mined first. Ideally we'd keep track of
- // the ancestor feerates and make the decision based on that,
- // but for now requiring all new inputs to be confirmed works.
- if (!setConflictsParents.count(tx.vin[j].prevout.hash))
- {
- // Rather than check the UTXO set - potentially expensive -
- // it's cheaper to just check if the new input refers to a
- // tx that's in the mempool.
- if (pool.exists(tx.vin[j].prevout.hash)) {
- return state.Invalid(ValidationInvalidReason::TX_MEMPOOL_POLICY, false, REJECT_NONSTANDARD, "replacement-adds-unconfirmed",
- strprintf("replacement %s adds unconfirmed input, idx %d",
- hash.ToString(), j));
- }
- }
+ setConflictsParents.insert(txin.prevout.hash);
}
- // The replacement must pay greater fees than the transactions it
- // replaces - if we did the bandwidth used by those conflicting
- // transactions would not be paid for.
- if (nModifiedFees < nConflictingFees)
- {
- return state.Invalid(ValidationInvalidReason::TX_MEMPOOL_POLICY, false, REJECT_INSUFFICIENTFEE, "insufficient fee",
- strprintf("rejecting replacement %s, less fees than conflicting txs; %s < %s",
- hash.ToString(), FormatMoney(nModifiedFees), FormatMoney(nConflictingFees)));
+ nConflictingCount += mi->GetCountWithDescendants();
+ }
+ // This potentially overestimates the number of actual descendants
+ // but we just want to be conservative to avoid doing too much
+ // work.
+ if (nConflictingCount <= maxDescendantsToVisit) {
+ // If not too many to replace, then calculate the set of
+ // transactions that would have to be evicted
+ for (CTxMemPool::txiter it : setIterConflicting) {
+ m_pool.CalculateDescendants(it, allConflicting);
}
-
- // Finally in addition to paying more fees than the conflicts the
- // new transaction must pay for its own bandwidth.
- CAmount nDeltaFees = nModifiedFees - nConflictingFees;
- if (nDeltaFees < ::incrementalRelayFee.GetFee(nSize))
- {
- return state.Invalid(ValidationInvalidReason::TX_MEMPOOL_POLICY, false, REJECT_INSUFFICIENTFEE, "insufficient fee",
- strprintf("rejecting replacement %s, not enough additional fees to relay; %s < %s",
- hash.ToString(),
- FormatMoney(nDeltaFees),
- FormatMoney(::incrementalRelayFee.GetFee(nSize))));
+ for (CTxMemPool::txiter it : allConflicting) {
+ nConflictingFees += it->GetModifiedFee();
+ nConflictingSize += it->GetTxSize();
}
+ } else {
+ return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "too many potential replacements",
+ strprintf("rejecting replacement %s; too many potential replacements (%d > %d)\n",
+ hash.ToString(),
+ nConflictingCount,
+ maxDescendantsToVisit));
}
- constexpr unsigned int scriptVerifyFlags = STANDARD_SCRIPT_VERIFY_FLAGS;
-
- // Check against previous transactions
- // The first loop above does all the inexpensive checks.
- // Only if ALL inputs pass do we perform expensive ECDSA signature checks.
- // Helps prevent CPU exhaustion denial-of-service attacks.
- PrecomputedTransactionData txdata(tx);
- if (!CheckInputs(tx, state, view, scriptVerifyFlags, true, false, txdata)) {
- // SCRIPT_VERIFY_CLEANSTACK requires SCRIPT_VERIFY_WITNESS, so we
- // need to turn both off, and compare against just turning off CLEANSTACK
- // to see if the failure is specifically due to witness validation.
- CValidationState stateDummy; // Want reported failures to be from first CheckInputs
- if (!tx.HasWitness() && CheckInputs(tx, stateDummy, view, scriptVerifyFlags & ~(SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_CLEANSTACK), true, false, txdata) &&
- !CheckInputs(tx, stateDummy, view, scriptVerifyFlags & ~SCRIPT_VERIFY_CLEANSTACK, true, false, txdata)) {
- // Only the witness is missing, so the transaction itself may be fine.
- state.Invalid(ValidationInvalidReason::TX_WITNESS_MUTATED, false,
- state.GetRejectCode(), state.GetRejectReason(), state.GetDebugMessage());
+ for (unsigned int j = 0; j < tx.vin.size(); j++)
+ {
+ // We don't want to accept replacements that require low
+ // feerate junk to be mined first. Ideally we'd keep track of
+ // the ancestor feerates and make the decision based on that,
+ // but for now requiring all new inputs to be confirmed works.
+ //
+ // Note that if you relax this to make RBF a little more useful,
+ // this may break the CalculateMempoolAncestors RBF relaxation,
+ // above. See the comment above the first CalculateMempoolAncestors
+ // call for more info.
+ if (!setConflictsParents.count(tx.vin[j].prevout.hash))
+ {
+ // Rather than check the UTXO set - potentially expensive -
+ // it's cheaper to just check if the new input refers to a
+ // tx that's in the mempool.
+ if (m_pool.exists(tx.vin[j].prevout.hash)) {
+ return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "replacement-adds-unconfirmed",
+ strprintf("replacement %s adds unconfirmed input, idx %d",
+ hash.ToString(), j));
+ }
}
- assert(IsTransactionReason(state.GetReason()));
- return false; // state filled in by CheckInputs
}
- // Check again against the current block tip's script verification
- // flags to cache our script execution flags. This is, of course,
- // useless if the next block has different script flags from the
- // previous one, but because the cache tracks script flags for us it
- // will auto-invalidate and we'll just have a few blocks of extra
- // misses on soft-fork activation.
- //
- // This is also useful in case of bugs in the standard flags that cause
- // transactions to pass as valid when they're actually invalid. For
- // instance the STRICTENC flag was incorrectly allowing certain
- // CHECKSIG NOT scripts to pass, even though they were invalid.
- //
- // There is a similar check in CreateNewBlock() to prevent creating
- // invalid blocks (using TestBlockValidity), however allowing such
- // transactions into the mempool can be exploited as a DoS attack.
- unsigned int currentBlockScriptVerifyFlags = GetBlockScriptFlags(::ChainActive().Tip(), chainparams.GetConsensus());
- if (!CheckInputsFromMempoolAndCache(tx, state, view, pool, currentBlockScriptVerifyFlags, true, txdata)) {
- return error("%s: BUG! PLEASE REPORT THIS! CheckInputs failed against latest-block but not STANDARD flags %s, %s",
- __func__, hash.ToString(), FormatStateMessage(state));
- }
-
- if (test_accept) {
- // Tx was accepted, but not added
- return true;
+ // The replacement must pay greater fees than the transactions it
+ // replaces - if we did the bandwidth used by those conflicting
+ // transactions would not be paid for.
+ if (nModifiedFees < nConflictingFees)
+ {
+ return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "insufficient fee",
+ strprintf("rejecting replacement %s, less fees than conflicting txs; %s < %s",
+ hash.ToString(), FormatMoney(nModifiedFees), FormatMoney(nConflictingFees)));
}
- // Remove conflicting transactions from the mempool
- for (CTxMemPool::txiter it : allConflicting)
+ // Finally in addition to paying more fees than the conflicts the
+ // new transaction must pay for its own bandwidth.
+ CAmount nDeltaFees = nModifiedFees - nConflictingFees;
+ if (nDeltaFees < ::incrementalRelayFee.GetFee(nSize))
{
- LogPrint(BCLog::MEMPOOL, "replacing tx %s with %s for %s BTC additional fees, %d delta bytes\n",
- it->GetTx().GetHash().ToString(),
- hash.ToString(),
- FormatMoney(nModifiedFees - nConflictingFees),
- (int)nSize - (int)nConflictingSize);
- if (plTxnReplaced)
- plTxnReplaced->push_back(it->GetSharedTx());
+ return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "insufficient fee",
+ strprintf("rejecting replacement %s, not enough additional fees to relay; %s < %s",
+ hash.ToString(),
+ FormatMoney(nDeltaFees),
+ FormatMoney(::incrementalRelayFee.GetFee(nSize))));
}
- pool.RemoveStaged(allConflicting, false, MemPoolRemovalReason::REPLACED);
+ }
+ return true;
+}
+
+bool MemPoolAccept::PolicyScriptChecks(ATMPArgs& args, Workspace& ws, PrecomputedTransactionData& txdata)
+{
+ const CTransaction& tx = *ws.m_ptx;
- // This transaction should only count for fee estimation if:
- // - it isn't a BIP 125 replacement transaction (may not be widely supported)
- // - it's not being re-added during a reorg which bypasses typical mempool fee limits
- // - the node is not behind
- // - the transaction is not dependent on any other transactions in the mempool
- bool validForFeeEstimation = !fReplacementTransaction && !bypass_limits && IsCurrentForFeeEstimation() && pool.HasNoInputsOf(tx);
+ TxValidationState &state = args.m_state;
- // Store transaction in memory
- pool.addUnchecked(entry, setAncestors, validForFeeEstimation);
+ constexpr unsigned int scriptVerifyFlags = STANDARD_SCRIPT_VERIFY_FLAGS;
- // trim mempool and check if tx was trimmed
- if (!bypass_limits) {
- LimitMempoolSize(pool, gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000, gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60);
- if (!pool.exists(hash))
- return state.Invalid(ValidationInvalidReason::TX_MEMPOOL_POLICY, false, REJECT_INSUFFICIENTFEE, "mempool full");
+ // Check against previous transactions
+ // This is done last to help prevent CPU exhaustion denial-of-service attacks.
+ if (!CheckInputs(tx, state, m_view, scriptVerifyFlags, true, false, txdata)) {
+ // SCRIPT_VERIFY_CLEANSTACK requires SCRIPT_VERIFY_WITNESS, so we
+ // need to turn both off, and compare against just turning off CLEANSTACK
+ // to see if the failure is specifically due to witness validation.
+ TxValidationState state_dummy; // Want reported failures to be from first CheckInputs
+ if (!tx.HasWitness() && CheckInputs(tx, state_dummy, m_view, scriptVerifyFlags & ~(SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_CLEANSTACK), true, false, txdata) &&
+ !CheckInputs(tx, state_dummy, m_view, scriptVerifyFlags & ~SCRIPT_VERIFY_CLEANSTACK, true, false, txdata)) {
+ // Only the witness is missing, so the transaction itself may be fine.
+ state.Invalid(TxValidationResult::TX_WITNESS_MUTATED,
+ state.GetRejectReason(), state.GetDebugMessage());
}
+ return false; // state filled in by CheckInputs
+ }
+
+ return true;
+}
+
+bool MemPoolAccept::ConsensusScriptChecks(ATMPArgs& args, Workspace& ws, PrecomputedTransactionData& txdata)
+{
+ const CTransaction& tx = *ws.m_ptx;
+ const uint256& hash = ws.m_hash;
+
+ TxValidationState &state = args.m_state;
+ const CChainParams& chainparams = args.m_chainparams;
+
+ // Check again against the current block tip's script verification
+ // flags to cache our script execution flags. This is, of course,
+ // useless if the next block has different script flags from the
+ // previous one, but because the cache tracks script flags for us it
+ // will auto-invalidate and we'll just have a few blocks of extra
+ // misses on soft-fork activation.
+ //
+ // This is also useful in case of bugs in the standard flags that cause
+ // transactions to pass as valid when they're actually invalid. For
+ // instance the STRICTENC flag was incorrectly allowing certain
+ // CHECKSIG NOT scripts to pass, even though they were invalid.
+ //
+ // There is a similar check in CreateNewBlock() to prevent creating
+ // invalid blocks (using TestBlockValidity), however allowing such
+ // transactions into the mempool can be exploited as a DoS attack.
+ unsigned int currentBlockScriptVerifyFlags = GetBlockScriptFlags(::ChainActive().Tip(), chainparams.GetConsensus());
+ if (!CheckInputsFromMempoolAndCache(tx, state, m_view, m_pool, currentBlockScriptVerifyFlags, txdata)) {
+ return error("%s: BUG! PLEASE REPORT THIS! CheckInputs failed against latest-block but not STANDARD flags %s, %s",
+ __func__, hash.ToString(), FormatStateMessage(state));
+ }
+
+ return true;
+}
+
+bool MemPoolAccept::Finalize(ATMPArgs& args, Workspace& ws)
+{
+ const CTransaction& tx = *ws.m_ptx;
+ const uint256& hash = ws.m_hash;
+ TxValidationState &state = args.m_state;
+ const bool bypass_limits = args.m_bypass_limits;
+
+ CTxMemPool::setEntries& allConflicting = ws.m_all_conflicting;
+ CTxMemPool::setEntries& setAncestors = ws.m_ancestors;
+ const CAmount& nModifiedFees = ws.m_modified_fees;
+ const CAmount& nConflictingFees = ws.m_conflicting_fees;
+ const size_t& nConflictingSize = ws.m_conflicting_size;
+ const bool fReplacementTransaction = ws.m_replacement_transaction;
+ std::unique_ptr<CTxMemPoolEntry>& entry = ws.m_entry;
+
+ // Remove conflicting transactions from the mempool
+ for (CTxMemPool::txiter it : allConflicting)
+ {
+ LogPrint(BCLog::MEMPOOL, "replacing tx %s with %s for %s BTC additional fees, %d delta bytes\n",
+ it->GetTx().GetHash().ToString(),
+ hash.ToString(),
+ FormatMoney(nModifiedFees - nConflictingFees),
+ (int)entry->GetTxSize() - (int)nConflictingSize);
+ if (args.m_replaced_transactions)
+ args.m_replaced_transactions->push_back(it->GetSharedTx());
+ }
+ m_pool.RemoveStaged(allConflicting, false, MemPoolRemovalReason::REPLACED);
+
+ // This transaction should only count for fee estimation if:
+ // - it isn't a BIP 125 replacement transaction (may not be widely supported)
+ // - it's not being re-added during a reorg which bypasses typical mempool fee limits
+ // - the node is not behind
+ // - the transaction is not dependent on any other transactions in the mempool
+ bool validForFeeEstimation = !fReplacementTransaction && !bypass_limits && IsCurrentForFeeEstimation() && m_pool.HasNoInputsOf(tx);
+
+ // Store transaction in memory
+ m_pool.addUnchecked(*entry, setAncestors, validForFeeEstimation);
+
+ // trim mempool and check if tx was trimmed
+ if (!bypass_limits) {
+ LimitMempoolSize(m_pool, gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000, std::chrono::hours{gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY)});
+ if (!m_pool.exists(hash))
+ return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "mempool full");
}
+ return true;
+}
+
+bool MemPoolAccept::AcceptSingleTransaction(const CTransactionRef& ptx, ATMPArgs& args)
+{
+ AssertLockHeld(cs_main);
+ LOCK(m_pool.cs); // mempool "read lock" (held through GetMainSignals().TransactionAddedToMempool())
+
+ Workspace workspace(ptx);
+
+ if (!PreChecks(args, workspace)) return false;
+
+ // Only compute the precomputed transaction data if we need to verify
+ // scripts (ie, other policy checks pass). We perform the inexpensive
+ // checks first and avoid hashing and signature verification unless those
+ // checks pass, to mitigate CPU exhaustion denial-of-service attacks.
+ PrecomputedTransactionData txdata(*ptx);
+
+ if (!PolicyScriptChecks(args, workspace, txdata)) return false;
+
+ if (!ConsensusScriptChecks(args, workspace, txdata)) return false;
+
+ // Tx was accepted, but not added
+ if (args.m_test_accept) return true;
+
+ if (!Finalize(args, workspace)) return false;
GetMainSignals().TransactionAddedToMempool(ptx);
return true;
}
+} // anon namespace
+
/** (try to) add transaction to memory pool with a specified acceptance time **/
-static bool AcceptToMemoryPoolWithTime(const CChainParams& chainparams, CTxMemPool& pool, CValidationState &state, const CTransactionRef &tx,
- bool* pfMissingInputs, int64_t nAcceptTime, std::list<CTransactionRef>* plTxnReplaced,
+static bool AcceptToMemoryPoolWithTime(const CChainParams& chainparams, CTxMemPool& pool, TxValidationState &state, const CTransactionRef &tx,
+ int64_t nAcceptTime, std::list<CTransactionRef>* plTxnReplaced,
bool bypass_limits, const CAmount nAbsurdFee, bool test_accept) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{
std::vector<COutPoint> coins_to_uncache;
- bool res = AcceptToMemoryPoolWorker(chainparams, pool, state, tx, pfMissingInputs, nAcceptTime, plTxnReplaced, bypass_limits, nAbsurdFee, coins_to_uncache, test_accept);
+ MemPoolAccept::ATMPArgs args { chainparams, state, nAcceptTime, plTxnReplaced, bypass_limits, nAbsurdFee, coins_to_uncache, test_accept };
+ bool res = MemPoolAccept(pool).AcceptSingleTransaction(tx, args);
if (!res) {
// Remove coins that were not present in the coins cache before calling ATMPW;
// this is to prevent memory DoS in case we receive a large number of
@@ -873,17 +1056,17 @@ static bool AcceptToMemoryPoolWithTime(const CChainParams& chainparams, CTxMemPo
::ChainstateActive().CoinsTip().Uncache(hashTx);
}
// After we've (potentially) uncached entries, ensure our coins cache is still within its size limits
- CValidationState stateDummy;
- ::ChainstateActive().FlushStateToDisk(chainparams, stateDummy, FlushStateMode::PERIODIC);
+ BlockValidationState state_dummy;
+ ::ChainstateActive().FlushStateToDisk(chainparams, state_dummy, FlushStateMode::PERIODIC);
return res;
}
-bool AcceptToMemoryPool(CTxMemPool& pool, CValidationState &state, const CTransactionRef &tx,
- bool* pfMissingInputs, std::list<CTransactionRef>* plTxnReplaced,
+bool AcceptToMemoryPool(CTxMemPool& pool, TxValidationState &state, const CTransactionRef &tx,
+ std::list<CTransactionRef>* plTxnReplaced,
bool bypass_limits, const CAmount nAbsurdFee, bool test_accept)
{
const CChainParams& chainparams = Params();
- return AcceptToMemoryPoolWithTime(chainparams, pool, state, tx, pfMissingInputs, GetTime(), plTxnReplaced, bypass_limits, nAbsurdFee, test_accept);
+ return AcceptToMemoryPoolWithTime(chainparams, pool, state, tx, GetTime(), plTxnReplaced, bypass_limits, nAbsurdFee, test_accept);
}
/**
@@ -1228,8 +1411,8 @@ void static InvalidChainFound(CBlockIndex* pindexNew) EXCLUSIVE_LOCKS_REQUIRED(c
CheckForkWarningConditions();
}
-void CChainState::InvalidBlockFound(CBlockIndex *pindex, const CValidationState &state) {
- if (state.GetReason() != ValidationInvalidReason::BLOCK_MUTATED) {
+void CChainState::InvalidBlockFound(CBlockIndex *pindex, const BlockValidationState &state) {
+ if (state.GetResult() != BlockValidationResult::BLOCK_MUTATED) {
pindex->nStatus |= BLOCK_FAILED_VALID;
m_blockman.m_failed_blocks.insert(pindex);
setDirtyBlockIndex.insert(pindex);
@@ -1302,7 +1485,7 @@ void InitScriptExecutionCache() {
*
* Non-static (and re-declared) in src/test/txvalidationcache_tests.cpp
*/
-bool CheckInputs(const CTransaction& tx, CValidationState &state, const CCoinsViewCache &inputs, unsigned int flags, bool cacheSigStore, bool cacheFullScriptStore, PrecomputedTransactionData& txdata, std::vector<CScriptCheck> *pvChecks) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
+bool CheckInputs(const CTransaction& tx, TxValidationState &state, const CCoinsViewCache &inputs, unsigned int flags, bool cacheSigStore, bool cacheFullScriptStore, PrecomputedTransactionData& txdata, std::vector<CScriptCheck> *pvChecks) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{
if (tx.IsCoinBase()) return true;
@@ -1354,10 +1537,10 @@ bool CheckInputs(const CTransaction& tx, CValidationState &state, const CCoinsVi
CScriptCheck check2(coin.out, tx, i,
flags & ~STANDARD_NOT_MANDATORY_VERIFY_FLAGS, cacheSigStore, &txdata);
if (check2())
- return state.Invalid(ValidationInvalidReason::TX_NOT_STANDARD, false, REJECT_NONSTANDARD, strprintf("non-mandatory-script-verify-flag (%s)", ScriptErrorString(check.GetScriptError())));
+ return state.Invalid(TxValidationResult::TX_NOT_STANDARD, strprintf("non-mandatory-script-verify-flag (%s)", ScriptErrorString(check.GetScriptError())));
}
// MANDATORY flag failures correspond to
- // ValidationInvalidReason::CONSENSUS. Because CONSENSUS
+ // TxValidationResult::TX_CONSENSUS. Because CONSENSUS
// failures are the most serious case of validation
// failures, we may need to consider using
// RECENT_CONSENSUS_CHANGE for any script failure that
@@ -1365,7 +1548,7 @@ bool CheckInputs(const CTransaction& tx, CValidationState &state, const CCoinsVi
// support, to avoid splitting the network (but this
// depends on the details of how net_processing handles
// such errors).
- return state.Invalid(ValidationInvalidReason::CONSENSUS, false, REJECT_INVALID, strprintf("mandatory-script-verify-flag-failed (%s)", ScriptErrorString(check.GetScriptError())));
+ return state.Invalid(TxValidationResult::TX_CONSENSUS, strprintf("mandatory-script-verify-flag-failed (%s)", ScriptErrorString(check.GetScriptError())));
}
}
@@ -1450,7 +1633,7 @@ static bool AbortNode(const std::string& strMessage, const std::string& userMess
return false;
}
-static bool AbortNode(CValidationState& state, const std::string& strMessage, const std::string& userMessage = "", unsigned int prefix = 0)
+static bool AbortNode(BlockValidationState& state, const std::string& strMessage, const std::string& userMessage = "", unsigned int prefix = 0)
{
AbortNode(strMessage, userMessage, prefix);
return state.Error(strMessage);
@@ -1564,9 +1747,9 @@ void static FlushBlockFile(bool fFinalize = false)
}
}
-static bool FindUndoPos(CValidationState &state, int nFile, FlatFilePos &pos, unsigned int nAddSize);
+static bool FindUndoPos(BlockValidationState &state, int nFile, FlatFilePos &pos, unsigned int nAddSize);
-static bool WriteUndoDataForBlock(const CBlockUndo& blockundo, CValidationState& state, CBlockIndex* pindex, const CChainParams& chainparams)
+static bool WriteUndoDataForBlock(const CBlockUndo& blockundo, BlockValidationState& state, CBlockIndex* pindex, const CChainParams& chainparams)
{
// Write undo information to disk
if (pindex->GetUndoPos().IsNull()) {
@@ -1627,7 +1810,8 @@ public:
bool Condition(const CBlockIndex* pindex, const Consensus::Params& params) const override
{
- return ((pindex->nVersion & VERSIONBITS_TOP_MASK) == VERSIONBITS_TOP_BITS) &&
+ return pindex->nHeight >= params.MinBIP9WarningHeight &&
+ ((pindex->nVersion & VERSIONBITS_TOP_MASK) == VERSIONBITS_TOP_BITS) &&
((pindex->nVersion >> bit) & 1) != 0 &&
((ComputeBlockVersion(pindex->pprev, params) >> bit) & 1) == 0;
}
@@ -1704,7 +1888,7 @@ static int64_t nBlocksTotal = 0;
/** Apply the effects of this block (with given index) on the UTXO set represented by coins.
* Validity checks that depend on the UTXO set are also done; ConnectBlock()
* can fail if those validity checks fail (among other reasons). */
-bool CChainState::ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pindex,
+bool CChainState::ConnectBlock(const CBlock& block, BlockValidationState& state, CBlockIndex* pindex,
CCoinsViewCache& view, const CChainParams& chainparams, bool fJustCheck)
{
AssertLockHeld(cs_main);
@@ -1726,7 +1910,7 @@ bool CChainState::ConnectBlock(const CBlock& block, CValidationState& state, CBl
// re-enforce that rule here (at least until we make it impossible for
// GetAdjustedTime() to go backward).
if (!CheckBlock(block, state, chainparams.GetConsensus(), !fJustCheck, !fJustCheck)) {
- if (state.GetReason() == ValidationInvalidReason::BLOCK_MUTATED) {
+ if (state.GetResult() == BlockValidationResult::BLOCK_MUTATED) {
// We don't write down blocks to disk if they may have been
// corrupted, so this should be impossible unless we're having hardware
// problems.
@@ -1788,7 +1972,7 @@ bool CChainState::ConnectBlock(const CBlock& block, CValidationState& state, CBl
// If such overwrites are allowed, coinbases and transactions depending upon those
// can be duplicated to remove the ability to spend the first instance -- even after
// being sent to another address.
- // See BIP30 and http://r6.ca/blog/20120206T005236Z.html for more information.
+ // See BIP30, CVE-2012-1909, and http://r6.ca/blog/20120206T005236Z.html for more information.
// This logic is not necessary for memory pool transactions, as AcceptToMemoryPool
// already refuses previously-known transaction ids entirely.
// This rule was originally applied to all blocks with a timestamp after March 15, 2012, 0:00 UTC.
@@ -1866,8 +2050,8 @@ bool CChainState::ConnectBlock(const CBlock& block, CValidationState& state, CBl
for (const auto& tx : block.vtx) {
for (size_t o = 0; o < tx->vout.size(); o++) {
if (view.HaveCoin(COutPoint(tx->GetHash(), o))) {
- return state.Invalid(ValidationInvalidReason::CONSENSUS, error("ConnectBlock(): tried to overwrite transaction"),
- REJECT_INVALID, "bad-txns-BIP30");
+ LogPrintf("ERROR: ConnectBlock(): tried to overwrite transaction\n");
+ return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-txns-BIP30");
}
}
}
@@ -1905,21 +2089,17 @@ bool CChainState::ConnectBlock(const CBlock& block, CValidationState& state, CBl
if (!tx.IsCoinBase())
{
CAmount txfee = 0;
- if (!Consensus::CheckTxInputs(tx, state, view, pindex->nHeight, txfee)) {
- if (!IsBlockReason(state.GetReason())) {
- // CheckTxInputs may return MISSING_INPUTS or
- // PREMATURE_SPEND but we can't return that, as it's not
- // defined for a block, so we reset the reason flag to
- // CONSENSUS here.
- state.Invalid(ValidationInvalidReason::CONSENSUS, false,
- state.GetRejectCode(), state.GetRejectReason(), state.GetDebugMessage());
- }
+ TxValidationState tx_state;
+ if (!Consensus::CheckTxInputs(tx, tx_state, view, pindex->nHeight, txfee)) {
+ // Any transaction validation failure in ConnectBlock is a block consensus failure
+ state.Invalid(BlockValidationResult::BLOCK_CONSENSUS,
+ tx_state.GetRejectReason(), tx_state.GetDebugMessage());
return error("%s: Consensus::CheckTxInputs: %s, %s", __func__, tx.GetHash().ToString(), FormatStateMessage(state));
}
nFees += txfee;
if (!MoneyRange(nFees)) {
- return state.Invalid(ValidationInvalidReason::CONSENSUS, error("%s: accumulated fee in the block out of range.", __func__),
- REJECT_INVALID, "bad-txns-accumulated-fee-outofrange");
+ LogPrintf("ERROR: %s: accumulated fee in the block out of range.\n", __func__);
+ return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-txns-accumulated-fee-outofrange");
}
// Check that transaction is BIP68 final
@@ -1931,8 +2111,8 @@ bool CChainState::ConnectBlock(const CBlock& block, CValidationState& state, CBl
}
if (!SequenceLocks(tx, nLockTimeFlags, &prevheights, *pindex)) {
- return state.Invalid(ValidationInvalidReason::CONSENSUS, error("%s: contains a non-BIP68-final transaction", __func__),
- REJECT_INVALID, "bad-txns-nonfinal");
+ LogPrintf("ERROR: %s: contains a non-BIP68-final transaction\n", __func__);
+ return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-txns-nonfinal");
}
}
@@ -1941,26 +2121,21 @@ bool CChainState::ConnectBlock(const CBlock& block, CValidationState& state, CBl
// * p2sh (when P2SH enabled in flags and excludes coinbase)
// * witness (when witness enabled in flags and excludes coinbase)
nSigOpsCost += GetTransactionSigOpCost(tx, view, flags);
- if (nSigOpsCost > MAX_BLOCK_SIGOPS_COST)
- return state.Invalid(ValidationInvalidReason::CONSENSUS, error("ConnectBlock(): too many sigops"),
- REJECT_INVALID, "bad-blk-sigops");
+ if (nSigOpsCost > MAX_BLOCK_SIGOPS_COST) {
+ LogPrintf("ERROR: ConnectBlock(): too many sigops\n");
+ return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-blk-sigops");
+ }
txdata.emplace_back(tx);
if (!tx.IsCoinBase())
{
std::vector<CScriptCheck> vChecks;
bool fCacheResults = fJustCheck; /* Don't cache results if we're actually connecting blocks (still consult the cache, though) */
- if (fScriptChecks && !CheckInputs(tx, state, view, flags, fCacheResults, fCacheResults, txdata[i], nScriptCheckThreads ? &vChecks : nullptr)) {
- if (state.GetReason() == ValidationInvalidReason::TX_NOT_STANDARD) {
- // CheckInputs may return NOT_STANDARD for extra flags we passed,
- // but we can't return that, as it's not defined for a block, so
- // we reset the reason flag to CONSENSUS here.
- // In the event of a future soft-fork, we may need to
- // consider whether rewriting to CONSENSUS or
- // RECENT_CONSENSUS_CHANGE would be more appropriate.
- state.Invalid(ValidationInvalidReason::CONSENSUS, false,
- state.GetRejectCode(), state.GetRejectReason(), state.GetDebugMessage());
- }
+ TxValidationState tx_state;
+ if (fScriptChecks && !CheckInputs(tx, tx_state, view, flags, fCacheResults, fCacheResults, txdata[i], nScriptCheckThreads ? &vChecks : nullptr)) {
+ // Any transaction validation failure in ConnectBlock is a block consensus failure
+ state.Invalid(BlockValidationResult::BLOCK_CONSENSUS,
+ tx_state.GetRejectReason(), tx_state.GetDebugMessage());
return error("ConnectBlock(): CheckInputs on %s failed with %s",
tx.GetHash().ToString(), FormatStateMessage(state));
}
@@ -1977,14 +2152,15 @@ bool CChainState::ConnectBlock(const CBlock& block, CValidationState& state, CBl
LogPrint(BCLog::BENCH, " - Connect %u transactions: %.2fms (%.3fms/tx, %.3fms/txin) [%.2fs (%.2fms/blk)]\n", (unsigned)block.vtx.size(), MILLI * (nTime3 - nTime2), MILLI * (nTime3 - nTime2) / block.vtx.size(), nInputs <= 1 ? 0 : MILLI * (nTime3 - nTime2) / (nInputs-1), nTimeConnect * MICRO, nTimeConnect * MILLI / nBlocksTotal);
CAmount blockReward = nFees + GetBlockSubsidy(pindex->nHeight, chainparams.GetConsensus());
- if (block.vtx[0]->GetValueOut() > blockReward)
- return state.Invalid(ValidationInvalidReason::CONSENSUS,
- error("ConnectBlock(): coinbase pays too much (actual=%d vs limit=%d)",
- block.vtx[0]->GetValueOut(), blockReward),
- REJECT_INVALID, "bad-cb-amount");
-
- if (!control.Wait())
- return state.Invalid(ValidationInvalidReason::CONSENSUS, error("%s: CheckQueue failed", __func__), REJECT_INVALID, "block-validation-failed");
+ if (block.vtx[0]->GetValueOut() > blockReward) {
+ LogPrintf("ERROR: ConnectBlock(): coinbase pays too much (actual=%d vs limit=%d)\n", block.vtx[0]->GetValueOut(), blockReward);
+ return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-cb-amount");
+ }
+
+ if (!control.Wait()) {
+ LogPrintf("ERROR: %s: CheckQueue failed\n", __func__);
+ return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "block-validation-failed");
+ }
int64_t nTime4 = GetTimeMicros(); nTimeVerify += nTime4 - nTime2;
LogPrint(BCLog::BENCH, " - Verify %u txins: %.2fms (%.3fms/txin) [%.2fs (%.2fms/blk)]\n", nInputs - 1, MILLI * (nTime4 - nTime2), nInputs <= 1 ? 0 : MILLI * (nTime4 - nTime2) / (nInputs-1), nTimeVerify * MICRO, nTimeVerify * MILLI / nBlocksTotal);
@@ -2014,7 +2190,7 @@ bool CChainState::ConnectBlock(const CBlock& block, CValidationState& state, CBl
bool CChainState::FlushStateToDisk(
const CChainParams& chainparams,
- CValidationState &state,
+ BlockValidationState &state,
FlushStateMode mode,
int nManualPruneHeight)
{
@@ -2146,7 +2322,7 @@ bool CChainState::FlushStateToDisk(
}
void CChainState::ForceFlushStateToDisk() {
- CValidationState state;
+ BlockValidationState state;
const CChainParams& chainparams = Params();
if (!this->FlushStateToDisk(chainparams, state, FlushStateMode::ALWAYS)) {
LogPrintf("%s: failed to flush state (%s)\n", __func__, FormatStateMessage(state));
@@ -2154,7 +2330,7 @@ void CChainState::ForceFlushStateToDisk() {
}
void CChainState::PruneAndFlush() {
- CValidationState state;
+ BlockValidationState state;
fCheckForPruning = true;
const CChainParams& chainparams = Params();
@@ -2221,14 +2397,12 @@ void static UpdateTip(const CBlockIndex* pindexNew, const CChainParams& chainPar
if (nUpgraded > 0)
AppendWarning(warningMessages, strprintf(_("%d of last 100 blocks have unexpected version").translated, nUpgraded));
}
- LogPrintf("%s: new best=%s height=%d version=0x%08x log2_work=%.8g tx=%lu date='%s' progress=%f cache=%.1fMiB(%utxo)", __func__, /* Continued */
+ LogPrintf("%s: new best=%s height=%d version=0x%08x log2_work=%.8g tx=%lu date='%s' progress=%f cache=%.1fMiB(%utxo)%s\n", __func__,
pindexNew->GetBlockHash().ToString(), pindexNew->nHeight, pindexNew->nVersion,
log(pindexNew->nChainWork.getdouble())/log(2.0), (unsigned long)pindexNew->nChainTx,
FormatISO8601DateTime(pindexNew->GetBlockTime()),
- GuessVerificationProgress(chainParams.TxData(), pindexNew), ::ChainstateActive().CoinsTip().DynamicMemoryUsage() * (1.0 / (1<<20)), ::ChainstateActive().CoinsTip().GetCacheSize());
- if (!warningMessages.empty())
- LogPrintf(" warning='%s'", warningMessages); /* Continued */
- LogPrintf("\n");
+ GuessVerificationProgress(chainParams.TxData(), pindexNew), ::ChainstateActive().CoinsTip().DynamicMemoryUsage() * (1.0 / (1<<20)), ::ChainstateActive().CoinsTip().GetCacheSize(),
+ !warningMessages.empty() ? strprintf(" warning='%s'", warningMessages) : "");
}
@@ -2242,7 +2416,7 @@ void static UpdateTip(const CBlockIndex* pindexNew, const CChainParams& chainPar
* disconnectpool (note that the caller is responsible for mempool consistency
* in any case).
*/
-bool CChainState::DisconnectTip(CValidationState& state, const CChainParams& chainparams, DisconnectedBlockTransactions *disconnectpool)
+bool CChainState::DisconnectTip(BlockValidationState& state, const CChainParams& chainparams, DisconnectedBlockTransactions *disconnectpool)
{
CBlockIndex *pindexDelete = m_chain.Tip();
assert(pindexDelete);
@@ -2362,7 +2536,7 @@ public:
*
* The block is added to connectTrace if connection succeeds.
*/
-bool CChainState::ConnectTip(CValidationState& state, const CChainParams& chainparams, CBlockIndex* pindexNew, const std::shared_ptr<const CBlock>& pblock, ConnectTrace& connectTrace, DisconnectedBlockTransactions &disconnectpool)
+bool CChainState::ConnectTip(BlockValidationState& state, const CChainParams& chainparams, CBlockIndex* pindexNew, const std::shared_ptr<const CBlock>& pblock, ConnectTrace& connectTrace, DisconnectedBlockTransactions &disconnectpool)
{
assert(pindexNew->pprev == m_chain.Tip());
// Read block from disk.
@@ -2491,8 +2665,10 @@ void CChainState::PruneBlockIndexCandidates() {
/**
* Try to make some progress towards making pindexMostWork the active block.
* pblock is either nullptr or a pointer to a CBlock corresponding to pindexMostWork.
+ *
+ * @returns true unless a system error occurred
*/
-bool CChainState::ActivateBestChainStep(CValidationState& state, const CChainParams& chainparams, CBlockIndex* pindexMostWork, const std::shared_ptr<const CBlock>& pblock, bool& fInvalidFound, ConnectTrace& connectTrace)
+bool CChainState::ActivateBestChainStep(BlockValidationState& state, const CChainParams& chainparams, CBlockIndex* pindexMostWork, const std::shared_ptr<const CBlock>& pblock, bool& fInvalidFound, ConnectTrace& connectTrace)
{
AssertLockHeld(cs_main);
@@ -2539,10 +2715,10 @@ bool CChainState::ActivateBestChainStep(CValidationState& state, const CChainPar
if (!ConnectTip(state, chainparams, pindexConnect, pindexConnect == pindexMostWork ? pblock : std::shared_ptr<const CBlock>(), connectTrace, disconnectpool)) {
if (state.IsInvalid()) {
// The block violates a consensus rule.
- if (state.GetReason() != ValidationInvalidReason::BLOCK_MUTATED) {
+ if (state.GetResult() != BlockValidationResult::BLOCK_MUTATED) {
InvalidChainFound(vpindexToConnect.front());
}
- state = CValidationState();
+ state = BlockValidationState();
fInvalidFound = true;
fContinue = false;
break;
@@ -2580,7 +2756,7 @@ bool CChainState::ActivateBestChainStep(CValidationState& state, const CChainPar
return true;
}
-static void NotifyHeaderTip() LOCKS_EXCLUDED(cs_main) {
+static bool NotifyHeaderTip() LOCKS_EXCLUDED(cs_main) {
bool fNotify = false;
bool fInitialBlockDownload = false;
static CBlockIndex* pindexHeaderOld = nullptr;
@@ -2599,6 +2775,7 @@ static void NotifyHeaderTip() LOCKS_EXCLUDED(cs_main) {
if (fNotify) {
uiInterface.NotifyHeaderTip(fInitialBlockDownload, pindexHeader);
}
+ return fNotify;
}
static void LimitValidationInterfaceQueue() LOCKS_EXCLUDED(cs_main) {
@@ -2609,16 +2786,7 @@ static void LimitValidationInterfaceQueue() LOCKS_EXCLUDED(cs_main) {
}
}
-/**
- * Make the best chain active, in multiple steps. The result is either failure
- * or an activated best chain. pblock is either nullptr or a pointer to a block
- * that is already loaded (to avoid loading it again from disk).
- *
- * ActivateBestChain is split into steps (see ActivateBestChainStep) so that
- * we avoid holding cs_main for an extended period of time; the length of this
- * call may be quite long during reindexing or a substantial reorg.
- */
-bool CChainState::ActivateBestChain(CValidationState &state, const CChainParams& chainparams, std::shared_ptr<const CBlock> pblock) {
+bool CChainState::ActivateBestChain(BlockValidationState &state, const CChainParams& chainparams, std::shared_ptr<const CBlock> pblock) {
// Note that while we're often called here from ProcessNewBlock, this is
// far from a guarantee. Things in the P2P/RPC will often end up calling
// us in the middle of ProcessNewBlock - do not assume pblock is set
@@ -2665,8 +2833,10 @@ bool CChainState::ActivateBestChain(CValidationState &state, const CChainParams&
bool fInvalidFound = false;
std::shared_ptr<const CBlock> nullBlockPtr;
- if (!ActivateBestChainStep(state, chainparams, pindexMostWork, pblock && pblock->GetHash() == pindexMostWork->GetBlockHash() ? pblock : nullBlockPtr, fInvalidFound, connectTrace))
+ if (!ActivateBestChainStep(state, chainparams, pindexMostWork, pblock && pblock->GetHash() == pindexMostWork->GetBlockHash() ? pblock : nullBlockPtr, fInvalidFound, connectTrace)) {
+ // A system error occurred
return false;
+ }
blocks_connected = true;
if (fInvalidFound) {
@@ -2716,11 +2886,11 @@ bool CChainState::ActivateBestChain(CValidationState &state, const CChainParams&
return true;
}
-bool ActivateBestChain(CValidationState &state, const CChainParams& chainparams, std::shared_ptr<const CBlock> pblock) {
+bool ActivateBestChain(BlockValidationState &state, const CChainParams& chainparams, std::shared_ptr<const CBlock> pblock) {
return ::ChainstateActive().ActivateBestChain(state, chainparams, std::move(pblock));
}
-bool CChainState::PreciousBlock(CValidationState& state, const CChainParams& params, CBlockIndex *pindex)
+bool CChainState::PreciousBlock(BlockValidationState& state, const CChainParams& params, CBlockIndex *pindex)
{
{
LOCK(cs_main);
@@ -2748,16 +2918,48 @@ bool CChainState::PreciousBlock(CValidationState& state, const CChainParams& par
return ActivateBestChain(state, params, std::shared_ptr<const CBlock>());
}
-bool PreciousBlock(CValidationState& state, const CChainParams& params, CBlockIndex *pindex) {
+bool PreciousBlock(BlockValidationState& state, const CChainParams& params, CBlockIndex *pindex) {
return ::ChainstateActive().PreciousBlock(state, params, pindex);
}
-bool CChainState::InvalidateBlock(CValidationState& state, const CChainParams& chainparams, CBlockIndex *pindex)
+bool CChainState::InvalidateBlock(BlockValidationState& state, const CChainParams& chainparams, CBlockIndex *pindex)
{
CBlockIndex* to_mark_failed = pindex;
bool pindex_was_in_chain = false;
int disconnected = 0;
+ // We do not allow ActivateBestChain() to run while InvalidateBlock() is
+ // running, as that could cause the tip to change while we disconnect
+ // blocks.
+ LOCK(m_cs_chainstate);
+
+ // We'll be acquiring and releasing cs_main below, to allow the validation
+ // callbacks to run. However, we should keep the block index in a
+ // consistent state as we disconnect blocks -- in particular we need to
+ // add equal-work blocks to setBlockIndexCandidates as we disconnect.
+ // To avoid walking the block index repeatedly in search of candidates,
+ // build a map once so that we can look up candidate blocks by chain
+ // work as we go.
+ std::multimap<const arith_uint256, CBlockIndex *> candidate_blocks_by_work;
+
+ {
+ LOCK(cs_main);
+ for (const auto& entry : m_blockman.m_block_index) {
+ CBlockIndex *candidate = entry.second;
+ // We don't need to put anything in our active chain into the
+ // multimap, because those candidates will be found and considered
+ // as we disconnect.
+ // Instead, consider only non-active-chain blocks that have at
+ // least as much work as where we expect the new tip to end up.
+ if (!m_chain.Contains(candidate) &&
+ !CBlockIndexWorkComparator()(candidate, pindex->pprev) &&
+ candidate->IsValid(BLOCK_VALID_TRANSACTIONS) &&
+ candidate->HaveTxsDownloaded()) {
+ candidate_blocks_by_work.insert(std::make_pair(candidate->nChainWork, candidate));
+ }
+ }
+ }
+
// Disconnect (descendants of) pindex, and mark them invalid.
while (true) {
if (ShutdownRequested()) break;
@@ -2800,11 +3002,24 @@ bool CChainState::InvalidateBlock(CValidationState& state, const CChainParams& c
setDirtyBlockIndex.insert(to_mark_failed);
}
+ // Add any equal or more work headers to setBlockIndexCandidates
+ auto candidate_it = candidate_blocks_by_work.lower_bound(invalid_walk_tip->pprev->nChainWork);
+ while (candidate_it != candidate_blocks_by_work.end()) {
+ if (!CBlockIndexWorkComparator()(candidate_it->second, invalid_walk_tip->pprev)) {
+ setBlockIndexCandidates.insert(candidate_it->second);
+ candidate_it = candidate_blocks_by_work.erase(candidate_it);
+ } else {
+ ++candidate_it;
+ }
+ }
+
// Track the last disconnected block, so we can correct its BLOCK_FAILED_CHILD status in future
// iterations, or, if it's the last one, call InvalidChainFound on it.
to_mark_failed = invalid_walk_tip;
}
+ CheckBlockIndex(chainparams.GetConsensus());
+
{
LOCK(cs_main);
if (m_chain.Contains(to_mark_failed)) {
@@ -2818,8 +3033,13 @@ bool CChainState::InvalidateBlock(CValidationState& state, const CChainParams& c
setBlockIndexCandidates.erase(to_mark_failed);
m_blockman.m_failed_blocks.insert(to_mark_failed);
- // The resulting new best tip may not be in setBlockIndexCandidates anymore, so
- // add it again.
+ // If any new blocks somehow arrived while we were disconnecting
+ // (above), then the pre-calculation of what should go into
+ // setBlockIndexCandidates may have missed entries. This would
+ // technically be an inconsistency in the block index, but if we clean
+ // it up here, this should be an essentially unobservable error.
+ // Loop back over all block index entries and add any missing entries
+ // to setBlockIndexCandidates.
BlockMap::iterator it = m_blockman.m_block_index.begin();
while (it != m_blockman.m_block_index.end()) {
if (it->second->IsValid(BLOCK_VALID_TRANSACTIONS) && it->second->HaveTxsDownloaded() && !setBlockIndexCandidates.value_comp()(it->second, m_chain.Tip())) {
@@ -2838,7 +3058,7 @@ bool CChainState::InvalidateBlock(CValidationState& state, const CChainParams& c
return true;
}
-bool InvalidateBlock(CValidationState& state, const CChainParams& chainparams, CBlockIndex *pindex) {
+bool InvalidateBlock(BlockValidationState& state, const CChainParams& chainparams, CBlockIndex *pindex) {
return ::ChainstateActive().InvalidateBlock(state, chainparams, pindex);
}
@@ -3012,7 +3232,7 @@ static bool FindBlockPos(FlatFilePos &pos, unsigned int nAddSize, unsigned int n
return true;
}
-static bool FindUndoPos(CValidationState &state, int nFile, FlatFilePos &pos, unsigned int nAddSize)
+static bool FindUndoPos(BlockValidationState &state, int nFile, FlatFilePos &pos, unsigned int nAddSize)
{
pos.nFile = nFile;
@@ -3034,16 +3254,16 @@ static bool FindUndoPos(CValidationState &state, int nFile, FlatFilePos &pos, un
return true;
}
-static bool CheckBlockHeader(const CBlockHeader& block, CValidationState& state, const Consensus::Params& consensusParams, bool fCheckPOW = true)
+static bool CheckBlockHeader(const CBlockHeader& block, BlockValidationState& state, const Consensus::Params& consensusParams, bool fCheckPOW = true)
{
// Check proof of work matches claimed amount
if (fCheckPOW && !CheckProofOfWork(block.GetHash(), block.nBits, consensusParams))
- return state.Invalid(ValidationInvalidReason::BLOCK_INVALID_HEADER, false, REJECT_INVALID, "high-hash", "proof of work failed");
+ return state.Invalid(BlockValidationResult::BLOCK_INVALID_HEADER, "high-hash", "proof of work failed");
return true;
}
-bool CheckBlock(const CBlock& block, CValidationState& state, const Consensus::Params& consensusParams, bool fCheckPOW, bool fCheckMerkleRoot)
+bool CheckBlock(const CBlock& block, BlockValidationState& state, const Consensus::Params& consensusParams, bool fCheckPOW, bool fCheckMerkleRoot)
{
// These are checks that are independent of context.
@@ -3060,13 +3280,13 @@ bool CheckBlock(const CBlock& block, CValidationState& state, const Consensus::P
bool mutated;
uint256 hashMerkleRoot2 = BlockMerkleRoot(block, &mutated);
if (block.hashMerkleRoot != hashMerkleRoot2)
- return state.Invalid(ValidationInvalidReason::BLOCK_MUTATED, false, REJECT_INVALID, "bad-txnmrklroot", "hashMerkleRoot mismatch");
+ return state.Invalid(BlockValidationResult::BLOCK_MUTATED, "bad-txnmrklroot", "hashMerkleRoot mismatch");
// Check for merkle tree malleability (CVE-2012-2459): repeating sequences
// of transactions in a block without affecting the merkle root of a block,
// while still invalidating it.
if (mutated)
- return state.Invalid(ValidationInvalidReason::BLOCK_MUTATED, false, REJECT_INVALID, "bad-txns-duplicate", "duplicate transaction");
+ return state.Invalid(BlockValidationResult::BLOCK_MUTATED, "bad-txns-duplicate", "duplicate transaction");
}
// All potential-corruption validation must be done before we do any
@@ -3077,28 +3297,34 @@ bool CheckBlock(const CBlock& block, CValidationState& state, const Consensus::P
// Size limits
if (block.vtx.empty() || block.vtx.size() * WITNESS_SCALE_FACTOR > MAX_BLOCK_WEIGHT || ::GetSerializeSize(block, PROTOCOL_VERSION | SERIALIZE_TRANSACTION_NO_WITNESS) * WITNESS_SCALE_FACTOR > MAX_BLOCK_WEIGHT)
- return state.Invalid(ValidationInvalidReason::CONSENSUS, false, REJECT_INVALID, "bad-blk-length", "size limits failed");
+ return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-blk-length", "size limits failed");
// First transaction must be coinbase, the rest must not be
if (block.vtx.empty() || !block.vtx[0]->IsCoinBase())
- return state.Invalid(ValidationInvalidReason::CONSENSUS, false, REJECT_INVALID, "bad-cb-missing", "first tx is not coinbase");
+ return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-cb-missing", "first tx is not coinbase");
for (unsigned int i = 1; i < block.vtx.size(); i++)
if (block.vtx[i]->IsCoinBase())
- return state.Invalid(ValidationInvalidReason::CONSENSUS, false, REJECT_INVALID, "bad-cb-multiple", "more than one coinbase");
+ return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-cb-multiple", "more than one coinbase");
// Check transactions
- for (const auto& tx : block.vtx)
- if (!CheckTransaction(*tx, state, true))
- return state.Invalid(state.GetReason(), false, state.GetRejectCode(), state.GetRejectReason(),
- strprintf("Transaction check failed (tx hash %s) %s", tx->GetHash().ToString(), state.GetDebugMessage()));
-
+ // Must check for duplicate inputs (see CVE-2018-17144)
+ for (const auto& tx : block.vtx) {
+ TxValidationState tx_state;
+ if (!CheckTransaction(*tx, tx_state)) {
+ // CheckBlock() does context-free validation checks. The only
+ // possible failures are consensus failures.
+ assert(tx_state.GetResult() == TxValidationResult::TX_CONSENSUS);
+ return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, tx_state.GetRejectReason(),
+ strprintf("Transaction check failed (tx hash %s) %s", tx->GetHash().ToString(), tx_state.GetDebugMessage()));
+ }
+ }
unsigned int nSigOps = 0;
for (const auto& tx : block.vtx)
{
nSigOps += GetLegacySigOpCount(*tx);
}
if (nSigOps * WITNESS_SCALE_FACTOR > MAX_BLOCK_SIGOPS_COST)
- return state.Invalid(ValidationInvalidReason::CONSENSUS, false, REJECT_INVALID, "bad-blk-sigops", "out-of-bounds SigOpCount");
+ return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-blk-sigops", "out-of-bounds SigOpCount");
if (fCheckPOW && fCheckMerkleRoot)
block.fChecked = true;
@@ -3112,9 +3338,7 @@ bool IsWitnessEnabled(const CBlockIndex* pindexPrev, const Consensus::Params& pa
return (height >= params.SegwitHeight);
}
-// Compute at which vout of the block's coinbase transaction the witness
-// commitment occurs, or -1 if not found.
-static int GetWitnessCommitmentIndex(const CBlock& block)
+int GetWitnessCommitmentIndex(const CBlock& block)
{
int commitpos = -1;
if (!block.vtx.empty()) {
@@ -3193,7 +3417,7 @@ static CBlockIndex* GetLastCheckpoint(const CCheckpointData& data) EXCLUSIVE_LOC
* in ConnectBlock().
* Note that -reindex-chainstate skips the validation that happens here!
*/
-static bool ContextualCheckBlockHeader(const CBlockHeader& block, CValidationState& state, const CChainParams& params, const CBlockIndex* pindexPrev, int64_t nAdjustedTime) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
+static bool ContextualCheckBlockHeader(const CBlockHeader& block, BlockValidationState& state, const CChainParams& params, const CBlockIndex* pindexPrev, int64_t nAdjustedTime) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{
assert(pindexPrev != nullptr);
const int nHeight = pindexPrev->nHeight + 1;
@@ -3201,7 +3425,7 @@ static bool ContextualCheckBlockHeader(const CBlockHeader& block, CValidationSta
// Check proof of work
const Consensus::Params& consensusParams = params.GetConsensus();
if (block.nBits != GetNextWorkRequired(pindexPrev, &block, consensusParams))
- return state.Invalid(ValidationInvalidReason::BLOCK_INVALID_HEADER, false, REJECT_INVALID, "bad-diffbits", "incorrect proof of work");
+ return state.Invalid(BlockValidationResult::BLOCK_INVALID_HEADER, "bad-diffbits", "incorrect proof of work");
// Check against checkpoints
if (fCheckpointsEnabled) {
@@ -3209,24 +3433,26 @@ static bool ContextualCheckBlockHeader(const CBlockHeader& block, CValidationSta
// GetLastCheckpoint finds the last checkpoint in MapCheckpoints that's in our
// g_blockman.m_block_index.
CBlockIndex* pcheckpoint = GetLastCheckpoint(params.Checkpoints());
- if (pcheckpoint && nHeight < pcheckpoint->nHeight)
- return state.Invalid(ValidationInvalidReason::BLOCK_CHECKPOINT, error("%s: forked chain older than last checkpoint (height %d)", __func__, nHeight), REJECT_CHECKPOINT, "bad-fork-prior-to-checkpoint");
+ if (pcheckpoint && nHeight < pcheckpoint->nHeight) {
+ LogPrintf("ERROR: %s: forked chain older than last checkpoint (height %d)\n", __func__, nHeight);
+ return state.Invalid(BlockValidationResult::BLOCK_CHECKPOINT, "bad-fork-prior-to-checkpoint");
+ }
}
// Check timestamp against prev
if (block.GetBlockTime() <= pindexPrev->GetMedianTimePast())
- return state.Invalid(ValidationInvalidReason::BLOCK_INVALID_HEADER, false, REJECT_INVALID, "time-too-old", "block's timestamp is too early");
+ return state.Invalid(BlockValidationResult::BLOCK_INVALID_HEADER, "time-too-old", "block's timestamp is too early");
// Check timestamp
if (block.GetBlockTime() > nAdjustedTime + MAX_FUTURE_BLOCK_TIME)
- return state.Invalid(ValidationInvalidReason::BLOCK_TIME_FUTURE, false, REJECT_INVALID, "time-too-new", "block timestamp too far in the future");
+ return state.Invalid(BlockValidationResult::BLOCK_TIME_FUTURE, "time-too-new", "block timestamp too far in the future");
// Reject outdated version blocks when 95% (75% on testnet) of the network has upgraded:
// check for version 2, 3 and 4 upgrades
if((block.nVersion < 2 && nHeight >= consensusParams.BIP34Height) ||
(block.nVersion < 3 && nHeight >= consensusParams.BIP66Height) ||
(block.nVersion < 4 && nHeight >= consensusParams.BIP65Height))
- return state.Invalid(ValidationInvalidReason::BLOCK_INVALID_HEADER, false, REJECT_OBSOLETE, strprintf("bad-version(0x%08x)", block.nVersion),
+ return state.Invalid(BlockValidationResult::BLOCK_INVALID_HEADER, strprintf("bad-version(0x%08x)", block.nVersion),
strprintf("rejected nVersion=0x%08x block", block.nVersion));
return true;
@@ -3238,7 +3464,7 @@ static bool ContextualCheckBlockHeader(const CBlockHeader& block, CValidationSta
* in ConnectBlock().
* Note that -reindex-chainstate skips the validation that happens here!
*/
-static bool ContextualCheckBlock(const CBlock& block, CValidationState& state, const Consensus::Params& consensusParams, const CBlockIndex* pindexPrev)
+static bool ContextualCheckBlock(const CBlock& block, BlockValidationState& state, const Consensus::Params& consensusParams, const CBlockIndex* pindexPrev)
{
const int nHeight = pindexPrev == nullptr ? 0 : pindexPrev->nHeight + 1;
@@ -3256,7 +3482,7 @@ static bool ContextualCheckBlock(const CBlock& block, CValidationState& state, c
// Check that all transactions are finalized
for (const auto& tx : block.vtx) {
if (!IsFinalTx(*tx, nHeight, nLockTimeCutoff)) {
- return state.Invalid(ValidationInvalidReason::CONSENSUS, false, REJECT_INVALID, "bad-txns-nonfinal", "non-final transaction");
+ return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-txns-nonfinal", "non-final transaction");
}
}
@@ -3266,7 +3492,7 @@ static bool ContextualCheckBlock(const CBlock& block, CValidationState& state, c
CScript expect = CScript() << nHeight;
if (block.vtx[0]->vin[0].scriptSig.size() < expect.size() ||
!std::equal(expect.begin(), expect.end(), block.vtx[0]->vin[0].scriptSig.begin())) {
- return state.Invalid(ValidationInvalidReason::CONSENSUS, false, REJECT_INVALID, "bad-cb-height", "block height mismatch in coinbase");
+ return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-cb-height", "block height mismatch in coinbase");
}
}
@@ -3288,11 +3514,11 @@ static bool ContextualCheckBlock(const CBlock& block, CValidationState& state, c
// already does not permit it, it is impossible to trigger in the
// witness tree.
if (block.vtx[0]->vin[0].scriptWitness.stack.size() != 1 || block.vtx[0]->vin[0].scriptWitness.stack[0].size() != 32) {
- return state.Invalid(ValidationInvalidReason::BLOCK_MUTATED, false, REJECT_INVALID, "bad-witness-nonce-size", strprintf("%s : invalid witness reserved value size", __func__));
+ return state.Invalid(BlockValidationResult::BLOCK_MUTATED, "bad-witness-nonce-size", strprintf("%s : invalid witness reserved value size", __func__));
}
CHash256().Write(hashWitness.begin(), 32).Write(&block.vtx[0]->vin[0].scriptWitness.stack[0][0], 32).Finalize(hashWitness.begin());
if (memcmp(hashWitness.begin(), &block.vtx[0]->vout[commitpos].scriptPubKey[6], 32)) {
- return state.Invalid(ValidationInvalidReason::BLOCK_MUTATED, false, REJECT_INVALID, "bad-witness-merkle-match", strprintf("%s : witness merkle commitment mismatch", __func__));
+ return state.Invalid(BlockValidationResult::BLOCK_MUTATED, "bad-witness-merkle-match", strprintf("%s : witness merkle commitment mismatch", __func__));
}
fHaveWitness = true;
}
@@ -3302,7 +3528,7 @@ static bool ContextualCheckBlock(const CBlock& block, CValidationState& state, c
if (!fHaveWitness) {
for (const auto& tx : block.vtx) {
if (tx->HasWitness()) {
- return state.Invalid(ValidationInvalidReason::BLOCK_MUTATED, false, REJECT_INVALID, "unexpected-witness", strprintf("%s : unexpected witness data found", __func__));
+ return state.Invalid(BlockValidationResult::BLOCK_MUTATED, "unexpected-witness", strprintf("%s : unexpected witness data found", __func__));
}
}
}
@@ -3314,13 +3540,13 @@ static bool ContextualCheckBlock(const CBlock& block, CValidationState& state, c
// the block hash, so we couldn't mark the block as permanently
// failed).
if (GetBlockWeight(block) > MAX_BLOCK_WEIGHT) {
- return state.Invalid(ValidationInvalidReason::CONSENSUS, false, REJECT_INVALID, "bad-blk-weight", strprintf("%s : weight limit failed", __func__));
+ return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-blk-weight", strprintf("%s : weight limit failed", __func__));
}
return true;
}
-bool BlockManager::AcceptBlockHeader(const CBlockHeader& block, CValidationState& state, const CChainParams& chainparams, CBlockIndex** ppindex)
+bool BlockManager::AcceptBlockHeader(const CBlockHeader& block, BlockValidationState& state, const CChainParams& chainparams, CBlockIndex** ppindex)
{
AssertLockHeld(cs_main);
// Check for duplicate
@@ -3333,8 +3559,10 @@ bool BlockManager::AcceptBlockHeader(const CBlockHeader& block, CValidationState
pindex = miSelf->second;
if (ppindex)
*ppindex = pindex;
- if (pindex->nStatus & BLOCK_FAILED_MASK)
- return state.Invalid(ValidationInvalidReason::CACHED_INVALID, error("%s: block %s is marked invalid", __func__, hash.ToString()), 0, "duplicate");
+ if (pindex->nStatus & BLOCK_FAILED_MASK) {
+ LogPrintf("ERROR: %s: block %s is marked invalid\n", __func__, hash.ToString());
+ return state.Invalid(BlockValidationResult::BLOCK_CACHED_INVALID, "duplicate");
+ }
return true;
}
@@ -3344,11 +3572,15 @@ bool BlockManager::AcceptBlockHeader(const CBlockHeader& block, CValidationState
// Get prev block index
CBlockIndex* pindexPrev = nullptr;
BlockMap::iterator mi = m_block_index.find(block.hashPrevBlock);
- if (mi == m_block_index.end())
- return state.Invalid(ValidationInvalidReason::BLOCK_MISSING_PREV, error("%s: prev block not found", __func__), 0, "prev-blk-not-found");
+ if (mi == m_block_index.end()) {
+ LogPrintf("ERROR: %s: prev block not found\n", __func__);
+ return state.Invalid(BlockValidationResult::BLOCK_MISSING_PREV, "prev-blk-not-found");
+ }
pindexPrev = (*mi).second;
- if (pindexPrev->nStatus & BLOCK_FAILED_MASK)
- return state.Invalid(ValidationInvalidReason::BLOCK_INVALID_PREV, error("%s: prev block invalid", __func__), REJECT_INVALID, "bad-prevblk");
+ if (pindexPrev->nStatus & BLOCK_FAILED_MASK) {
+ LogPrintf("ERROR: %s: prev block invalid\n", __func__);
+ return state.Invalid(BlockValidationResult::BLOCK_INVALID_PREV, "bad-prevblk");
+ }
if (!ContextualCheckBlockHeader(block, state, chainparams, pindexPrev, GetAdjustedTime()))
return error("%s: Consensus::ContextualCheckBlockHeader: %s, %s", __func__, hash.ToString(), FormatStateMessage(state));
@@ -3385,7 +3617,8 @@ bool BlockManager::AcceptBlockHeader(const CBlockHeader& block, CValidationState
setDirtyBlockIndex.insert(invalid_walk);
invalid_walk = invalid_walk->pprev;
}
- return state.Invalid(ValidationInvalidReason::BLOCK_INVALID_PREV, error("%s: prev block invalid", __func__), REJECT_INVALID, "bad-prevblk");
+ LogPrintf("ERROR: %s: prev block invalid\n", __func__);
+ return state.Invalid(BlockValidationResult::BLOCK_INVALID_PREV, "bad-prevblk");
}
}
}
@@ -3400,9 +3633,8 @@ bool BlockManager::AcceptBlockHeader(const CBlockHeader& block, CValidationState
}
// Exposed wrapper for AcceptBlockHeader
-bool ProcessNewBlockHeaders(const std::vector<CBlockHeader>& headers, CValidationState& state, const CChainParams& chainparams, const CBlockIndex** ppindex, CBlockHeader *first_invalid)
+bool ProcessNewBlockHeaders(const std::vector<CBlockHeader>& headers, BlockValidationState& state, const CChainParams& chainparams, const CBlockIndex** ppindex)
{
- if (first_invalid != nullptr) first_invalid->SetNull();
{
LOCK(cs_main);
for (const CBlockHeader& header : headers) {
@@ -3411,7 +3643,6 @@ bool ProcessNewBlockHeaders(const std::vector<CBlockHeader>& headers, CValidatio
::ChainstateActive().CheckBlockIndex(chainparams.GetConsensus());
if (!accepted) {
- if (first_invalid) *first_invalid = header;
return false;
}
if (ppindex) {
@@ -3419,9 +3650,7 @@ bool ProcessNewBlockHeaders(const std::vector<CBlockHeader>& headers, CValidatio
}
}
}
- NotifyHeaderTip();
- {
- LOCK(cs_main);
+ if (NotifyHeaderTip()) {
if (::ChainstateActive().IsInitialBlockDownload() && ppindex && *ppindex) {
LogPrintf("Synchronizing blockheaders, height: %d (~%.2f%%)\n", (*ppindex)->nHeight, 100.0/((*ppindex)->nHeight+(GetAdjustedTime() - (*ppindex)->GetBlockTime()) / Params().GetConsensus().nPowTargetSpacing) * (*ppindex)->nHeight);
}
@@ -3449,7 +3678,7 @@ static FlatFilePos SaveBlockToDisk(const CBlock& block, int nHeight, const CChai
}
/** Store block on disk. If dbp is non-nullptr, the file is known to already reside on disk */
-bool CChainState::AcceptBlock(const std::shared_ptr<const CBlock>& pblock, CValidationState& state, const CChainParams& chainparams, CBlockIndex** ppindex, bool fRequested, const FlatFilePos* dbp, bool* fNewBlock)
+bool CChainState::AcceptBlock(const std::shared_ptr<const CBlock>& pblock, BlockValidationState& state, const CChainParams& chainparams, CBlockIndex** ppindex, bool fRequested, const FlatFilePos* dbp, bool* fNewBlock)
{
const CBlock& block = *pblock;
@@ -3499,8 +3728,7 @@ bool CChainState::AcceptBlock(const std::shared_ptr<const CBlock>& pblock, CVali
if (!CheckBlock(block, state, chainparams.GetConsensus()) ||
!ContextualCheckBlock(block, state, chainparams.GetConsensus(), pindex->pprev)) {
- assert(IsBlockReason(state.GetReason()));
- if (state.IsInvalid() && state.GetReason() != ValidationInvalidReason::BLOCK_MUTATED) {
+ if (state.IsInvalid() && state.GetResult() != BlockValidationResult::BLOCK_MUTATED) {
pindex->nStatus |= BLOCK_FAILED_VALID;
setDirtyBlockIndex.insert(pindex);
}
@@ -3539,7 +3767,7 @@ bool ProcessNewBlock(const CChainParams& chainparams, const std::shared_ptr<cons
{
CBlockIndex *pindex = nullptr;
if (fNewBlock) *fNewBlock = false;
- CValidationState state;
+ BlockValidationState state;
// CheckBlock() does not support multi-threaded block validation because CBlock::fChecked can cause data race.
// Therefore, the following critical section must include the CheckBlock() call as well.
@@ -3560,14 +3788,14 @@ bool ProcessNewBlock(const CChainParams& chainparams, const std::shared_ptr<cons
NotifyHeaderTip();
- CValidationState state; // Only used to report errors, not invalidity - ignore it
+ BlockValidationState state; // Only used to report errors, not invalidity - ignore it
if (!::ChainstateActive().ActivateBestChain(state, chainparams, pblock))
return error("%s: ActivateBestChain failed (%s)", __func__, FormatStateMessage(state));
return true;
}
-bool TestBlockValidity(CValidationState& state, const CChainParams& chainparams, const CBlock& block, CBlockIndex* pindexPrev, bool fCheckPOW, bool fCheckMerkleRoot)
+bool TestBlockValidity(BlockValidationState& state, const CChainParams& chainparams, const CBlock& block, CBlockIndex* pindexPrev, bool fCheckPOW, bool fCheckMerkleRoot)
{
AssertLockHeld(cs_main);
assert(pindexPrev && pindexPrev == ::ChainActive().Tip());
@@ -3678,7 +3906,7 @@ static void FindFilesToPruneManual(std::set<int>& setFilesToPrune, int nManualPr
/* This function is called from the RPC code for pruneblockchain */
void PruneBlockFilesManual(int nManualPruneHeight)
{
- CValidationState state;
+ BlockValidationState state;
const CChainParams& chainparams = Params();
if (!::ChainstateActive().FlushStateToDisk(
chainparams, state, FlushStateMode::NONE, nManualPruneHeight)) {
@@ -3921,28 +4149,31 @@ bool static LoadBlockIndexDB(const CChainParams& chainparams) EXCLUSIVE_LOCKS_RE
return true;
}
-bool LoadChainTip(const CChainParams& chainparams)
+bool CChainState::LoadChainTip(const CChainParams& chainparams)
{
AssertLockHeld(cs_main);
- const CCoinsViewCache& coins_cache = ::ChainstateActive().CoinsTip();
+ const CCoinsViewCache& coins_cache = CoinsTip();
assert(!coins_cache.GetBestBlock().IsNull()); // Never called when the coins view is empty
+ const CBlockIndex* tip = m_chain.Tip();
- if (::ChainActive().Tip() &&
- ::ChainActive().Tip()->GetBlockHash() == coins_cache.GetBestBlock()) return true;
+ if (tip && tip->GetBlockHash() == coins_cache.GetBestBlock()) {
+ return true;
+ }
// Load pointer to end of best chain
CBlockIndex* pindex = LookupBlockIndex(coins_cache.GetBestBlock());
if (!pindex) {
return false;
}
- ::ChainActive().SetTip(pindex);
-
- ::ChainstateActive().PruneBlockIndexCandidates();
+ m_chain.SetTip(pindex);
+ PruneBlockIndexCandidates();
+ tip = m_chain.Tip();
LogPrintf("Loaded best chain: hashBestChain=%s height=%d date=%s progress=%f\n",
- ::ChainActive().Tip()->GetBlockHash().ToString(), ::ChainActive().Height(),
- FormatISO8601DateTime(::ChainActive().Tip()->GetBlockTime()),
- GuessVerificationProgress(chainparams.TxData(), ::ChainActive().Tip()));
+ tip->GetBlockHash().ToString(),
+ m_chain.Height(),
+ FormatISO8601DateTime(tip->GetBlockTime()),
+ GuessVerificationProgress(chainparams.TxData(), tip));
return true;
}
@@ -3971,7 +4202,7 @@ bool CVerifyDB::VerifyDB(const CChainParams& chainparams, CCoinsView *coinsview,
CBlockIndex* pindex;
CBlockIndex* pindexFailure = nullptr;
int nGoodTransactions = 0;
- CValidationState state;
+ BlockValidationState state;
int reportDone = 0;
LogPrintf("[0%%]..."); /* Continued */
for (pindex = ::ChainActive().Tip(); pindex && pindex->pprev; pindex = pindex->pprev) {
@@ -4077,13 +4308,14 @@ bool CChainState::RollforwardBlock(const CBlockIndex* pindex, CCoinsViewCache& i
return true;
}
-bool CChainState::ReplayBlocks(const CChainParams& params, CCoinsView* view)
+bool CChainState::ReplayBlocks(const CChainParams& params)
{
LOCK(cs_main);
- CCoinsViewCache cache(view);
+ CCoinsView& db = this->CoinsDB();
+ CCoinsViewCache cache(&db);
- std::vector<uint256> hashHeads = view->GetHeadBlocks();
+ std::vector<uint256> hashHeads = db.GetHeadBlocks();
if (hashHeads.empty()) return true; // We're already in a consistent state.
if (hashHeads.size() != 2) return error("ReplayBlocks(): unknown inconsistent state");
@@ -4143,10 +4375,6 @@ bool CChainState::ReplayBlocks(const CChainParams& params, CCoinsView* view)
return true;
}
-bool ReplayBlocks(const CChainParams& params, CCoinsView* view) {
- return ::ChainstateActive().ReplayBlocks(params, view);
-}
-
//! Helper for CChainState::RewindBlockIndex
void CChainState::EraseBlockData(CBlockIndex* index)
{
@@ -4218,7 +4446,7 @@ bool CChainState::RewindBlockIndex(const CChainParams& params)
}
// nHeight is now the height of the first insufficiently-validated block, or tipheight + 1
- CValidationState state;
+ BlockValidationState state;
// Loop until the tip is below nHeight, or we reach a pruned block.
while (!ShutdownRequested()) {
{
@@ -4286,7 +4514,7 @@ bool RewindBlockIndex(const CChainParams& params) {
// FlushStateToDisk can possibly read ::ChainActive(). Be conservative
// and skip it here, we're about to -reindex-chainstate anyway, so
// it'll get called a bunch real soon.
- CValidationState state;
+ BlockValidationState state;
if (!::ChainstateActive().FlushStateToDisk(params, state, FlushStateMode::ALWAYS)) {
LogPrintf("RewindBlockIndex: unable to flush state to disk (%s)\n", FormatStateMessage(state));
return false;
@@ -4438,7 +4666,7 @@ bool LoadExternalBlockFile(const CChainParams& chainparams, FILE* fileIn, FlatFi
// process in case the block isn't known yet
CBlockIndex* pindex = LookupBlockIndex(hash);
if (!pindex || (pindex->nStatus & BLOCK_HAVE_DATA) == 0) {
- CValidationState state;
+ BlockValidationState state;
if (::ChainstateActive().AcceptBlock(pblock, state, chainparams, nullptr, true, dbp, nullptr)) {
nLoaded++;
}
@@ -4452,7 +4680,7 @@ bool LoadExternalBlockFile(const CChainParams& chainparams, FILE* fileIn, FlatFi
// Activate the genesis block so normal node progress can continue
if (hash == chainparams.GetConsensus().hashGenesisBlock) {
- CValidationState state;
+ BlockValidationState state;
if (!ActivateBestChain(state, chainparams)) {
break;
}
@@ -4475,7 +4703,7 @@ bool LoadExternalBlockFile(const CChainParams& chainparams, FILE* fileIn, FlatFi
LogPrint(BCLog::REINDEX, "%s: Processing out of order child %s of %s\n", __func__, pblockrecursive->GetHash().ToString(),
head.ToString());
LOCK(cs_main);
- CValidationState dummy;
+ BlockValidationState dummy;
if (::ChainstateActive().AcceptBlock(pblockrecursive, dummy, chainparams, nullptr, true, &it->second, nullptr))
{
nLoaded++;
@@ -4752,10 +4980,10 @@ bool LoadMempool(CTxMemPool& pool)
if (amountdelta) {
pool.PrioritiseTransaction(tx->GetHash(), amountdelta);
}
- CValidationState state;
+ TxValidationState state;
if (nTime + nExpiryTimeout > nNow) {
LOCK(cs_main);
- AcceptToMemoryPoolWithTime(chainparams, pool, state, tx, nullptr /* pfMissingInputs */, nTime,
+ AcceptToMemoryPoolWithTime(chainparams, pool, state, tx, nTime,
nullptr /* plTxnReplaced */, false /* bypass_limits */, 0 /* nAbsurdFee */,
false /* test_accept */);
if (state.IsValid()) {
@@ -4826,8 +5054,8 @@ bool DumpMempool(const CTxMemPool& pool)
file << (uint64_t)vinfo.size();
for (const auto& i : vinfo) {
file << *(i.tx);
- file << (int64_t)i.nTime;
- file << (int64_t)i.nFeeDelta;
+ file << int64_t{count_seconds(i.m_time)};
+ file << int64_t{i.nFeeDelta};
mapDeltas.erase(i.tx->GetHash());
}