aboutsummaryrefslogtreecommitdiff
path: root/src/validation.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/validation.cpp')
-rw-r--r--src/validation.cpp929
1 files changed, 605 insertions, 324 deletions
diff --git a/src/validation.cpp b/src/validation.cpp
index d276cea2f7..b42b398619 100644
--- a/src/validation.cpp
+++ b/src/validation.cpp
@@ -1,5 +1,5 @@
// Copyright (c) 2009-2010 Satoshi Nakamoto
-// Copyright (c) 2009-2021 The Bitcoin Core developers
+// Copyright (c) 2009-2022 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
@@ -22,6 +22,7 @@
#include <flatfile.h>
#include <fs.h>
#include <hash.h>
+#include <kernel/mempool_entry.h>
#include <logging.h>
#include <logging/timer.h>
#include <node/blockstorage.h>
@@ -63,6 +64,7 @@
#include <numeric>
#include <optional>
#include <string>
+#include <utility>
using kernel::CCoinsStats;
using kernel::CoinStatsHashType;
@@ -82,9 +84,6 @@ using node::SnapshotMetadata;
using node::UndoReadFromDisk;
using node::UnlinkPrunedFiles;
-#define MICRO 0.000001
-#define MILLI 0.001
-
/** Maximum kilobytes for transactions to store for processing during reorg */
static const unsigned int MAX_DISCONNECTED_TX_POOL_SIZE = 20000;
/** Time to wait between writing blocks/block index to disk. */
@@ -108,30 +107,11 @@ const std::vector<std::string> CHECKLEVEL_DOC {
* */
static constexpr int PRUNE_LOCK_BUFFER{10};
-/**
- * Mutex to guard access to validation specific variables, such as reading
- * or changing the chainstate.
- *
- * This may also need to be locked when updating the transaction pool, e.g. on
- * AcceptToMemoryPool. See CTxMemPool::cs comment for details.
- *
- * The transaction pool has a separate lock to allow reading from it and the
- * chainstate at the same time.
- */
-RecursiveMutex cs_main;
-
GlobalMutex g_best_block_mutex;
std::condition_variable g_best_block_cv;
uint256 g_best_block;
-bool g_parallel_script_checks{false};
-bool fCheckBlockIndex = false;
-bool fCheckpointsEnabled = DEFAULT_CHECKPOINTS_ENABLED;
-int64_t nMaxTipAge = DEFAULT_MAX_TIP_AGE;
-uint256 hashAssumeValid;
-arith_uint256 nMinimumChainWork;
-
-const CBlockIndex* CChainState::FindForkInGlobalIndex(const CBlockLocator& locator) const
+const CBlockIndex* Chainstate::FindForkInGlobalIndex(const CBlockLocator& locator) const
{
AssertLockHeld(cs_main);
@@ -273,7 +253,7 @@ static void LimitMempoolSize(CTxMemPool& pool, CCoinsViewCache& coins_cache)
coins_cache.Uncache(removed);
}
-static bool IsCurrentForFeeEstimation(CChainState& active_chainstate) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
+static bool IsCurrentForFeeEstimation(Chainstate& active_chainstate) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{
AssertLockHeld(cs_main);
if (active_chainstate.IsInitialBlockDownload())
@@ -286,7 +266,7 @@ static bool IsCurrentForFeeEstimation(CChainState& active_chainstate) EXCLUSIVE_
return true;
}
-void CChainState::MaybeUpdateMempoolForReorg(
+void Chainstate::MaybeUpdateMempoolForReorg(
DisconnectedBlockTransactions& disconnectpool,
bool fAddToMempool)
{
@@ -424,11 +404,13 @@ namespace {
class MemPoolAccept
{
public:
- explicit MemPoolAccept(CTxMemPool& mempool, CChainState& active_chainstate) : m_pool(mempool), m_view(&m_dummy), m_viewmempool(&active_chainstate.CoinsTip(), m_pool), m_active_chainstate(active_chainstate),
- m_limit_ancestors(m_pool.m_limits.ancestor_count),
- m_limit_ancestor_size(m_pool.m_limits.ancestor_size_vbytes),
- m_limit_descendants(m_pool.m_limits.descendant_count),
- m_limit_descendant_size(m_pool.m_limits.descendant_size_vbytes) {
+ explicit MemPoolAccept(CTxMemPool& mempool, Chainstate& active_chainstate) :
+ m_pool(mempool),
+ m_view(&m_dummy),
+ m_viewmempool(&active_chainstate.CoinsTip(), m_pool),
+ m_active_chainstate(active_chainstate),
+ m_limits{m_pool.m_limits}
+ {
}
// We put the arguments we're handed into a struct, so we can pass them
@@ -449,7 +431,7 @@ public:
/** Whether we allow transactions to replace mempool transactions by BIP125 rules. If false,
* any transaction spending the same inputs as a transaction in the mempool is considered
* a conflict. */
- const bool m_allow_bip125_replacement;
+ const bool m_allow_replacement;
/** When true, the mempool will not be trimmed when individual transactions are submitted in
* Finalize(). Instead, limits should be enforced at the end to ensure the package is not
* partially submitted.
@@ -469,7 +451,7 @@ public:
/* m_bypass_limits */ bypass_limits,
/* m_coins_to_uncache */ coins_to_uncache,
/* m_test_accept */ test_accept,
- /* m_allow_bip125_replacement */ true,
+ /* m_allow_replacement */ true,
/* m_package_submission */ false,
/* m_package_feerates */ false,
};
@@ -483,7 +465,7 @@ public:
/* m_bypass_limits */ false,
/* m_coins_to_uncache */ coins_to_uncache,
/* m_test_accept */ true,
- /* m_allow_bip125_replacement */ false,
+ /* m_allow_replacement */ false,
/* m_package_submission */ false, // not submitting to mempool
/* m_package_feerates */ false,
};
@@ -497,7 +479,7 @@ public:
/* m_bypass_limits */ false,
/* m_coins_to_uncache */ coins_to_uncache,
/* m_test_accept */ false,
- /* m_allow_bip125_replacement */ false,
+ /* m_allow_replacement */ false,
/* m_package_submission */ true,
/* m_package_feerates */ true,
};
@@ -510,7 +492,7 @@ public:
/* m_bypass_limits */ false,
/* m_coins_to_uncache */ package_args.m_coins_to_uncache,
/* m_test_accept */ package_args.m_test_accept,
- /* m_allow_bip125_replacement */ true,
+ /* m_allow_replacement */ true,
/* m_package_submission */ false,
/* m_package_feerates */ false, // only 1 transaction
};
@@ -524,7 +506,7 @@ public:
bool bypass_limits,
std::vector<COutPoint>& coins_to_uncache,
bool test_accept,
- bool allow_bip125_replacement,
+ bool allow_replacement,
bool package_submission,
bool package_feerates)
: m_chainparams{chainparams},
@@ -532,7 +514,7 @@ public:
m_bypass_limits{bypass_limits},
m_coins_to_uncache{coins_to_uncache},
m_test_accept{test_accept},
- m_allow_bip125_replacement{allow_bip125_replacement},
+ m_allow_replacement{allow_replacement},
m_package_submission{package_submission},
m_package_feerates{package_feerates}
{
@@ -589,6 +571,11 @@ private:
/** Total virtual size of all transactions being replaced. */
size_t m_conflicting_size{0};
+ /** If we're doing package validation (i.e. m_package_feerates=true), the "effective"
+ * package feerate of this transaction is the total fees divided by the total size of
+ * transactions (which may include its ancestors and/or descendants). */
+ CFeeRate m_package_feerate{0};
+
const CTransactionRef& m_ptx;
/** Txid. */
const uint256& m_hash;
@@ -658,15 +645,9 @@ private:
CCoinsViewMemPool m_viewmempool;
CCoinsView m_dummy;
- CChainState& m_active_chainstate;
+ Chainstate& m_active_chainstate;
- // The package limits in effect at the time of invocation.
- const size_t m_limit_ancestors;
- const size_t m_limit_ancestor_size;
- // These may be modified while evaluating a transaction (eg to account for
- // in-mempool conflicts; see below).
- size_t m_limit_descendants;
- size_t m_limit_descendant_size;
+ CTxMemPool::Limits m_limits;
/** Whether the transaction(s) would replace any mempool transactions. If so, RBF rules apply. */
bool m_rbf{false};
@@ -703,10 +684,7 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws)
return state.Invalid(TxValidationResult::TX_NOT_STANDARD, reason);
}
- // Do not work on transactions that are too small.
- // A transaction with 1 segwit input and 1 P2WPHK output has non-witness size of 82 bytes.
- // Transactions smaller than this are not relayed to mitigate CVE-2017-12842 by not relaying
- // 64-byte transactions.
+ // Transactions smaller than 65 non-witness bytes are not relayed to mitigate CVE-2017-12842.
if (::GetSerializeSize(tx, PROTOCOL_VERSION | SERIALIZE_TRANSACTION_NO_WITNESS) < MIN_STANDARD_TX_NONWITNESS_SIZE)
return state.Invalid(TxValidationResult::TX_NOT_STANDARD, "tx-size-small");
@@ -731,7 +709,7 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws)
{
const CTransaction* ptxConflicting = m_pool.GetConflictTx(txin.prevout);
if (ptxConflicting) {
- if (!args.m_allow_bip125_replacement) {
+ if (!args.m_allow_replacement) {
// Transaction conflicts with a mempool tx, but we're not allowing replacements.
return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "bip125-replacement-disallowed");
}
@@ -861,8 +839,8 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws)
// Specifically, the subset of RBF transactions which we allow despite chain limits are those which
// conflict directly with exactly one other transaction (but may evict children of said transaction),
// and which are not adding any new mempool dependencies. Note that the "no new mempool dependencies"
- // check is accomplished later, so we don't bother doing anything about it here, but if BIP 125 is
- // amended, we may need to move that check to here instead of removing it wholesale.
+ // check is accomplished later, so we don't bother doing anything about it here, but if our
+ // policy changes, we may need to move that check to here instead of removing it wholesale.
//
// Such transactions are clearly not merging any existing packages, so we are only concerned with
// ensuring that (a) no package is growing past the package size (not count) limits and (b) we are
@@ -879,15 +857,13 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws)
assert(ws.m_iters_conflicting.size() == 1);
CTxMemPool::txiter conflict = *ws.m_iters_conflicting.begin();
- m_limit_descendants += 1;
- m_limit_descendant_size += conflict->GetSizeWithDescendants();
+ m_limits.descendant_count += 1;
+ m_limits.descendant_size_vbytes += conflict->GetSizeWithDescendants();
}
- std::string errString;
- if (!m_pool.CalculateMemPoolAncestors(*entry, ws.m_ancestors, m_limit_ancestors, m_limit_ancestor_size, m_limit_descendants, m_limit_descendant_size, errString)) {
- ws.m_ancestors.clear();
+ auto ancestors{m_pool.CalculateMemPoolAncestors(*entry, m_limits)};
+ if (!ancestors) {
// If CalculateMemPoolAncestors fails second time, we want the original error string.
- std::string dummy_err_string;
// Contracting/payment channels CPFP carve-out:
// If the new transaction is relatively small (up to 40k weight)
// and has at most one ancestor (ie ancestor limit of 2, including
@@ -899,12 +875,22 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws)
// to be secure by simply only having two immediately-spendable
// outputs - one for each counterparty. For more info on the uses for
// this, see https://lists.linuxfoundation.org/pipermail/bitcoin-dev/2018-November/016518.html
- if (ws.m_vsize > EXTRA_DESCENDANT_TX_SIZE_LIMIT ||
- !m_pool.CalculateMemPoolAncestors(*entry, ws.m_ancestors, 2, m_limit_ancestor_size, m_limit_descendants + 1, m_limit_descendant_size + EXTRA_DESCENDANT_TX_SIZE_LIMIT, dummy_err_string)) {
- return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "too-long-mempool-chain", errString);
+ CTxMemPool::Limits cpfp_carve_out_limits{
+ .ancestor_count = 2,
+ .ancestor_size_vbytes = m_limits.ancestor_size_vbytes,
+ .descendant_count = m_limits.descendant_count + 1,
+ .descendant_size_vbytes = m_limits.descendant_size_vbytes + EXTRA_DESCENDANT_TX_SIZE_LIMIT,
+ };
+ const auto error_message{util::ErrorString(ancestors).original};
+ if (ws.m_vsize > EXTRA_DESCENDANT_TX_SIZE_LIMIT) {
+ return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "too-long-mempool-chain", error_message);
}
+ ancestors = m_pool.CalculateMemPoolAncestors(*entry, cpfp_carve_out_limits);
+ if (!ancestors) return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "too-long-mempool-chain", error_message);
}
+ ws.m_ancestors = *ancestors;
+
// A transaction that spends outputs that would be replaced by it is invalid. Now
// that we have the set of all ancestors we can detect this
// pathological case by making sure ws.m_conflicts and ws.m_ancestors don't
@@ -929,7 +915,7 @@ bool MemPoolAccept::ReplacementChecks(Workspace& ws)
TxValidationState& state = ws.m_state;
CFeeRate newFeeRate(ws.m_modified_fees, ws.m_vsize);
- // The replacement transaction must have a higher feerate than its direct conflicts.
+ // Enforce Rule #6. The replacement transaction must have a higher feerate than its direct conflicts.
// - The motivation for this check is to ensure that the replacement transaction is preferable for
// block-inclusion, compared to what would be removed from the mempool.
// - This logic predates ancestor feerate-based transaction selection, which is why it doesn't
@@ -942,18 +928,18 @@ bool MemPoolAccept::ReplacementChecks(Workspace& ws)
return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "insufficient fee", *err_string);
}
- // Calculate all conflicting entries and enforce BIP125 Rule #5.
+ // Calculate all conflicting entries and enforce Rule #5.
if (const auto err_string{GetEntriesForConflicts(tx, m_pool, ws.m_iters_conflicting, ws.m_all_conflicting)}) {
return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY,
"too many potential replacements", *err_string);
}
- // Enforce BIP125 Rule #2.
+ // Enforce Rule #2.
if (const auto err_string{HasNoNewUnconfirmed(tx, m_pool, ws.m_iters_conflicting)}) {
return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY,
"replacement-adds-unconfirmed", *err_string);
}
// Check if it's economically rational to mine this transaction rather than the ones it
- // replaces and pays for its own relay fees. Enforce BIP125 Rules #3 and #4.
+ // replaces and pays for its own relay fees. Enforce Rules #3 and #4.
for (CTxMemPool::txiter it : ws.m_all_conflicting) {
ws.m_conflicting_fees += it->GetModifiedFee();
ws.m_conflicting_size += it->GetTxSize();
@@ -976,8 +962,7 @@ bool MemPoolAccept::PackageMempoolChecks(const std::vector<CTransactionRef>& txn
{ return !m_pool.exists(GenTxid::Txid(tx->GetHash()));}));
std::string err_string;
- if (!m_pool.CheckPackageLimits(txns, m_limit_ancestors, m_limit_ancestor_size, m_limit_descendants,
- m_limit_descendant_size, err_string)) {
+ if (!m_pool.CheckPackageLimits(txns, m_limits, err_string)) {
// This is a package-wide error, separate from an individual transaction error.
return package_state.Invalid(PackageValidationResult::PCKG_POLICY, "package-mempool-limits", err_string);
}
@@ -1120,17 +1105,18 @@ bool MemPoolAccept::SubmitPackage(const ATMPArgs& args, std::vector<Workspace>&
// Re-calculate mempool ancestors to call addUnchecked(). They may have changed since the
// last calculation done in PreChecks, since package ancestors have already been submitted.
- std::string unused_err_string;
- if(!m_pool.CalculateMemPoolAncestors(*ws.m_entry, ws.m_ancestors, m_limit_ancestors,
- m_limit_ancestor_size, m_limit_descendants,
- m_limit_descendant_size, unused_err_string)) {
- results.emplace(ws.m_ptx->GetWitnessHash(), MempoolAcceptResult::Failure(ws.m_state));
- // Since PreChecks() and PackageMempoolChecks() both enforce limits, this should never fail.
- Assume(false);
- all_submitted = false;
- package_state.Invalid(PackageValidationResult::PCKG_MEMPOOL_ERROR,
- strprintf("BUG! Mempool ancestors or descendants were underestimated: %s",
- ws.m_ptx->GetHash().ToString()));
+ {
+ auto ancestors{m_pool.CalculateMemPoolAncestors(*ws.m_entry, m_limits)};
+ if(!ancestors) {
+ results.emplace(ws.m_ptx->GetWitnessHash(), MempoolAcceptResult::Failure(ws.m_state));
+ // Since PreChecks() and PackageMempoolChecks() both enforce limits, this should never fail.
+ Assume(false);
+ all_submitted = false;
+ package_state.Invalid(PackageValidationResult::PCKG_MEMPOOL_ERROR,
+ strprintf("BUG! Mempool ancestors or descendants were underestimated: %s",
+ ws.m_ptx->GetHash().ToString()));
+ }
+ ws.m_ancestors = std::move(ancestors).value_or(ws.m_ancestors);
}
// If we call LimitMempoolSize() for each individual Finalize(), the mempool will not take
// the transaction's descendant feerate into account because it hasn't seen them yet. Also,
@@ -1151,12 +1137,21 @@ bool MemPoolAccept::SubmitPackage(const ATMPArgs& args, std::vector<Workspace>&
// make sure we haven't exceeded max mempool size.
LimitMempoolSize(m_pool, m_active_chainstate.CoinsTip());
+ std::vector<uint256> all_package_wtxids;
+ all_package_wtxids.reserve(workspaces.size());
+ std::transform(workspaces.cbegin(), workspaces.cend(), std::back_inserter(all_package_wtxids),
+ [](const auto& ws) { return ws.m_ptx->GetWitnessHash(); });
// Find the wtxids of the transactions that made it into the mempool. Allow partial submission,
// but don't report success unless they all made it into the mempool.
for (Workspace& ws : workspaces) {
+ const auto effective_feerate = args.m_package_feerates ? ws.m_package_feerate :
+ CFeeRate{ws.m_modified_fees, static_cast<uint32_t>(ws.m_vsize)};
+ const auto effective_feerate_wtxids = args.m_package_feerates ? all_package_wtxids :
+ std::vector<uint256>({ws.m_ptx->GetWitnessHash()});
if (m_pool.exists(GenTxid::Wtxid(ws.m_ptx->GetWitnessHash()))) {
results.emplace(ws.m_ptx->GetWitnessHash(),
- MempoolAcceptResult::Success(std::move(ws.m_replaced_transactions), ws.m_vsize, ws.m_base_fees));
+ MempoolAcceptResult::Success(std::move(ws.m_replaced_transactions), ws.m_vsize,
+ ws.m_base_fees, effective_feerate, effective_feerate_wtxids));
GetMainSignals().TransactionAddedToMempool(ws.m_ptx, m_pool.GetAndIncrementSequence());
} else {
all_submitted = false;
@@ -1184,16 +1179,20 @@ MempoolAcceptResult MemPoolAccept::AcceptSingleTransaction(const CTransactionRef
if (!ConsensusScriptChecks(args, ws)) return MempoolAcceptResult::Failure(ws.m_state);
+ const CFeeRate effective_feerate{ws.m_modified_fees, static_cast<uint32_t>(ws.m_vsize)};
+ const std::vector<uint256> single_wtxid{ws.m_ptx->GetWitnessHash()};
// Tx was accepted, but not added
if (args.m_test_accept) {
- return MempoolAcceptResult::Success(std::move(ws.m_replaced_transactions), ws.m_vsize, ws.m_base_fees);
+ return MempoolAcceptResult::Success(std::move(ws.m_replaced_transactions), ws.m_vsize,
+ ws.m_base_fees, effective_feerate, single_wtxid);
}
if (!Finalize(args, ws)) return MempoolAcceptResult::Failure(ws.m_state);
GetMainSignals().TransactionAddedToMempool(ptx, m_pool.GetAndIncrementSequence());
- return MempoolAcceptResult::Success(std::move(ws.m_replaced_transactions), ws.m_vsize, ws.m_base_fees);
+ return MempoolAcceptResult::Success(std::move(ws.m_replaced_transactions), ws.m_vsize, ws.m_base_fees,
+ effective_feerate, single_wtxid);
}
PackageMempoolAcceptResult MemPoolAccept::AcceptMultipleTransactions(const std::vector<CTransactionRef>& txns, ATMPArgs& args)
@@ -1224,7 +1223,7 @@ PackageMempoolAcceptResult MemPoolAccept::AcceptMultipleTransactions(const std::
// package to spend. Since we already checked conflicts in the package and we don't allow
// replacements, we don't need to track the coins spent. Note that this logic will need to be
// updated if package replace-by-fee is allowed in the future.
- assert(!args.m_allow_bip125_replacement);
+ assert(!args.m_allow_replacement);
m_viewmempool.PackageAddTransaction(ws.m_ptx);
}
@@ -1240,7 +1239,7 @@ PackageMempoolAcceptResult MemPoolAccept::AcceptMultipleTransactions(const std::
if (args.m_package_feerates &&
!CheckFeeRate(m_total_vsize, m_total_modified_fees, placeholder_state)) {
package_state.Invalid(PackageValidationResult::PCKG_POLICY, "package-fee-too-low");
- return PackageMempoolAcceptResult(package_state, package_feerate, {});
+ return PackageMempoolAcceptResult(package_state, {});
}
// Apply package mempool ancestor/descendant limits. Skip if there is only one transaction,
@@ -1248,51 +1247,60 @@ PackageMempoolAcceptResult MemPoolAccept::AcceptMultipleTransactions(const std::
// transactions, but this exemption is not extended to packages in CheckPackageLimits().
std::string err_string;
if (txns.size() > 1 && !PackageMempoolChecks(txns, package_state)) {
- return PackageMempoolAcceptResult(package_state, package_feerate, std::move(results));
+ return PackageMempoolAcceptResult(package_state, std::move(results));
}
+ std::vector<uint256> all_package_wtxids;
+ all_package_wtxids.reserve(workspaces.size());
+ std::transform(workspaces.cbegin(), workspaces.cend(), std::back_inserter(all_package_wtxids),
+ [](const auto& ws) { return ws.m_ptx->GetWitnessHash(); });
for (Workspace& ws : workspaces) {
+ ws.m_package_feerate = package_feerate;
if (!PolicyScriptChecks(args, ws)) {
// Exit early to avoid doing pointless work. Update the failed tx result; the rest are unfinished.
package_state.Invalid(PackageValidationResult::PCKG_TX, "transaction failed");
results.emplace(ws.m_ptx->GetWitnessHash(), MempoolAcceptResult::Failure(ws.m_state));
- return PackageMempoolAcceptResult(package_state, package_feerate, std::move(results));
+ return PackageMempoolAcceptResult(package_state, std::move(results));
}
if (args.m_test_accept) {
- // When test_accept=true, transactions that pass PolicyScriptChecks are valid because there are
- // no further mempool checks (passing PolicyScriptChecks implies passing ConsensusScriptChecks).
+ const auto effective_feerate = args.m_package_feerates ? ws.m_package_feerate :
+ CFeeRate{ws.m_modified_fees, static_cast<uint32_t>(ws.m_vsize)};
+ const auto effective_feerate_wtxids = args.m_package_feerates ? all_package_wtxids :
+ std::vector<uint256>{ws.m_ptx->GetWitnessHash()};
results.emplace(ws.m_ptx->GetWitnessHash(),
MempoolAcceptResult::Success(std::move(ws.m_replaced_transactions),
- ws.m_vsize, ws.m_base_fees));
+ ws.m_vsize, ws.m_base_fees, effective_feerate,
+ effective_feerate_wtxids));
}
}
- if (args.m_test_accept) return PackageMempoolAcceptResult(package_state, package_feerate, std::move(results));
+ if (args.m_test_accept) return PackageMempoolAcceptResult(package_state, std::move(results));
if (!SubmitPackage(args, workspaces, package_state, results)) {
// PackageValidationState filled in by SubmitPackage().
- return PackageMempoolAcceptResult(package_state, package_feerate, std::move(results));
+ return PackageMempoolAcceptResult(package_state, std::move(results));
}
- return PackageMempoolAcceptResult(package_state, package_feerate, std::move(results));
+ return PackageMempoolAcceptResult(package_state, std::move(results));
}
PackageMempoolAcceptResult MemPoolAccept::AcceptPackage(const Package& package, ATMPArgs& args)
{
AssertLockHeld(cs_main);
- PackageValidationState package_state;
+ // Used if returning a PackageMempoolAcceptResult directly from this function.
+ PackageValidationState package_state_quit_early;
// Check that the package is well-formed. If it isn't, we won't try to validate any of the
// transactions and thus won't return any MempoolAcceptResults, just a package-wide error.
// Context-free package checks.
- if (!CheckPackage(package, package_state)) return PackageMempoolAcceptResult(package_state, {});
+ if (!CheckPackage(package, package_state_quit_early)) return PackageMempoolAcceptResult(package_state_quit_early, {});
// All transactions in the package must be a parent of the last transaction. This is just an
// opportunity for us to fail fast on a context-free check without taking the mempool lock.
if (!IsChildWithParents(package)) {
- package_state.Invalid(PackageValidationResult::PCKG_POLICY, "package-not-child-with-parents");
- return PackageMempoolAcceptResult(package_state, {});
+ package_state_quit_early.Invalid(PackageValidationResult::PCKG_POLICY, "package-not-child-with-parents");
+ return PackageMempoolAcceptResult(package_state_quit_early, {});
}
// IsChildWithParents() guarantees the package is > 1 transactions.
@@ -1324,15 +1332,16 @@ PackageMempoolAcceptResult MemPoolAccept::AcceptPackage(const Package& package,
return unconfirmed_parent_txids.count(input.prevout.hash) > 0 || m_view.HaveCoin(input.prevout);
};
if (!std::all_of(child->vin.cbegin(), child->vin.cend(), package_or_confirmed)) {
- package_state.Invalid(PackageValidationResult::PCKG_POLICY, "package-not-child-with-unconfirmed-parents");
- return PackageMempoolAcceptResult(package_state, {});
+ package_state_quit_early.Invalid(PackageValidationResult::PCKG_POLICY, "package-not-child-with-unconfirmed-parents");
+ return PackageMempoolAcceptResult(package_state_quit_early, {});
}
// Protect against bugs where we pull more inputs from disk that miss being added to
// coins_to_uncache. The backend will be connected again when needed in PreChecks.
m_view.SetBackend(m_dummy);
LOCK(m_pool.cs);
- std::map<const uint256, const MempoolAcceptResult> results;
+ // Stores final results that won't change
+ std::map<const uint256, const MempoolAcceptResult> results_final;
// Node operators are free to set their mempool policies however they please, nodes may receive
// transactions in different orders, and malicious counterparties may try to take advantage of
// policy differences to pin or delay propagation of transactions. As such, it's possible for
@@ -1342,8 +1351,13 @@ PackageMempoolAcceptResult MemPoolAccept::AcceptPackage(const Package& package,
// the new transactions. This ensures we don't double-count transaction counts and sizes when
// checking ancestor/descendant limits, or double-count transaction fees for fee-related policy.
ATMPArgs single_args = ATMPArgs::SingleInPackageAccept(args);
+ // Results from individual validation. "Nonfinal" because if a transaction fails by itself but
+ // succeeds later (i.e. when evaluated with a fee-bumping child), the result changes (though not
+ // reflected in this map). If a transaction fails more than once, we want to return the first
+ // result, when it was considered on its own. So changes will only be from invalid -> valid.
+ std::map<uint256, MempoolAcceptResult> individual_results_nonfinal;
bool quit_early{false};
- std::vector<CTransactionRef> txns_new;
+ std::vector<CTransactionRef> txns_package_eval;
for (const auto& tx : package) {
const auto& wtxid = tx->GetWitnessHash();
const auto& txid = tx->GetHash();
@@ -1354,7 +1368,7 @@ PackageMempoolAcceptResult MemPoolAccept::AcceptPackage(const Package& package,
// Exact transaction already exists in the mempool.
auto iter = m_pool.GetIter(txid);
assert(iter != std::nullopt);
- results.emplace(wtxid, MempoolAcceptResult::MempoolTx(iter.value()->GetTxSize(), iter.value()->GetFee()));
+ results_final.emplace(wtxid, MempoolAcceptResult::MempoolTx(iter.value()->GetTxSize(), iter.value()->GetFee()));
} else if (m_pool.exists(GenTxid::Txid(txid))) {
// Transaction with the same non-witness data but different witness (same txid,
// different wtxid) already exists in the mempool.
@@ -1366,7 +1380,7 @@ PackageMempoolAcceptResult MemPoolAccept::AcceptPackage(const Package& package,
auto iter = m_pool.GetIter(txid);
assert(iter != std::nullopt);
// Provide the wtxid of the mempool tx so that the caller can look it up in the mempool.
- results.emplace(wtxid, MempoolAcceptResult::MempoolTxDifferentWitness(iter.value()->GetTx().GetWitnessHash()));
+ results_final.emplace(wtxid, MempoolAcceptResult::MempoolTxDifferentWitness(iter.value()->GetTx().GetWitnessHash()));
} else {
// Transaction does not already exist in the mempool.
// Try submitting the transaction on its own.
@@ -1375,7 +1389,7 @@ PackageMempoolAcceptResult MemPoolAccept::AcceptPackage(const Package& package,
// The transaction succeeded on its own and is now in the mempool. Don't include it
// in package validation, because its fees should only be "used" once.
assert(m_pool.exists(GenTxid::Wtxid(wtxid)));
- results.emplace(wtxid, single_res);
+ results_final.emplace(wtxid, single_res);
} else if (single_res.m_state.GetResult() != TxValidationResult::TX_MEMPOOL_POLICY &&
single_res.m_state.GetResult() != TxValidationResult::TX_MISSING_INPUTS) {
// Package validation policy only differs from individual policy in its evaluation
@@ -1388,41 +1402,57 @@ PackageMempoolAcceptResult MemPoolAccept::AcceptPackage(const Package& package,
// future. Continue individually validating the rest of the transactions, because
// some of them may still be valid.
quit_early = true;
+ package_state_quit_early.Invalid(PackageValidationResult::PCKG_TX, "transaction failed");
+ individual_results_nonfinal.emplace(wtxid, single_res);
} else {
- txns_new.push_back(tx);
+ individual_results_nonfinal.emplace(wtxid, single_res);
+ txns_package_eval.push_back(tx);
}
}
}
- // Nothing to do if the entire package has already been submitted.
- if (quit_early || txns_new.empty()) {
- // No package feerate when no package validation was done.
- return PackageMempoolAcceptResult(package_state, std::move(results));
+ // Quit early because package validation won't change the result or the entire package has
+ // already been submitted.
+ if (quit_early || txns_package_eval.empty()) {
+ for (const auto& [wtxid, mempoolaccept_res] : individual_results_nonfinal) {
+ Assume(results_final.emplace(wtxid, mempoolaccept_res).second);
+ Assume(mempoolaccept_res.m_result_type == MempoolAcceptResult::ResultType::INVALID);
+ }
+ return PackageMempoolAcceptResult(package_state_quit_early, std::move(results_final));
}
- // Validate the (deduplicated) transactions as a package.
- auto submission_result = AcceptMultipleTransactions(txns_new, args);
+ // Validate the (deduplicated) transactions as a package. Note that submission_result has its
+ // own PackageValidationState; package_state_quit_early is unused past this point.
+ auto submission_result = AcceptMultipleTransactions(txns_package_eval, args);
// Include already-in-mempool transaction results in the final result.
- for (const auto& [wtxid, mempoolaccept_res] : results) {
- submission_result.m_tx_results.emplace(wtxid, mempoolaccept_res);
+ for (const auto& [wtxid, mempoolaccept_res] : results_final) {
+ Assume(submission_result.m_tx_results.emplace(wtxid, mempoolaccept_res).second);
+ Assume(mempoolaccept_res.m_result_type != MempoolAcceptResult::ResultType::INVALID);
+ }
+ if (submission_result.m_state.GetResult() == PackageValidationResult::PCKG_TX) {
+ // Package validation failed because one or more transactions failed. Provide a result for
+ // each transaction; if AcceptMultipleTransactions() didn't return a result for a tx,
+ // include the previous individual failure reason.
+ submission_result.m_tx_results.insert(individual_results_nonfinal.cbegin(),
+ individual_results_nonfinal.cend());
+ Assume(submission_result.m_tx_results.size() == package.size());
}
- if (submission_result.m_state.IsValid()) assert(submission_result.m_package_feerate.has_value());
return submission_result;
}
} // anon namespace
-MempoolAcceptResult AcceptToMemoryPool(CChainState& active_chainstate, const CTransactionRef& tx,
+MempoolAcceptResult AcceptToMemoryPool(Chainstate& active_chainstate, const CTransactionRef& tx,
int64_t accept_time, bool bypass_limits, bool test_accept)
EXCLUSIVE_LOCKS_REQUIRED(::cs_main)
{
AssertLockHeld(::cs_main);
- const CChainParams& chainparams{active_chainstate.m_params};
+ const CChainParams& chainparams{active_chainstate.m_chainman.GetParams()};
assert(active_chainstate.GetMempool() != nullptr);
CTxMemPool& pool{*active_chainstate.GetMempool()};
std::vector<COutPoint> coins_to_uncache;
auto args = MemPoolAccept::ATMPArgs::SingleAccept(chainparams, accept_time, bypass_limits, coins_to_uncache, test_accept);
- const MempoolAcceptResult result = MemPoolAccept(pool, active_chainstate).AcceptSingleTransaction(tx, args);
+ MempoolAcceptResult result = MemPoolAccept(pool, active_chainstate).AcceptSingleTransaction(tx, args);
if (result.m_result_type != MempoolAcceptResult::ResultType::VALID) {
// Remove coins that were not present in the coins cache before calling
// AcceptSingleTransaction(); this is to prevent memory DoS in case we receive a large
@@ -1438,7 +1468,7 @@ MempoolAcceptResult AcceptToMemoryPool(CChainState& active_chainstate, const CTr
return result;
}
-PackageMempoolAcceptResult ProcessNewPackage(CChainState& active_chainstate, CTxMemPool& pool,
+PackageMempoolAcceptResult ProcessNewPackage(Chainstate& active_chainstate, CTxMemPool& pool,
const Package& package, bool test_accept)
{
AssertLockHeld(cs_main);
@@ -1446,8 +1476,8 @@ PackageMempoolAcceptResult ProcessNewPackage(CChainState& active_chainstate, CTx
assert(std::all_of(package.cbegin(), package.cend(), [](const auto& tx){return tx != nullptr;}));
std::vector<COutPoint> coins_to_uncache;
- const CChainParams& chainparams = active_chainstate.m_params;
- const auto result = [&]() EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
+ const CChainParams& chainparams = active_chainstate.m_chainman.GetParams();
+ auto result = [&]() EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
AssertLockHeld(cs_main);
if (test_accept) {
auto args = MemPoolAccept::ATMPArgs::PackageTestAccept(chainparams, GetTime(), coins_to_uncache);
@@ -1497,32 +1527,31 @@ void CoinsViews::InitCache()
m_cacheview = std::make_unique<CCoinsViewCache>(&m_catcherview);
}
-CChainState::CChainState(
+Chainstate::Chainstate(
CTxMemPool* mempool,
BlockManager& blockman,
ChainstateManager& chainman,
std::optional<uint256> from_snapshot_blockhash)
: m_mempool(mempool),
m_blockman(blockman),
- m_params(chainman.GetParams()),
m_chainman(chainman),
m_from_snapshot_blockhash(from_snapshot_blockhash) {}
-void CChainState::InitCoinsDB(
+void Chainstate::InitCoinsDB(
size_t cache_size_bytes,
bool in_memory,
bool should_wipe,
fs::path leveldb_name)
{
if (m_from_snapshot_blockhash) {
- leveldb_name += "_" + m_from_snapshot_blockhash->ToString();
+ leveldb_name += node::SNAPSHOT_CHAINSTATE_SUFFIX;
}
m_coins_views = std::make_unique<CoinsViews>(
leveldb_name, cache_size_bytes, in_memory, should_wipe);
}
-void CChainState::InitCoinsCache(size_t cache_size_bytes)
+void Chainstate::InitCoinsCache(size_t cache_size_bytes)
{
AssertLockHeld(::cs_main);
assert(m_coins_views != nullptr);
@@ -1532,10 +1561,10 @@ void CChainState::InitCoinsCache(size_t cache_size_bytes)
// Note that though this is marked const, we may end up modifying `m_cached_finished_ibd`, which
// is a performance-related implementation detail. This function must be marked
-// `const` so that `CValidationInterface` clients (which are given a `const CChainState*`)
+// `const` so that `CValidationInterface` clients (which are given a `const Chainstate*`)
// can call it.
//
-bool CChainState::IsInitialBlockDownload() const
+bool Chainstate::IsInitialBlockDownload() const
{
// Optimization: pre-test latch before taking the lock.
if (m_cached_finished_ibd.load(std::memory_order_relaxed))
@@ -1548,10 +1577,12 @@ bool CChainState::IsInitialBlockDownload() const
return true;
if (m_chain.Tip() == nullptr)
return true;
- if (m_chain.Tip()->nChainWork < nMinimumChainWork)
+ if (m_chain.Tip()->nChainWork < m_chainman.MinimumChainWork()) {
return true;
- if (m_chain.Tip()->GetBlockTime() < (GetTime() - nMaxTipAge))
+ }
+ if (m_chain.Tip()->Time() < Now<NodeSeconds>() - m_chainman.m_options.max_tip_age) {
return true;
+ }
LogPrintf("Leaving InitialBlockDownload (latching to false)\n");
m_cached_finished_ibd.store(true, std::memory_order_relaxed);
return false;
@@ -1577,7 +1608,7 @@ static void AlertNotify(const std::string& strMessage)
#endif
}
-void CChainState::CheckForkWarningConditions()
+void Chainstate::CheckForkWarningConditions()
{
AssertLockHeld(cs_main);
@@ -1596,7 +1627,7 @@ void CChainState::CheckForkWarningConditions()
}
// Called both upon regular invalid block discovery *and* InvalidateBlock
-void CChainState::InvalidChainFound(CBlockIndex* pindexNew)
+void Chainstate::InvalidChainFound(CBlockIndex* pindexNew)
{
AssertLockHeld(cs_main);
if (!m_chainman.m_best_invalid || pindexNew->nChainWork > m_chainman.m_best_invalid->nChainWork) {
@@ -1619,7 +1650,7 @@ void CChainState::InvalidChainFound(CBlockIndex* pindexNew)
// Same as InvalidChainFound, above, except not called directly from InvalidateBlock,
// which does its own setBlockIndexCandidates management.
-void CChainState::InvalidBlockFound(CBlockIndex* pindex, const BlockValidationState& state)
+void Chainstate::InvalidBlockFound(CBlockIndex* pindex, const BlockValidationState& state)
{
AssertLockHeld(cs_main);
if (state.GetResult() != BlockValidationResult::BLOCK_MUTATED) {
@@ -1824,7 +1855,7 @@ int ApplyTxInUndo(Coin&& undo, CCoinsViewCache& view, const COutPoint& out)
/** Undo the effects of this block (with given index) on the UTXO set represented by coins.
* When FAILED is returned, view is left in an indeterminate state. */
-DisconnectResult CChainState::DisconnectBlock(const CBlock& block, const CBlockIndex* pindex, CCoinsViewCache& view)
+DisconnectResult Chainstate::DisconnectBlock(const CBlock& block, const CBlockIndex* pindex, CCoinsViewCache& view)
{
AssertLockHeld(::cs_main);
bool fClean = true;
@@ -1840,11 +1871,21 @@ DisconnectResult CChainState::DisconnectBlock(const CBlock& block, const CBlockI
return DISCONNECT_FAILED;
}
+ // Ignore blocks that contain transactions which are 'overwritten' by later transactions,
+ // unless those are already completely spent.
+ // See https://github.com/bitcoin/bitcoin/issues/22596 for additional information.
+ // Note: the blocks specified here are different than the ones used in ConnectBlock because DisconnectBlock
+ // unwinds the blocks in reverse. As a result, the inconsistency is not discovered until the earlier
+ // blocks with the duplicate coinbase transactions are disconnected.
+ bool fEnforceBIP30 = !((pindex->nHeight==91722 && pindex->GetBlockHash() == uint256S("0x00000000000271a2dc26e7667f8419f2e15416dc6955e5a6c6cdf3f2574dd08e")) ||
+ (pindex->nHeight==91812 && pindex->GetBlockHash() == uint256S("0x00000000000af0aed4792b1acee3d966af36cf5def14935db8de83d6f9306f2f")));
+
// undo transactions in reverse order
for (int i = block.vtx.size() - 1; i >= 0; i--) {
const CTransaction &tx = *(block.vtx[i]);
uint256 hash = tx.GetHash();
bool is_coinbase = tx.IsCoinBase();
+ bool is_bip30_exception = (is_coinbase && !fEnforceBIP30);
// Check that all outputs are available and match the outputs in the block itself
// exactly.
@@ -1854,7 +1895,9 @@ DisconnectResult CChainState::DisconnectBlock(const CBlock& block, const CBlockI
Coin coin;
bool is_spent = view.SpendCoin(out, &coin);
if (!is_spent || tx.vout[o] != coin.out || pindex->nHeight != coin.nHeight || is_coinbase != coin.fCoinBase) {
- fClean = false; // transaction output mismatch
+ if (!is_bip30_exception) {
+ fClean = false; // transaction output mismatch
+ }
}
}
}
@@ -1965,19 +2008,19 @@ static unsigned int GetBlockScriptFlags(const CBlockIndex& block_index, const Ch
}
-static int64_t nTimeCheck = 0;
-static int64_t nTimeForks = 0;
-static int64_t nTimeConnect = 0;
-static int64_t nTimeVerify = 0;
-static int64_t nTimeUndo = 0;
-static int64_t nTimeIndex = 0;
-static int64_t nTimeTotal = 0;
-static int64_t nBlocksTotal = 0;
+static SteadyClock::duration time_check{};
+static SteadyClock::duration time_forks{};
+static SteadyClock::duration time_connect{};
+static SteadyClock::duration time_verify{};
+static SteadyClock::duration time_undo{};
+static SteadyClock::duration time_index{};
+static SteadyClock::duration time_total{};
+static int64_t num_blocks_total = 0;
/** Apply the effects of this block (with given index) on the UTXO set represented by coins.
* Validity checks that depend on the UTXO set are also done; ConnectBlock()
* can fail if those validity checks fail (among other reasons). */
-bool CChainState::ConnectBlock(const CBlock& block, BlockValidationState& state, CBlockIndex* pindex,
+bool Chainstate::ConnectBlock(const CBlock& block, BlockValidationState& state, CBlockIndex* pindex,
CCoinsViewCache& view, bool fJustCheck)
{
AssertLockHeld(cs_main);
@@ -1985,8 +2028,10 @@ bool CChainState::ConnectBlock(const CBlock& block, BlockValidationState& state,
uint256 block_hash{block.GetHash()};
assert(*pindex->phashBlock == block_hash);
+ const bool parallel_script_checks{scriptcheckqueue.HasThreads()};
- int64_t nTimeStart = GetTimeMicros();
+ const auto time_start{SteadyClock::now()};
+ const CChainParams& params{m_chainman.GetParams()};
// Check it again in case a previous version let a bad block in
// NOTE: We don't currently (re-)invoke ContextualCheckBlock() or
@@ -2001,7 +2046,7 @@ bool CChainState::ConnectBlock(const CBlock& block, BlockValidationState& state,
// is enforced in ContextualCheckBlockHeader(); we wouldn't want to
// re-enforce that rule here (at least until we make it impossible for
// m_adjusted_time_callback() to go backward).
- if (!CheckBlock(block, state, m_params.GetConsensus(), !fJustCheck, !fJustCheck)) {
+ if (!CheckBlock(block, state, params.GetConsensus(), !fJustCheck, !fJustCheck)) {
if (state.GetResult() == BlockValidationResult::BLOCK_MUTATED) {
// We don't write down blocks to disk if they may have been
// corrupted, so this should be impossible unless we're having hardware
@@ -2015,28 +2060,28 @@ bool CChainState::ConnectBlock(const CBlock& block, BlockValidationState& state,
uint256 hashPrevBlock = pindex->pprev == nullptr ? uint256() : pindex->pprev->GetBlockHash();
assert(hashPrevBlock == view.GetBestBlock());
- nBlocksTotal++;
+ num_blocks_total++;
// Special case for the genesis block, skipping connection of its transactions
// (its coinbase is unspendable)
- if (block_hash == m_params.GetConsensus().hashGenesisBlock) {
+ if (block_hash == params.GetConsensus().hashGenesisBlock) {
if (!fJustCheck)
view.SetBestBlock(pindex->GetBlockHash());
return true;
}
bool fScriptChecks = true;
- if (!hashAssumeValid.IsNull()) {
+ if (!m_chainman.AssumedValidBlock().IsNull()) {
// We've been configured with the hash of a block which has been externally verified to have a valid history.
// A suitable default value is included with the software and updated from time to time. Because validity
// relative to a piece of software is an objective fact these defaults can be easily reviewed.
// This setting doesn't force the selection of any particular chain but makes validating some faster by
// effectively caching the result of part of the verification.
- BlockMap::const_iterator it = m_blockman.m_block_index.find(hashAssumeValid);
+ BlockMap::const_iterator it{m_blockman.m_block_index.find(m_chainman.AssumedValidBlock())};
if (it != m_blockman.m_block_index.end()) {
if (it->second.GetAncestor(pindex->nHeight) == pindex &&
m_chainman.m_best_header->GetAncestor(pindex->nHeight) == pindex &&
- m_chainman.m_best_header->nChainWork >= nMinimumChainWork) {
+ m_chainman.m_best_header->nChainWork >= m_chainman.MinimumChainWork()) {
// This block is a member of the assumed verified chain and an ancestor of the best header.
// Script verification is skipped when connecting blocks under the
// assumevalid block. Assuming the assumevalid block is valid this
@@ -2049,15 +2094,19 @@ bool CChainState::ConnectBlock(const CBlock& block, BlockValidationState& state,
// it hard to hide the implication of the demand. This also avoids having release candidates
// that are hardly doing any signature verification at all in testing without having to
// artificially set the default assumed verified block further back.
- // The test against nMinimumChainWork prevents the skipping when denied access to any chain at
+ // The test against the minimum chain work prevents the skipping when denied access to any chain at
// least as good as the expected chain.
- fScriptChecks = (GetBlockProofEquivalentTime(*m_chainman.m_best_header, *pindex, *m_chainman.m_best_header, m_params.GetConsensus()) <= 60 * 60 * 24 * 7 * 2);
+ fScriptChecks = (GetBlockProofEquivalentTime(*m_chainman.m_best_header, *pindex, *m_chainman.m_best_header, params.GetConsensus()) <= 60 * 60 * 24 * 7 * 2);
}
}
}
- int64_t nTime1 = GetTimeMicros(); nTimeCheck += nTime1 - nTimeStart;
- LogPrint(BCLog::BENCH, " - Sanity checks: %.2fms [%.2fs (%.2fms/blk)]\n", MILLI * (nTime1 - nTimeStart), nTimeCheck * MICRO, nTimeCheck * MILLI / nBlocksTotal);
+ const auto time_1{SteadyClock::now()};
+ time_check += time_1 - time_start;
+ LogPrint(BCLog::BENCH, " - Sanity checks: %.2fms [%.2fs (%.2fms/blk)]\n",
+ Ticks<MillisecondsDouble>(time_1 - time_start),
+ Ticks<SecondsDouble>(time_check),
+ Ticks<MillisecondsDouble>(time_check) / num_blocks_total);
// Do not allow blocks that contain transactions which 'overwrite' older transactions,
// unless those are already completely spent.
@@ -2069,8 +2118,7 @@ bool CChainState::ConnectBlock(const CBlock& block, BlockValidationState& state,
// Now that the whole chain is irreversibly beyond that time it is applied to all blocks except the
// two in the chain that violate it. This prevents exploiting the issue against nodes during their
// initial block download.
- bool fEnforceBIP30 = !((pindex->nHeight==91842 && pindex->GetBlockHash() == uint256S("0x00000000000a4d0a398161ffc163c503763b1f4360639393e0e4c8e300e0caec")) ||
- (pindex->nHeight==91880 && pindex->GetBlockHash() == uint256S("0x00000000000743f190a18c5577a3c2d2a1f610ae9601ac046a38084ccb7cd721")));
+ bool fEnforceBIP30 = !IsBIP30Repeat(*pindex);
// Once BIP34 activated it was not possible to create new duplicate coinbases and thus other than starting
// with the 2 existing duplicate coinbase pairs, not possible to create overwriting txs. But by the
@@ -2128,9 +2176,9 @@ bool CChainState::ConnectBlock(const CBlock& block, BlockValidationState& state,
// post BIP34 before approximately height 486,000,000. After block
// 1,983,702 testnet3 starts doing unnecessary BIP30 checking again.
assert(pindex->pprev);
- CBlockIndex* pindexBIP34height = pindex->pprev->GetAncestor(m_params.GetConsensus().BIP34Height);
+ CBlockIndex* pindexBIP34height = pindex->pprev->GetAncestor(params.GetConsensus().BIP34Height);
//Only continue to enforce if we're below BIP34 activation height or the block hash at that height doesn't correspond.
- fEnforceBIP30 = fEnforceBIP30 && (!pindexBIP34height || !(pindexBIP34height->GetBlockHash() == m_params.GetConsensus().BIP34Hash));
+ fEnforceBIP30 = fEnforceBIP30 && (!pindexBIP34height || !(pindexBIP34height->GetBlockHash() == params.GetConsensus().BIP34Hash));
// TODO: Remove BIP30 checking from block height 1,983,702 on, once we have a
// consensus change that ensures coinbases at those heights cannot
@@ -2155,8 +2203,12 @@ bool CChainState::ConnectBlock(const CBlock& block, BlockValidationState& state,
// Get the script flags for this block
unsigned int flags{GetBlockScriptFlags(*pindex, m_chainman)};
- int64_t nTime2 = GetTimeMicros(); nTimeForks += nTime2 - nTime1;
- LogPrint(BCLog::BENCH, " - Fork checks: %.2fms [%.2fs (%.2fms/blk)]\n", MILLI * (nTime2 - nTime1), nTimeForks * MICRO, nTimeForks * MILLI / nBlocksTotal);
+ const auto time_2{SteadyClock::now()};
+ time_forks += time_2 - time_1;
+ LogPrint(BCLog::BENCH, " - Fork checks: %.2fms [%.2fs (%.2fms/blk)]\n",
+ Ticks<MillisecondsDouble>(time_2 - time_1),
+ Ticks<SecondsDouble>(time_forks),
+ Ticks<MillisecondsDouble>(time_forks) / num_blocks_total);
CBlockUndo blockundo;
@@ -2165,7 +2217,7 @@ bool CChainState::ConnectBlock(const CBlock& block, BlockValidationState& state,
// in multiple threads). Preallocate the vector size so a new allocation
// doesn't invalidate pointers into the vector, and keep txsdata in scope
// for as long as `control`.
- CCheckQueueControl<CScriptCheck> control(fScriptChecks && g_parallel_script_checks ? &scriptcheckqueue : nullptr);
+ CCheckQueueControl<CScriptCheck> control(fScriptChecks && parallel_script_checks ? &scriptcheckqueue : nullptr);
std::vector<PrecomputedTransactionData> txsdata(block.vtx.size());
std::vector<int> prevheights;
@@ -2224,7 +2276,7 @@ bool CChainState::ConnectBlock(const CBlock& block, BlockValidationState& state,
std::vector<CScriptCheck> vChecks;
bool fCacheResults = fJustCheck; /* Don't cache results if we're actually connecting blocks (still consult the cache, though) */
TxValidationState tx_state;
- if (fScriptChecks && !CheckInputScripts(tx, tx_state, view, flags, fCacheResults, fCacheResults, txsdata[i], g_parallel_script_checks ? &vChecks : nullptr)) {
+ if (fScriptChecks && !CheckInputScripts(tx, tx_state, view, flags, fCacheResults, fCacheResults, txsdata[i], parallel_script_checks ? &vChecks : nullptr)) {
// Any transaction validation failure in ConnectBlock is a block consensus failure
state.Invalid(BlockValidationResult::BLOCK_CONSENSUS,
tx_state.GetRejectReason(), tx_state.GetDebugMessage());
@@ -2240,10 +2292,15 @@ bool CChainState::ConnectBlock(const CBlock& block, BlockValidationState& state,
}
UpdateCoins(tx, view, i == 0 ? undoDummy : blockundo.vtxundo.back(), pindex->nHeight);
}
- int64_t nTime3 = GetTimeMicros(); nTimeConnect += nTime3 - nTime2;
- LogPrint(BCLog::BENCH, " - Connect %u transactions: %.2fms (%.3fms/tx, %.3fms/txin) [%.2fs (%.2fms/blk)]\n", (unsigned)block.vtx.size(), MILLI * (nTime3 - nTime2), MILLI * (nTime3 - nTime2) / block.vtx.size(), nInputs <= 1 ? 0 : MILLI * (nTime3 - nTime2) / (nInputs-1), nTimeConnect * MICRO, nTimeConnect * MILLI / nBlocksTotal);
+ const auto time_3{SteadyClock::now()};
+ time_connect += time_3 - time_2;
+ LogPrint(BCLog::BENCH, " - Connect %u transactions: %.2fms (%.3fms/tx, %.3fms/txin) [%.2fs (%.2fms/blk)]\n", (unsigned)block.vtx.size(),
+ Ticks<MillisecondsDouble>(time_3 - time_2), Ticks<MillisecondsDouble>(time_3 - time_2) / block.vtx.size(),
+ nInputs <= 1 ? 0 : Ticks<MillisecondsDouble>(time_3 - time_2) / (nInputs - 1),
+ Ticks<SecondsDouble>(time_connect),
+ Ticks<MillisecondsDouble>(time_connect) / num_blocks_total);
- CAmount blockReward = nFees + GetBlockSubsidy(pindex->nHeight, m_params.GetConsensus());
+ CAmount blockReward = nFees + GetBlockSubsidy(pindex->nHeight, params.GetConsensus());
if (block.vtx[0]->GetValueOut() > blockReward) {
LogPrintf("ERROR: ConnectBlock(): coinbase pays too much (actual=%d vs limit=%d)\n", block.vtx[0]->GetValueOut(), blockReward);
return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "bad-cb-amount");
@@ -2253,18 +2310,27 @@ bool CChainState::ConnectBlock(const CBlock& block, BlockValidationState& state,
LogPrintf("ERROR: %s: CheckQueue failed\n", __func__);
return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, "block-validation-failed");
}
- int64_t nTime4 = GetTimeMicros(); nTimeVerify += nTime4 - nTime2;
- LogPrint(BCLog::BENCH, " - Verify %u txins: %.2fms (%.3fms/txin) [%.2fs (%.2fms/blk)]\n", nInputs - 1, MILLI * (nTime4 - nTime2), nInputs <= 1 ? 0 : MILLI * (nTime4 - nTime2) / (nInputs-1), nTimeVerify * MICRO, nTimeVerify * MILLI / nBlocksTotal);
+ const auto time_4{SteadyClock::now()};
+ time_verify += time_4 - time_2;
+ LogPrint(BCLog::BENCH, " - Verify %u txins: %.2fms (%.3fms/txin) [%.2fs (%.2fms/blk)]\n", nInputs - 1,
+ Ticks<MillisecondsDouble>(time_4 - time_2),
+ nInputs <= 1 ? 0 : Ticks<MillisecondsDouble>(time_4 - time_2) / (nInputs - 1),
+ Ticks<SecondsDouble>(time_verify),
+ Ticks<MillisecondsDouble>(time_verify) / num_blocks_total);
if (fJustCheck)
return true;
- if (!m_blockman.WriteUndoDataForBlock(blockundo, state, pindex, m_params)) {
+ if (!m_blockman.WriteUndoDataForBlock(blockundo, state, pindex, params)) {
return false;
}
- int64_t nTime5 = GetTimeMicros(); nTimeUndo += nTime5 - nTime4;
- LogPrint(BCLog::BENCH, " - Write undo data: %.2fms [%.2fs (%.2fms/blk)]\n", MILLI * (nTime5 - nTime4), nTimeUndo * MICRO, nTimeUndo * MILLI / nBlocksTotal);
+ const auto time_5{SteadyClock::now()};
+ time_undo += time_5 - time_4;
+ LogPrint(BCLog::BENCH, " - Write undo data: %.2fms [%.2fs (%.2fms/blk)]\n",
+ Ticks<MillisecondsDouble>(time_5 - time_4),
+ Ticks<SecondsDouble>(time_undo),
+ Ticks<MillisecondsDouble>(time_undo) / num_blocks_total);
if (!pindex->IsValid(BLOCK_VALID_SCRIPTS)) {
pindex->RaiseValidity(BLOCK_VALID_SCRIPTS);
@@ -2274,8 +2340,12 @@ bool CChainState::ConnectBlock(const CBlock& block, BlockValidationState& state,
// add this block to the view's block chain
view.SetBestBlock(pindex->GetBlockHash());
- int64_t nTime6 = GetTimeMicros(); nTimeIndex += nTime6 - nTime5;
- LogPrint(BCLog::BENCH, " - Index writing: %.2fms [%.2fs (%.2fms/blk)]\n", MILLI * (nTime6 - nTime5), nTimeIndex * MICRO, nTimeIndex * MILLI / nBlocksTotal);
+ const auto time_6{SteadyClock::now()};
+ time_index += time_6 - time_5;
+ LogPrint(BCLog::BENCH, " - Index writing: %.2fms [%.2fs (%.2fms/blk)]\n",
+ Ticks<MillisecondsDouble>(time_6 - time_5),
+ Ticks<SecondsDouble>(time_index),
+ Ticks<MillisecondsDouble>(time_index) / num_blocks_total);
TRACE6(validation, block_connected,
block_hash.data(),
@@ -2283,13 +2353,13 @@ bool CChainState::ConnectBlock(const CBlock& block, BlockValidationState& state,
block.vtx.size(),
nInputs,
nSigOpsCost,
- nTime5 - nTimeStart // in microseconds (µs)
+ time_5 - time_start // in microseconds (µs)
);
return true;
}
-CoinsCacheSizeState CChainState::GetCoinsCacheSizeState()
+CoinsCacheSizeState Chainstate::GetCoinsCacheSizeState()
{
AssertLockHeld(::cs_main);
return this->GetCoinsCacheSizeState(
@@ -2297,7 +2367,7 @@ CoinsCacheSizeState CChainState::GetCoinsCacheSizeState()
m_mempool ? m_mempool->m_max_size_bytes : 0);
}
-CoinsCacheSizeState CChainState::GetCoinsCacheSizeState(
+CoinsCacheSizeState Chainstate::GetCoinsCacheSizeState(
size_t max_coins_cache_size_bytes,
size_t max_mempool_size_bytes)
{
@@ -2321,15 +2391,13 @@ CoinsCacheSizeState CChainState::GetCoinsCacheSizeState(
return CoinsCacheSizeState::OK;
}
-bool CChainState::FlushStateToDisk(
+bool Chainstate::FlushStateToDisk(
BlockValidationState &state,
FlushStateMode mode,
int nManualPruneHeight)
{
LOCK(cs_main);
assert(this->CanFlushToDisk());
- static std::chrono::microseconds nLastWrite{0};
- static std::chrono::microseconds nLastFlush{0};
std::set<int> setFilesToPrune;
bool full_flush_completed = false;
@@ -2370,7 +2438,7 @@ bool CChainState::FlushStateToDisk(
} else {
LOG_TIME_MILLIS_WITH_CATEGORY("find files to prune", BCLog::BENCH);
- m_blockman.FindFilesToPrune(setFilesToPrune, m_params.PruneAfterHeight(), m_chain.Height(), last_prune, IsInitialBlockDownload());
+ m_blockman.FindFilesToPrune(setFilesToPrune, m_chainman.GetParams().PruneAfterHeight(), m_chain.Height(), last_prune, IsInitialBlockDownload());
m_blockman.m_check_for_pruning = false;
}
if (!setFilesToPrune.empty()) {
@@ -2383,20 +2451,20 @@ bool CChainState::FlushStateToDisk(
}
const auto nNow = GetTime<std::chrono::microseconds>();
// Avoid writing/flushing immediately after startup.
- if (nLastWrite.count() == 0) {
- nLastWrite = nNow;
+ if (m_last_write.count() == 0) {
+ m_last_write = nNow;
}
- if (nLastFlush.count() == 0) {
- nLastFlush = nNow;
+ if (m_last_flush.count() == 0) {
+ m_last_flush = nNow;
}
// The cache is large and we're within 10% and 10 MiB of the limit, but we have time now (not in the middle of a block processing).
bool fCacheLarge = mode == FlushStateMode::PERIODIC && cache_state >= CoinsCacheSizeState::LARGE;
// The cache is over the limit, we have to write now.
bool fCacheCritical = mode == FlushStateMode::IF_NEEDED && cache_state >= CoinsCacheSizeState::CRITICAL;
// It's been a while since we wrote the block index to disk. Do this frequently, so we don't need to redownload after a crash.
- bool fPeriodicWrite = mode == FlushStateMode::PERIODIC && nNow > nLastWrite + DATABASE_WRITE_INTERVAL;
+ bool fPeriodicWrite = mode == FlushStateMode::PERIODIC && nNow > m_last_write + DATABASE_WRITE_INTERVAL;
// It's been very long since we flushed the cache. Do this infrequently, to optimize cache usage.
- bool fPeriodicFlush = mode == FlushStateMode::PERIODIC && nNow > nLastFlush + DATABASE_FLUSH_INTERVAL;
+ bool fPeriodicFlush = mode == FlushStateMode::PERIODIC && nNow > m_last_flush + DATABASE_FLUSH_INTERVAL;
// Combine all conditions that result in a full cache flush.
fDoFullFlush = (mode == FlushStateMode::ALWAYS) || fCacheLarge || fCacheCritical || fPeriodicFlush || fFlushForPrune;
// Write blocks and block index to disk.
@@ -2426,7 +2494,7 @@ bool CChainState::FlushStateToDisk(
UnlinkPrunedFiles(setFilesToPrune);
}
- nLastWrite = nNow;
+ m_last_write = nNow;
}
// Flush best chain related state. This can only be done if the blocks / block index write was also done.
if (fDoFullFlush && !CoinsTip().GetBestBlock().IsNull()) {
@@ -2444,7 +2512,7 @@ bool CChainState::FlushStateToDisk(
// Flush the chainstate (which may refer to block index entries).
if (!CoinsTip().Flush())
return AbortNode(state, "Failed to write to coin database");
- nLastFlush = nNow;
+ m_last_flush = nNow;
full_flush_completed = true;
TRACE5(utxocache, flush,
(int64_t)(GetTimeMicros() - nNow.count()), // in microseconds (µs)
@@ -2464,7 +2532,7 @@ bool CChainState::FlushStateToDisk(
return true;
}
-void CChainState::ForceFlushStateToDisk()
+void Chainstate::ForceFlushStateToDisk()
{
BlockValidationState state;
if (!this->FlushStateToDisk(state, FlushStateMode::ALWAYS)) {
@@ -2472,7 +2540,7 @@ void CChainState::ForceFlushStateToDisk()
}
}
-void CChainState::PruneAndFlush()
+void Chainstate::PruneAndFlush()
{
BlockValidationState state;
m_blockman.m_check_for_pruning = true;
@@ -2519,18 +2587,20 @@ static void UpdateTipLog(
!warning_messages.empty() ? strprintf(" warning='%s'", warning_messages) : "");
}
-void CChainState::UpdateTip(const CBlockIndex* pindexNew)
+void Chainstate::UpdateTip(const CBlockIndex* pindexNew)
{
AssertLockHeld(::cs_main);
const auto& coins_tip = this->CoinsTip();
+ const CChainParams& params{m_chainman.GetParams()};
+
// The remainder of the function isn't relevant if we are not acting on
// the active chainstate, so return if need be.
if (this != &m_chainman.ActiveChainstate()) {
// Only log every so often so that we don't bury log messages at the tip.
constexpr int BACKGROUND_LOG_INTERVAL = 2000;
if (pindexNew->nHeight % BACKGROUND_LOG_INTERVAL == 0) {
- UpdateTipLog(coins_tip, pindexNew, m_params, __func__, "[background validation] ", "");
+ UpdateTipLog(coins_tip, pindexNew, params, __func__, "[background validation] ", "");
}
return;
}
@@ -2551,7 +2621,7 @@ void CChainState::UpdateTip(const CBlockIndex* pindexNew)
const CBlockIndex* pindex = pindexNew;
for (int bit = 0; bit < VERSIONBITS_NUM_BITS; bit++) {
WarningBitsConditionChecker checker(m_chainman, bit);
- ThresholdState state = checker.GetStateFor(pindex, m_params.GetConsensus(), warningcache.at(bit));
+ ThresholdState state = checker.GetStateFor(pindex, params.GetConsensus(), warningcache.at(bit));
if (state == ThresholdState::ACTIVE || state == ThresholdState::LOCKED_IN) {
const bilingual_str warning = strprintf(_("Unknown new rules activated (versionbit %i)"), bit);
if (state == ThresholdState::ACTIVE) {
@@ -2562,7 +2632,7 @@ void CChainState::UpdateTip(const CBlockIndex* pindexNew)
}
}
}
- UpdateTipLog(coins_tip, pindexNew, m_params, __func__, "", warning_messages.original);
+ UpdateTipLog(coins_tip, pindexNew, params, __func__, "", warning_messages.original);
}
/** Disconnect m_chain's tip.
@@ -2575,7 +2645,7 @@ void CChainState::UpdateTip(const CBlockIndex* pindexNew)
* disconnectpool (note that the caller is responsible for mempool consistency
* in any case).
*/
-bool CChainState::DisconnectTip(BlockValidationState& state, DisconnectedBlockTransactions* disconnectpool)
+bool Chainstate::DisconnectTip(BlockValidationState& state, DisconnectedBlockTransactions* disconnectpool)
{
AssertLockHeld(cs_main);
if (m_mempool) AssertLockHeld(m_mempool->cs);
@@ -2586,11 +2656,11 @@ bool CChainState::DisconnectTip(BlockValidationState& state, DisconnectedBlockTr
// Read block from disk.
std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
CBlock& block = *pblock;
- if (!ReadBlockFromDisk(block, pindexDelete, m_params.GetConsensus())) {
+ if (!ReadBlockFromDisk(block, pindexDelete, m_chainman.GetConsensus())) {
return error("DisconnectTip(): Failed to read block");
}
// Apply the block atomically to the chain state.
- int64_t nStart = GetTimeMicros();
+ const auto time_start{SteadyClock::now()};
{
CCoinsViewCache view(&CoinsTip());
assert(view.GetBestBlock() == pindexDelete->GetBlockHash());
@@ -2599,7 +2669,8 @@ bool CChainState::DisconnectTip(BlockValidationState& state, DisconnectedBlockTr
bool flushed = view.Flush();
assert(flushed);
}
- LogPrint(BCLog::BENCH, "- Disconnect block: %.2fms\n", (GetTimeMicros() - nStart) * MILLI);
+ LogPrint(BCLog::BENCH, "- Disconnect block: %.2fms\n",
+ Ticks<MillisecondsDouble>(SteadyClock::now() - time_start));
{
// Prune locks that began at or after the tip should be moved backward so they get a chance to reorg
@@ -2639,11 +2710,11 @@ bool CChainState::DisconnectTip(BlockValidationState& state, DisconnectedBlockTr
return true;
}
-static int64_t nTimeReadFromDiskTotal = 0;
-static int64_t nTimeConnectTotal = 0;
-static int64_t nTimeFlush = 0;
-static int64_t nTimeChainState = 0;
-static int64_t nTimePostConnect = 0;
+static SteadyClock::duration time_read_from_disk_total{};
+static SteadyClock::duration time_connect_total{};
+static SteadyClock::duration time_flush{};
+static SteadyClock::duration time_chainstate{};
+static SteadyClock::duration time_post_connect{};
struct PerBlockConnectTrace {
CBlockIndex* pindex = nullptr;
@@ -2691,18 +2762,18 @@ public:
*
* The block is added to connectTrace if connection succeeds.
*/
-bool CChainState::ConnectTip(BlockValidationState& state, CBlockIndex* pindexNew, const std::shared_ptr<const CBlock>& pblock, ConnectTrace& connectTrace, DisconnectedBlockTransactions& disconnectpool)
+bool Chainstate::ConnectTip(BlockValidationState& state, CBlockIndex* pindexNew, const std::shared_ptr<const CBlock>& pblock, ConnectTrace& connectTrace, DisconnectedBlockTransactions& disconnectpool)
{
AssertLockHeld(cs_main);
if (m_mempool) AssertLockHeld(m_mempool->cs);
assert(pindexNew->pprev == m_chain.Tip());
// Read block from disk.
- int64_t nTime1 = GetTimeMicros();
+ const auto time_1{SteadyClock::now()};
std::shared_ptr<const CBlock> pthisBlock;
if (!pblock) {
std::shared_ptr<CBlock> pblockNew = std::make_shared<CBlock>();
- if (!ReadBlockFromDisk(*pblockNew, pindexNew, m_params.GetConsensus())) {
+ if (!ReadBlockFromDisk(*pblockNew, pindexNew, m_chainman.GetConsensus())) {
return AbortNode(state, "Failed to read block");
}
pthisBlock = pblockNew;
@@ -2712,9 +2783,13 @@ bool CChainState::ConnectTip(BlockValidationState& state, CBlockIndex* pindexNew
}
const CBlock& blockConnecting = *pthisBlock;
// Apply the block atomically to the chain state.
- int64_t nTime2 = GetTimeMicros(); nTimeReadFromDiskTotal += nTime2 - nTime1;
- int64_t nTime3;
- LogPrint(BCLog::BENCH, " - Load block from disk: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime2 - nTime1) * MILLI, nTimeReadFromDiskTotal * MICRO, nTimeReadFromDiskTotal * MILLI / nBlocksTotal);
+ const auto time_2{SteadyClock::now()};
+ time_read_from_disk_total += time_2 - time_1;
+ SteadyClock::time_point time_3;
+ LogPrint(BCLog::BENCH, " - Load block from disk: %.2fms [%.2fs (%.2fms/blk)]\n",
+ Ticks<MillisecondsDouble>(time_2 - time_1),
+ Ticks<SecondsDouble>(time_read_from_disk_total),
+ Ticks<MillisecondsDouble>(time_read_from_disk_total) / num_blocks_total);
{
CCoinsViewCache view(&CoinsTip());
bool rv = ConnectBlock(blockConnecting, state, pindexNew, view);
@@ -2724,20 +2799,32 @@ bool CChainState::ConnectTip(BlockValidationState& state, CBlockIndex* pindexNew
InvalidBlockFound(pindexNew, state);
return error("%s: ConnectBlock %s failed, %s", __func__, pindexNew->GetBlockHash().ToString(), state.ToString());
}
- nTime3 = GetTimeMicros(); nTimeConnectTotal += nTime3 - nTime2;
- assert(nBlocksTotal > 0);
- LogPrint(BCLog::BENCH, " - Connect total: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime3 - nTime2) * MILLI, nTimeConnectTotal * MICRO, nTimeConnectTotal * MILLI / nBlocksTotal);
+ time_3 = SteadyClock::now();
+ time_connect_total += time_3 - time_2;
+ assert(num_blocks_total > 0);
+ LogPrint(BCLog::BENCH, " - Connect total: %.2fms [%.2fs (%.2fms/blk)]\n",
+ Ticks<MillisecondsDouble>(time_3 - time_2),
+ Ticks<SecondsDouble>(time_connect_total),
+ Ticks<MillisecondsDouble>(time_connect_total) / num_blocks_total);
bool flushed = view.Flush();
assert(flushed);
}
- int64_t nTime4 = GetTimeMicros(); nTimeFlush += nTime4 - nTime3;
- LogPrint(BCLog::BENCH, " - Flush: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime4 - nTime3) * MILLI, nTimeFlush * MICRO, nTimeFlush * MILLI / nBlocksTotal);
+ const auto time_4{SteadyClock::now()};
+ time_flush += time_4 - time_3;
+ LogPrint(BCLog::BENCH, " - Flush: %.2fms [%.2fs (%.2fms/blk)]\n",
+ Ticks<MillisecondsDouble>(time_4 - time_3),
+ Ticks<SecondsDouble>(time_flush),
+ Ticks<MillisecondsDouble>(time_flush) / num_blocks_total);
// Write the chain state to disk, if necessary.
if (!FlushStateToDisk(state, FlushStateMode::IF_NEEDED)) {
return false;
}
- int64_t nTime5 = GetTimeMicros(); nTimeChainState += nTime5 - nTime4;
- LogPrint(BCLog::BENCH, " - Writing chainstate: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime5 - nTime4) * MILLI, nTimeChainState * MICRO, nTimeChainState * MILLI / nBlocksTotal);
+ const auto time_5{SteadyClock::now()};
+ time_chainstate += time_5 - time_4;
+ LogPrint(BCLog::BENCH, " - Writing chainstate: %.2fms [%.2fs (%.2fms/blk)]\n",
+ Ticks<MillisecondsDouble>(time_5 - time_4),
+ Ticks<SecondsDouble>(time_chainstate),
+ Ticks<MillisecondsDouble>(time_chainstate) / num_blocks_total);
// Remove conflicting transactions from the mempool.;
if (m_mempool) {
m_mempool->removeForBlock(blockConnecting.vtx, pindexNew->nHeight);
@@ -2747,9 +2834,17 @@ bool CChainState::ConnectTip(BlockValidationState& state, CBlockIndex* pindexNew
m_chain.SetTip(*pindexNew);
UpdateTip(pindexNew);
- int64_t nTime6 = GetTimeMicros(); nTimePostConnect += nTime6 - nTime5; nTimeTotal += nTime6 - nTime1;
- LogPrint(BCLog::BENCH, " - Connect postprocess: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime6 - nTime5) * MILLI, nTimePostConnect * MICRO, nTimePostConnect * MILLI / nBlocksTotal);
- LogPrint(BCLog::BENCH, "- Connect block: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime6 - nTime1) * MILLI, nTimeTotal * MICRO, nTimeTotal * MILLI / nBlocksTotal);
+ const auto time_6{SteadyClock::now()};
+ time_post_connect += time_6 - time_5;
+ time_total += time_6 - time_1;
+ LogPrint(BCLog::BENCH, " - Connect postprocess: %.2fms [%.2fs (%.2fms/blk)]\n",
+ Ticks<MillisecondsDouble>(time_6 - time_5),
+ Ticks<SecondsDouble>(time_post_connect),
+ Ticks<MillisecondsDouble>(time_post_connect) / num_blocks_total);
+ LogPrint(BCLog::BENCH, "- Connect block: %.2fms [%.2fs (%.2fms/blk)]\n",
+ Ticks<MillisecondsDouble>(time_6 - time_1),
+ Ticks<SecondsDouble>(time_total),
+ Ticks<MillisecondsDouble>(time_total) / num_blocks_total);
connectTrace.BlockConnected(pindexNew, std::move(pthisBlock));
return true;
@@ -2759,7 +2854,7 @@ bool CChainState::ConnectTip(BlockValidationState& state, CBlockIndex* pindexNew
* Return the tip of the chain with the most work in it, that isn't
* known to be invalid (it's however far from certain to be valid).
*/
-CBlockIndex* CChainState::FindMostWorkChain()
+CBlockIndex* Chainstate::FindMostWorkChain()
{
AssertLockHeld(::cs_main);
do {
@@ -2818,7 +2913,7 @@ CBlockIndex* CChainState::FindMostWorkChain()
}
/** Delete all entries in setBlockIndexCandidates that are worse than the current tip. */
-void CChainState::PruneBlockIndexCandidates() {
+void Chainstate::PruneBlockIndexCandidates() {
// Note that we can't delete the current block itself, as we may need to return to it later in case a
// reorganization to a better block fails.
std::set<CBlockIndex*, CBlockIndexWorkComparator>::iterator it = setBlockIndexCandidates.begin();
@@ -2835,7 +2930,7 @@ void CChainState::PruneBlockIndexCandidates() {
*
* @returns true unless a system error occurred
*/
-bool CChainState::ActivateBestChainStep(BlockValidationState& state, CBlockIndex* pindexMostWork, const std::shared_ptr<const CBlock>& pblock, bool& fInvalidFound, ConnectTrace& connectTrace)
+bool Chainstate::ActivateBestChainStep(BlockValidationState& state, CBlockIndex* pindexMostWork, const std::shared_ptr<const CBlock>& pblock, bool& fInvalidFound, ConnectTrace& connectTrace)
{
AssertLockHeld(cs_main);
if (m_mempool) AssertLockHeld(m_mempool->cs);
@@ -2927,7 +3022,7 @@ static SynchronizationState GetSynchronizationState(bool init)
return SynchronizationState::INIT_DOWNLOAD;
}
-static bool NotifyHeaderTip(CChainState& chainstate) LOCKS_EXCLUDED(cs_main) {
+static bool NotifyHeaderTip(Chainstate& chainstate) LOCKS_EXCLUDED(cs_main) {
bool fNotify = false;
bool fInitialBlockDownload = false;
static CBlockIndex* pindexHeaderOld = nullptr;
@@ -2944,7 +3039,7 @@ static bool NotifyHeaderTip(CChainState& chainstate) LOCKS_EXCLUDED(cs_main) {
}
// Send block tip changed notifications without cs_main
if (fNotify) {
- uiInterface.NotifyHeaderTip(GetSynchronizationState(fInitialBlockDownload), pindexHeader);
+ uiInterface.NotifyHeaderTip(GetSynchronizationState(fInitialBlockDownload), pindexHeader->nHeight, pindexHeader->nTime, false);
}
return fNotify;
}
@@ -2957,7 +3052,7 @@ static void LimitValidationInterfaceQueue() LOCKS_EXCLUDED(cs_main) {
}
}
-bool CChainState::ActivateBestChain(BlockValidationState& state, std::shared_ptr<const CBlock> pblock)
+bool Chainstate::ActivateBestChain(BlockValidationState& state, std::shared_ptr<const CBlock> pblock)
{
AssertLockNotHeld(m_chainstate_mutex);
@@ -3059,7 +3154,7 @@ bool CChainState::ActivateBestChain(BlockValidationState& state, std::shared_ptr
return true;
}
-bool CChainState::PreciousBlock(BlockValidationState& state, CBlockIndex* pindex)
+bool Chainstate::PreciousBlock(BlockValidationState& state, CBlockIndex* pindex)
{
AssertLockNotHeld(m_chainstate_mutex);
AssertLockNotHeld(::cs_main);
@@ -3090,7 +3185,7 @@ bool CChainState::PreciousBlock(BlockValidationState& state, CBlockIndex* pindex
return ActivateBestChain(state, std::shared_ptr<const CBlock>());
}
-bool CChainState::InvalidateBlock(BlockValidationState& state, CBlockIndex* pindex)
+bool Chainstate::InvalidateBlock(BlockValidationState& state, CBlockIndex* pindex)
{
AssertLockNotHeld(m_chainstate_mutex);
AssertLockNotHeld(::cs_main);
@@ -3233,7 +3328,7 @@ bool CChainState::InvalidateBlock(BlockValidationState& state, CBlockIndex* pind
return true;
}
-void CChainState::ResetBlockFailureFlags(CBlockIndex *pindex) {
+void Chainstate::ResetBlockFailureFlags(CBlockIndex *pindex) {
AssertLockHeld(cs_main);
int nHeight = pindex->nHeight;
@@ -3266,7 +3361,7 @@ void CChainState::ResetBlockFailureFlags(CBlockIndex *pindex) {
}
/** Mark a block as having its data received and checked (up to BLOCK_VALID_TRANSACTIONS). */
-void CChainState::ReceivedBlockTransactions(const CBlock& block, CBlockIndex* pindexNew, const FlatFilePos& pos)
+void Chainstate::ReceivedBlockTransactions(const CBlock& block, CBlockIndex* pindexNew, const FlatFilePos& pos)
{
AssertLockHeld(cs_main);
pindexNew->nTx = block.vtx.size();
@@ -3432,6 +3527,22 @@ std::vector<unsigned char> ChainstateManager::GenerateCoinbaseCommitment(CBlock&
return commitment;
}
+bool HasValidProofOfWork(const std::vector<CBlockHeader>& headers, const Consensus::Params& consensusParams)
+{
+ return std::all_of(headers.cbegin(), headers.cend(),
+ [&](const auto& header) { return CheckProofOfWork(header.GetHash(), header.nBits, consensusParams);});
+}
+
+arith_uint256 CalculateHeadersWork(const std::vector<CBlockHeader>& headers)
+{
+ arith_uint256 total_work{0};
+ for (const CBlockHeader& header : headers) {
+ CBlockIndex dummy(header);
+ total_work += GetBlockProof(dummy);
+ }
+ return total_work;
+}
+
/** Context-dependent validity checks.
* By "context", we mean only the previous block headers, but not the UTXO
* set; UTXO-related validity checks are done in ConnectBlock().
@@ -3441,7 +3552,7 @@ std::vector<unsigned char> ChainstateManager::GenerateCoinbaseCommitment(CBlock&
* in ConnectBlock().
* Note that -reindex-chainstate skips the validation that happens here!
*/
-static bool ContextualCheckBlockHeader(const CBlockHeader& block, BlockValidationState& state, BlockManager& blockman, const ChainstateManager& chainman, const CBlockIndex* pindexPrev, int64_t nAdjustedTime) EXCLUSIVE_LOCKS_REQUIRED(::cs_main)
+static bool ContextualCheckBlockHeader(const CBlockHeader& block, BlockValidationState& state, BlockManager& blockman, const ChainstateManager& chainman, const CBlockIndex* pindexPrev, NodeClock::time_point now) EXCLUSIVE_LOCKS_REQUIRED(::cs_main)
{
AssertLockHeld(::cs_main);
assert(pindexPrev != nullptr);
@@ -3453,7 +3564,7 @@ static bool ContextualCheckBlockHeader(const CBlockHeader& block, BlockValidatio
return state.Invalid(BlockValidationResult::BLOCK_INVALID_HEADER, "bad-diffbits", "incorrect proof of work");
// Check against checkpoints
- if (fCheckpointsEnabled) {
+ if (chainman.m_options.checkpoints_enabled) {
// Don't accept any forks from the main chain prior to last checkpoint.
// GetLastCheckpoint finds the last checkpoint in MapCheckpoints that's in our
// BlockIndex().
@@ -3469,8 +3580,9 @@ static bool ContextualCheckBlockHeader(const CBlockHeader& block, BlockValidatio
return state.Invalid(BlockValidationResult::BLOCK_INVALID_HEADER, "time-too-old", "block's timestamp is too early");
// Check timestamp
- if (block.GetBlockTime() > nAdjustedTime + MAX_FUTURE_BLOCK_TIME)
+ if (block.Time() > now + std::chrono::seconds{MAX_FUTURE_BLOCK_TIME}) {
return state.Invalid(BlockValidationResult::BLOCK_TIME_FUTURE, "time-too-new", "block timestamp too far in the future");
+ }
// Reject blocks with outdated version
if ((block.nVersion < 2 && DeploymentActiveAfter(pindexPrev, chainman, Consensus::DEPLOYMENT_HEIGHTINCB)) ||
@@ -3571,9 +3683,10 @@ static bool ContextualCheckBlock(const CBlock& block, BlockValidationState& stat
return true;
}
-bool ChainstateManager::AcceptBlockHeader(const CBlockHeader& block, BlockValidationState& state, CBlockIndex** ppindex)
+bool ChainstateManager::AcceptBlockHeader(const CBlockHeader& block, BlockValidationState& state, CBlockIndex** ppindex, bool min_pow_checked)
{
AssertLockHeld(cs_main);
+
// Check for duplicate
uint256 hash = block.GetHash();
BlockMap::iterator miSelf{m_blockman.m_block_index.find(hash)};
@@ -3599,15 +3712,15 @@ bool ChainstateManager::AcceptBlockHeader(const CBlockHeader& block, BlockValida
CBlockIndex* pindexPrev = nullptr;
BlockMap::iterator mi{m_blockman.m_block_index.find(block.hashPrevBlock)};
if (mi == m_blockman.m_block_index.end()) {
- LogPrint(BCLog::VALIDATION, "%s: %s prev block not found\n", __func__, hash.ToString());
+ LogPrint(BCLog::VALIDATION, "header %s has prev block not found: %s\n", hash.ToString(), block.hashPrevBlock.ToString());
return state.Invalid(BlockValidationResult::BLOCK_MISSING_PREV, "prev-blk-not-found");
}
pindexPrev = &((*mi).second);
if (pindexPrev->nStatus & BLOCK_FAILED_MASK) {
- LogPrint(BCLog::VALIDATION, "%s: %s prev block invalid\n", __func__, hash.ToString());
+ LogPrint(BCLog::VALIDATION, "header %s has prev block invalid: %s\n", hash.ToString(), block.hashPrevBlock.ToString());
return state.Invalid(BlockValidationResult::BLOCK_INVALID_PREV, "bad-prevblk");
}
- if (!ContextualCheckBlockHeader(block, state, m_blockman, *this, pindexPrev, m_adjusted_time_callback())) {
+ if (!ContextualCheckBlockHeader(block, state, m_blockman, *this, pindexPrev, m_options.adjusted_time_callback())) {
LogPrint(BCLog::VALIDATION, "%s: Consensus::ContextualCheckBlockHeader: %s, %s\n", __func__, hash.ToString(), state.ToString());
return false;
}
@@ -3645,12 +3758,16 @@ bool ChainstateManager::AcceptBlockHeader(const CBlockHeader& block, BlockValida
m_blockman.m_dirty_blockindex.insert(invalid_walk);
invalid_walk = invalid_walk->pprev;
}
- LogPrint(BCLog::VALIDATION, "%s: %s prev block invalid\n", __func__, hash.ToString());
+ LogPrint(BCLog::VALIDATION, "header %s has prev block invalid: %s\n", hash.ToString(), block.hashPrevBlock.ToString());
return state.Invalid(BlockValidationResult::BLOCK_INVALID_PREV, "bad-prevblk");
}
}
}
}
+ if (!min_pow_checked) {
+ LogPrint(BCLog::VALIDATION, "%s: not adding new block header %s, missing anti-dos proof-of-work validation\n", __func__, hash.ToString());
+ return state.Invalid(BlockValidationResult::BLOCK_HEADER_LOW_WORK, "too-little-chainwork");
+ }
CBlockIndex* pindex{m_blockman.AddToBlockIndex(block, m_best_header)};
if (ppindex)
@@ -3660,14 +3777,14 @@ bool ChainstateManager::AcceptBlockHeader(const CBlockHeader& block, BlockValida
}
// Exposed wrapper for AcceptBlockHeader
-bool ChainstateManager::ProcessNewBlockHeaders(const std::vector<CBlockHeader>& headers, BlockValidationState& state, const CBlockIndex** ppindex)
+bool ChainstateManager::ProcessNewBlockHeaders(const std::vector<CBlockHeader>& headers, bool min_pow_checked, BlockValidationState& state, const CBlockIndex** ppindex)
{
AssertLockNotHeld(cs_main);
{
LOCK(cs_main);
for (const CBlockHeader& header : headers) {
CBlockIndex *pindex = nullptr; // Use a temp pindex instead of ppindex to avoid a const_cast
- bool accepted{AcceptBlockHeader(header, state, &pindex)};
+ bool accepted{AcceptBlockHeader(header, state, &pindex, min_pow_checked)};
ActiveChainstate().CheckBlockIndex();
if (!accepted) {
@@ -3689,8 +3806,33 @@ bool ChainstateManager::ProcessNewBlockHeaders(const std::vector<CBlockHeader>&
return true;
}
+void ChainstateManager::ReportHeadersPresync(const arith_uint256& work, int64_t height, int64_t timestamp)
+{
+ AssertLockNotHeld(cs_main);
+ const auto& chainstate = ActiveChainstate();
+ {
+ LOCK(cs_main);
+ // Don't report headers presync progress if we already have a post-minchainwork header chain.
+ // This means we lose reporting for potentially legitimate, but unlikely, deep reorgs, but
+ // prevent attackers that spam low-work headers from filling our logs.
+ if (m_best_header->nChainWork >= UintToArith256(GetConsensus().nMinimumChainWork)) return;
+ // Rate limit headers presync updates to 4 per second, as these are not subject to DoS
+ // protection.
+ auto now = std::chrono::steady_clock::now();
+ if (now < m_last_presync_update + std::chrono::milliseconds{250}) return;
+ m_last_presync_update = now;
+ }
+ bool initial_download = chainstate.IsInitialBlockDownload();
+ uiInterface.NotifyHeaderTip(GetSynchronizationState(initial_download), height, timestamp, /*presync=*/true);
+ if (initial_download) {
+ const int64_t blocks_left{(GetTime() - timestamp) / GetConsensus().nPowTargetSpacing};
+ const double progress{100.0 * height / (height + blocks_left)};
+ LogPrintf("Pre-synchronizing blockheaders, height: %d (~%.2f%%)\n", height, progress);
+ }
+}
+
/** Store block on disk. If dbp is non-nullptr, the file is known to already reside on disk */
-bool CChainState::AcceptBlock(const std::shared_ptr<const CBlock>& pblock, BlockValidationState& state, CBlockIndex** ppindex, bool fRequested, const FlatFilePos* dbp, bool* fNewBlock)
+bool Chainstate::AcceptBlock(const std::shared_ptr<const CBlock>& pblock, BlockValidationState& state, CBlockIndex** ppindex, bool fRequested, const FlatFilePos* dbp, bool* fNewBlock, bool min_pow_checked)
{
const CBlock& block = *pblock;
@@ -3700,7 +3842,7 @@ bool CChainState::AcceptBlock(const std::shared_ptr<const CBlock>& pblock, Block
CBlockIndex *pindexDummy = nullptr;
CBlockIndex *&pindex = ppindex ? *ppindex : pindexDummy;
- bool accepted_header{m_chainman.AcceptBlockHeader(block, state, &pindex)};
+ bool accepted_header{m_chainman.AcceptBlockHeader(block, state, &pindex, min_pow_checked)};
CheckBlockIndex();
if (!accepted_header)
@@ -3736,10 +3878,12 @@ bool CChainState::AcceptBlock(const std::shared_ptr<const CBlock>& pblock, Block
// If our tip is behind, a peer could try to send us
// low-work blocks on a fake chain that we would never
// request; don't process these.
- if (pindex->nChainWork < nMinimumChainWork) return true;
+ if (pindex->nChainWork < m_chainman.MinimumChainWork()) return true;
}
- if (!CheckBlock(block, state, m_params.GetConsensus()) ||
+ const CChainParams& params{m_chainman.GetParams()};
+
+ if (!CheckBlock(block, state, params.GetConsensus()) ||
!ContextualCheckBlock(block, state, m_chainman, pindex->pprev)) {
if (state.IsInvalid() && state.GetResult() != BlockValidationResult::BLOCK_MUTATED) {
pindex->nStatus |= BLOCK_FAILED_VALID;
@@ -3756,7 +3900,7 @@ bool CChainState::AcceptBlock(const std::shared_ptr<const CBlock>& pblock, Block
// Write block to history file
if (fNewBlock) *fNewBlock = true;
try {
- FlatFilePos blockPos{m_blockman.SaveBlockToDisk(block, pindex->nHeight, m_chain, m_params, dbp)};
+ FlatFilePos blockPos{m_blockman.SaveBlockToDisk(block, pindex->nHeight, m_chain, params, dbp)};
if (blockPos.IsNull()) {
state.Error(strprintf("%s: Failed to find position to write new block to disk", __func__));
return false;
@@ -3773,7 +3917,7 @@ bool CChainState::AcceptBlock(const std::shared_ptr<const CBlock>& pblock, Block
return true;
}
-bool ChainstateManager::ProcessNewBlock(const std::shared_ptr<const CBlock>& block, bool force_processing, bool* new_block)
+bool ChainstateManager::ProcessNewBlock(const std::shared_ptr<const CBlock>& block, bool force_processing, bool min_pow_checked, bool* new_block)
{
AssertLockNotHeld(cs_main);
@@ -3794,7 +3938,7 @@ bool ChainstateManager::ProcessNewBlock(const std::shared_ptr<const CBlock>& blo
bool ret = CheckBlock(*block, state, GetConsensus());
if (ret) {
// Store to disk
- ret = ActiveChainstate().AcceptBlock(block, state, &pindex, force_processing, nullptr, new_block);
+ ret = ActiveChainstate().AcceptBlock(block, state, &pindex, force_processing, nullptr, new_block, min_pow_checked);
}
if (!ret) {
GetMainSignals().BlockChecked(*block, state);
@@ -3815,7 +3959,7 @@ bool ChainstateManager::ProcessNewBlock(const std::shared_ptr<const CBlock>& blo
MempoolAcceptResult ChainstateManager::ProcessTransaction(const CTransactionRef& tx, bool test_accept)
{
AssertLockHeld(cs_main);
- CChainState& active_chainstate = ActiveChainstate();
+ Chainstate& active_chainstate = ActiveChainstate();
if (!active_chainstate.GetMempool()) {
TxValidationState state;
state.Invalid(TxValidationResult::TX_NO_MEMPOOL, "no-mempool");
@@ -3828,10 +3972,10 @@ MempoolAcceptResult ChainstateManager::ProcessTransaction(const CTransactionRef&
bool TestBlockValidity(BlockValidationState& state,
const CChainParams& chainparams,
- CChainState& chainstate,
+ Chainstate& chainstate,
const CBlock& block,
CBlockIndex* pindexPrev,
- const std::function<int64_t()>& adjusted_time_callback,
+ const std::function<NodeClock::time_point()>& adjusted_time_callback,
bool fCheckPOW,
bool fCheckMerkleRoot)
{
@@ -3860,7 +4004,7 @@ bool TestBlockValidity(BlockValidationState& state,
}
/* This function is called from the RPC code for pruneblockchain */
-void PruneBlockFilesManual(CChainState& active_chainstate, int nManualPruneHeight)
+void PruneBlockFilesManual(Chainstate& active_chainstate, int nManualPruneHeight)
{
BlockValidationState state;
if (!active_chainstate.FlushStateToDisk(
@@ -3869,14 +4013,14 @@ void PruneBlockFilesManual(CChainState& active_chainstate, int nManualPruneHeigh
}
}
-void CChainState::LoadMempool(const fs::path& load_path, FopenFn mockable_fopen_function)
+void Chainstate::LoadMempool(const fs::path& load_path, FopenFn mockable_fopen_function)
{
if (!m_mempool) return;
::LoadMempool(*m_mempool, load_path, *this, mockable_fopen_function);
m_mempool->SetLoadTried(!ShutdownRequested());
}
-bool CChainState::LoadChainTip()
+bool Chainstate::LoadChainTip()
{
AssertLockHeld(cs_main);
const CCoinsViewCache& coins_cache = CoinsTip();
@@ -3900,7 +4044,7 @@ bool CChainState::LoadChainTip()
tip->GetBlockHash().ToString(),
m_chain.Height(),
FormatISO8601DateTime(tip->GetBlockTime()),
- GuessVerificationProgress(m_params.TxData(), tip));
+ GuessVerificationProgress(m_chainman.GetParams().TxData(), tip));
return true;
}
@@ -3915,7 +4059,7 @@ CVerifyDB::~CVerifyDB()
}
bool CVerifyDB::VerifyDB(
- CChainState& chainstate,
+ Chainstate& chainstate,
const Consensus::Params& consensus_params,
CCoinsView& coinsview,
int nCheckLevel, int nCheckDepth)
@@ -4031,12 +4175,12 @@ bool CVerifyDB::VerifyDB(
}
/** Apply the effects of a block on the utxo cache, ignoring that it may already have been applied. */
-bool CChainState::RollforwardBlock(const CBlockIndex* pindex, CCoinsViewCache& inputs)
+bool Chainstate::RollforwardBlock(const CBlockIndex* pindex, CCoinsViewCache& inputs)
{
AssertLockHeld(cs_main);
// TODO: merge with ConnectBlock
CBlock block;
- if (!ReadBlockFromDisk(block, pindex, m_params.GetConsensus())) {
+ if (!ReadBlockFromDisk(block, pindex, m_chainman.GetConsensus())) {
return error("ReplayBlock(): ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
}
@@ -4052,7 +4196,7 @@ bool CChainState::RollforwardBlock(const CBlockIndex* pindex, CCoinsViewCache& i
return true;
}
-bool CChainState::ReplayBlocks()
+bool Chainstate::ReplayBlocks()
{
LOCK(cs_main);
@@ -4088,7 +4232,7 @@ bool CChainState::ReplayBlocks()
while (pindexOld != pindexFork) {
if (pindexOld->nHeight > 0) { // Never disconnect the genesis block.
CBlock block;
- if (!ReadBlockFromDisk(block, pindexOld, m_params.GetConsensus())) {
+ if (!ReadBlockFromDisk(block, pindexOld, m_chainman.GetConsensus())) {
return error("RollbackBlock(): ReadBlockFromDisk() failed at %d, hash=%s", pindexOld->nHeight, pindexOld->GetBlockHash().ToString());
}
LogPrintf("Rolling back %s (%i)\n", pindexOld->GetBlockHash().ToString(), pindexOld->nHeight);
@@ -4120,7 +4264,7 @@ bool CChainState::ReplayBlocks()
return true;
}
-bool CChainState::NeedsRedownload() const
+bool Chainstate::NeedsRedownload() const
{
AssertLockHeld(cs_main);
@@ -4138,7 +4282,7 @@ bool CChainState::NeedsRedownload() const
return false;
}
-void CChainState::UnloadBlockIndex()
+void Chainstate::UnloadBlockIndex()
{
AssertLockHeld(::cs_main);
nBlockSequenceId = 1;
@@ -4207,7 +4351,7 @@ bool ChainstateManager::LoadBlockIndex()
// detecting "holistically" whether the block index under consideration
// relied on an assumed-valid ancestor, but this proved to be too slow to
// be practical.
- for (CChainState* chainstate : GetAll()) {
+ for (Chainstate* chainstate : GetAll()) {
if (chainstate->reliesOnAssumedValid() ||
pindex->nHeight < first_assumed_valid_height) {
chainstate->setBlockIndexCandidates.insert(pindex);
@@ -4236,20 +4380,22 @@ bool ChainstateManager::LoadBlockIndex()
return true;
}
-bool CChainState::LoadGenesisBlock()
+bool Chainstate::LoadGenesisBlock()
{
LOCK(cs_main);
+ const CChainParams& params{m_chainman.GetParams()};
+
// Check whether we're already initialized by checking for genesis in
// m_blockman.m_block_index. Note that we can't use m_chain here, since it is
// set based on the coins db, not the block index db, which is the only
// thing loaded at this point.
- if (m_blockman.m_block_index.count(m_params.GenesisBlock().GetHash()))
+ if (m_blockman.m_block_index.count(params.GenesisBlock().GetHash()))
return true;
try {
- const CBlock& block = m_params.GenesisBlock();
- FlatFilePos blockPos{m_blockman.SaveBlockToDisk(block, 0, m_chain, m_params, nullptr)};
+ const CBlock& block = params.GenesisBlock();
+ FlatFilePos blockPos{m_blockman.SaveBlockToDisk(block, 0, m_chain, params, nullptr)};
if (blockPos.IsNull()) {
return error("%s: writing genesis block to disk failed", __func__);
}
@@ -4262,7 +4408,7 @@ bool CChainState::LoadGenesisBlock()
return true;
}
-void CChainState::LoadExternalBlockFile(
+void Chainstate::LoadExternalBlockFile(
FILE* fileIn,
FlatFilePos* dbp,
std::multimap<uint256, FlatFilePos>* blocks_with_unknown_parent)
@@ -4272,12 +4418,15 @@ void CChainState::LoadExternalBlockFile(
// Either both should be specified (-reindex), or neither (-loadblock).
assert(!dbp == !blocks_with_unknown_parent);
- int64_t nStart = GetTimeMillis();
+ const auto start{SteadyClock::now()};
+ const CChainParams& params{m_chainman.GetParams()};
int nLoaded = 0;
try {
// This takes over fileIn and calls fclose() on it in the CBufferedFile destructor
CBufferedFile blkdat(fileIn, 2*MAX_BLOCK_SERIALIZED_SIZE, MAX_BLOCK_SERIALIZED_SIZE+8, SER_DISK, CLIENT_VERSION);
+ // nRewind indicates where to resume scanning in case something goes wrong,
+ // such as a block fails to deserialize.
uint64_t nRewind = blkdat.GetPos();
while (!blkdat.eof()) {
if (ShutdownRequested()) return;
@@ -4289,10 +4438,10 @@ void CChainState::LoadExternalBlockFile(
try {
// locate a header
unsigned char buf[CMessageHeader::MESSAGE_START_SIZE];
- blkdat.FindByte(m_params.MessageStart()[0]);
+ blkdat.FindByte(params.MessageStart()[0]);
nRewind = blkdat.GetPos() + 1;
blkdat >> buf;
- if (memcmp(buf, m_params.MessageStart(), CMessageHeader::MESSAGE_START_SIZE)) {
+ if (memcmp(buf, params.MessageStart(), CMessageHeader::MESSAGE_START_SIZE)) {
continue;
}
// read size
@@ -4301,28 +4450,30 @@ void CChainState::LoadExternalBlockFile(
continue;
} catch (const std::exception&) {
// no valid block header found; don't complain
+ // (this happens at the end of every blk.dat file)
break;
}
try {
- // read block
- uint64_t nBlockPos = blkdat.GetPos();
+ // read block header
+ const uint64_t nBlockPos{blkdat.GetPos()};
if (dbp)
dbp->nPos = nBlockPos;
blkdat.SetLimit(nBlockPos + nSize);
- std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
- CBlock& block = *pblock;
- blkdat >> block;
- nRewind = blkdat.GetPos();
-
- uint256 hash = block.GetHash();
+ CBlockHeader header;
+ blkdat >> header;
+ const uint256 hash{header.GetHash()};
+ // Skip the rest of this block (this may read from disk into memory); position to the marker before the
+ // next block, but it's still possible to rewind to the start of the current block (without a disk read).
+ nRewind = nBlockPos + nSize;
+ blkdat.SkipTo(nRewind);
{
LOCK(cs_main);
// detect out of order blocks, and store them for later
- if (hash != m_params.GetConsensus().hashGenesisBlock && !m_blockman.LookupBlockIndex(block.hashPrevBlock)) {
+ if (hash != params.GetConsensus().hashGenesisBlock && !m_blockman.LookupBlockIndex(header.hashPrevBlock)) {
LogPrint(BCLog::REINDEX, "%s: Out of order block %s, parent %s not known\n", __func__, hash.ToString(),
- block.hashPrevBlock.ToString());
+ header.hashPrevBlock.ToString());
if (dbp && blocks_with_unknown_parent) {
- blocks_with_unknown_parent->emplace(block.hashPrevBlock, *dbp);
+ blocks_with_unknown_parent->emplace(header.hashPrevBlock, *dbp);
}
continue;
}
@@ -4330,20 +4481,26 @@ void CChainState::LoadExternalBlockFile(
// process in case the block isn't known yet
const CBlockIndex* pindex = m_blockman.LookupBlockIndex(hash);
if (!pindex || (pindex->nStatus & BLOCK_HAVE_DATA) == 0) {
- BlockValidationState state;
- if (AcceptBlock(pblock, state, nullptr, true, dbp, nullptr)) {
- nLoaded++;
- }
- if (state.IsError()) {
- break;
- }
- } else if (hash != m_params.GetConsensus().hashGenesisBlock && pindex->nHeight % 1000 == 0) {
+ // This block can be processed immediately; rewind to its start, read and deserialize it.
+ blkdat.SetPos(nBlockPos);
+ std::shared_ptr<CBlock> pblock{std::make_shared<CBlock>()};
+ blkdat >> *pblock;
+ nRewind = blkdat.GetPos();
+
+ BlockValidationState state;
+ if (AcceptBlock(pblock, state, nullptr, true, dbp, nullptr, true)) {
+ nLoaded++;
+ }
+ if (state.IsError()) {
+ break;
+ }
+ } else if (hash != params.GetConsensus().hashGenesisBlock && pindex->nHeight % 1000 == 0) {
LogPrint(BCLog::REINDEX, "Block Import: already had block %s at height %d\n", hash.ToString(), pindex->nHeight);
}
}
// Activate the genesis block so normal node progress can continue
- if (hash == m_params.GetConsensus().hashGenesisBlock) {
+ if (hash == params.GetConsensus().hashGenesisBlock) {
BlockValidationState state;
if (!ActivateBestChain(state, nullptr)) {
break;
@@ -4364,12 +4521,12 @@ void CChainState::LoadExternalBlockFile(
while (range.first != range.second) {
std::multimap<uint256, FlatFilePos>::iterator it = range.first;
std::shared_ptr<CBlock> pblockrecursive = std::make_shared<CBlock>();
- if (ReadBlockFromDisk(*pblockrecursive, it->second, m_params.GetConsensus())) {
+ if (ReadBlockFromDisk(*pblockrecursive, it->second, params.GetConsensus())) {
LogPrint(BCLog::REINDEX, "%s: Processing out of order child %s of %s\n", __func__, pblockrecursive->GetHash().ToString(),
head.ToString());
LOCK(cs_main);
BlockValidationState dummy;
- if (AcceptBlock(pblockrecursive, dummy, nullptr, true, &it->second, nullptr)) {
+ if (AcceptBlock(pblockrecursive, dummy, nullptr, true, &it->second, nullptr, true)) {
nLoaded++;
queue.push_back(pblockrecursive->GetHash());
}
@@ -4380,18 +4537,29 @@ void CChainState::LoadExternalBlockFile(
}
}
} catch (const std::exception& e) {
- LogPrintf("%s: Deserialize or I/O error - %s\n", __func__, e.what());
+ // historical bugs added extra data to the block files that does not deserialize cleanly.
+ // commonly this data is between readable blocks, but it does not really matter. such data is not fatal to the import process.
+ // the code that reads the block files deals with invalid data by simply ignoring it.
+ // it continues to search for the next {4 byte magic message start bytes + 4 byte length + block} that does deserialize cleanly
+ // and passes all of the other block validation checks dealing with POW and the merkle root, etc...
+ // we merely note with this informational log message when unexpected data is encountered.
+ // we could also be experiencing a storage system read error, or a read of a previous bad write. these are possible, but
+ // less likely scenarios. we don't have enough information to tell a difference here.
+ // the reindex process is not the place to attempt to clean and/or compact the block files. if so desired, a studious node operator
+ // may use knowledge of the fact that the block files are not entirely pristine in order to prepare a set of pristine, and
+ // perhaps ordered, block files for later reindexing.
+ LogPrint(BCLog::REINDEX, "%s: unexpected data at file offset 0x%x - %s. continuing\n", __func__, (nRewind - 1), e.what());
}
}
} catch (const std::runtime_error& e) {
AbortNode(std::string("System error: ") + e.what());
}
- LogPrintf("Loaded %i blocks from external file in %dms\n", nLoaded, GetTimeMillis() - nStart);
+ LogPrintf("Loaded %i blocks from external file in %dms\n", nLoaded, Ticks<std::chrono::milliseconds>(SteadyClock::now() - start));
}
-void CChainState::CheckBlockIndex()
+void Chainstate::CheckBlockIndex()
{
- if (!fCheckBlockIndex) {
+ if (!m_chainman.ShouldCheckBlockIndex()) {
return;
}
@@ -4464,7 +4632,7 @@ void CChainState::CheckBlockIndex()
// Begin: actual consistency checks.
if (pindex->pprev == nullptr) {
// Genesis block checks.
- assert(pindex->GetBlockHash() == m_params.GetConsensus().hashGenesisBlock); // Genesis block's hash must match.
+ assert(pindex->GetBlockHash() == m_chainman.GetConsensus().hashGenesisBlock); // Genesis block's hash must match.
assert(pindex == m_chain.Genesis()); // The current active chain's genesis block must be this block.
}
if (!pindex->HaveTxsDownloaded()) assert(pindex->nSequenceId <= 0); // nSequenceId can't be set positive for blocks that aren't linked (negative is used for preciousblock)
@@ -4611,7 +4779,7 @@ void CChainState::CheckBlockIndex()
assert(nNodes == forward.size());
}
-std::string CChainState::ToString()
+std::string Chainstate::ToString()
{
AssertLockHeld(::cs_main);
CBlockIndex* tip = m_chain.Tip();
@@ -4620,7 +4788,7 @@ std::string CChainState::ToString()
tip ? tip->nHeight : -1, tip ? tip->GetBlockHash().ToString() : "null");
}
-bool CChainState::ResizeCoinsCaches(size_t coinstip_size, size_t coinsdb_size)
+bool Chainstate::ResizeCoinsCaches(size_t coinstip_size, size_t coinsdb_size)
{
AssertLockHeld(::cs_main);
if (coinstip_size == m_coinstip_cache_size_bytes &&
@@ -4681,10 +4849,10 @@ std::optional<uint256> ChainstateManager::SnapshotBlockhash() const
return std::nullopt;
}
-std::vector<CChainState*> ChainstateManager::GetAll()
+std::vector<Chainstate*> ChainstateManager::GetAll()
{
LOCK(::cs_main);
- std::vector<CChainState*> out;
+ std::vector<Chainstate*> out;
if (!IsSnapshotValidated() && m_ibd_chainstate) {
out.push_back(m_ibd_chainstate.get());
@@ -4697,28 +4865,15 @@ std::vector<CChainState*> ChainstateManager::GetAll()
return out;
}
-CChainState& ChainstateManager::InitializeChainstate(
- CTxMemPool* mempool, const std::optional<uint256>& snapshot_blockhash)
+Chainstate& ChainstateManager::InitializeChainstate(CTxMemPool* mempool)
{
AssertLockHeld(::cs_main);
- bool is_snapshot = snapshot_blockhash.has_value();
- std::unique_ptr<CChainState>& to_modify =
- is_snapshot ? m_snapshot_chainstate : m_ibd_chainstate;
-
- if (to_modify) {
- throw std::logic_error("should not be overwriting a chainstate");
- }
- to_modify.reset(new CChainState(mempool, m_blockman, *this, snapshot_blockhash));
+ assert(!m_ibd_chainstate);
+ assert(!m_active_chainstate);
- // Snapshot chainstates and initial IBD chaintates always become active.
- if (is_snapshot || (!is_snapshot && !m_active_chainstate)) {
- LogPrintf("Switching active chainstate to %s\n", to_modify->ToString());
- m_active_chainstate = to_modify.get();
- } else {
- throw std::logic_error("unexpected chainstate activation");
- }
-
- return *to_modify;
+ m_ibd_chainstate = std::make_unique<Chainstate>(mempool, m_blockman, *this);
+ m_active_chainstate = m_ibd_chainstate.get();
+ return *m_active_chainstate;
}
const AssumeutxoData* ExpectedAssumeutxo(
@@ -4733,6 +4888,46 @@ const AssumeutxoData* ExpectedAssumeutxo(
return nullptr;
}
+static bool DeleteCoinsDBFromDisk(const fs::path db_path, bool is_snapshot)
+ EXCLUSIVE_LOCKS_REQUIRED(::cs_main)
+{
+ AssertLockHeld(::cs_main);
+
+ if (is_snapshot) {
+ fs::path base_blockhash_path = db_path / node::SNAPSHOT_BLOCKHASH_FILENAME;
+
+ if (fs::exists(base_blockhash_path)) {
+ bool removed = fs::remove(base_blockhash_path);
+ if (!removed) {
+ LogPrintf("[snapshot] failed to remove file %s\n",
+ fs::PathToString(base_blockhash_path));
+ }
+ } else {
+ LogPrintf("[snapshot] snapshot chainstate dir being removed lacks %s file\n",
+ fs::PathToString(node::SNAPSHOT_BLOCKHASH_FILENAME));
+ }
+ }
+
+ std::string path_str = fs::PathToString(db_path);
+ LogPrintf("Removing leveldb dir at %s\n", path_str);
+
+ // We have to destruct before this call leveldb::DB in order to release the db
+ // lock, otherwise `DestroyDB` will fail. See `leveldb::~DBImpl()`.
+ const bool destroyed = dbwrapper::DestroyDB(path_str, {}).ok();
+
+ if (!destroyed) {
+ LogPrintf("error: leveldb DestroyDB call failed on %s\n", path_str);
+ }
+
+ // Datadir should be removed from filesystem; otherwise initialization may detect
+ // it on subsequent statups and get confused.
+ //
+ // If the base_blockhash_path removal above fails in the case of snapshot
+ // chainstates, this will return false since leveldb won't remove a non-empty
+ // directory.
+ return destroyed && !fs::exists(db_path);
+}
+
bool ChainstateManager::ActivateSnapshot(
AutoFile& coins_file,
const SnapshotMetadata& metadata,
@@ -4778,7 +4973,7 @@ bool ChainstateManager::ActivateSnapshot(
}
auto snapshot_chainstate = WITH_LOCK(::cs_main,
- return std::make_unique<CChainState>(
+ return std::make_unique<Chainstate>(
/*mempool=*/nullptr, m_blockman, *this, base_blockhash));
{
@@ -4790,11 +4985,34 @@ bool ChainstateManager::ActivateSnapshot(
static_cast<size_t>(current_coinstip_cache_size * SNAPSHOT_CACHE_PERC));
}
- const bool snapshot_ok = this->PopulateAndValidateSnapshot(
+ bool snapshot_ok = this->PopulateAndValidateSnapshot(
*snapshot_chainstate, coins_file, metadata);
+ // If not in-memory, persist the base blockhash for use during subsequent
+ // initialization.
+ if (!in_memory) {
+ LOCK(::cs_main);
+ if (!node::WriteSnapshotBaseBlockhash(*snapshot_chainstate)) {
+ snapshot_ok = false;
+ }
+ }
if (!snapshot_ok) {
- WITH_LOCK(::cs_main, this->MaybeRebalanceCaches());
+ LOCK(::cs_main);
+ this->MaybeRebalanceCaches();
+
+ // PopulateAndValidateSnapshot can return (in error) before the leveldb datadir
+ // has been created, so only attempt removal if we got that far.
+ if (auto snapshot_datadir = node::FindSnapshotChainstateDir()) {
+ // We have to destruct leveldb::DB in order to release the db lock, otherwise
+ // DestroyDB() (in DeleteCoinsDBFromDisk()) will fail. See `leveldb::~DBImpl()`.
+ // Destructing the chainstate (and so resetting the coinsviews object) does this.
+ snapshot_chainstate.reset();
+ bool removed = DeleteCoinsDBFromDisk(*snapshot_datadir, /*is_snapshot=*/true);
+ if (!removed) {
+ AbortNode(strprintf("Failed to remove snapshot chainstate dir (%s). "
+ "Manually remove it before restarting.\n", fs::PathToString(*snapshot_datadir)));
+ }
+ }
return false;
}
@@ -4828,7 +5046,7 @@ static void FlushSnapshotToDisk(CCoinsViewCache& coins_cache, bool snapshot_load
}
bool ChainstateManager::PopulateAndValidateSnapshot(
- CChainState& snapshot_chainstate,
+ Chainstate& snapshot_chainstate,
AutoFile& coins_file,
const SnapshotMetadata& metadata)
{
@@ -4922,7 +5140,7 @@ bool ChainstateManager::PopulateAndValidateSnapshot(
// Important that we set this. This and the coins_cache accesses above are
// sort of a layer violation, but either we reach into the innards of
- // CCoinsViewCache here or we have to invert some of the CChainState to
+ // CCoinsViewCache here or we have to invert some of the Chainstate to
// embed them in a snapshot-activation-specific CCoinsViewCache bulk load
// method.
coins_cache.SetBestBlock(base_blockhash);
@@ -5001,7 +5219,7 @@ bool ChainstateManager::PopulateAndValidateSnapshot(
index->nStatus |= BLOCK_ASSUMED_VALID;
}
- // Fake BLOCK_OPT_WITNESS so that CChainState::NeedsRedownload()
+ // Fake BLOCK_OPT_WITNESS so that Chainstate::NeedsRedownload()
// won't ask to rewind the entire assumed-valid chain on startup.
if (DeploymentActiveAt(*index, *this, Consensus::DEPLOYMENT_SEGWIT)) {
index->nStatus |= BLOCK_OPT_WITNESS;
@@ -5024,7 +5242,7 @@ bool ChainstateManager::PopulateAndValidateSnapshot(
return true;
}
-CChainState& ChainstateManager::ActiveChainstate() const
+Chainstate& ChainstateManager::ActiveChainstate() const
{
LOCK(::cs_main);
assert(m_active_chainstate);
@@ -5068,6 +5286,29 @@ void ChainstateManager::MaybeRebalanceCaches()
}
}
+void ChainstateManager::ResetChainstates()
+{
+ m_ibd_chainstate.reset();
+ m_snapshot_chainstate.reset();
+ m_active_chainstate = nullptr;
+}
+
+/**
+ * Apply default chain params to nullopt members.
+ * This helps to avoid coding errors around the accidental use of the compare
+ * operators that accept nullopt, thus ignoring the intended default value.
+ */
+static ChainstateManager::Options&& Flatten(ChainstateManager::Options&& opts)
+{
+ if (!opts.check_block_index.has_value()) opts.check_block_index = opts.chainparams.DefaultConsistencyChecks();
+ if (!opts.minimum_chain_work.has_value()) opts.minimum_chain_work = UintToArith256(opts.chainparams.GetConsensus().nMinimumChainWork);
+ if (!opts.assumed_valid_block.has_value()) opts.assumed_valid_block = opts.chainparams.GetConsensus().defaultAssumeValid;
+ Assert(opts.adjusted_time_callback);
+ return std::move(opts);
+}
+
+ChainstateManager::ChainstateManager(Options options) : m_options{Flatten(std::move(options))} {}
+
ChainstateManager::~ChainstateManager()
{
LOCK(::cs_main);
@@ -5079,3 +5320,43 @@ ChainstateManager::~ChainstateManager()
i.clear();
}
}
+
+bool ChainstateManager::DetectSnapshotChainstate(CTxMemPool* mempool)
+{
+ assert(!m_snapshot_chainstate);
+ std::optional<fs::path> path = node::FindSnapshotChainstateDir();
+ if (!path) {
+ return false;
+ }
+ std::optional<uint256> base_blockhash = node::ReadSnapshotBaseBlockhash(*path);
+ if (!base_blockhash) {
+ return false;
+ }
+ LogPrintf("[snapshot] detected active snapshot chainstate (%s) - loading\n",
+ fs::PathToString(*path));
+
+ this->ActivateExistingSnapshot(mempool, *base_blockhash);
+ return true;
+}
+
+Chainstate& ChainstateManager::ActivateExistingSnapshot(CTxMemPool* mempool, uint256 base_blockhash)
+{
+ assert(!m_snapshot_chainstate);
+ m_snapshot_chainstate =
+ std::make_unique<Chainstate>(mempool, m_blockman, *this, base_blockhash);
+ LogPrintf("[snapshot] switching active chainstate to %s\n", m_snapshot_chainstate->ToString());
+ m_active_chainstate = m_snapshot_chainstate.get();
+ return *m_snapshot_chainstate;
+}
+
+bool IsBIP30Repeat(const CBlockIndex& block_index)
+{
+ return (block_index.nHeight==91842 && block_index.GetBlockHash() == uint256S("0x00000000000a4d0a398161ffc163c503763b1f4360639393e0e4c8e300e0caec")) ||
+ (block_index.nHeight==91880 && block_index.GetBlockHash() == uint256S("0x00000000000743f190a18c5577a3c2d2a1f610ae9601ac046a38084ccb7cd721"));
+}
+
+bool IsBIP30Unspendable(const CBlockIndex& block_index)
+{
+ return (block_index.nHeight==91722 && block_index.GetBlockHash() == uint256S("0x00000000000271a2dc26e7667f8419f2e15416dc6955e5a6c6cdf3f2574dd08e")) ||
+ (block_index.nHeight==91812 && block_index.GetBlockHash() == uint256S("0x00000000000af0aed4792b1acee3d966af36cf5def14935db8de83d6f9306f2f"));
+}