aboutsummaryrefslogtreecommitdiff
path: root/src/validation.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/validation.cpp')
-rw-r--r--src/validation.cpp1019
1 files changed, 635 insertions, 384 deletions
diff --git a/src/validation.cpp b/src/validation.cpp
index 2e2a89bcf4..a073e3d1c0 100644
--- a/src/validation.cpp
+++ b/src/validation.cpp
@@ -14,15 +14,18 @@
#include "consensus/merkle.h"
#include "consensus/tx_verify.h"
#include "consensus/validation.h"
+#include "cuckoocache.h"
#include "fs.h"
#include "hash.h"
#include "init.h"
#include "policy/fees.h"
#include "policy/policy.h"
+#include "policy/rbf.h"
#include "pow.h"
#include "primitives/block.h"
#include "primitives/transaction.h"
#include "random.h"
+#include "reverse_iterator.h"
#include "script/script.h"
#include "script/sigcache.h"
#include "script/standard.h"
@@ -50,6 +53,9 @@
# error "Bitcoin cannot be compiled without assertions."
#endif
+#define MICRO 0.000001
+#define MILLI 0.001
+
/**
* Global state
*/
@@ -58,12 +64,12 @@ CCriticalSection cs_main;
BlockMap mapBlockIndex;
CChain chainActive;
-CBlockIndex *pindexBestHeader = NULL;
+CBlockIndex *pindexBestHeader = nullptr;
CWaitableCriticalSection csBestBlock;
CConditionVariable cvBlockChange;
int nScriptCheckThreads = 0;
std::atomic_bool fImporting(false);
-bool fReindex = false;
+std::atomic_bool fReindex(false);
bool fTxIndex = false;
bool fHavePruned = false;
bool fPruneMode = false;
@@ -77,6 +83,7 @@ int64_t nMaxTipAge = DEFAULT_MAX_TIP_AGE;
bool fEnableReplacement = DEFAULT_ENABLE_REPLACEMENT;
uint256 hashAssumeValid;
+arith_uint256 nMinimumChainWork;
CFeeRate minRelayTxFee = CFeeRate(DEFAULT_MIN_RELAY_TX_FEE);
CAmount maxTxFee = DEFAULT_TRANSACTION_MAXFEE;
@@ -96,7 +103,7 @@ namespace {
struct CBlockIndexWorkComparator
{
- bool operator()(CBlockIndex *pa, CBlockIndex *pb) const {
+ bool operator()(const CBlockIndex *pa, const CBlockIndex *pb) const {
// First sort by most total work, ...
if (pa->nChainWork > pb->nChainWork) return false;
if (pa->nChainWork < pb->nChainWork) return true;
@@ -174,9 +181,9 @@ CBlockIndex* FindForkInGlobalIndex(const CChain& chain, const CBlockLocator& loc
return chain.Genesis();
}
-CCoinsViewDB *pcoinsdbview = NULL;
-CCoinsViewCache *pcoinsTip = NULL;
-CBlockTreeDB *pblocktree = NULL;
+CCoinsViewDB *pcoinsdbview = nullptr;
+CCoinsViewCache *pcoinsTip = nullptr;
+CBlockTreeDB *pblocktree = nullptr;
enum FlushStateMode {
FLUSH_STATE_NONE,
@@ -189,7 +196,7 @@ enum FlushStateMode {
static bool FlushStateToDisk(const CChainParams& chainParams, CValidationState &state, FlushStateMode mode, int nManualPruneHeight=0);
static void FindFilesToPruneManual(std::set<int>& setFilesToPrune, int nManualPruneHeight);
static void FindFilesToPrune(std::set<int>& setFilesToPrune, uint64_t nPruneAfterHeight);
-static bool CheckInputs(const CTransaction& tx, CValidationState &state, const CCoinsViewCache &inputs, bool fScriptChecks, unsigned int flags, bool cacheStore, PrecomputedTransactionData& txdata, std::vector<CScriptCheck> *pvChecks = NULL);
+bool CheckInputs(const CTransaction& tx, CValidationState &state, const CCoinsViewCache &inputs, bool fScriptChecks, unsigned int flags, bool cacheSigStore, bool cacheFullScriptStore, PrecomputedTransactionData& txdata, std::vector<CScriptCheck> *pvChecks = nullptr);
static FILE* OpenUndoFile(const CDiskBlockPos &pos, bool fReadOnly = false);
bool CheckFinalTx(const CTransaction &tx, int flags)
@@ -212,7 +219,7 @@ bool CheckFinalTx(const CTransaction &tx, int flags)
// IsFinalTx() with one more than chainActive.Height().
const int nBlockHeight = chainActive.Height() + 1;
- // BIP113 will require that time-locked transactions have nLockTime set to
+ // BIP113 requires that time-locked transactions have nLockTime set to
// less than the median time of the previous block they're contained in.
// When the next block is created its previous block will be the current
// chain tip, so we use that to calculate the median time passed to
@@ -248,6 +255,8 @@ bool CheckSequenceLocks(const CTransaction &tx, int flags, LockPoints* lp, bool
AssertLockHeld(mempool.cs);
CBlockIndex* tip = chainActive.Tip();
+ assert(tip != nullptr);
+
CBlockIndex index;
index.pprev = tip;
// CheckSequenceLocks() uses chainActive.Height()+1 to evaluate
@@ -312,6 +321,9 @@ bool CheckSequenceLocks(const CTransaction &tx, int flags, LockPoints* lp, bool
return EvaluateSequenceLocks(index, lockPair);
}
+// Returns the script flags which should be checked for a given block
+static unsigned int GetBlockScriptFlags(const CBlockIndex* pindex, const Consensus::Params& chainparams);
+
static void LimitMempoolSize(CTxMemPool& pool, size_t limit, unsigned long age) {
int expired = pool.Expire(GetTime() - age);
if (expired != 0) {
@@ -372,7 +384,9 @@ void UpdateMempoolForReorg(DisconnectedBlockTransactions &disconnectpool, bool f
while (it != disconnectpool.queuedTx.get<insertion_order>().rend()) {
// ignore validation errors in resurrected transactions
CValidationState stateDummy;
- if (!fAddToMempool || (*it)->IsCoinBase() || !AcceptToMemoryPool(mempool, stateDummy, *it, false, NULL, NULL, true)) {
+ if (!fAddToMempool || (*it)->IsCoinBase() ||
+ !AcceptToMemoryPool(mempool, stateDummy, *it, nullptr /* pfMissingInputs */,
+ nullptr /* plTxnReplaced */, true /* bypass_limits */, 0 /* nAbsurdFee */)) {
// If the transaction doesn't make it in to the mempool, remove any
// transactions that depend on it (which would now be orphans).
mempool.removeRecursive(**it, MemPoolRemovalReason::REORG);
@@ -392,12 +406,48 @@ void UpdateMempoolForReorg(DisconnectedBlockTransactions &disconnectpool, bool f
// We also need to remove any now-immature transactions
mempool.removeForReorg(pcoinsTip, chainActive.Tip()->nHeight + 1, STANDARD_LOCKTIME_VERIFY_FLAGS);
// Re-limit mempool size, in case we added any transactions
- LimitMempoolSize(mempool, GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000, GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60);
+ LimitMempoolSize(mempool, gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000, gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60);
}
-static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool& pool, CValidationState& state, const CTransactionRef& ptx, bool fLimitFree,
+// Used to avoid mempool polluting consensus critical paths if CCoinsViewMempool
+// were somehow broken and returning the wrong scriptPubKeys
+static bool CheckInputsFromMempoolAndCache(const CTransaction& tx, CValidationState &state, const CCoinsViewCache &view, CTxMemPool& pool,
+ unsigned int flags, bool cacheSigStore, PrecomputedTransactionData& txdata) {
+ AssertLockHeld(cs_main);
+
+ // pool.cs should be locked already, but go ahead and re-take the lock here
+ // to enforce that mempool doesn't change between when we check the view
+ // and when we actually call through to CheckInputs
+ LOCK(pool.cs);
+
+ assert(!tx.IsCoinBase());
+ for (const CTxIn& txin : tx.vin) {
+ const Coin& coin = view.AccessCoin(txin.prevout);
+
+ // At this point we haven't actually checked if the coins are all
+ // available (or shouldn't assume we have, since CheckInputs does).
+ // So we just return failure if the inputs are not available here,
+ // and then only have to check equivalence for available inputs.
+ if (coin.IsSpent()) return false;
+
+ const CTransactionRef& txFrom = pool.get(txin.prevout.hash);
+ if (txFrom) {
+ assert(txFrom->GetHash() == txin.prevout.hash);
+ assert(txFrom->vout.size() > txin.prevout.n);
+ assert(txFrom->vout[txin.prevout.n] == coin.out);
+ } else {
+ const Coin& coinFromDisk = pcoinsTip->AccessCoin(txin.prevout);
+ assert(!coinFromDisk.IsSpent());
+ assert(coinFromDisk.out == coin.out);
+ }
+ }
+
+ return CheckInputs(tx, state, view, true, flags, cacheSigStore, true, txdata);
+}
+
+static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool& pool, CValidationState& state, const CTransactionRef& ptx,
bool* pfMissingInputs, int64_t nAcceptTime, std::list<CTransactionRef>* plTxnReplaced,
- bool fOverrideMempoolLimit, const CAmount& nAbsurdFee, std::vector<COutPoint>& coins_to_uncache)
+ bool bypass_limits, const CAmount& nAbsurdFee, std::vector<COutPoint>& coins_to_uncache)
{
const CTransaction& tx = *ptx;
const uint256 hash = tx.GetHash();
@@ -414,7 +464,7 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool
// Reject transactions with witness before segregated witness activates (override with -prematurewitness)
bool witnessEnabled = IsWitnessEnabled(chainActive.Tip(), chainparams.GetConsensus());
- if (!GetBoolArg("-prematurewitness",false) && tx.HasWitness() && !witnessEnabled) {
+ if (!gArgs.GetBoolArg("-prematurewitness", false) && tx.HasWitness() && !witnessEnabled) {
return state.DoS(0, false, REJECT_NONSTANDARD, "no-witness-yet", true);
}
@@ -447,9 +497,9 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool
if (!setConflicts.count(ptxConflicting->GetHash()))
{
// Allow opt-out of transaction replacement by setting
- // nSequence >= maxint-1 on all inputs.
+ // nSequence > MAX_BIP125_RBF_SEQUENCE (SEQUENCE_FINAL-2) on all inputs.
//
- // maxint-1 is picked to still allow use of nLockTime by
+ // SEQUENCE_FINAL-1 is picked to still allow use of nLockTime by
// non-replaceable transactions. All inputs rather than just one
// is for the sake of multi-party protocols, where we don't
// want a single party to be able to disable replacement.
@@ -463,7 +513,7 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool
{
for (const CTxIn &_txin : ptxConflicting->vin)
{
- if (_txin.nSequence < std::numeric_limits<unsigned int>::max()-1)
+ if (_txin.nSequence <= MAX_BIP125_RBF_SEQUENCE)
{
fReplacementOptOut = false;
break;
@@ -490,24 +540,20 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool
CCoinsViewMemPool viewMemPool(pcoinsTip, pool);
view.SetBackend(viewMemPool);
- // do we already have it?
- for (size_t out = 0; out < tx.vout.size(); out++) {
- COutPoint outpoint(hash, out);
- bool had_coin_in_cache = pcoinsTip->HaveCoinInCache(outpoint);
- if (view.HaveCoin(outpoint)) {
- if (!had_coin_in_cache) {
- coins_to_uncache.push_back(outpoint);
- }
- return state.Invalid(false, REJECT_DUPLICATE, "txn-already-known");
- }
- }
-
// do all inputs exist?
for (const CTxIn txin : tx.vin) {
if (!pcoinsTip->HaveCoinInCache(txin.prevout)) {
coins_to_uncache.push_back(txin.prevout);
}
if (!view.HaveCoin(txin.prevout)) {
+ // Are inputs missing because we already have the tx?
+ for (size_t out = 0; out < tx.vout.size(); out++) {
+ // Optimistically just do efficient check of cache for outputs
+ if (pcoinsTip->HaveCoinInCache(COutPoint(hash, out))) {
+ return state.Invalid(false, REJECT_DUPLICATE, "txn-already-known");
+ }
+ }
+ // Otherwise assume this might be an orphan tx for which we just haven't seen parents yet
if (pfMissingInputs) {
*pfMissingInputs = true;
}
@@ -574,13 +620,13 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool
return state.DoS(0, false, REJECT_NONSTANDARD, "bad-txns-too-many-sigops", false,
strprintf("%d", nSigOpsCost));
- CAmount mempoolRejectFee = pool.GetMinFee(GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000).GetFee(nSize);
- if (mempoolRejectFee > 0 && nModifiedFees < mempoolRejectFee) {
+ CAmount mempoolRejectFee = pool.GetMinFee(gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000).GetFee(nSize);
+ if (!bypass_limits && mempoolRejectFee > 0 && nModifiedFees < mempoolRejectFee) {
return state.DoS(0, false, REJECT_INSUFFICIENTFEE, "mempool min fee not met", false, strprintf("%d < %d", nFees, mempoolRejectFee));
}
// No transactions are allowed below minRelayTxFee except from disconnected blocks
- if (fLimitFree && nModifiedFees < ::minRelayTxFee.GetFee(nSize)) {
+ if (!bypass_limits && nModifiedFees < ::minRelayTxFee.GetFee(nSize)) {
return state.DoS(0, false, REJECT_INSUFFICIENTFEE, "min relay fee not met");
}
@@ -591,10 +637,10 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool
// Calculate in-mempool ancestors, up to a limit.
CTxMemPool::setEntries setAncestors;
- size_t nLimitAncestors = GetArg("-limitancestorcount", DEFAULT_ANCESTOR_LIMIT);
- size_t nLimitAncestorSize = GetArg("-limitancestorsize", DEFAULT_ANCESTOR_SIZE_LIMIT)*1000;
- size_t nLimitDescendants = GetArg("-limitdescendantcount", DEFAULT_DESCENDANT_LIMIT);
- size_t nLimitDescendantSize = GetArg("-limitdescendantsize", DEFAULT_DESCENDANT_SIZE_LIMIT)*1000;
+ size_t nLimitAncestors = gArgs.GetArg("-limitancestorcount", DEFAULT_ANCESTOR_LIMIT);
+ size_t nLimitAncestorSize = gArgs.GetArg("-limitancestorsize", DEFAULT_ANCESTOR_SIZE_LIMIT)*1000;
+ size_t nLimitDescendants = gArgs.GetArg("-limitdescendantcount", DEFAULT_DESCENDANT_LIMIT);
+ size_t nLimitDescendantSize = gArgs.GetArg("-limitdescendantsize", DEFAULT_DESCENDANT_SIZE_LIMIT)*1000;
std::string errString;
if (!pool.CalculateMemPoolAncestors(entry, setAncestors, nLimitAncestors, nLimitAncestorSize, nLimitDescendants, nLimitDescendantSize, errString)) {
return state.DoS(0, false, REJECT_NONSTANDARD, "too-long-mempool-chain", false, errString);
@@ -746,38 +792,57 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool
unsigned int scriptVerifyFlags = STANDARD_SCRIPT_VERIFY_FLAGS;
if (!chainparams.RequireStandard()) {
- scriptVerifyFlags = GetArg("-promiscuousmempoolflags", scriptVerifyFlags);
+ scriptVerifyFlags = gArgs.GetArg("-promiscuousmempoolflags", scriptVerifyFlags);
}
// Check against previous transactions
// This is done last to help prevent CPU exhaustion denial-of-service attacks.
PrecomputedTransactionData txdata(tx);
- if (!CheckInputs(tx, state, view, true, scriptVerifyFlags, true, txdata)) {
+ if (!CheckInputs(tx, state, view, true, scriptVerifyFlags, true, false, txdata)) {
// SCRIPT_VERIFY_CLEANSTACK requires SCRIPT_VERIFY_WITNESS, so we
// need to turn both off, and compare against just turning off CLEANSTACK
// to see if the failure is specifically due to witness validation.
CValidationState stateDummy; // Want reported failures to be from first CheckInputs
- if (!tx.HasWitness() && CheckInputs(tx, stateDummy, view, true, scriptVerifyFlags & ~(SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_CLEANSTACK), true, txdata) &&
- !CheckInputs(tx, stateDummy, view, true, scriptVerifyFlags & ~SCRIPT_VERIFY_CLEANSTACK, true, txdata)) {
+ if (!tx.HasWitness() && CheckInputs(tx, stateDummy, view, true, scriptVerifyFlags & ~(SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_CLEANSTACK), true, false, txdata) &&
+ !CheckInputs(tx, stateDummy, view, true, scriptVerifyFlags & ~SCRIPT_VERIFY_CLEANSTACK, true, false, txdata)) {
// Only the witness is missing, so the transaction itself may be fine.
state.SetCorruptionPossible();
}
return false; // state filled in by CheckInputs
}
- // Check again against just the consensus-critical mandatory script
- // verification flags, in case of bugs in the standard flags that cause
+ // Check again against the current block tip's script verification
+ // flags to cache our script execution flags. This is, of course,
+ // useless if the next block has different script flags from the
+ // previous one, but because the cache tracks script flags for us it
+ // will auto-invalidate and we'll just have a few blocks of extra
+ // misses on soft-fork activation.
+ //
+ // This is also useful in case of bugs in the standard flags that cause
// transactions to pass as valid when they're actually invalid. For
// instance the STRICTENC flag was incorrectly allowing certain
// CHECKSIG NOT scripts to pass, even though they were invalid.
//
// There is a similar check in CreateNewBlock() to prevent creating
- // invalid blocks, however allowing such transactions into the mempool
- // can be exploited as a DoS attack.
- if (!CheckInputs(tx, state, view, true, MANDATORY_SCRIPT_VERIFY_FLAGS, true, txdata))
+ // invalid blocks (using TestBlockValidity), however allowing such
+ // transactions into the mempool can be exploited as a DoS attack.
+ unsigned int currentBlockScriptVerifyFlags = GetBlockScriptFlags(chainActive.Tip(), Params().GetConsensus());
+ if (!CheckInputsFromMempoolAndCache(tx, state, view, pool, currentBlockScriptVerifyFlags, true, txdata))
{
- return error("%s: BUG! PLEASE REPORT THIS! ConnectInputs failed against MANDATORY but not STANDARD flags %s, %s",
- __func__, hash.ToString(), FormatStateMessage(state));
+ // If we're using promiscuousmempoolflags, we may hit this normally
+ // Check if current block has some flags that scriptVerifyFlags
+ // does not before printing an ominous warning
+ if (!(~scriptVerifyFlags & currentBlockScriptVerifyFlags)) {
+ return error("%s: BUG! PLEASE REPORT THIS! ConnectInputs failed against latest-block but not STANDARD flags %s, %s",
+ __func__, hash.ToString(), FormatStateMessage(state));
+ } else {
+ if (!CheckInputs(tx, state, view, true, MANDATORY_SCRIPT_VERIFY_FLAGS, true, false, txdata)) {
+ return error("%s: ConnectInputs failed against MANDATORY but not STANDARD flags due to promiscuous mempool %s, %s",
+ __func__, hash.ToString(), FormatStateMessage(state));
+ } else {
+ LogPrintf("Warning: -promiscuousmempool flags set to not include currently enforced soft forks, this may break mining or otherwise cause instability!\n");
+ }
+ }
}
// Remove conflicting transactions from the mempool
@@ -793,18 +858,19 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool
}
pool.RemoveStaged(allConflicting, false, MemPoolRemovalReason::REPLACED);
- // This transaction should only count for fee estimation if it isn't a
- // BIP 125 replacement transaction (may not be widely supported), the
- // node is not behind, and the transaction is not dependent on any other
- // transactions in the mempool.
- bool validForFeeEstimation = !fReplacementTransaction && IsCurrentForFeeEstimation() && pool.HasNoInputsOf(tx);
+ // This transaction should only count for fee estimation if:
+ // - it isn't a BIP 125 replacement transaction (may not be widely supported)
+ // - it's not being readded during a reorg which bypasses typical mempool fee limits
+ // - the node is not behind
+ // - the transaction is not dependent on any other transactions in the mempool
+ bool validForFeeEstimation = !fReplacementTransaction && !bypass_limits && IsCurrentForFeeEstimation() && pool.HasNoInputsOf(tx);
// Store transaction in memory
pool.addUnchecked(hash, entry, setAncestors, validForFeeEstimation);
// trim mempool and check if tx was trimmed
- if (!fOverrideMempoolLimit) {
- LimitMempoolSize(pool, GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000, GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60);
+ if (!bypass_limits) {
+ LimitMempoolSize(pool, gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000, gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60);
if (!pool.exists(hash))
return state.DoS(0, false, REJECT_INSUFFICIENTFEE, "mempool full");
}
@@ -816,12 +882,12 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool
}
/** (try to) add transaction to memory pool with a specified acceptance time **/
-static bool AcceptToMemoryPoolWithTime(const CChainParams& chainparams, CTxMemPool& pool, CValidationState &state, const CTransactionRef &tx, bool fLimitFree,
+static bool AcceptToMemoryPoolWithTime(const CChainParams& chainparams, CTxMemPool& pool, CValidationState &state, const CTransactionRef &tx,
bool* pfMissingInputs, int64_t nAcceptTime, std::list<CTransactionRef>* plTxnReplaced,
- bool fOverrideMempoolLimit, const CAmount nAbsurdFee)
+ bool bypass_limits, const CAmount nAbsurdFee)
{
std::vector<COutPoint> coins_to_uncache;
- bool res = AcceptToMemoryPoolWorker(chainparams, pool, state, tx, fLimitFree, pfMissingInputs, nAcceptTime, plTxnReplaced, fOverrideMempoolLimit, nAbsurdFee, coins_to_uncache);
+ bool res = AcceptToMemoryPoolWorker(chainparams, pool, state, tx, pfMissingInputs, nAcceptTime, plTxnReplaced, bypass_limits, nAbsurdFee, coins_to_uncache);
if (!res) {
for (const COutPoint& hashTx : coins_to_uncache)
pcoinsTip->Uncache(hashTx);
@@ -832,18 +898,18 @@ static bool AcceptToMemoryPoolWithTime(const CChainParams& chainparams, CTxMemPo
return res;
}
-bool AcceptToMemoryPool(CTxMemPool& pool, CValidationState &state, const CTransactionRef &tx, bool fLimitFree,
+bool AcceptToMemoryPool(CTxMemPool& pool, CValidationState &state, const CTransactionRef &tx,
bool* pfMissingInputs, std::list<CTransactionRef>* plTxnReplaced,
- bool fOverrideMempoolLimit, const CAmount nAbsurdFee)
+ bool bypass_limits, const CAmount nAbsurdFee)
{
const CChainParams& chainparams = Params();
- return AcceptToMemoryPoolWithTime(chainparams, pool, state, tx, fLimitFree, pfMissingInputs, GetTime(), plTxnReplaced, fOverrideMempoolLimit, nAbsurdFee);
+ return AcceptToMemoryPoolWithTime(chainparams, pool, state, tx, pfMissingInputs, GetTime(), plTxnReplaced, bypass_limits, nAbsurdFee);
}
/** Return transaction in txOut, and if it was found inside a block, its hash is placed in hashBlock */
bool GetTransaction(const uint256 &hash, CTransactionRef &txOut, const Consensus::Params& consensusParams, uint256 &hashBlock, bool fAllowSlow)
{
- CBlockIndex *pindexSlow = NULL;
+ CBlockIndex *pindexSlow = nullptr;
LOCK(cs_main);
@@ -976,8 +1042,6 @@ CAmount GetBlockSubsidy(int nHeight, const Consensus::Params& consensusParams)
bool IsInitialBlockDownload()
{
- const CChainParams& chainParams = Params();
-
// Once this function has returned false, it must remain false.
static std::atomic<bool> latchToFalse{false};
// Optimization: pre-test latch before taking the lock.
@@ -989,9 +1053,9 @@ bool IsInitialBlockDownload()
return false;
if (fImporting || fReindex)
return true;
- if (chainActive.Tip() == NULL)
+ if (chainActive.Tip() == nullptr)
return true;
- if (chainActive.Tip()->nChainWork < UintToArith256(chainParams.GetConsensus().nMinimumChainWork))
+ if (chainActive.Tip()->nChainWork < nMinimumChainWork)
return true;
if (chainActive.Tip()->GetBlockTime() < (GetTime() - nMaxTipAge))
return true;
@@ -1000,12 +1064,12 @@ bool IsInitialBlockDownload()
return false;
}
-CBlockIndex *pindexBestForkTip = NULL, *pindexBestForkBase = NULL;
+CBlockIndex *pindexBestForkTip = nullptr, *pindexBestForkBase = nullptr;
static void AlertNotify(const std::string& strMessage)
{
uiInterface.NotifyAlertChanged();
- std::string strCmd = GetArg("-alertnotify", "");
+ std::string strCmd = gArgs.GetArg("-alertnotify", "");
if (strCmd.empty()) return;
// Alert text should be plain ascii coming from a trusted source, but to
@@ -1030,7 +1094,7 @@ static void CheckForkWarningConditions()
// If our best fork is no longer within 72 blocks (+/- 12 hours if no one mines it)
// of our head, drop it
if (pindexBestForkTip && chainActive.Height() - pindexBestForkTip->nHeight >= 72)
- pindexBestForkTip = NULL;
+ pindexBestForkTip = nullptr;
if (pindexBestForkTip || (pindexBestInvalid && pindexBestInvalid->nChainWork > chainActive.Tip()->nChainWork + (GetBlockProof(*chainActive.Tip()) * 6)))
{
@@ -1082,7 +1146,7 @@ static void CheckForkWarningConditionsOnNewFork(CBlockIndex* pindexNewForkTip)
// or a chain that is entirely longer than ours and invalid (note that this should be detected by both)
// We define it this way because it allows us to only store the highest fork tip (+ base) which meets
// the 7-block condition and from this always have the most-likely-to-cause-warning fork
- if (pfork && (!pindexBestForkTip || (pindexBestForkTip && pindexNewForkTip->nHeight > pindexBestForkTip->nHeight)) &&
+ if (pfork && (!pindexBestForkTip || pindexNewForkTip->nHeight > pindexBestForkTip->nHeight) &&
pindexNewForkTip->nChainWork - pfork->nChainWork > (GetBlockProof(*pfork) * 7) &&
chainActive.Height() - pindexNewForkTip->nHeight < 72)
{
@@ -1143,7 +1207,7 @@ void UpdateCoins(const CTransaction& tx, CCoinsViewCache& inputs, int nHeight)
bool CScriptCheck::operator()() {
const CScript &scriptSig = ptxTo->vin[nIn].scriptSig;
const CScriptWitness *witness = &ptxTo->vin[nIn].scriptWitness;
- return VerifyScript(scriptSig, scriptPubKey, witness, nFlags, CachingTransactionSignatureChecker(ptxTo, nIn, amount, cacheStore, *txdata), &error);
+ return VerifyScript(scriptSig, m_tx_out.scriptPubKey, witness, nFlags, CachingTransactionSignatureChecker(ptxTo, nIn, m_tx_out.nValue, cacheStore, *txdata), &error);
}
int GetSpendHeight(const CCoinsViewCache& inputs)
@@ -1153,12 +1217,34 @@ int GetSpendHeight(const CCoinsViewCache& inputs)
return pindexPrev->nHeight + 1;
}
+
+static CuckooCache::cache<uint256, SignatureCacheHasher> scriptExecutionCache;
+static uint256 scriptExecutionCacheNonce(GetRandHash());
+
+void InitScriptExecutionCache() {
+ // nMaxCacheSize is unsigned. If -maxsigcachesize is set to zero,
+ // setup_bytes creates the minimum possible cache (2 elements).
+ size_t nMaxCacheSize = std::min(std::max((int64_t)0, gArgs.GetArg("-maxsigcachesize", DEFAULT_MAX_SIG_CACHE_SIZE) / 2), MAX_MAX_SIG_CACHE_SIZE) * ((size_t) 1 << 20);
+ size_t nElems = scriptExecutionCache.setup_bytes(nMaxCacheSize);
+ LogPrintf("Using %zu MiB out of %zu/2 requested for script execution cache, able to store %zu elements\n",
+ (nElems*sizeof(uint256)) >>20, (nMaxCacheSize*2)>>20, nElems);
+}
+
/**
* Check whether all inputs of this transaction are valid (no double spends, scripts & sigs, amounts)
- * This does not modify the UTXO set. If pvChecks is not NULL, script checks are pushed onto it
- * instead of being performed inline.
+ * This does not modify the UTXO set.
+ *
+ * If pvChecks is not nullptr, script checks are pushed onto it instead of being performed inline. Any
+ * script checks which are not necessary (eg due to script execution cache hits) are, obviously,
+ * not pushed onto pvChecks/run.
+ *
+ * Setting cacheSigStore/cacheFullScriptStore to false will remove elements from the corresponding cache
+ * which are matched. This is useful for checking blocks where we will likely never need the cache
+ * entry again.
+ *
+ * Non-static (and re-declared) in src/test/txvalidationcache_tests.cpp
*/
-static bool CheckInputs(const CTransaction& tx, CValidationState &state, const CCoinsViewCache &inputs, bool fScriptChecks, unsigned int flags, bool cacheStore, PrecomputedTransactionData& txdata, std::vector<CScriptCheck> *pvChecks)
+bool CheckInputs(const CTransaction& tx, CValidationState &state, const CCoinsViewCache &inputs, bool fScriptChecks, unsigned int flags, bool cacheSigStore, bool cacheFullScriptStore, PrecomputedTransactionData& txdata, std::vector<CScriptCheck> *pvChecks)
{
if (!tx.IsCoinBase())
{
@@ -1175,6 +1261,21 @@ static bool CheckInputs(const CTransaction& tx, CValidationState &state, const C
// Of course, if an assumed valid block is invalid due to false scriptSigs
// this optimization would allow an invalid chain to be accepted.
if (fScriptChecks) {
+ // First check if script executions have been cached with the same
+ // flags. Note that this assumes that the inputs provided are
+ // correct (ie that the transaction hash which is in tx's prevouts
+ // properly commits to the scriptPubKey in the inputs view of that
+ // transaction).
+ uint256 hashCacheEntry;
+ // We only use the first 19 bytes of nonce to avoid a second SHA
+ // round - giving us 19 + 32 + 4 = 55 bytes (+ 8 + 1 = 64)
+ static_assert(55 - sizeof(flags) - 32 >= 128/8, "Want at least 128 bits of nonce for script execution cache");
+ CSHA256().Write(scriptExecutionCacheNonce.begin(), 55 - sizeof(flags) - 32).Write(tx.GetWitnessHash().begin(), 32).Write((unsigned char*)&flags, sizeof(flags)).Finalize(hashCacheEntry.begin());
+ AssertLockHeld(cs_main); //TODO: Remove this requirement by making CuckooCache not require external locks
+ if (scriptExecutionCache.contains(hashCacheEntry, !cacheFullScriptStore)) {
+ return true;
+ }
+
for (unsigned int i = 0; i < tx.vin.size(); i++) {
const COutPoint &prevout = tx.vin[i].prevout;
const Coin& coin = inputs.AccessCoin(prevout);
@@ -1185,11 +1286,9 @@ static bool CheckInputs(const CTransaction& tx, CValidationState &state, const C
// a sanity check that our caching is not introducing consensus
// failures through additional data in, eg, the coins being
// spent being checked as a part of CScriptCheck.
- const CScript& scriptPubKey = coin.out.scriptPubKey;
- const CAmount amount = coin.out.nValue;
// Verify signature
- CScriptCheck check(scriptPubKey, amount, tx, i, flags, cacheStore, &txdata);
+ CScriptCheck check(coin.out, tx, i, flags, cacheSigStore, &txdata);
if (pvChecks) {
pvChecks->push_back(CScriptCheck());
check.swap(pvChecks->back());
@@ -1201,8 +1300,8 @@ static bool CheckInputs(const CTransaction& tx, CValidationState &state, const C
// arguments; if so, don't trigger DoS protection to
// avoid splitting the network between upgraded and
// non-upgraded nodes.
- CScriptCheck check2(scriptPubKey, amount, tx, i,
- flags & ~STANDARD_NOT_MANDATORY_VERIFY_FLAGS, cacheStore, &txdata);
+ CScriptCheck check2(coin.out, tx, i,
+ flags & ~STANDARD_NOT_MANDATORY_VERIFY_FLAGS, cacheSigStore, &txdata);
if (check2())
return state.Invalid(false, REJECT_NONSTANDARD, strprintf("non-mandatory-script-verify-flag (%s)", ScriptErrorString(check.GetScriptError())));
}
@@ -1216,6 +1315,12 @@ static bool CheckInputs(const CTransaction& tx, CValidationState &state, const C
return state.DoS(100,false, REJECT_INVALID, strprintf("mandatory-script-verify-flag-failed (%s)", ScriptErrorString(check.GetScriptError())));
}
}
+
+ if (cacheFullScriptStore && !pvChecks) {
+ // We executed all of the provided scripts, and were told to
+ // cache the result. Do so now.
+ scriptExecutionCache.insert(hashCacheEntry);
+ }
}
}
@@ -1329,17 +1434,19 @@ int ApplyTxInUndo(Coin&& undo, CCoinsViewCache& view, const COutPoint& out)
return DISCONNECT_FAILED; // adding output for transaction without known metadata
}
}
- view.AddCoin(out, std::move(undo), undo.fCoinBase);
+ // The potential_overwrite parameter to AddCoin is only allowed to be false if we know for
+ // sure that the coin did not already exist in the cache. As we have queried for that above
+ // using HaveCoin, we don't need to guess. When fClean is false, a coin already existed and
+ // it is an overwrite.
+ view.AddCoin(out, std::move(undo), !fClean);
return fClean ? DISCONNECT_OK : DISCONNECT_UNCLEAN;
}
/** Undo the effects of this block (with given index) on the UTXO set represented by coins.
- * When UNCLEAN or FAILED is returned, view is left in an indeterminate state. */
+ * When FAILED is returned, view is left in an indeterminate state. */
static DisconnectResult DisconnectBlock(const CBlock& block, const CBlockIndex* pindex, CCoinsViewCache& view)
{
- assert(pindex->GetBlockHash() == view.GetBestBlock());
-
bool fClean = true;
CBlockUndo blockUndo;
@@ -1362,6 +1469,7 @@ static DisconnectResult DisconnectBlock(const CBlock& block, const CBlockIndex*
for (int i = block.vtx.size() - 1; i >= 0; i--) {
const CTransaction &tx = *(block.vtx[i]);
uint256 hash = tx.GetHash();
+ bool is_coinbase = tx.IsCoinBase();
// Check that all outputs are available and match the outputs in the block itself
// exactly.
@@ -1370,7 +1478,7 @@ static DisconnectResult DisconnectBlock(const CBlock& block, const CBlockIndex*
COutPoint out(hash, o);
Coin coin;
bool is_spent = view.SpendCoin(out, &coin);
- if (!is_spent || tx.vout[o] != coin.out) {
+ if (!is_spent || tx.vout[o] != coin.out || pindex->nHeight != coin.nHeight || is_coinbase != coin.fCoinBase) {
fClean = false; // transaction output mismatch
}
}
@@ -1458,14 +1566,14 @@ private:
int bit;
public:
- WarningBitsConditionChecker(int bitIn) : bit(bitIn) {}
+ explicit WarningBitsConditionChecker(int bitIn) : bit(bitIn) {}
- int64_t BeginTime(const Consensus::Params& params) const { return 0; }
- int64_t EndTime(const Consensus::Params& params) const { return std::numeric_limits<int64_t>::max(); }
- int Period(const Consensus::Params& params) const { return params.nMinerConfirmationWindow; }
- int Threshold(const Consensus::Params& params) const { return params.nRuleChangeActivationThreshold; }
+ int64_t BeginTime(const Consensus::Params& params) const override { return 0; }
+ int64_t EndTime(const Consensus::Params& params) const override { return std::numeric_limits<int64_t>::max(); }
+ int Period(const Consensus::Params& params) const override { return params.nMinerConfirmationWindow; }
+ int Threshold(const Consensus::Params& params) const override { return params.nRuleChangeActivationThreshold; }
- bool Condition(const CBlockIndex* pindex, const Consensus::Params& params) const
+ bool Condition(const CBlockIndex* pindex, const Consensus::Params& params) const override
{
return ((pindex->nVersion & VERSIONBITS_TOP_MASK) == VERSIONBITS_TOP_BITS) &&
((pindex->nVersion >> bit) & 1) != 0 &&
@@ -1476,6 +1584,41 @@ public:
// Protected by cs_main
static ThresholdConditionCache warningcache[VERSIONBITS_NUM_BITS];
+static unsigned int GetBlockScriptFlags(const CBlockIndex* pindex, const Consensus::Params& consensusparams) {
+ AssertLockHeld(cs_main);
+
+ // BIP16 didn't become active until Apr 1 2012
+ int64_t nBIP16SwitchTime = 1333238400;
+ bool fStrictPayToScriptHash = (pindex->GetBlockTime() >= nBIP16SwitchTime);
+
+ unsigned int flags = fStrictPayToScriptHash ? SCRIPT_VERIFY_P2SH : SCRIPT_VERIFY_NONE;
+
+ // Start enforcing the DERSIG (BIP66) rule
+ if (pindex->nHeight >= consensusparams.BIP66Height) {
+ flags |= SCRIPT_VERIFY_DERSIG;
+ }
+
+ // Start enforcing CHECKLOCKTIMEVERIFY (BIP65) rule
+ if (pindex->nHeight >= consensusparams.BIP65Height) {
+ flags |= SCRIPT_VERIFY_CHECKLOCKTIMEVERIFY;
+ }
+
+ // Start enforcing BIP68 (sequence locks) and BIP112 (CHECKSEQUENCEVERIFY) using versionbits logic.
+ if (VersionBitsState(pindex->pprev, consensusparams, Consensus::DEPLOYMENT_CSV, versionbitscache) == THRESHOLD_ACTIVE) {
+ flags |= SCRIPT_VERIFY_CHECKSEQUENCEVERIFY;
+ }
+
+ // Start enforcing WITNESS rules using versionbits logic.
+ if (IsWitnessEnabled(pindex->pprev, consensusparams)) {
+ flags |= SCRIPT_VERIFY_WITNESS;
+ flags |= SCRIPT_VERIFY_NULLDUMMY;
+ }
+
+ return flags;
+}
+
+
+
static int64_t nTimeCheck = 0;
static int64_t nTimeForks = 0;
static int64_t nTimeVerify = 0;
@@ -1483,6 +1626,7 @@ static int64_t nTimeConnect = 0;
static int64_t nTimeIndex = 0;
static int64_t nTimeCallbacks = 0;
static int64_t nTimeTotal = 0;
+static int64_t nBlocksTotal = 0;
/** Apply the effects of this block (with given index) on the UTXO set represented by coins.
* Validity checks that depend on the UTXO set are also done; ConnectBlock()
@@ -1493,7 +1637,7 @@ static bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockInd
AssertLockHeld(cs_main);
assert(pindex);
// pindex->phashBlock can be null if called by CreateNewBlock/TestBlockValidity
- assert((pindex->phashBlock == NULL) ||
+ assert((pindex->phashBlock == nullptr) ||
(*pindex->phashBlock == block.GetHash()));
int64_t nTimeStart = GetTimeMicros();
@@ -1502,7 +1646,7 @@ static bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockInd
return error("%s: Consensus::CheckBlock: %s", __func__, FormatStateMessage(state));
// verify that the view's current state corresponds to the previous block
- uint256 hashPrevBlock = pindex->pprev == NULL ? uint256() : pindex->pprev->GetBlockHash();
+ uint256 hashPrevBlock = pindex->pprev == nullptr ? uint256() : pindex->pprev->GetBlockHash();
assert(hashPrevBlock == view.GetBestBlock());
// Special case for the genesis block, skipping connection of its transactions
@@ -1513,6 +1657,8 @@ static bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockInd
return true;
}
+ nBlocksTotal++;
+
bool fScriptChecks = true;
if (!hashAssumeValid.IsNull()) {
// We've been configured with the hash of a block which has been externally verified to have a valid history.
@@ -1524,7 +1670,7 @@ static bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockInd
if (it != mapBlockIndex.end()) {
if (it->second->GetAncestor(pindex->nHeight) == pindex &&
pindexBestHeader->GetAncestor(pindex->nHeight) == pindex &&
- pindexBestHeader->nChainWork >= UintToArith256(chainparams.GetConsensus().nMinimumChainWork)) {
+ pindexBestHeader->nChainWork >= nMinimumChainWork) {
// This block is a member of the assumed verified chain and an ancestor of the best header.
// The equivalent time check discourages hash power from extorting the network via DOS attack
// into accepting an invalid block through telling users they must manually set assumevalid.
@@ -1540,7 +1686,7 @@ static bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockInd
}
int64_t nTime1 = GetTimeMicros(); nTimeCheck += nTime1 - nTimeStart;
- LogPrint(BCLog::BENCH, " - Sanity checks: %.2fms [%.2fs]\n", 0.001 * (nTime1 - nTimeStart), nTimeCheck * 0.000001);
+ LogPrint(BCLog::BENCH, " - Sanity checks: %.2fms [%.2fs (%.2fms/blk)]\n", MILLI * (nTime1 - nTimeStart), nTimeCheck * MICRO, nTimeCheck * MILLI / nBlocksTotal);
// Do not allow blocks that contain transactions which 'overwrite' older transactions,
// unless those are already completely spent.
@@ -1564,6 +1710,7 @@ static bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockInd
// before the first had been spent. Since those coinbases are sufficiently buried its no longer possible to create further
// duplicate transactions descending from the known pairs either.
// If we're on the known chain at height greater than where BIP34 activated, we can save the db accesses needed for the BIP30 check.
+ assert(pindex->pprev);
CBlockIndex *pindexBIP34height = pindex->pprev->GetAncestor(chainparams.GetConsensus().BIP34Height);
//Only continue to enforce if we're below BIP34 activation height or the block hash at that height doesn't correspond.
fEnforceBIP30 = fEnforceBIP30 && (!pindexBIP34height || !(pindexBIP34height->GetBlockHash() == chainparams.GetConsensus().BIP34Hash));
@@ -1579,41 +1726,21 @@ static bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockInd
}
}
- // BIP16 didn't become active until Apr 1 2012
- int64_t nBIP16SwitchTime = 1333238400;
- bool fStrictPayToScriptHash = (pindex->GetBlockTime() >= nBIP16SwitchTime);
-
- unsigned int flags = fStrictPayToScriptHash ? SCRIPT_VERIFY_P2SH : SCRIPT_VERIFY_NONE;
-
- // Start enforcing the DERSIG (BIP66) rule
- if (pindex->nHeight >= chainparams.GetConsensus().BIP66Height) {
- flags |= SCRIPT_VERIFY_DERSIG;
- }
-
- // Start enforcing CHECKLOCKTIMEVERIFY (BIP65) rule
- if (pindex->nHeight >= chainparams.GetConsensus().BIP65Height) {
- flags |= SCRIPT_VERIFY_CHECKLOCKTIMEVERIFY;
- }
-
// Start enforcing BIP68 (sequence locks) and BIP112 (CHECKSEQUENCEVERIFY) using versionbits logic.
int nLockTimeFlags = 0;
if (VersionBitsState(pindex->pprev, chainparams.GetConsensus(), Consensus::DEPLOYMENT_CSV, versionbitscache) == THRESHOLD_ACTIVE) {
- flags |= SCRIPT_VERIFY_CHECKSEQUENCEVERIFY;
nLockTimeFlags |= LOCKTIME_VERIFY_SEQUENCE;
}
- // Start enforcing WITNESS rules using versionbits logic.
- if (IsWitnessEnabled(pindex->pprev, chainparams.GetConsensus())) {
- flags |= SCRIPT_VERIFY_WITNESS;
- flags |= SCRIPT_VERIFY_NULLDUMMY;
- }
+ // Get the script flags for this block
+ unsigned int flags = GetBlockScriptFlags(pindex, chainparams.GetConsensus());
int64_t nTime2 = GetTimeMicros(); nTimeForks += nTime2 - nTime1;
- LogPrint(BCLog::BENCH, " - Fork checks: %.2fms [%.2fs]\n", 0.001 * (nTime2 - nTime1), nTimeForks * 0.000001);
+ LogPrint(BCLog::BENCH, " - Fork checks: %.2fms [%.2fs (%.2fms/blk)]\n", MILLI * (nTime2 - nTime1), nTimeForks * MICRO, nTimeForks * MILLI / nBlocksTotal);
CBlockUndo blockundo;
- CCheckQueueControl<CScriptCheck> control(fScriptChecks && nScriptCheckThreads ? &scriptcheckqueue : NULL);
+ CCheckQueueControl<CScriptCheck> control(fScriptChecks && nScriptCheckThreads ? &scriptcheckqueue : nullptr);
std::vector<int> prevheights;
CAmount nFees = 0;
@@ -1671,7 +1798,7 @@ static bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockInd
{
std::vector<CScriptCheck> vChecks;
bool fCacheResults = fJustCheck; /* Don't cache results if we're actually connecting blocks (still consult the cache, though) */
- if (!CheckInputs(tx, state, view, fScriptChecks, flags, fCacheResults, txdata[i], nScriptCheckThreads ? &vChecks : NULL))
+ if (!CheckInputs(tx, state, view, fScriptChecks, flags, fCacheResults, fCacheResults, txdata[i], nScriptCheckThreads ? &vChecks : nullptr))
return error("ConnectBlock(): CheckInputs on %s failed with %s",
tx.GetHash().ToString(), FormatStateMessage(state));
control.Add(vChecks);
@@ -1687,7 +1814,7 @@ static bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockInd
pos.nTxOffset += ::GetSerializeSize(tx, SER_DISK, CLIENT_VERSION);
}
int64_t nTime3 = GetTimeMicros(); nTimeConnect += nTime3 - nTime2;
- LogPrint(BCLog::BENCH, " - Connect %u transactions: %.2fms (%.3fms/tx, %.3fms/txin) [%.2fs]\n", (unsigned)block.vtx.size(), 0.001 * (nTime3 - nTime2), 0.001 * (nTime3 - nTime2) / block.vtx.size(), nInputs <= 1 ? 0 : 0.001 * (nTime3 - nTime2) / (nInputs-1), nTimeConnect * 0.000001);
+ LogPrint(BCLog::BENCH, " - Connect %u transactions: %.2fms (%.3fms/tx, %.3fms/txin) [%.2fs (%.2fms/blk)]\n", (unsigned)block.vtx.size(), MILLI * (nTime3 - nTime2), MILLI * (nTime3 - nTime2) / block.vtx.size(), nInputs <= 1 ? 0 : MILLI * (nTime3 - nTime2) / (nInputs-1), nTimeConnect * MICRO, nTimeConnect * MILLI / nBlocksTotal);
CAmount blockReward = nFees + GetBlockSubsidy(pindex->nHeight, chainparams.GetConsensus());
if (block.vtx[0]->GetValueOut() > blockReward)
@@ -1699,7 +1826,7 @@ static bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockInd
if (!control.Wait())
return state.DoS(100, error("%s: CheckQueue failed", __func__), REJECT_INVALID, "block-validation-failed");
int64_t nTime4 = GetTimeMicros(); nTimeVerify += nTime4 - nTime2;
- LogPrint(BCLog::BENCH, " - Verify %u txins: %.2fms (%.3fms/txin) [%.2fs]\n", nInputs - 1, 0.001 * (nTime4 - nTime2), nInputs <= 1 ? 0 : 0.001 * (nTime4 - nTime2) / (nInputs-1), nTimeVerify * 0.000001);
+ LogPrint(BCLog::BENCH, " - Verify %u txins: %.2fms (%.3fms/txin) [%.2fs (%.2fms/blk)]\n", nInputs - 1, MILLI * (nTime4 - nTime2), nInputs <= 1 ? 0 : MILLI * (nTime4 - nTime2) / (nInputs-1), nTimeVerify * MICRO, nTimeVerify * MILLI / nBlocksTotal);
if (fJustCheck)
return true;
@@ -1727,14 +1854,15 @@ static bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockInd
if (!pblocktree->WriteTxIndex(vPos))
return AbortNode(state, "Failed to write transaction index");
+ assert(pindex->phashBlock);
// add this block to the view's block chain
view.SetBestBlock(pindex->GetBlockHash());
int64_t nTime5 = GetTimeMicros(); nTimeIndex += nTime5 - nTime4;
- LogPrint(BCLog::BENCH, " - Index writing: %.2fms [%.2fs]\n", 0.001 * (nTime5 - nTime4), nTimeIndex * 0.000001);
+ LogPrint(BCLog::BENCH, " - Index writing: %.2fms [%.2fs (%.2fms/blk)]\n", MILLI * (nTime5 - nTime4), nTimeIndex * MICRO, nTimeIndex * MILLI / nBlocksTotal);
int64_t nTime6 = GetTimeMicros(); nTimeCallbacks += nTime6 - nTime5;
- LogPrint(BCLog::BENCH, " - Callbacks: %.2fms [%.2fs]\n", 0.001 * (nTime6 - nTime5), nTimeCallbacks * 0.000001);
+ LogPrint(BCLog::BENCH, " - Callbacks: %.2fms [%.2fs (%.2fms/blk)]\n", MILLI * (nTime6 - nTime5), nTimeCallbacks * MICRO, nTimeCallbacks * MILLI / nBlocksTotal);
return true;
}
@@ -1747,95 +1875,100 @@ static bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockInd
*/
bool static FlushStateToDisk(const CChainParams& chainparams, CValidationState &state, FlushStateMode mode, int nManualPruneHeight) {
int64_t nMempoolUsage = mempool.DynamicMemoryUsage();
- LOCK2(cs_main, cs_LastBlockFile);
+ LOCK(cs_main);
static int64_t nLastWrite = 0;
static int64_t nLastFlush = 0;
static int64_t nLastSetChain = 0;
std::set<int> setFilesToPrune;
bool fFlushForPrune = false;
+ bool fDoFullFlush = false;
+ int64_t nNow = 0;
try {
- if (fPruneMode && (fCheckForPruning || nManualPruneHeight > 0) && !fReindex) {
- if (nManualPruneHeight > 0) {
- FindFilesToPruneManual(setFilesToPrune, nManualPruneHeight);
- } else {
- FindFilesToPrune(setFilesToPrune, chainparams.PruneAfterHeight());
- fCheckForPruning = false;
- }
- if (!setFilesToPrune.empty()) {
- fFlushForPrune = true;
- if (!fHavePruned) {
- pblocktree->WriteFlag("prunedblockfiles", true);
- fHavePruned = true;
- }
- }
- }
- int64_t nNow = GetTimeMicros();
- // Avoid writing/flushing immediately after startup.
- if (nLastWrite == 0) {
- nLastWrite = nNow;
- }
- if (nLastFlush == 0) {
- nLastFlush = nNow;
- }
- if (nLastSetChain == 0) {
- nLastSetChain = nNow;
- }
- int64_t nMempoolSizeMax = GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000;
- int64_t cacheSize = pcoinsTip->DynamicMemoryUsage() * DB_PEAK_USAGE_FACTOR;
- int64_t nTotalSpace = nCoinCacheUsage + std::max<int64_t>(nMempoolSizeMax - nMempoolUsage, 0);
- // The cache is large and we're within 10% and 10 MiB of the limit, but we have time now (not in the middle of a block processing).
- bool fCacheLarge = mode == FLUSH_STATE_PERIODIC && cacheSize > std::max((9 * nTotalSpace) / 10, nTotalSpace - MAX_BLOCK_COINSDB_USAGE * 1024 * 1024);
- // The cache is over the limit, we have to write now.
- bool fCacheCritical = mode == FLUSH_STATE_IF_NEEDED && cacheSize > nTotalSpace;
- // It's been a while since we wrote the block index to disk. Do this frequently, so we don't need to redownload after a crash.
- bool fPeriodicWrite = mode == FLUSH_STATE_PERIODIC && nNow > nLastWrite + (int64_t)DATABASE_WRITE_INTERVAL * 1000000;
- // It's been very long since we flushed the cache. Do this infrequently, to optimize cache usage.
- bool fPeriodicFlush = mode == FLUSH_STATE_PERIODIC && nNow > nLastFlush + (int64_t)DATABASE_FLUSH_INTERVAL * 1000000;
- // Combine all conditions that result in a full cache flush.
- bool fDoFullFlush = (mode == FLUSH_STATE_ALWAYS) || fCacheLarge || fCacheCritical || fPeriodicFlush || fFlushForPrune;
- // Write blocks and block index to disk.
- if (fDoFullFlush || fPeriodicWrite) {
- // Depend on nMinDiskSpace to ensure we can write block index
- if (!CheckDiskSpace(0))
- return state.Error("out of disk space");
- // First make sure all block and undo data is flushed to disk.
- FlushBlockFile();
- // Then update all block file information (which may refer to block and undo files).
- {
- std::vector<std::pair<int, const CBlockFileInfo*> > vFiles;
- vFiles.reserve(setDirtyFileInfo.size());
- for (std::set<int>::iterator it = setDirtyFileInfo.begin(); it != setDirtyFileInfo.end(); ) {
- vFiles.push_back(std::make_pair(*it, &vinfoBlockFile[*it]));
- setDirtyFileInfo.erase(it++);
+ {
+ LOCK(cs_LastBlockFile);
+ if (fPruneMode && (fCheckForPruning || nManualPruneHeight > 0) && !fReindex) {
+ if (nManualPruneHeight > 0) {
+ FindFilesToPruneManual(setFilesToPrune, nManualPruneHeight);
+ } else {
+ FindFilesToPrune(setFilesToPrune, chainparams.PruneAfterHeight());
+ fCheckForPruning = false;
}
- std::vector<const CBlockIndex*> vBlocks;
- vBlocks.reserve(setDirtyBlockIndex.size());
- for (std::set<CBlockIndex*>::iterator it = setDirtyBlockIndex.begin(); it != setDirtyBlockIndex.end(); ) {
- vBlocks.push_back(*it);
- setDirtyBlockIndex.erase(it++);
+ if (!setFilesToPrune.empty()) {
+ fFlushForPrune = true;
+ if (!fHavePruned) {
+ pblocktree->WriteFlag("prunedblockfiles", true);
+ fHavePruned = true;
+ }
}
- if (!pblocktree->WriteBatchSync(vFiles, nLastBlockFile, vBlocks)) {
- return AbortNode(state, "Failed to write to block index database");
+ }
+ nNow = GetTimeMicros();
+ // Avoid writing/flushing immediately after startup.
+ if (nLastWrite == 0) {
+ nLastWrite = nNow;
+ }
+ if (nLastFlush == 0) {
+ nLastFlush = nNow;
+ }
+ if (nLastSetChain == 0) {
+ nLastSetChain = nNow;
+ }
+ int64_t nMempoolSizeMax = gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000;
+ int64_t cacheSize = pcoinsTip->DynamicMemoryUsage();
+ int64_t nTotalSpace = nCoinCacheUsage + std::max<int64_t>(nMempoolSizeMax - nMempoolUsage, 0);
+ // The cache is large and we're within 10% and 10 MiB of the limit, but we have time now (not in the middle of a block processing).
+ bool fCacheLarge = mode == FLUSH_STATE_PERIODIC && cacheSize > std::max((9 * nTotalSpace) / 10, nTotalSpace - MAX_BLOCK_COINSDB_USAGE * 1024 * 1024);
+ // The cache is over the limit, we have to write now.
+ bool fCacheCritical = mode == FLUSH_STATE_IF_NEEDED && cacheSize > nTotalSpace;
+ // It's been a while since we wrote the block index to disk. Do this frequently, so we don't need to redownload after a crash.
+ bool fPeriodicWrite = mode == FLUSH_STATE_PERIODIC && nNow > nLastWrite + (int64_t)DATABASE_WRITE_INTERVAL * 1000000;
+ // It's been very long since we flushed the cache. Do this infrequently, to optimize cache usage.
+ bool fPeriodicFlush = mode == FLUSH_STATE_PERIODIC && nNow > nLastFlush + (int64_t)DATABASE_FLUSH_INTERVAL * 1000000;
+ // Combine all conditions that result in a full cache flush.
+ fDoFullFlush = (mode == FLUSH_STATE_ALWAYS) || fCacheLarge || fCacheCritical || fPeriodicFlush || fFlushForPrune;
+ // Write blocks and block index to disk.
+ if (fDoFullFlush || fPeriodicWrite) {
+ // Depend on nMinDiskSpace to ensure we can write block index
+ if (!CheckDiskSpace(0))
+ return state.Error("out of disk space");
+ // First make sure all block and undo data is flushed to disk.
+ FlushBlockFile();
+ // Then update all block file information (which may refer to block and undo files).
+ {
+ std::vector<std::pair<int, const CBlockFileInfo*> > vFiles;
+ vFiles.reserve(setDirtyFileInfo.size());
+ for (std::set<int>::iterator it = setDirtyFileInfo.begin(); it != setDirtyFileInfo.end(); ) {
+ vFiles.push_back(std::make_pair(*it, &vinfoBlockFile[*it]));
+ setDirtyFileInfo.erase(it++);
+ }
+ std::vector<const CBlockIndex*> vBlocks;
+ vBlocks.reserve(setDirtyBlockIndex.size());
+ for (std::set<CBlockIndex*>::iterator it = setDirtyBlockIndex.begin(); it != setDirtyBlockIndex.end(); ) {
+ vBlocks.push_back(*it);
+ setDirtyBlockIndex.erase(it++);
+ }
+ if (!pblocktree->WriteBatchSync(vFiles, nLastBlockFile, vBlocks)) {
+ return AbortNode(state, "Failed to write to block index database");
+ }
}
+ // Finally remove any pruned files
+ if (fFlushForPrune)
+ UnlinkPrunedFiles(setFilesToPrune);
+ nLastWrite = nNow;
+ }
+ // Flush best chain related state. This can only be done if the blocks / block index write was also done.
+ if (fDoFullFlush) {
+ // Typical Coin structures on disk are around 48 bytes in size.
+ // Pushing a new one to the database can cause it to be written
+ // twice (once in the log, and once in the tables). This is already
+ // an overestimation, as most will delete an existing entry or
+ // overwrite one. Still, use a conservative safety factor of 2.
+ if (!CheckDiskSpace(48 * 2 * 2 * pcoinsTip->GetCacheSize()))
+ return state.Error("out of disk space");
+ // Flush the chainstate (which may refer to block index entries).
+ if (!pcoinsTip->Flush())
+ return AbortNode(state, "Failed to write to coin database");
+ nLastFlush = nNow;
}
- // Finally remove any pruned files
- if (fFlushForPrune)
- UnlinkPrunedFiles(setFilesToPrune);
- nLastWrite = nNow;
- }
- // Flush best chain related state. This can only be done if the blocks / block index write was also done.
- if (fDoFullFlush) {
- // Typical Coin structures on disk are around 48 bytes in size.
- // Pushing a new one to the database can cause it to be written
- // twice (once in the log, and once in the tables). This is already
- // an overestimation, as most will delete an existing entry or
- // overwrite one. Still, use a conservative safety factor of 2.
- if (!CheckDiskSpace(48 * 2 * 2 * pcoinsTip->GetCacheSize()))
- return state.Error("out of disk space");
- // Flush the chainstate (which may refer to block index entries).
- if (!pcoinsTip->Flush())
- return AbortNode(state, "Failed to write to coin database");
- nLastFlush = nNow;
}
if (fDoFullFlush || ((mode == FLUSH_STATE_ALWAYS || mode == FLUSH_STATE_PERIODIC) && nNow > nLastSetChain + (int64_t)DATABASE_WRITE_INTERVAL * 1000000)) {
// Update best block in wallet (so we can detect restored wallets).
@@ -1898,7 +2031,7 @@ void static UpdateTip(CBlockIndex *pindexNew, const CChainParams& chainParams) {
}
}
// Check the version of the last 100 blocks to see if we need to upgrade:
- for (int i = 0; i < 100 && pindex != NULL; i++)
+ for (int i = 0; i < 100 && pindex != nullptr; i++)
{
int32_t nExpectedVersion = ComputeBlockVersion(pindex->pprev, chainParams.GetConsensus());
if (pindex->nVersion > VERSIONBITS_LAST_OLD_BLOCK_VERSION && (pindex->nVersion & ~nExpectedVersion) != 0)
@@ -1931,7 +2064,7 @@ void static UpdateTip(CBlockIndex *pindexNew, const CChainParams& chainParams) {
* should make the mempool consistent again by calling UpdateMempoolForReorg.
* with cs_main held.
*
- * If disconnectpool is NULL, then no disconnected transactions are added to
+ * If disconnectpool is nullptr, then no disconnected transactions are added to
* disconnectpool (note that the caller is responsible for mempool consistency
* in any case).
*/
@@ -1948,12 +2081,13 @@ bool static DisconnectTip(CValidationState& state, const CChainParams& chainpara
int64_t nStart = GetTimeMicros();
{
CCoinsViewCache view(pcoinsTip);
+ assert(view.GetBestBlock() == pindexDelete->GetBlockHash());
if (DisconnectBlock(block, pindexDelete, view) != DISCONNECT_OK)
return error("DisconnectTip(): DisconnectBlock %s failed", pindexDelete->GetBlockHash().ToString());
bool flushed = view.Flush();
assert(flushed);
}
- LogPrint(BCLog::BENCH, "- Disconnect block: %.2fms\n", (GetTimeMicros() - nStart) * 0.001);
+ LogPrint(BCLog::BENCH, "- Disconnect block: %.2fms\n", (GetTimeMicros() - nStart) * MILLI);
// Write the chain state to disk, if necessary.
if (!FlushStateToDisk(chainparams, state, FLUSH_STATE_IF_NEEDED))
return false;
@@ -1986,7 +2120,7 @@ static int64_t nTimeChainState = 0;
static int64_t nTimePostConnect = 0;
struct PerBlockConnectTrace {
- CBlockIndex* pindex = NULL;
+ CBlockIndex* pindex = nullptr;
std::shared_ptr<const CBlock> pblock;
std::shared_ptr<std::vector<CTransactionRef>> conflictedTxs;
PerBlockConnectTrace() : conflictedTxs(std::make_shared<std::vector<CTransactionRef>>()) {}
@@ -2013,7 +2147,7 @@ private:
CTxMemPool &pool;
public:
- ConnectTrace(CTxMemPool &_pool) : blocksConnected(1), pool(_pool) {
+ explicit ConnectTrace(CTxMemPool &_pool) : blocksConnected(1), pool(_pool) {
pool.NotifyEntryRemoved.connect(boost::bind(&ConnectTrace::NotifyEntryRemoved, this, _1, _2));
}
@@ -2051,7 +2185,7 @@ public:
};
/**
- * Connect a new block to chainActive. pblock is either NULL or a pointer to a CBlock
+ * Connect a new block to chainActive. pblock is either nullptr or a pointer to a CBlock
* corresponding to pindexNew, to bypass loading it again from disk.
*
* The block is added to connectTrace if connection succeeds.
@@ -2074,7 +2208,7 @@ bool static ConnectTip(CValidationState& state, const CChainParams& chainparams,
// Apply the block atomically to the chain state.
int64_t nTime2 = GetTimeMicros(); nTimeReadFromDisk += nTime2 - nTime1;
int64_t nTime3;
- LogPrint(BCLog::BENCH, " - Load block from disk: %.2fms [%.2fs]\n", (nTime2 - nTime1) * 0.001, nTimeReadFromDisk * 0.000001);
+ LogPrint(BCLog::BENCH, " - Load block from disk: %.2fms [%.2fs]\n", (nTime2 - nTime1) * MILLI, nTimeReadFromDisk * MICRO);
{
CCoinsViewCache view(pcoinsTip);
bool rv = ConnectBlock(blockConnecting, state, pindexNew, view, chainparams);
@@ -2085,17 +2219,17 @@ bool static ConnectTip(CValidationState& state, const CChainParams& chainparams,
return error("ConnectTip(): ConnectBlock %s failed", pindexNew->GetBlockHash().ToString());
}
nTime3 = GetTimeMicros(); nTimeConnectTotal += nTime3 - nTime2;
- LogPrint(BCLog::BENCH, " - Connect total: %.2fms [%.2fs]\n", (nTime3 - nTime2) * 0.001, nTimeConnectTotal * 0.000001);
+ LogPrint(BCLog::BENCH, " - Connect total: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime3 - nTime2) * MILLI, nTimeConnectTotal * MICRO, nTimeConnectTotal * MILLI / nBlocksTotal);
bool flushed = view.Flush();
assert(flushed);
}
int64_t nTime4 = GetTimeMicros(); nTimeFlush += nTime4 - nTime3;
- LogPrint(BCLog::BENCH, " - Flush: %.2fms [%.2fs]\n", (nTime4 - nTime3) * 0.001, nTimeFlush * 0.000001);
+ LogPrint(BCLog::BENCH, " - Flush: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime4 - nTime3) * MILLI, nTimeFlush * MICRO, nTimeFlush * MILLI / nBlocksTotal);
// Write the chain state to disk, if necessary.
if (!FlushStateToDisk(chainparams, state, FLUSH_STATE_IF_NEEDED))
return false;
int64_t nTime5 = GetTimeMicros(); nTimeChainState += nTime5 - nTime4;
- LogPrint(BCLog::BENCH, " - Writing chainstate: %.2fms [%.2fs]\n", (nTime5 - nTime4) * 0.001, nTimeChainState * 0.000001);
+ LogPrint(BCLog::BENCH, " - Writing chainstate: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime5 - nTime4) * MILLI, nTimeChainState * MICRO, nTimeChainState * MILLI / nBlocksTotal);
// Remove conflicting transactions from the mempool.;
mempool.removeForBlock(blockConnecting.vtx, pindexNew->nHeight);
disconnectpool.removeForBlock(blockConnecting.vtx);
@@ -2103,8 +2237,8 @@ bool static ConnectTip(CValidationState& state, const CChainParams& chainparams,
UpdateTip(pindexNew, chainparams);
int64_t nTime6 = GetTimeMicros(); nTimePostConnect += nTime6 - nTime5; nTimeTotal += nTime6 - nTime1;
- LogPrint(BCLog::BENCH, " - Connect postprocess: %.2fms [%.2fs]\n", (nTime6 - nTime5) * 0.001, nTimePostConnect * 0.000001);
- LogPrint(BCLog::BENCH, "- Connect block: %.2fms [%.2fs]\n", (nTime6 - nTime1) * 0.001, nTimeTotal * 0.000001);
+ LogPrint(BCLog::BENCH, " - Connect postprocess: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime6 - nTime5) * MILLI, nTimePostConnect * MICRO, nTimePostConnect * MILLI / nBlocksTotal);
+ LogPrint(BCLog::BENCH, "- Connect block: %.2fms [%.2fs (%.2fms/blk)]\n", (nTime6 - nTime1) * MILLI, nTimeTotal * MICRO, nTimeTotal * MILLI / nBlocksTotal);
connectTrace.BlockConnected(pindexNew, std::move(pthisBlock));
return true;
@@ -2116,13 +2250,13 @@ bool static ConnectTip(CValidationState& state, const CChainParams& chainparams,
*/
static CBlockIndex* FindMostWorkChain() {
do {
- CBlockIndex *pindexNew = NULL;
+ CBlockIndex *pindexNew = nullptr;
// Find the best candidate header.
{
std::set<CBlockIndex*, CBlockIndexWorkComparator>::reverse_iterator it = setBlockIndexCandidates.rbegin();
if (it == setBlockIndexCandidates.rend())
- return NULL;
+ return nullptr;
pindexNew = *it;
}
@@ -2141,7 +2275,7 @@ static CBlockIndex* FindMostWorkChain() {
bool fMissingData = !(pindexTest->nStatus & BLOCK_HAVE_DATA);
if (fFailedChain || fMissingData) {
// Candidate chain is not usable (either invalid or missing data)
- if (fFailedChain && (pindexBestInvalid == NULL || pindexNew->nChainWork > pindexBestInvalid->nChainWork))
+ if (fFailedChain && (pindexBestInvalid == nullptr || pindexNew->nChainWork > pindexBestInvalid->nChainWork))
pindexBestInvalid = pindexNew;
CBlockIndex *pindexFailed = pindexNew;
// Remove the entire chain from the set.
@@ -2182,7 +2316,7 @@ static void PruneBlockIndexCandidates() {
/**
* Try to make some progress towards making pindexMostWork the active block.
- * pblock is either NULL or a pointer to a CBlock corresponding to pindexMostWork.
+ * pblock is either nullptr or a pointer to a CBlock corresponding to pindexMostWork.
*/
static bool ActivateBestChainStep(CValidationState& state, const CChainParams& chainparams, CBlockIndex* pindexMostWork, const std::shared_ptr<const CBlock>& pblock, bool& fInvalidFound, ConnectTrace& connectTrace)
{
@@ -2221,7 +2355,7 @@ static bool ActivateBestChainStep(CValidationState& state, const CChainParams& c
nHeight = nTargetHeight;
// Connect new blocks.
- BOOST_REVERSE_FOREACH(CBlockIndex *pindexConnect, vpindexToConnect) {
+ for (CBlockIndex *pindexConnect : reverse_iterate(vpindexToConnect)) {
if (!ConnectTip(state, chainparams, pindexConnect, pindexConnect == pindexMostWork ? pblock : std::shared_ptr<const CBlock>(), connectTrace, disconnectpool)) {
if (state.IsInvalid()) {
// The block violates a consensus rule.
@@ -2268,8 +2402,8 @@ static bool ActivateBestChainStep(CValidationState& state, const CChainParams& c
static void NotifyHeaderTip() {
bool fNotify = false;
bool fInitialBlockDownload = false;
- static CBlockIndex* pindexHeaderOld = NULL;
- CBlockIndex* pindexHeader = NULL;
+ static CBlockIndex* pindexHeaderOld = nullptr;
+ CBlockIndex* pindexHeader = nullptr;
{
LOCK(cs_main);
pindexHeader = pindexBestHeader;
@@ -2288,7 +2422,7 @@ static void NotifyHeaderTip() {
/**
* Make the best chain active, in multiple steps. The result is either failure
- * or an activated best chain. pblock is either NULL or a pointer to a block
+ * or an activated best chain. pblock is either nullptr or a pointer to a block
* that is already loaded (to avoid loading it again from disk).
*/
bool ActivateBestChain(CValidationState &state, const CChainParams& chainparams, std::shared_ptr<const CBlock> pblock) {
@@ -2297,9 +2431,9 @@ bool ActivateBestChain(CValidationState &state, const CChainParams& chainparams,
// us in the middle of ProcessNewBlock - do not assume pblock is set
// sanely for performance or correctness!
- CBlockIndex *pindexMostWork = NULL;
- CBlockIndex *pindexNewTip = NULL;
- int nStopAtHeight = GetArg("-stopatheight", DEFAULT_STOPATHEIGHT);
+ CBlockIndex *pindexMostWork = nullptr;
+ CBlockIndex *pindexNewTip = nullptr;
+ int nStopAtHeight = gArgs.GetArg("-stopatheight", DEFAULT_STOPATHEIGHT);
do {
boost::this_thread::interruption_point();
if (ShutdownRequested())
@@ -2312,12 +2446,12 @@ bool ActivateBestChain(CValidationState &state, const CChainParams& chainparams,
ConnectTrace connectTrace(mempool); // Destructed before cs_main is unlocked
CBlockIndex *pindexOldTip = chainActive.Tip();
- if (pindexMostWork == NULL) {
+ if (pindexMostWork == nullptr) {
pindexMostWork = FindMostWorkChain();
}
// Whether we have anything to do at all.
- if (pindexMostWork == NULL || pindexMostWork == chainActive.Tip())
+ if (pindexMostWork == nullptr || pindexMostWork == chainActive.Tip())
return true;
bool fInvalidFound = false;
@@ -2327,7 +2461,7 @@ bool ActivateBestChain(CValidationState &state, const CChainParams& chainparams,
if (fInvalidFound) {
// Wipe cache, we may need another branch now.
- pindexMostWork = NULL;
+ pindexMostWork = nullptr;
}
pindexNewTip = chainActive.Tip();
pindexFork = chainActive.FindFork(pindexOldTip);
@@ -2452,14 +2586,14 @@ bool ResetBlockFailureFlags(CBlockIndex *pindex) {
}
if (it->second == pindexBestInvalid) {
// Reset invalid block marker if it was pointing to one of those.
- pindexBestInvalid = NULL;
+ pindexBestInvalid = nullptr;
}
}
it++;
}
// Remove the invalidity flag from all ancestors too.
- while (pindex != NULL) {
+ while (pindex != nullptr) {
if (pindex->nStatus & BLOCK_FAILED_MASK) {
pindex->nStatus &= ~BLOCK_FAILED_MASK;
setDirtyBlockIndex.insert(pindex);
@@ -2496,7 +2630,7 @@ static CBlockIndex* AddToBlockIndex(const CBlockHeader& block)
pindexNew->nTimeMax = (pindexNew->pprev ? std::max(pindexNew->pprev->nTimeMax, pindexNew->nTime) : pindexNew->nTime);
pindexNew->nChainWork = (pindexNew->pprev ? pindexNew->pprev->nChainWork : 0) + GetBlockProof(*pindexNew);
pindexNew->RaiseValidity(BLOCK_VALID_TREE);
- if (pindexBestHeader == NULL || pindexBestHeader->nChainWork < pindexNew->nChainWork)
+ if (pindexBestHeader == nullptr || pindexBestHeader->nChainWork < pindexNew->nChainWork)
pindexBestHeader = pindexNew;
setDirtyBlockIndex.insert(pindexNew);
@@ -2519,7 +2653,7 @@ static bool ReceivedBlockTransactions(const CBlock &block, CValidationState& sta
pindexNew->RaiseValidity(BLOCK_VALID_TRANSACTIONS);
setDirtyBlockIndex.insert(pindexNew);
- if (pindexNew->pprev == NULL || pindexNew->pprev->nChainTx) {
+ if (pindexNew->pprev == nullptr || pindexNew->pprev->nChainTx) {
// If pindexNew is the genesis block or all parents are BLOCK_VALID_TRANSACTIONS.
std::deque<CBlockIndex*> queue;
queue.push_back(pindexNew);
@@ -2533,7 +2667,7 @@ static bool ReceivedBlockTransactions(const CBlock &block, CValidationState& sta
LOCK(cs_nBlockSequenceId);
pindex->nSequenceId = nBlockSequenceId++;
}
- if (chainActive.Tip() == NULL || !setBlockIndexCandidates.value_comp()(pindex, chainActive.Tip())) {
+ if (chainActive.Tip() == nullptr || !setBlockIndexCandidates.value_comp()(pindex, chainActive.Tip())) {
setBlockIndexCandidates.insert(pindex);
}
std::pair<std::multimap<CBlockIndex*, CBlockIndex*>::iterator, std::multimap<CBlockIndex*, CBlockIndex*>::iterator> range = mapBlocksUnlinked.equal_range(pindex);
@@ -2683,7 +2817,7 @@ bool CheckBlock(const CBlock& block, CValidationState& state, const Consensus::P
// checks that use witness data may be performed here.
// Size limits
- if (block.vtx.empty() || block.vtx.size() > MAX_BLOCK_BASE_SIZE || ::GetSerializeSize(block, SER_NETWORK, PROTOCOL_VERSION | SERIALIZE_TRANSACTION_NO_WITNESS) > MAX_BLOCK_BASE_SIZE)
+ if (block.vtx.empty() || block.vtx.size() * WITNESS_SCALE_FACTOR > MAX_BLOCK_WEIGHT || ::GetSerializeSize(block, SER_NETWORK, PROTOCOL_VERSION | SERIALIZE_TRANSACTION_NO_WITNESS) * WITNESS_SCALE_FACTOR > MAX_BLOCK_WEIGHT)
return state.DoS(100, false, REJECT_INVALID, "bad-blk-length", false, "size limits failed");
// First transaction must be coinbase, the rest must not be
@@ -2713,22 +2847,6 @@ bool CheckBlock(const CBlock& block, CValidationState& state, const Consensus::P
return true;
}
-static bool CheckIndexAgainstCheckpoint(const CBlockIndex* pindexPrev, CValidationState& state, const CChainParams& chainparams, const uint256& hash)
-{
- if (*pindexPrev->phashBlock == chainparams.GetConsensus().hashGenesisBlock)
- return true;
-
- int nHeight = pindexPrev->nHeight+1;
- // Don't accept any forks from the main chain prior to last checkpoint.
- // GetLastCheckpoint finds the last checkpoint in MapCheckpoints that's in our
- // MapBlockIndex.
- CBlockIndex* pcheckpoint = Checkpoints::GetLastCheckpoint(chainparams.Checkpoints());
- if (pcheckpoint && nHeight < pcheckpoint->nHeight)
- return state.DoS(100, error("%s: forked chain older than last checkpoint (height %d)", __func__, nHeight), REJECT_CHECKPOINT, "bad-fork-prior-to-checkpoint");
-
- return true;
-}
-
bool IsWitnessEnabled(const CBlockIndex* pindexPrev, const Consensus::Params& params)
{
LOCK(cs_main);
@@ -2769,8 +2887,8 @@ std::vector<unsigned char> GenerateCoinbaseCommitment(CBlock& block, const CBloc
std::vector<unsigned char> ret(32, 0x00);
if (consensusParams.vDeployments[Consensus::DEPLOYMENT_SEGWIT].nTimeout != 0) {
if (commitpos == -1) {
- uint256 witnessroot = BlockWitnessMerkleRoot(block, NULL);
- CHash256().Write(witnessroot.begin(), 32).Write(&ret[0], 32).Finalize(witnessroot.begin());
+ uint256 witnessroot = BlockWitnessMerkleRoot(block, nullptr);
+ CHash256().Write(witnessroot.begin(), 32).Write(ret.data(), 32).Finalize(witnessroot.begin());
CTxOut out;
out.nValue = 0;
out.scriptPubKey.resize(38);
@@ -2794,14 +2912,26 @@ std::vector<unsigned char> GenerateCoinbaseCommitment(CBlock& block, const CBloc
/** Context-dependent validity checks.
* By "context", we mean only the previous block headers, but not the UTXO
* set; UTXO-related validity checks are done in ConnectBlock(). */
-static bool ContextualCheckBlockHeader(const CBlockHeader& block, CValidationState& state, const Consensus::Params& consensusParams, const CBlockIndex* pindexPrev, int64_t nAdjustedTime)
+static bool ContextualCheckBlockHeader(const CBlockHeader& block, CValidationState& state, const CChainParams& params, const CBlockIndex* pindexPrev, int64_t nAdjustedTime)
{
- assert(pindexPrev != NULL);
+ assert(pindexPrev != nullptr);
const int nHeight = pindexPrev->nHeight + 1;
+
// Check proof of work
+ const Consensus::Params& consensusParams = params.GetConsensus();
if (block.nBits != GetNextWorkRequired(pindexPrev, &block, consensusParams))
return state.DoS(100, false, REJECT_INVALID, "bad-diffbits", false, "incorrect proof of work");
+ // Check against checkpoints
+ if (fCheckpointsEnabled) {
+ // Don't accept any forks from the main chain prior to last checkpoint.
+ // GetLastCheckpoint finds the last checkpoint in MapCheckpoints that's in our
+ // MapBlockIndex.
+ CBlockIndex* pcheckpoint = Checkpoints::GetLastCheckpoint(params.Checkpoints());
+ if (pcheckpoint && nHeight < pcheckpoint->nHeight)
+ return state.DoS(100, error("%s: forked chain older than last checkpoint (height %d)", __func__, nHeight), REJECT_CHECKPOINT, "bad-fork-prior-to-checkpoint");
+ }
+
// Check timestamp against prev
if (block.GetBlockTime() <= pindexPrev->GetMedianTimePast())
return state.Invalid(false, REJECT_INVALID, "time-too-old", "block's timestamp is too early");
@@ -2823,7 +2953,7 @@ static bool ContextualCheckBlockHeader(const CBlockHeader& block, CValidationSta
static bool ContextualCheckBlock(const CBlock& block, CValidationState& state, const Consensus::Params& consensusParams, const CBlockIndex* pindexPrev)
{
- const int nHeight = pindexPrev == NULL ? 0 : pindexPrev->nHeight + 1;
+ const int nHeight = pindexPrev == nullptr ? 0 : pindexPrev->nHeight + 1;
// Start enforcing BIP113 (Median Time Past) using versionbits logic.
int nLockTimeFlags = 0;
@@ -2908,7 +3038,7 @@ static bool AcceptBlockHeader(const CBlockHeader& block, CValidationState& state
// Check for duplicate
uint256 hash = block.GetHash();
BlockMap::iterator miSelf = mapBlockIndex.find(hash);
- CBlockIndex *pindex = NULL;
+ CBlockIndex *pindex = nullptr;
if (hash != chainparams.GetConsensus().hashGenesisBlock) {
if (miSelf != mapBlockIndex.end()) {
@@ -2925,22 +3055,17 @@ static bool AcceptBlockHeader(const CBlockHeader& block, CValidationState& state
return error("%s: Consensus::CheckBlockHeader: %s, %s", __func__, hash.ToString(), FormatStateMessage(state));
// Get prev block index
- CBlockIndex* pindexPrev = NULL;
+ CBlockIndex* pindexPrev = nullptr;
BlockMap::iterator mi = mapBlockIndex.find(block.hashPrevBlock);
if (mi == mapBlockIndex.end())
return state.DoS(10, error("%s: prev block not found", __func__), 0, "prev-blk-not-found");
pindexPrev = (*mi).second;
if (pindexPrev->nStatus & BLOCK_FAILED_MASK)
return state.DoS(100, error("%s: prev block invalid", __func__), REJECT_INVALID, "bad-prevblk");
-
- assert(pindexPrev);
- if (fCheckpointsEnabled && !CheckIndexAgainstCheckpoint(pindexPrev, state, chainparams, hash))
- return error("%s: CheckIndexAgainstCheckpoint(): %s", __func__, state.GetRejectReason().c_str());
-
- if (!ContextualCheckBlockHeader(block, state, chainparams.GetConsensus(), pindexPrev, GetAdjustedTime()))
+ if (!ContextualCheckBlockHeader(block, state, chainparams, pindexPrev, GetAdjustedTime()))
return error("%s: Consensus::ContextualCheckBlockHeader: %s, %s", __func__, hash.ToString(), FormatStateMessage(state));
}
- if (pindex == NULL)
+ if (pindex == nullptr)
pindex = AddToBlockIndex(block);
if (ppindex)
@@ -2957,7 +3082,7 @@ bool ProcessNewBlockHeaders(const std::vector<CBlockHeader>& headers, CValidatio
{
LOCK(cs_main);
for (const CBlockHeader& header : headers) {
- CBlockIndex *pindex = NULL; // Use a temp pindex instead of ppindex to avoid a const_cast
+ CBlockIndex *pindex = nullptr; // Use a temp pindex instead of ppindex to avoid a const_cast
if (!AcceptBlockHeader(header, state, chainparams, &pindex)) {
return false;
}
@@ -2970,7 +3095,7 @@ bool ProcessNewBlockHeaders(const std::vector<CBlockHeader>& headers, CValidatio
return true;
}
-/** Store block on disk. If dbp is non-NULL, the file is known to already reside on disk */
+/** Store block on disk. If dbp is non-nullptr, the file is known to already reside on disk */
static bool AcceptBlock(const std::shared_ptr<const CBlock>& pblock, CValidationState& state, const CChainParams& chainparams, CBlockIndex** ppindex, bool fRequested, const CDiskBlockPos* dbp, bool* fNewBlock)
{
const CBlock& block = *pblock;
@@ -2978,7 +3103,7 @@ static bool AcceptBlock(const std::shared_ptr<const CBlock>& pblock, CValidation
if (fNewBlock) *fNewBlock = false;
AssertLockHeld(cs_main);
- CBlockIndex *pindexDummy = NULL;
+ CBlockIndex *pindexDummy = nullptr;
CBlockIndex *&pindex = ppindex ? *ppindex : pindexDummy;
if (!AcceptBlockHeader(block, state, chainparams, &pindex))
@@ -3031,11 +3156,11 @@ static bool AcceptBlock(const std::shared_ptr<const CBlock>& pblock, CValidation
try {
unsigned int nBlockSize = ::GetSerializeSize(block, SER_DISK, CLIENT_VERSION);
CDiskBlockPos blockPos;
- if (dbp != NULL)
+ if (dbp != nullptr)
blockPos = *dbp;
- if (!FindBlockPos(state, blockPos, nBlockSize+8, nHeight, block.GetBlockTime(), dbp != NULL))
+ if (!FindBlockPos(state, blockPos, nBlockSize+8, nHeight, block.GetBlockTime(), dbp != nullptr))
return error("AcceptBlock(): FindBlockPos failed");
- if (dbp == NULL)
+ if (dbp == nullptr)
if (!WriteBlockToDisk(block, blockPos, chainparams.MessageStart()))
AbortNode(state, "Failed to write block");
if (!ReceivedBlockTransactions(block, state, pindex, blockPos, chainparams.GetConsensus()))
@@ -3053,7 +3178,7 @@ static bool AcceptBlock(const std::shared_ptr<const CBlock>& pblock, CValidation
bool ProcessNewBlock(const CChainParams& chainparams, const std::shared_ptr<const CBlock> pblock, bool fForceProcessing, bool *fNewBlock)
{
{
- CBlockIndex *pindex = NULL;
+ CBlockIndex *pindex = nullptr;
if (fNewBlock) *fNewBlock = false;
CValidationState state;
// Ensure that CheckBlock() passes before calling AcceptBlock, as
@@ -3064,12 +3189,12 @@ bool ProcessNewBlock(const CChainParams& chainparams, const std::shared_ptr<cons
if (ret) {
// Store to disk
- ret = AcceptBlock(pblock, state, chainparams, &pindex, fForceProcessing, NULL, fNewBlock);
+ ret = AcceptBlock(pblock, state, chainparams, &pindex, fForceProcessing, nullptr, fNewBlock);
}
CheckBlockIndex(chainparams.GetConsensus());
if (!ret) {
GetMainSignals().BlockChecked(*pblock, state);
- return error("%s: AcceptBlock FAILED", __func__);
+ return error("%s: AcceptBlock FAILED (%s)", __func__, state.GetDebugMessage());
}
}
@@ -3086,16 +3211,13 @@ bool TestBlockValidity(CValidationState& state, const CChainParams& chainparams,
{
AssertLockHeld(cs_main);
assert(pindexPrev && pindexPrev == chainActive.Tip());
- if (fCheckpointsEnabled && !CheckIndexAgainstCheckpoint(pindexPrev, state, chainparams, block.GetHash()))
- return error("%s: CheckIndexAgainstCheckpoint(): %s", __func__, state.GetRejectReason().c_str());
-
CCoinsViewCache viewNew(pcoinsTip);
CBlockIndex indexDummy(block);
indexDummy.pprev = pindexPrev;
indexDummy.nHeight = pindexPrev->nHeight + 1;
// NOTE: CheckBlockHeader is called by CheckBlock
- if (!ContextualCheckBlockHeader(block, state, chainparams.GetConsensus(), pindexPrev, GetAdjustedTime()))
+ if (!ContextualCheckBlockHeader(block, state, chainparams, pindexPrev, GetAdjustedTime()))
return error("%s: Consensus::ContextualCheckBlockHeader: %s", __func__, FormatStateMessage(state));
if (!CheckBlock(block, state, chainparams.GetConsensus(), fCheckPOW, fCheckMerkleRoot))
return error("%s: Consensus::CheckBlock: %s", __func__, FormatStateMessage(state));
@@ -3113,8 +3235,10 @@ bool TestBlockValidity(CValidationState& state, const CChainParams& chainparams,
*/
/* Calculate the amount of disk space the block & undo files currently use */
-static uint64_t CalculateCurrentUsage()
+uint64_t CalculateCurrentUsage()
{
+ LOCK(cs_LastBlockFile);
+
uint64_t retval = 0;
for (const CBlockFileInfo &file : vinfoBlockFile) {
retval += file.nSize + file.nUndoSize;
@@ -3125,6 +3249,8 @@ static uint64_t CalculateCurrentUsage()
/* Prune a block file (modify associated database entries)*/
void PruneOneBlockFile(const int fileNumber)
{
+ LOCK(cs_LastBlockFile);
+
for (BlockMap::iterator it = mapBlockIndex.begin(); it != mapBlockIndex.end(); ++it) {
CBlockIndex* pindex = it->second;
if (pindex->nFile == fileNumber) {
@@ -3171,7 +3297,7 @@ static void FindFilesToPruneManual(std::set<int>& setFilesToPrune, int nManualPr
assert(fPruneMode && nManualPruneHeight > 0);
LOCK2(cs_main, cs_LastBlockFile);
- if (chainActive.Tip() == NULL)
+ if (chainActive.Tip() == nullptr)
return;
// last block to prune is the lesser of (user-specified height, MIN_BLOCKS_TO_KEEP from the tip)
@@ -3213,7 +3339,7 @@ void PruneBlockFilesManual(int nManualPruneHeight)
static void FindFilesToPrune(std::set<int>& setFilesToPrune, uint64_t nPruneAfterHeight)
{
LOCK2(cs_main, cs_LastBlockFile);
- if (chainActive.Tip() == NULL || nPruneTarget == 0) {
+ if (chainActive.Tip() == nullptr || nPruneTarget == 0) {
return;
}
if ((uint64_t)chainActive.Tip()->nHeight <= nPruneAfterHeight) {
@@ -3271,7 +3397,7 @@ bool CheckDiskSpace(uint64_t nAdditionalBytes)
static FILE* OpenDiskFile(const CDiskBlockPos &pos, const char *prefix, bool fReadOnly)
{
if (pos.IsNull())
- return NULL;
+ return nullptr;
fs::path path = GetBlockPosFilename(pos, prefix);
fs::create_directories(path.parent_path());
FILE* file = fsbridge::fopen(path, "rb+");
@@ -3279,13 +3405,13 @@ static FILE* OpenDiskFile(const CDiskBlockPos &pos, const char *prefix, bool fRe
file = fsbridge::fopen(path, "wb+");
if (!file) {
LogPrintf("Unable to open file %s\n", path.string());
- return NULL;
+ return nullptr;
}
if (pos.nPos) {
if (fseek(file, pos.nPos, SEEK_SET)) {
LogPrintf("Unable to seek to position %u of %s\n", pos.nPos, path.string());
fclose(file);
- return NULL;
+ return nullptr;
}
}
return file;
@@ -3308,7 +3434,7 @@ fs::path GetBlockPosFilename(const CDiskBlockPos &pos, const char *prefix)
CBlockIndex * InsertBlockIndex(uint256 hash)
{
if (hash.IsNull())
- return NULL;
+ return nullptr;
// Return existing
BlockMap::iterator mi = mapBlockIndex.find(hash);
@@ -3360,13 +3486,13 @@ bool static LoadBlockIndexDB(const CChainParams& chainparams)
pindex->nChainTx = pindex->nTx;
}
}
- if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS) && (pindex->nChainTx || pindex->pprev == NULL))
+ if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS) && (pindex->nChainTx || pindex->pprev == nullptr))
setBlockIndexCandidates.insert(pindex);
if (pindex->nStatus & BLOCK_FAILED_MASK && (!pindexBestInvalid || pindex->nChainWork > pindexBestInvalid->nChainWork))
pindexBestInvalid = pindex;
if (pindex->pprev)
pindex->BuildSkip();
- if (pindex->IsValid(BLOCK_VALID_TREE) && (pindexBestHeader == NULL || CBlockIndexWorkComparator()(pindexBestHeader, pindex)))
+ if (pindex->IsValid(BLOCK_VALID_TREE) && (pindexBestHeader == nullptr || CBlockIndexWorkComparator()(pindexBestHeader, pindex)))
pindexBestHeader = pindex;
}
@@ -3413,54 +3539,68 @@ bool static LoadBlockIndexDB(const CChainParams& chainparams)
// Check whether we need to continue reindexing
bool fReindexing = false;
pblocktree->ReadReindexing(fReindexing);
- fReindex |= fReindexing;
+ if(fReindexing) fReindex = true;
// Check whether we have a transaction index
pblocktree->ReadFlag("txindex", fTxIndex);
LogPrintf("%s: transaction index %s\n", __func__, fTxIndex ? "enabled" : "disabled");
+ return true;
+}
+
+bool LoadChainTip(const CChainParams& chainparams)
+{
+ if (chainActive.Tip() && chainActive.Tip()->GetBlockHash() == pcoinsTip->GetBestBlock()) return true;
+
+ if (pcoinsTip->GetBestBlock().IsNull() && mapBlockIndex.size() == 1) {
+ // In case we just added the genesis block, connect it now, so
+ // that we always have a chainActive.Tip() when we return.
+ LogPrintf("%s: Connecting genesis block...\n", __func__);
+ CValidationState state;
+ if (!ActivateBestChain(state, chainparams)) {
+ return false;
+ }
+ }
+
// Load pointer to end of best chain
BlockMap::iterator it = mapBlockIndex.find(pcoinsTip->GetBestBlock());
if (it == mapBlockIndex.end())
- return true;
+ return false;
chainActive.SetTip(it->second);
PruneBlockIndexCandidates();
- LogPrintf("%s: hashBestChain=%s height=%d date=%s progress=%f\n", __func__,
+ LogPrintf("Loaded best chain: hashBestChain=%s height=%d date=%s progress=%f\n",
chainActive.Tip()->GetBlockHash().ToString(), chainActive.Height(),
DateTimeStrFormat("%Y-%m-%d %H:%M:%S", chainActive.Tip()->GetBlockTime()),
GuessVerificationProgress(chainparams.TxData(), chainActive.Tip()));
-
return true;
}
CVerifyDB::CVerifyDB()
{
- uiInterface.ShowProgress(_("Verifying blocks..."), 0);
+ uiInterface.ShowProgress(_("Verifying blocks..."), 0, false);
}
CVerifyDB::~CVerifyDB()
{
- uiInterface.ShowProgress("", 100);
+ uiInterface.ShowProgress("", 100, false);
}
bool CVerifyDB::VerifyDB(const CChainParams& chainparams, CCoinsView *coinsview, int nCheckLevel, int nCheckDepth)
{
LOCK(cs_main);
- if (chainActive.Tip() == NULL || chainActive.Tip()->pprev == NULL)
+ if (chainActive.Tip() == nullptr || chainActive.Tip()->pprev == nullptr)
return true;
// Verify blocks in the best chain
- if (nCheckDepth <= 0)
- nCheckDepth = 1000000000; // suffices until the year 19000
- if (nCheckDepth > chainActive.Height())
+ if (nCheckDepth <= 0 || nCheckDepth > chainActive.Height())
nCheckDepth = chainActive.Height();
nCheckLevel = std::max(0, std::min(4, nCheckLevel));
LogPrintf("Verifying last %i blocks at level %i\n", nCheckDepth, nCheckLevel);
CCoinsViewCache coins(coinsview);
CBlockIndex* pindexState = chainActive.Tip();
- CBlockIndex* pindexFailure = NULL;
+ CBlockIndex* pindexFailure = nullptr;
int nGoodTransactions = 0;
CValidationState state;
int reportDone = 0;
@@ -3474,7 +3614,7 @@ bool CVerifyDB::VerifyDB(const CChainParams& chainparams, CCoinsView *coinsview,
LogPrintf("[%d%%]...", percentageDone);
reportDone = percentageDone/10;
}
- uiInterface.ShowProgress(_("Verifying blocks..."), percentageDone);
+ uiInterface.ShowProgress(_("Verifying blocks..."), percentageDone, false);
if (pindex->nHeight < chainActive.Height()-nCheckDepth)
break;
if (fPruneMode && !(pindex->nStatus & BLOCK_HAVE_DATA)) {
@@ -3501,6 +3641,7 @@ bool CVerifyDB::VerifyDB(const CChainParams& chainparams, CCoinsView *coinsview,
}
// check level 3: check for inconsistencies during memory-only disconnect of tip blocks
if (nCheckLevel >= 3 && pindex == pindexState && (coins.DynamicMemoryUsage() + pcoinsTip->DynamicMemoryUsage()) <= nCoinCacheUsage) {
+ assert(coins.GetBestBlock() == pindex->GetBlockHash());
DisconnectResult res = DisconnectBlock(block, pindex, coins);
if (res == DISCONNECT_FAILED) {
return error("VerifyDB(): *** irrecoverable inconsistency in block data at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
@@ -3524,7 +3665,7 @@ bool CVerifyDB::VerifyDB(const CChainParams& chainparams, CCoinsView *coinsview,
CBlockIndex *pindex = pindexState;
while (pindex != chainActive.Tip()) {
boost::this_thread::interruption_point();
- uiInterface.ShowProgress(_("Verifying blocks..."), std::max(1, std::min(99, 100 - (int)(((double)(chainActive.Height() - pindex->nHeight)) / (double)nCheckDepth * 50))));
+ uiInterface.ShowProgress(_("Verifying blocks..."), std::max(1, std::min(99, 100 - (int)(((double)(chainActive.Height() - pindex->nHeight)) / (double)nCheckDepth * 50))), false);
pindex = chainActive.Next(pindex);
CBlock block;
if (!ReadBlockFromDisk(block, pindex, chainparams.GetConsensus()))
@@ -3540,10 +3681,98 @@ bool CVerifyDB::VerifyDB(const CChainParams& chainparams, CCoinsView *coinsview,
return true;
}
+/** Apply the effects of a block on the utxo cache, ignoring that it may already have been applied. */
+static bool RollforwardBlock(const CBlockIndex* pindex, CCoinsViewCache& inputs, const CChainParams& params)
+{
+ // TODO: merge with ConnectBlock
+ CBlock block;
+ if (!ReadBlockFromDisk(block, pindex, params.GetConsensus())) {
+ return error("ReplayBlock(): ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
+ }
+
+ for (const CTransactionRef& tx : block.vtx) {
+ if (!tx->IsCoinBase()) {
+ for (const CTxIn &txin : tx->vin) {
+ inputs.SpendCoin(txin.prevout);
+ }
+ }
+ // Pass check = true as every addition may be an overwrite.
+ AddCoins(inputs, *tx, pindex->nHeight, true);
+ }
+ return true;
+}
+
+bool ReplayBlocks(const CChainParams& params, CCoinsView* view)
+{
+ LOCK(cs_main);
+
+ CCoinsViewCache cache(view);
+
+ std::vector<uint256> hashHeads = view->GetHeadBlocks();
+ if (hashHeads.empty()) return true; // We're already in a consistent state.
+ if (hashHeads.size() != 2) return error("ReplayBlocks(): unknown inconsistent state");
+
+ uiInterface.ShowProgress(_("Replaying blocks..."), 0, false);
+ LogPrintf("Replaying blocks\n");
+
+ const CBlockIndex* pindexOld = nullptr; // Old tip during the interrupted flush.
+ const CBlockIndex* pindexNew; // New tip during the interrupted flush.
+ const CBlockIndex* pindexFork = nullptr; // Latest block common to both the old and the new tip.
+
+ if (mapBlockIndex.count(hashHeads[0]) == 0) {
+ return error("ReplayBlocks(): reorganization to unknown block requested");
+ }
+ pindexNew = mapBlockIndex[hashHeads[0]];
+
+ if (!hashHeads[1].IsNull()) { // The old tip is allowed to be 0, indicating it's the first flush.
+ if (mapBlockIndex.count(hashHeads[1]) == 0) {
+ return error("ReplayBlocks(): reorganization from unknown block requested");
+ }
+ pindexOld = mapBlockIndex[hashHeads[1]];
+ pindexFork = LastCommonAncestor(pindexOld, pindexNew);
+ assert(pindexFork != nullptr);
+ }
+
+ // Rollback along the old branch.
+ while (pindexOld != pindexFork) {
+ if (pindexOld->nHeight > 0) { // Never disconnect the genesis block.
+ CBlock block;
+ if (!ReadBlockFromDisk(block, pindexOld, params.GetConsensus())) {
+ return error("RollbackBlock(): ReadBlockFromDisk() failed at %d, hash=%s", pindexOld->nHeight, pindexOld->GetBlockHash().ToString());
+ }
+ LogPrintf("Rolling back %s (%i)\n", pindexOld->GetBlockHash().ToString(), pindexOld->nHeight);
+ DisconnectResult res = DisconnectBlock(block, pindexOld, cache);
+ if (res == DISCONNECT_FAILED) {
+ return error("RollbackBlock(): DisconnectBlock failed at %d, hash=%s", pindexOld->nHeight, pindexOld->GetBlockHash().ToString());
+ }
+ // If DISCONNECT_UNCLEAN is returned, it means a non-existing UTXO was deleted, or an existing UTXO was
+ // overwritten. It corresponds to cases where the block-to-be-disconnect never had all its operations
+ // applied to the UTXO set. However, as both writing a UTXO and deleting a UTXO are idempotent operations,
+ // the result is still a version of the UTXO set with the effects of that block undone.
+ }
+ pindexOld = pindexOld->pprev;
+ }
+
+ // Roll forward from the forking point to the new tip.
+ int nForkHeight = pindexFork ? pindexFork->nHeight : 0;
+ for (int nHeight = nForkHeight + 1; nHeight <= pindexNew->nHeight; ++nHeight) {
+ const CBlockIndex* pindex = pindexNew->GetAncestor(nHeight);
+ LogPrintf("Rolling forward %s (%i)\n", pindex->GetBlockHash().ToString(), nHeight);
+ if (!RollforwardBlock(pindex, cache, params)) return false;
+ }
+
+ cache.SetBestBlock(pindexNew->GetBlockHash());
+ cache.Flush();
+ uiInterface.ShowProgress("", 100, false);
+ return true;
+}
+
bool RewindBlockIndex(const CChainParams& params)
{
LOCK(cs_main);
+ // Note that during -reindex-chainstate we are called with an empty chainActive!
+
int nHeight = 1;
while (nHeight <= chainActive.Height()) {
if (IsWitnessEnabled(chainActive[nHeight - 1], params.GetConsensus()) && !(chainActive[nHeight]->nStatus & BLOCK_OPT_WITNESS)) {
@@ -3564,7 +3793,7 @@ bool RewindBlockIndex(const CChainParams& params)
// of the blockchain).
break;
}
- if (!DisconnectTip(state, params, NULL)) {
+ if (!DisconnectTip(state, params, nullptr)) {
return error("RewindBlockIndex: unable to disconnect block at height %i", pindex->nHeight);
}
// Occasionally flush state to disk.
@@ -3613,12 +3842,19 @@ bool RewindBlockIndex(const CChainParams& params)
}
}
- PruneBlockIndexCandidates();
+ if (chainActive.Tip() != nullptr) {
+ // We can't prune block index candidates based on our tip if we have
+ // no tip due to chainActive being empty!
+ PruneBlockIndexCandidates();
- CheckBlockIndex(params.GetConsensus());
+ CheckBlockIndex(params.GetConsensus());
- if (!FlushStateToDisk(params, state, FLUSH_STATE_ALWAYS)) {
- return false;
+ // FlushStateToDisk can possibly read chainActive. Be conservative
+ // and skip it here, we're about to -reindex-chainstate anyway, so
+ // it'll get called a bunch real soon.
+ if (!FlushStateToDisk(params, state, FLUSH_STATE_ALWAYS)) {
+ return false;
+ }
}
return true;
@@ -3631,9 +3867,9 @@ void UnloadBlockIndex()
{
LOCK(cs_main);
setBlockIndexCandidates.clear();
- chainActive.SetTip(NULL);
- pindexBestInvalid = NULL;
- pindexBestHeader = NULL;
+ chainActive.SetTip(nullptr);
+ pindexBestInvalid = nullptr;
+ pindexBestHeader = nullptr;
mempool.clear();
mapBlocksUnlinked.clear();
vinfoBlockFile.clear();
@@ -3656,44 +3892,54 @@ void UnloadBlockIndex()
bool LoadBlockIndex(const CChainParams& chainparams)
{
// Load block index from databases
- if (!fReindex && !LoadBlockIndexDB(chainparams))
- return false;
+ bool needs_init = fReindex;
+ if (!fReindex) {
+ bool ret = LoadBlockIndexDB(chainparams);
+ if (!ret) return false;
+ needs_init = mapBlockIndex.empty();
+ }
+
+ if (needs_init) {
+ // Everything here is for *new* reindex/DBs. Thus, though
+ // LoadBlockIndexDB may have set fReindex if we shut down
+ // mid-reindex previously, we don't check fReindex and
+ // instead only check it prior to LoadBlockIndexDB to set
+ // needs_init.
+
+ LogPrintf("Initializing databases...\n");
+ // Use the provided setting for -txindex in the new database
+ fTxIndex = gArgs.GetBoolArg("-txindex", DEFAULT_TXINDEX);
+ pblocktree->WriteFlag("txindex", fTxIndex);
+ }
return true;
}
-bool InitBlockIndex(const CChainParams& chainparams)
+bool LoadGenesisBlock(const CChainParams& chainparams)
{
LOCK(cs_main);
- // Check whether we're already initialized
- if (chainActive.Genesis() != NULL)
+ // Check whether we're already initialized by checking for genesis in
+ // mapBlockIndex. Note that we can't use chainActive here, since it is
+ // set based on the coins db, not the block index db, which is the only
+ // thing loaded at this point.
+ if (mapBlockIndex.count(chainparams.GenesisBlock().GetHash()))
return true;
- // Use the provided setting for -txindex in the new database
- fTxIndex = GetBoolArg("-txindex", DEFAULT_TXINDEX);
- pblocktree->WriteFlag("txindex", fTxIndex);
- LogPrintf("Initializing databases...\n");
-
- // Only add the genesis block if not reindexing (in which case we reuse the one already on disk)
- if (!fReindex) {
- try {
- CBlock &block = const_cast<CBlock&>(chainparams.GenesisBlock());
- // Start new block file
- unsigned int nBlockSize = ::GetSerializeSize(block, SER_DISK, CLIENT_VERSION);
- CDiskBlockPos blockPos;
- CValidationState state;
- if (!FindBlockPos(state, blockPos, nBlockSize+8, 0, block.GetBlockTime()))
- return error("LoadBlockIndex(): FindBlockPos failed");
- if (!WriteBlockToDisk(block, blockPos, chainparams.MessageStart()))
- return error("LoadBlockIndex(): writing genesis block to disk failed");
- CBlockIndex *pindex = AddToBlockIndex(block);
- if (!ReceivedBlockTransactions(block, state, pindex, blockPos, chainparams.GetConsensus()))
- return error("LoadBlockIndex(): genesis block not accepted");
- // Force a chainstate write so that when we VerifyDB in a moment, it doesn't check stale data
- return FlushStateToDisk(chainparams, state, FLUSH_STATE_ALWAYS);
- } catch (const std::runtime_error& e) {
- return error("LoadBlockIndex(): failed to initialize block database: %s", e.what());
- }
+ try {
+ CBlock &block = const_cast<CBlock&>(chainparams.GenesisBlock());
+ // Start new block file
+ unsigned int nBlockSize = ::GetSerializeSize(block, SER_DISK, CLIENT_VERSION);
+ CDiskBlockPos blockPos;
+ CValidationState state;
+ if (!FindBlockPos(state, blockPos, nBlockSize+8, 0, block.GetBlockTime()))
+ return error("%s: FindBlockPos failed", __func__);
+ if (!WriteBlockToDisk(block, blockPos, chainparams.MessageStart()))
+ return error("%s: writing genesis block to disk failed", __func__);
+ CBlockIndex *pindex = AddToBlockIndex(block);
+ if (!ReceivedBlockTransactions(block, state, pindex, blockPos, chainparams.GetConsensus()))
+ return error("%s: genesis block not accepted", __func__);
+ } catch (const std::runtime_error& e) {
+ return error("%s: failed to write genesis block: %s", __func__, e.what());
}
return true;
@@ -3759,7 +4005,7 @@ bool LoadExternalBlockFile(const CChainParams& chainparams, FILE* fileIn, CDiskB
if (mapBlockIndex.count(hash) == 0 || (mapBlockIndex[hash]->nStatus & BLOCK_HAVE_DATA) == 0) {
LOCK(cs_main);
CValidationState state;
- if (AcceptBlock(pblock, state, chainparams, NULL, true, dbp, NULL))
+ if (AcceptBlock(pblock, state, chainparams, nullptr, true, dbp, nullptr))
nLoaded++;
if (state.IsError())
break;
@@ -3793,7 +4039,7 @@ bool LoadExternalBlockFile(const CChainParams& chainparams, FILE* fileIn, CDiskB
head.ToString());
LOCK(cs_main);
CValidationState dummy;
- if (AcceptBlock(pblockrecursive, dummy, chainparams, NULL, true, &it->second, NULL))
+ if (AcceptBlock(pblockrecursive, dummy, chainparams, nullptr, true, &it->second, nullptr))
{
nLoaded++;
queue.push_back(pblockrecursive->GetHash());
@@ -3840,35 +4086,35 @@ void static CheckBlockIndex(const Consensus::Params& consensusParams)
assert(forward.size() == mapBlockIndex.size());
- std::pair<std::multimap<CBlockIndex*,CBlockIndex*>::iterator,std::multimap<CBlockIndex*,CBlockIndex*>::iterator> rangeGenesis = forward.equal_range(NULL);
+ std::pair<std::multimap<CBlockIndex*,CBlockIndex*>::iterator,std::multimap<CBlockIndex*,CBlockIndex*>::iterator> rangeGenesis = forward.equal_range(nullptr);
CBlockIndex *pindex = rangeGenesis.first->second;
rangeGenesis.first++;
- assert(rangeGenesis.first == rangeGenesis.second); // There is only one index entry with parent NULL.
+ assert(rangeGenesis.first == rangeGenesis.second); // There is only one index entry with parent nullptr.
// Iterate over the entire block tree, using depth-first search.
// Along the way, remember whether there are blocks on the path from genesis
// block being explored which are the first to have certain properties.
size_t nNodes = 0;
int nHeight = 0;
- CBlockIndex* pindexFirstInvalid = NULL; // Oldest ancestor of pindex which is invalid.
- CBlockIndex* pindexFirstMissing = NULL; // Oldest ancestor of pindex which does not have BLOCK_HAVE_DATA.
- CBlockIndex* pindexFirstNeverProcessed = NULL; // Oldest ancestor of pindex for which nTx == 0.
- CBlockIndex* pindexFirstNotTreeValid = NULL; // Oldest ancestor of pindex which does not have BLOCK_VALID_TREE (regardless of being valid or not).
- CBlockIndex* pindexFirstNotTransactionsValid = NULL; // Oldest ancestor of pindex which does not have BLOCK_VALID_TRANSACTIONS (regardless of being valid or not).
- CBlockIndex* pindexFirstNotChainValid = NULL; // Oldest ancestor of pindex which does not have BLOCK_VALID_CHAIN (regardless of being valid or not).
- CBlockIndex* pindexFirstNotScriptsValid = NULL; // Oldest ancestor of pindex which does not have BLOCK_VALID_SCRIPTS (regardless of being valid or not).
- while (pindex != NULL) {
+ CBlockIndex* pindexFirstInvalid = nullptr; // Oldest ancestor of pindex which is invalid.
+ CBlockIndex* pindexFirstMissing = nullptr; // Oldest ancestor of pindex which does not have BLOCK_HAVE_DATA.
+ CBlockIndex* pindexFirstNeverProcessed = nullptr; // Oldest ancestor of pindex for which nTx == 0.
+ CBlockIndex* pindexFirstNotTreeValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_TREE (regardless of being valid or not).
+ CBlockIndex* pindexFirstNotTransactionsValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_TRANSACTIONS (regardless of being valid or not).
+ CBlockIndex* pindexFirstNotChainValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_CHAIN (regardless of being valid or not).
+ CBlockIndex* pindexFirstNotScriptsValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_SCRIPTS (regardless of being valid or not).
+ while (pindex != nullptr) {
nNodes++;
- if (pindexFirstInvalid == NULL && pindex->nStatus & BLOCK_FAILED_VALID) pindexFirstInvalid = pindex;
- if (pindexFirstMissing == NULL && !(pindex->nStatus & BLOCK_HAVE_DATA)) pindexFirstMissing = pindex;
- if (pindexFirstNeverProcessed == NULL && pindex->nTx == 0) pindexFirstNeverProcessed = pindex;
- if (pindex->pprev != NULL && pindexFirstNotTreeValid == NULL && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_TREE) pindexFirstNotTreeValid = pindex;
- if (pindex->pprev != NULL && pindexFirstNotTransactionsValid == NULL && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_TRANSACTIONS) pindexFirstNotTransactionsValid = pindex;
- if (pindex->pprev != NULL && pindexFirstNotChainValid == NULL && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_CHAIN) pindexFirstNotChainValid = pindex;
- if (pindex->pprev != NULL && pindexFirstNotScriptsValid == NULL && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_SCRIPTS) pindexFirstNotScriptsValid = pindex;
+ if (pindexFirstInvalid == nullptr && pindex->nStatus & BLOCK_FAILED_VALID) pindexFirstInvalid = pindex;
+ if (pindexFirstMissing == nullptr && !(pindex->nStatus & BLOCK_HAVE_DATA)) pindexFirstMissing = pindex;
+ if (pindexFirstNeverProcessed == nullptr && pindex->nTx == 0) pindexFirstNeverProcessed = pindex;
+ if (pindex->pprev != nullptr && pindexFirstNotTreeValid == nullptr && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_TREE) pindexFirstNotTreeValid = pindex;
+ if (pindex->pprev != nullptr && pindexFirstNotTransactionsValid == nullptr && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_TRANSACTIONS) pindexFirstNotTransactionsValid = pindex;
+ if (pindex->pprev != nullptr && pindexFirstNotChainValid == nullptr && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_CHAIN) pindexFirstNotChainValid = pindex;
+ if (pindex->pprev != nullptr && pindexFirstNotScriptsValid == nullptr && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_SCRIPTS) pindexFirstNotScriptsValid = pindex;
// Begin: actual consistency checks.
- if (pindex->pprev == NULL) {
+ if (pindex->pprev == nullptr) {
// Genesis block checks.
assert(pindex->GetBlockHash() == consensusParams.hashGenesisBlock); // Genesis block's hash must match.
assert(pindex == chainActive.Genesis()); // The current active chain's genesis block must be this block.
@@ -3887,26 +4133,26 @@ void static CheckBlockIndex(const Consensus::Params& consensusParams)
if (pindex->nStatus & BLOCK_HAVE_UNDO) assert(pindex->nStatus & BLOCK_HAVE_DATA);
assert(((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_TRANSACTIONS) == (pindex->nTx > 0)); // This is pruning-independent.
// All parents having had data (at some point) is equivalent to all parents being VALID_TRANSACTIONS, which is equivalent to nChainTx being set.
- assert((pindexFirstNeverProcessed != NULL) == (pindex->nChainTx == 0)); // nChainTx != 0 is used to signal that all parent blocks have been processed (but may have been pruned).
- assert((pindexFirstNotTransactionsValid != NULL) == (pindex->nChainTx == 0));
+ assert((pindexFirstNeverProcessed != nullptr) == (pindex->nChainTx == 0)); // nChainTx != 0 is used to signal that all parent blocks have been processed (but may have been pruned).
+ assert((pindexFirstNotTransactionsValid != nullptr) == (pindex->nChainTx == 0));
assert(pindex->nHeight == nHeight); // nHeight must be consistent.
- assert(pindex->pprev == NULL || pindex->nChainWork >= pindex->pprev->nChainWork); // For every block except the genesis block, the chainwork must be larger than the parent's.
+ assert(pindex->pprev == nullptr || pindex->nChainWork >= pindex->pprev->nChainWork); // For every block except the genesis block, the chainwork must be larger than the parent's.
assert(nHeight < 2 || (pindex->pskip && (pindex->pskip->nHeight < nHeight))); // The pskip pointer must point back for all but the first 2 blocks.
- assert(pindexFirstNotTreeValid == NULL); // All mapBlockIndex entries must at least be TREE valid
- if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_TREE) assert(pindexFirstNotTreeValid == NULL); // TREE valid implies all parents are TREE valid
- if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_CHAIN) assert(pindexFirstNotChainValid == NULL); // CHAIN valid implies all parents are CHAIN valid
- if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_SCRIPTS) assert(pindexFirstNotScriptsValid == NULL); // SCRIPTS valid implies all parents are SCRIPTS valid
- if (pindexFirstInvalid == NULL) {
+ assert(pindexFirstNotTreeValid == nullptr); // All mapBlockIndex entries must at least be TREE valid
+ if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_TREE) assert(pindexFirstNotTreeValid == nullptr); // TREE valid implies all parents are TREE valid
+ if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_CHAIN) assert(pindexFirstNotChainValid == nullptr); // CHAIN valid implies all parents are CHAIN valid
+ if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_SCRIPTS) assert(pindexFirstNotScriptsValid == nullptr); // SCRIPTS valid implies all parents are SCRIPTS valid
+ if (pindexFirstInvalid == nullptr) {
// Checks for not-invalid blocks.
assert((pindex->nStatus & BLOCK_FAILED_MASK) == 0); // The failed mask cannot be set for blocks without invalid parents.
}
- if (!CBlockIndexWorkComparator()(pindex, chainActive.Tip()) && pindexFirstNeverProcessed == NULL) {
- if (pindexFirstInvalid == NULL) {
+ if (!CBlockIndexWorkComparator()(pindex, chainActive.Tip()) && pindexFirstNeverProcessed == nullptr) {
+ if (pindexFirstInvalid == nullptr) {
// If this block sorts at least as good as the current tip and
// is valid and we have all data for its parents, it must be in
// setBlockIndexCandidates. chainActive.Tip() must also be there
// even if some data has been pruned.
- if (pindexFirstMissing == NULL || pindex == chainActive.Tip()) {
+ if (pindexFirstMissing == nullptr || pindex == chainActive.Tip()) {
assert(setBlockIndexCandidates.count(pindex));
}
// If some parent is missing, then it could be that this block was in
@@ -3927,13 +4173,13 @@ void static CheckBlockIndex(const Consensus::Params& consensusParams)
}
rangeUnlinked.first++;
}
- if (pindex->pprev && (pindex->nStatus & BLOCK_HAVE_DATA) && pindexFirstNeverProcessed != NULL && pindexFirstInvalid == NULL) {
+ if (pindex->pprev && (pindex->nStatus & BLOCK_HAVE_DATA) && pindexFirstNeverProcessed != nullptr && pindexFirstInvalid == nullptr) {
// If this block has block data available, some parent was never received, and has no invalid parents, it must be in mapBlocksUnlinked.
assert(foundInUnlinked);
}
if (!(pindex->nStatus & BLOCK_HAVE_DATA)) assert(!foundInUnlinked); // Can't be in mapBlocksUnlinked if we don't HAVE_DATA
- if (pindexFirstMissing == NULL) assert(!foundInUnlinked); // We aren't missing data for any parent -- cannot be in mapBlocksUnlinked.
- if (pindex->pprev && (pindex->nStatus & BLOCK_HAVE_DATA) && pindexFirstNeverProcessed == NULL && pindexFirstMissing != NULL) {
+ if (pindexFirstMissing == nullptr) assert(!foundInUnlinked); // We aren't missing data for any parent -- cannot be in mapBlocksUnlinked.
+ if (pindex->pprev && (pindex->nStatus & BLOCK_HAVE_DATA) && pindexFirstNeverProcessed == nullptr && pindexFirstMissing != nullptr) {
// We HAVE_DATA for this block, have received data for all parents at some point, but we're currently missing data for some parent.
assert(fHavePruned); // We must have pruned.
// This block may have entered mapBlocksUnlinked if:
@@ -3945,7 +4191,7 @@ void static CheckBlockIndex(const Consensus::Params& consensusParams)
// So if this block is itself better than chainActive.Tip() and it wasn't in
// setBlockIndexCandidates, then it must be in mapBlocksUnlinked.
if (!CBlockIndexWorkComparator()(pindex, chainActive.Tip()) && setBlockIndexCandidates.count(pindex) == 0) {
- if (pindexFirstInvalid == NULL) {
+ if (pindexFirstInvalid == nullptr) {
assert(foundInUnlinked);
}
}
@@ -3966,13 +4212,13 @@ void static CheckBlockIndex(const Consensus::Params& consensusParams)
while (pindex) {
// We are going to either move to a parent or a sibling of pindex.
// If pindex was the first with a certain property, unset the corresponding variable.
- if (pindex == pindexFirstInvalid) pindexFirstInvalid = NULL;
- if (pindex == pindexFirstMissing) pindexFirstMissing = NULL;
- if (pindex == pindexFirstNeverProcessed) pindexFirstNeverProcessed = NULL;
- if (pindex == pindexFirstNotTreeValid) pindexFirstNotTreeValid = NULL;
- if (pindex == pindexFirstNotTransactionsValid) pindexFirstNotTransactionsValid = NULL;
- if (pindex == pindexFirstNotChainValid) pindexFirstNotChainValid = NULL;
- if (pindex == pindexFirstNotScriptsValid) pindexFirstNotScriptsValid = NULL;
+ if (pindex == pindexFirstInvalid) pindexFirstInvalid = nullptr;
+ if (pindex == pindexFirstMissing) pindexFirstMissing = nullptr;
+ if (pindex == pindexFirstNeverProcessed) pindexFirstNeverProcessed = nullptr;
+ if (pindex == pindexFirstNotTreeValid) pindexFirstNotTreeValid = nullptr;
+ if (pindex == pindexFirstNotTransactionsValid) pindexFirstNotTransactionsValid = nullptr;
+ if (pindex == pindexFirstNotChainValid) pindexFirstNotChainValid = nullptr;
+ if (pindex == pindexFirstNotScriptsValid) pindexFirstNotScriptsValid = nullptr;
// Find our parent.
CBlockIndex* pindexPar = pindex->pprev;
// Find which child we just visited.
@@ -4007,6 +4253,8 @@ std::string CBlockFileInfo::ToString() const
CBlockFileInfo* GetBlockFileInfo(size_t n)
{
+ LOCK(cs_LastBlockFile);
+
return &vinfoBlockFile.at(n);
}
@@ -4033,7 +4281,7 @@ static const uint64_t MEMPOOL_DUMP_VERSION = 1;
bool LoadMempool(void)
{
const CChainParams& chainparams = Params();
- int64_t nExpiryTimeout = GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60;
+ int64_t nExpiryTimeout = gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60;
FILE* filestr = fsbridge::fopen(GetDataDir() / "mempool.dat", "rb");
CAutoFile file(filestr, SER_DISK, CLIENT_VERSION);
if (file.IsNull()) {
@@ -4069,7 +4317,8 @@ bool LoadMempool(void)
CValidationState state;
if (nTime + nExpiryTimeout > nNow) {
LOCK(cs_main);
- AcceptToMemoryPoolWithTime(chainparams, mempool, state, tx, true, NULL, nTime, NULL, false, 0);
+ AcceptToMemoryPoolWithTime(chainparams, mempool, state, tx, nullptr /* pfMissingInputs */, nTime,
+ nullptr /* plTxnReplaced */, false /* bypass_limits */, 0 /* nAbsurdFee */);
if (state.IsValid()) {
++count;
} else {
@@ -4096,7 +4345,7 @@ bool LoadMempool(void)
return true;
}
-void DumpMempool(void)
+bool DumpMempool(void)
{
int64_t start = GetTimeMicros();
@@ -4116,7 +4365,7 @@ void DumpMempool(void)
try {
FILE* filestr = fsbridge::fopen(GetDataDir() / "mempool.dat.new", "wb");
if (!filestr) {
- return;
+ return false;
}
CAutoFile file(filestr, SER_DISK, CLIENT_VERSION);
@@ -4137,18 +4386,20 @@ void DumpMempool(void)
file.fclose();
RenameOver(GetDataDir() / "mempool.dat.new", GetDataDir() / "mempool.dat");
int64_t last = GetTimeMicros();
- LogPrintf("Dumped mempool: %gs to copy, %gs to dump\n", (mid-start)*0.000001, (last-mid)*0.000001);
+ LogPrintf("Dumped mempool: %gs to copy, %gs to dump\n", (mid-start)*MICRO, (last-mid)*MICRO);
} catch (const std::exception& e) {
LogPrintf("Failed to dump mempool: %s. Continuing anyway.\n", e.what());
+ return false;
}
+ return true;
}
//! Guess how far we are in the verification process at the given block index
double GuessVerificationProgress(const ChainTxData& data, CBlockIndex *pindex) {
- if (pindex == NULL)
+ if (pindex == nullptr)
return 0.0;
- int64_t nNow = time(NULL);
+ int64_t nNow = time(nullptr);
double fTxTotal;