aboutsummaryrefslogtreecommitdiff
path: root/src/validation.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/validation.cpp')
-rw-r--r--src/validation.cpp218
1 files changed, 153 insertions, 65 deletions
diff --git a/src/validation.cpp b/src/validation.cpp
index 0bd1ec672b..efabe6293f 100644
--- a/src/validation.cpp
+++ b/src/validation.cpp
@@ -69,7 +69,7 @@ CWaitableCriticalSection csBestBlock;
CConditionVariable cvBlockChange;
int nScriptCheckThreads = 0;
std::atomic_bool fImporting(false);
-bool fReindex = false;
+std::atomic_bool fReindex(false);
bool fTxIndex = false;
bool fHavePruned = false;
bool fPruneMode = false;
@@ -156,6 +156,26 @@ namespace {
/** chainwork for the last block that preciousblock has been applied to. */
arith_uint256 nLastPreciousChainwork = 0;
+ /** In order to efficiently track invalidity of headers, we keep the set of
+ * blocks which we tried to connect and found to be invalid here (ie which
+ * were set to BLOCK_FAILED_VALID since the last restart). We can then
+ * walk this set and check if a new header is a descendant of something in
+ * this set, preventing us from having to walk mapBlockIndex when we try
+ * to connect a bad block and fail.
+ *
+ * While this is more complicated than marking everything which descends
+ * from an invalid block as invalid at the time we discover it to be
+ * invalid, doing so would require walking all of mapBlockIndex to find all
+ * descendants. Since this case should be very rare, keeping track of all
+ * BLOCK_FAILED_VALID blocks in a set should be just fine and work just as
+ * well.
+ *
+ * Because we already walk mapBlockIndex in height-order at startup, we go
+ * ahead and mark descendants of invalid blocks as FAILED_CHILD at that time,
+ * instead of putting things in this set.
+ */
+ std::set<CBlockIndex*> g_failed_blocks;
+
/** Dirty block index entries. */
std::set<CBlockIndex*> setDirtyBlockIndex;
@@ -219,7 +239,7 @@ bool CheckFinalTx(const CTransaction &tx, int flags)
// IsFinalTx() with one more than chainActive.Height().
const int nBlockHeight = chainActive.Height() + 1;
- // BIP113 will require that time-locked transactions have nLockTime set to
+ // BIP113 requires that time-locked transactions have nLockTime set to
// less than the median time of the previous block they're contained in.
// When the next block is created its previous block will be the current
// chain tip, so we use that to calculate the median time passed to
@@ -384,7 +404,9 @@ void UpdateMempoolForReorg(DisconnectedBlockTransactions &disconnectpool, bool f
while (it != disconnectpool.queuedTx.get<insertion_order>().rend()) {
// ignore validation errors in resurrected transactions
CValidationState stateDummy;
- if (!fAddToMempool || (*it)->IsCoinBase() || !AcceptToMemoryPool(mempool, stateDummy, *it, false, nullptr, nullptr, true)) {
+ if (!fAddToMempool || (*it)->IsCoinBase() ||
+ !AcceptToMemoryPool(mempool, stateDummy, *it, nullptr /* pfMissingInputs */,
+ nullptr /* plTxnReplaced */, true /* bypass_limits */, 0 /* nAbsurdFee */)) {
// If the transaction doesn't make it in to the mempool, remove any
// transactions that depend on it (which would now be orphans).
mempool.removeRecursive(**it, MemPoolRemovalReason::REORG);
@@ -443,9 +465,9 @@ static bool CheckInputsFromMempoolAndCache(const CTransaction& tx, CValidationSt
return CheckInputs(tx, state, view, true, flags, cacheSigStore, true, txdata);
}
-static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool& pool, CValidationState& state, const CTransactionRef& ptx, bool fLimitFree,
+static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool& pool, CValidationState& state, const CTransactionRef& ptx,
bool* pfMissingInputs, int64_t nAcceptTime, std::list<CTransactionRef>* plTxnReplaced,
- bool fOverrideMempoolLimit, const CAmount& nAbsurdFee, std::vector<COutPoint>& coins_to_uncache)
+ bool bypass_limits, const CAmount& nAbsurdFee, std::vector<COutPoint>& coins_to_uncache)
{
const CTransaction& tx = *ptx;
const uint256 hash = tx.GetHash();
@@ -532,7 +554,6 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool
CCoinsView dummy;
CCoinsViewCache view(&dummy);
- CAmount nValueIn = 0;
LockPoints lp;
{
LOCK(pool.cs);
@@ -563,8 +584,6 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool
// Bring the best block into scope
view.GetBestBlock();
- nValueIn = view.GetValueIn(tx);
-
// we have all inputs cached now, so switch back to dummy, so we don't need to keep lock on mempool
view.SetBackend(dummy);
@@ -575,6 +594,12 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool
// CoinsViewCache instead of create its own
if (!CheckSequenceLocks(tx, STANDARD_LOCKTIME_VERIFY_FLAGS, &lp))
return state.DoS(0, false, REJECT_NONSTANDARD, "non-BIP68-final");
+
+ } // end LOCK(pool.cs)
+
+ CAmount nFees = 0;
+ if (!Consensus::CheckTxInputs(tx, state, view, GetSpendHeight(view), nFees)) {
+ return error("%s: Consensus::CheckTxInputs: %s, %s", __func__, tx.GetHash().ToString(), FormatStateMessage(state));
}
// Check for non-standard pay-to-script-hash in inputs
@@ -587,8 +612,6 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool
int64_t nSigOpsCost = GetTransactionSigOpCost(tx, view, STANDARD_SCRIPT_VERIFY_FLAGS);
- CAmount nValueOut = tx.GetValueOut();
- CAmount nFees = nValueIn-nValueOut;
// nModifiedFees includes any fee deltas from PrioritiseTransaction
CAmount nModifiedFees = nFees;
pool.ApplyDelta(hash, nModifiedFees);
@@ -618,12 +641,12 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool
strprintf("%d", nSigOpsCost));
CAmount mempoolRejectFee = pool.GetMinFee(gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000).GetFee(nSize);
- if (mempoolRejectFee > 0 && nModifiedFees < mempoolRejectFee) {
+ if (!bypass_limits && mempoolRejectFee > 0 && nModifiedFees < mempoolRejectFee) {
return state.DoS(0, false, REJECT_INSUFFICIENTFEE, "mempool min fee not met", false, strprintf("%d < %d", nFees, mempoolRejectFee));
}
// No transactions are allowed below minRelayTxFee except from disconnected blocks
- if (fLimitFree && nModifiedFees < ::minRelayTxFee.GetFee(nSize)) {
+ if (!bypass_limits && nModifiedFees < ::minRelayTxFee.GetFee(nSize)) {
return state.DoS(0, false, REJECT_INSUFFICIENTFEE, "min relay fee not met");
}
@@ -855,17 +878,18 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool
}
pool.RemoveStaged(allConflicting, false, MemPoolRemovalReason::REPLACED);
- // This transaction should only count for fee estimation if it isn't a
- // BIP 125 replacement transaction (may not be widely supported), the
- // node is not behind, and the transaction is not dependent on any other
- // transactions in the mempool.
- bool validForFeeEstimation = !fReplacementTransaction && IsCurrentForFeeEstimation() && pool.HasNoInputsOf(tx);
+ // This transaction should only count for fee estimation if:
+ // - it isn't a BIP 125 replacement transaction (may not be widely supported)
+ // - it's not being readded during a reorg which bypasses typical mempool fee limits
+ // - the node is not behind
+ // - the transaction is not dependent on any other transactions in the mempool
+ bool validForFeeEstimation = !fReplacementTransaction && !bypass_limits && IsCurrentForFeeEstimation() && pool.HasNoInputsOf(tx);
// Store transaction in memory
pool.addUnchecked(hash, entry, setAncestors, validForFeeEstimation);
// trim mempool and check if tx was trimmed
- if (!fOverrideMempoolLimit) {
+ if (!bypass_limits) {
LimitMempoolSize(pool, gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000, gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60);
if (!pool.exists(hash))
return state.DoS(0, false, REJECT_INSUFFICIENTFEE, "mempool full");
@@ -878,12 +902,12 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool
}
/** (try to) add transaction to memory pool with a specified acceptance time **/
-static bool AcceptToMemoryPoolWithTime(const CChainParams& chainparams, CTxMemPool& pool, CValidationState &state, const CTransactionRef &tx, bool fLimitFree,
+static bool AcceptToMemoryPoolWithTime(const CChainParams& chainparams, CTxMemPool& pool, CValidationState &state, const CTransactionRef &tx,
bool* pfMissingInputs, int64_t nAcceptTime, std::list<CTransactionRef>* plTxnReplaced,
- bool fOverrideMempoolLimit, const CAmount nAbsurdFee)
+ bool bypass_limits, const CAmount nAbsurdFee)
{
std::vector<COutPoint> coins_to_uncache;
- bool res = AcceptToMemoryPoolWorker(chainparams, pool, state, tx, fLimitFree, pfMissingInputs, nAcceptTime, plTxnReplaced, fOverrideMempoolLimit, nAbsurdFee, coins_to_uncache);
+ bool res = AcceptToMemoryPoolWorker(chainparams, pool, state, tx, pfMissingInputs, nAcceptTime, plTxnReplaced, bypass_limits, nAbsurdFee, coins_to_uncache);
if (!res) {
for (const COutPoint& hashTx : coins_to_uncache)
pcoinsTip->Uncache(hashTx);
@@ -894,12 +918,12 @@ static bool AcceptToMemoryPoolWithTime(const CChainParams& chainparams, CTxMemPo
return res;
}
-bool AcceptToMemoryPool(CTxMemPool& pool, CValidationState &state, const CTransactionRef &tx, bool fLimitFree,
+bool AcceptToMemoryPool(CTxMemPool& pool, CValidationState &state, const CTransactionRef &tx,
bool* pfMissingInputs, std::list<CTransactionRef>* plTxnReplaced,
- bool fOverrideMempoolLimit, const CAmount nAbsurdFee)
+ bool bypass_limits, const CAmount nAbsurdFee)
{
const CChainParams& chainparams = Params();
- return AcceptToMemoryPoolWithTime(chainparams, pool, state, tx, fLimitFree, pfMissingInputs, GetTime(), plTxnReplaced, fOverrideMempoolLimit, nAbsurdFee);
+ return AcceptToMemoryPoolWithTime(chainparams, pool, state, tx, pfMissingInputs, GetTime(), plTxnReplaced, bypass_limits, nAbsurdFee);
}
/** Return transaction in txOut, and if it was found inside a block, its hash is placed in hashBlock */
@@ -935,6 +959,9 @@ bool GetTransaction(const uint256 &hash, CTransactionRef &txOut, const Consensus
return error("%s: txid mismatch", __func__);
return true;
}
+
+ // transaction not found in index, nothing more can be done
+ return false;
}
if (fAllowSlow) { // use coin database to locate block that contains transaction, and scan it
@@ -1173,6 +1200,7 @@ void static InvalidChainFound(CBlockIndex* pindexNew)
void static InvalidBlockFound(CBlockIndex *pindex, const CValidationState &state) {
if (!state.CorruptionPossible()) {
pindex->nStatus |= BLOCK_FAILED_VALID;
+ g_failed_blocks.insert(pindex);
setDirtyBlockIndex.insert(pindex);
setBlockIndexCandidates.erase(pindex);
InvalidChainFound(pindex);
@@ -1203,7 +1231,7 @@ void UpdateCoins(const CTransaction& tx, CCoinsViewCache& inputs, int nHeight)
bool CScriptCheck::operator()() {
const CScript &scriptSig = ptxTo->vin[nIn].scriptSig;
const CScriptWitness *witness = &ptxTo->vin[nIn].scriptWitness;
- return VerifyScript(scriptSig, scriptPubKey, witness, nFlags, CachingTransactionSignatureChecker(ptxTo, nIn, amount, cacheStore, *txdata), &error);
+ return VerifyScript(scriptSig, m_tx_out.scriptPubKey, witness, nFlags, CachingTransactionSignatureChecker(ptxTo, nIn, m_tx_out.nValue, cacheStore, *txdata), &error);
}
int GetSpendHeight(const CCoinsViewCache& inputs)
@@ -1244,9 +1272,6 @@ bool CheckInputs(const CTransaction& tx, CValidationState &state, const CCoinsVi
{
if (!tx.IsCoinBase())
{
- if (!Consensus::CheckTxInputs(tx, state, inputs, GetSpendHeight(inputs)))
- return false;
-
if (pvChecks)
pvChecks->reserve(tx.vin.size());
@@ -1285,11 +1310,9 @@ bool CheckInputs(const CTransaction& tx, CValidationState &state, const CCoinsVi
// a sanity check that our caching is not introducing consensus
// failures through additional data in, eg, the coins being
// spent being checked as a part of CScriptCheck.
- const CScript& scriptPubKey = coin.out.scriptPubKey;
- const CAmount amount = coin.out.nValue;
// Verify signature
- CScriptCheck check(scriptPubKey, amount, tx, i, flags, cacheSigStore, &txdata);
+ CScriptCheck check(coin.out, tx, i, flags, cacheSigStore, &txdata);
if (pvChecks) {
pvChecks->push_back(CScriptCheck());
check.swap(pvChecks->back());
@@ -1301,7 +1324,7 @@ bool CheckInputs(const CTransaction& tx, CValidationState &state, const CCoinsVi
// arguments; if so, don't trigger DoS protection to
// avoid splitting the network between upgraded and
// non-upgraded nodes.
- CScriptCheck check2(scriptPubKey, amount, tx, i,
+ CScriptCheck check2(coin.out, tx, i,
flags & ~STANDARD_NOT_MANDATORY_VERIFY_FLAGS, cacheSigStore, &txdata);
if (check2())
return state.Invalid(false, REJECT_NONSTANDARD, strprintf("non-mandatory-script-verify-flag (%s)", ScriptErrorString(check.GetScriptError())));
@@ -1588,11 +1611,12 @@ static ThresholdConditionCache warningcache[VERSIONBITS_NUM_BITS];
static unsigned int GetBlockScriptFlags(const CBlockIndex* pindex, const Consensus::Params& consensusparams) {
AssertLockHeld(cs_main);
- // BIP16 didn't become active until Apr 1 2012
- int64_t nBIP16SwitchTime = 1333238400;
- bool fStrictPayToScriptHash = (pindex->GetBlockTime() >= nBIP16SwitchTime);
+ unsigned int flags = SCRIPT_VERIFY_NONE;
- unsigned int flags = fStrictPayToScriptHash ? SCRIPT_VERIFY_P2SH : SCRIPT_VERIFY_NONE;
+ // Start enforcing P2SH (BIP16)
+ if (pindex->nHeight >= consensusparams.BIP16Height) {
+ flags |= SCRIPT_VERIFY_P2SH;
+ }
// Start enforcing the DERSIG (BIP66) rule
if (pindex->nHeight >= consensusparams.BIP66Height) {
@@ -1711,6 +1735,7 @@ static bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockInd
// before the first had been spent. Since those coinbases are sufficiently buried its no longer possible to create further
// duplicate transactions descending from the known pairs either.
// If we're on the known chain at height greater than where BIP34 activated, we can save the db accesses needed for the BIP30 check.
+ assert(pindex->pprev);
CBlockIndex *pindexBIP34height = pindex->pprev->GetAncestor(chainparams.GetConsensus().BIP34Height);
//Only continue to enforce if we're below BIP34 activation height or the block hash at that height doesn't correspond.
fEnforceBIP30 = fEnforceBIP30 && (!pindexBIP34height || !(pindexBIP34height->GetBlockHash() == chainparams.GetConsensus().BIP34Hash));
@@ -1760,9 +1785,15 @@ static bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockInd
if (!tx.IsCoinBase())
{
- if (!view.HaveInputs(tx))
- return state.DoS(100, error("ConnectBlock(): inputs missing/spent"),
- REJECT_INVALID, "bad-txns-inputs-missingorspent");
+ CAmount txfee = 0;
+ if (!Consensus::CheckTxInputs(tx, state, view, pindex->nHeight, txfee)) {
+ return error("%s: Consensus::CheckTxInputs: %s, %s", __func__, tx.GetHash().ToString(), FormatStateMessage(state));
+ }
+ nFees += txfee;
+ if (!MoneyRange(nFees)) {
+ return state.DoS(100, error("%s: accumulated fee in the block out of range.", __func__),
+ REJECT_INVALID, "bad-txns-accumulated-fee-outofrange");
+ }
// Check that transaction is BIP68 final
// BIP68 lock checks (as opposed to nLockTime checks) must
@@ -1790,8 +1821,6 @@ static bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockInd
txdata.emplace_back(tx);
if (!tx.IsCoinBase())
{
- nFees += view.GetValueIn(tx)-tx.GetValueOut();
-
std::vector<CScriptCheck> vChecks;
bool fCacheResults = fJustCheck; /* Don't cache results if we're actually connecting blocks (still consult the cache, though) */
if (!CheckInputs(tx, state, view, fScriptChecks, flags, fCacheResults, fCacheResults, txdata[i], nScriptCheckThreads ? &vChecks : nullptr))
@@ -1850,6 +1879,7 @@ static bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockInd
if (!pblocktree->WriteTxIndex(vPos))
return AbortNode(state, "Failed to write transaction index");
+ assert(pindex->phashBlock);
// add this block to the view's block chain
view.SetBestBlock(pindex->GetBlockHash());
@@ -2525,17 +2555,18 @@ bool InvalidateBlock(CValidationState& state, const CChainParams& chainparams, C
{
AssertLockHeld(cs_main);
- // Mark the block itself as invalid.
- pindex->nStatus |= BLOCK_FAILED_VALID;
- setDirtyBlockIndex.insert(pindex);
- setBlockIndexCandidates.erase(pindex);
+ // We first disconnect backwards and then mark the blocks as invalid.
+ // This prevents a case where pruned nodes may fail to invalidateblock
+ // and be left unable to start as they have no tip candidates (as there
+ // are no blocks that meet the "have data and are not invalid per
+ // nStatus" criteria for inclusion in setBlockIndexCandidates).
+
+ bool pindex_was_in_chain = false;
+ CBlockIndex *invalid_walk_tip = chainActive.Tip();
DisconnectedBlockTransactions disconnectpool;
while (chainActive.Contains(pindex)) {
- CBlockIndex *pindexWalk = chainActive.Tip();
- pindexWalk->nStatus |= BLOCK_FAILED_CHILD;
- setDirtyBlockIndex.insert(pindexWalk);
- setBlockIndexCandidates.erase(pindexWalk);
+ pindex_was_in_chain = true;
// ActivateBestChain considers blocks already in chainActive
// unconditionally valid already, so force disconnect away from it.
if (!DisconnectTip(state, chainparams, &disconnectpool)) {
@@ -2546,6 +2577,21 @@ bool InvalidateBlock(CValidationState& state, const CChainParams& chainparams, C
}
}
+ // Now mark the blocks we just disconnected as descendants invalid
+ // (note this may not be all descendants).
+ while (pindex_was_in_chain && invalid_walk_tip != pindex) {
+ invalid_walk_tip->nStatus |= BLOCK_FAILED_CHILD;
+ setDirtyBlockIndex.insert(invalid_walk_tip);
+ setBlockIndexCandidates.erase(invalid_walk_tip);
+ invalid_walk_tip = invalid_walk_tip->pprev;
+ }
+
+ // Mark the block itself as invalid.
+ pindex->nStatus |= BLOCK_FAILED_VALID;
+ setDirtyBlockIndex.insert(pindex);
+ setBlockIndexCandidates.erase(pindex);
+ g_failed_blocks.insert(pindex);
+
// DisconnectTip will add transactions to disconnectpool; try to add these
// back to the mempool.
UpdateMempoolForReorg(disconnectpool, true);
@@ -2583,6 +2629,7 @@ bool ResetBlockFailureFlags(CBlockIndex *pindex) {
// Reset invalid block marker if it was pointing to one of those.
pindexBestInvalid = nullptr;
}
+ g_failed_blocks.erase(it->second);
}
it++;
}
@@ -2608,7 +2655,6 @@ static CBlockIndex* AddToBlockIndex(const CBlockHeader& block)
// Construct new block index object
CBlockIndex* pindexNew = new CBlockIndex(block);
- assert(pindexNew);
// We assign the sequence id to blocks only when the full data is available,
// to avoid miners withholding blocks but broadcasting headers, to get a
// competitive advantage.
@@ -3059,6 +3105,21 @@ static bool AcceptBlockHeader(const CBlockHeader& block, CValidationState& state
return state.DoS(100, error("%s: prev block invalid", __func__), REJECT_INVALID, "bad-prevblk");
if (!ContextualCheckBlockHeader(block, state, chainparams, pindexPrev, GetAdjustedTime()))
return error("%s: Consensus::ContextualCheckBlockHeader: %s, %s", __func__, hash.ToString(), FormatStateMessage(state));
+
+ if (!pindexPrev->IsValid(BLOCK_VALID_SCRIPTS)) {
+ for (const CBlockIndex* failedit : g_failed_blocks) {
+ if (pindexPrev->GetAncestor(failedit->nHeight) == failedit) {
+ assert(failedit->nStatus & BLOCK_FAILED_VALID);
+ CBlockIndex* invalid_walk = pindexPrev;
+ while (invalid_walk != failedit) {
+ invalid_walk->nStatus |= BLOCK_FAILED_CHILD;
+ setDirtyBlockIndex.insert(invalid_walk);
+ invalid_walk = invalid_walk->pprev;
+ }
+ return state.DoS(100, error("%s: prev block invalid", __func__), REJECT_INVALID, "bad-prevblk");
+ }
+ }
+ }
}
if (pindex == nullptr)
pindex = AddToBlockIndex(block);
@@ -3072,13 +3133,15 @@ static bool AcceptBlockHeader(const CBlockHeader& block, CValidationState& state
}
// Exposed wrapper for AcceptBlockHeader
-bool ProcessNewBlockHeaders(const std::vector<CBlockHeader>& headers, CValidationState& state, const CChainParams& chainparams, const CBlockIndex** ppindex)
+bool ProcessNewBlockHeaders(const std::vector<CBlockHeader>& headers, CValidationState& state, const CChainParams& chainparams, const CBlockIndex** ppindex, CBlockHeader *first_invalid)
{
+ if (first_invalid != nullptr) first_invalid->SetNull();
{
LOCK(cs_main);
for (const CBlockHeader& header : headers) {
CBlockIndex *pindex = nullptr; // Use a temp pindex instead of ppindex to avoid a const_cast
if (!AcceptBlockHeader(header, state, chainparams, &pindex)) {
+ if (first_invalid) *first_invalid = header;
return false;
}
if (ppindex) {
@@ -3108,7 +3171,7 @@ static bool AcceptBlock(const std::shared_ptr<const CBlock>& pblock, CValidation
// process an unrequested block if it's new and has enough work to
// advance our tip, and isn't too many blocks ahead.
bool fAlreadyHave = pindex->nStatus & BLOCK_HAVE_DATA;
- bool fHasMoreWork = (chainActive.Tip() ? pindex->nChainWork > chainActive.Tip()->nChainWork : true);
+ bool fHasMoreOrSameWork = (chainActive.Tip() ? pindex->nChainWork >= chainActive.Tip()->nChainWork : true);
// Blocks that are too out-of-order needlessly limit the effectiveness of
// pruning, because pruning will not delete block files that contain any
// blocks which are too close in height to the tip. Apply this test
@@ -3125,9 +3188,15 @@ static bool AcceptBlock(const std::shared_ptr<const CBlock>& pblock, CValidation
// and unrequested blocks.
if (fAlreadyHave) return true;
if (!fRequested) { // If we didn't ask for it:
- if (pindex->nTx != 0) return true; // This is a previously-processed block that was pruned
- if (!fHasMoreWork) return true; // Don't process less-work chains
- if (fTooFarAhead) return true; // Block height is too high
+ if (pindex->nTx != 0) return true; // This is a previously-processed block that was pruned
+ if (!fHasMoreOrSameWork) return true; // Don't process less-work chains
+ if (fTooFarAhead) return true; // Block height is too high
+
+ // Protect against DoS attacks from low-work chains.
+ // If our tip is behind, a peer could try to send us
+ // low-work blocks on a fake chain that we would never
+ // request; don't process these.
+ if (pindex->nChainWork < nMinimumChainWork) return true;
}
if (fNewBlock) *fNewBlock = true;
@@ -3189,7 +3258,7 @@ bool ProcessNewBlock(const CChainParams& chainparams, const std::shared_ptr<cons
CheckBlockIndex(chainparams.GetConsensus());
if (!ret) {
GetMainSignals().BlockChecked(*pblock, state);
- return error("%s: AcceptBlock FAILED", __func__);
+ return error("%s: AcceptBlock FAILED (%s)", __func__, state.GetDebugMessage());
}
}
@@ -3230,8 +3299,10 @@ bool TestBlockValidity(CValidationState& state, const CChainParams& chainparams,
*/
/* Calculate the amount of disk space the block & undo files currently use */
-static uint64_t CalculateCurrentUsage()
+uint64_t CalculateCurrentUsage()
{
+ LOCK(cs_LastBlockFile);
+
uint64_t retval = 0;
for (const CBlockFileInfo &file : vinfoBlockFile) {
retval += file.nSize + file.nUndoSize;
@@ -3242,6 +3313,8 @@ static uint64_t CalculateCurrentUsage()
/* Prune a block file (modify associated database entries)*/
void PruneOneBlockFile(const int fileNumber)
{
+ LOCK(cs_LastBlockFile);
+
for (BlockMap::iterator it = mapBlockIndex.begin(); it != mapBlockIndex.end(); ++it) {
CBlockIndex* pindex = it->second;
if (pindex->nFile == fileNumber) {
@@ -3434,8 +3507,6 @@ CBlockIndex * InsertBlockIndex(uint256 hash)
// Create new
CBlockIndex* pindexNew = new CBlockIndex();
- if (!pindexNew)
- throw std::runtime_error(std::string(__func__) + ": new CBlockIndex failed");
mi = mapBlockIndex.insert(std::make_pair(hash, pindexNew)).first;
pindexNew->phashBlock = &((*mi).first);
@@ -3477,6 +3548,10 @@ bool static LoadBlockIndexDB(const CChainParams& chainparams)
pindex->nChainTx = pindex->nTx;
}
}
+ if (!(pindex->nStatus & BLOCK_FAILED_MASK) && pindex->pprev && (pindex->pprev->nStatus & BLOCK_FAILED_MASK)) {
+ pindex->nStatus |= BLOCK_FAILED_CHILD;
+ setDirtyBlockIndex.insert(pindex);
+ }
if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS) && (pindex->nChainTx || pindex->pprev == nullptr))
setBlockIndexCandidates.insert(pindex);
if (pindex->nStatus & BLOCK_FAILED_MASK && (!pindexBestInvalid || pindex->nChainWork > pindexBestInvalid->nChainWork))
@@ -3530,7 +3605,7 @@ bool static LoadBlockIndexDB(const CChainParams& chainparams)
// Check whether we need to continue reindexing
bool fReindexing = false;
pblocktree->ReadReindexing(fReindexing);
- fReindex |= fReindexing;
+ if(fReindexing) fReindex = true;
// Check whether we have a transaction index
pblocktree->ReadFlag("txindex", fTxIndex);
@@ -3867,6 +3942,7 @@ void UnloadBlockIndex()
nLastBlockFile = 0;
nBlockSequenceId = 1;
setDirtyBlockIndex.clear();
+ g_failed_blocks.clear();
setDirtyFileInfo.clear();
versionbitscache.Clear();
for (int b = 0; b < VERSIONBITS_NUM_BITS; b++) {
@@ -4244,6 +4320,8 @@ std::string CBlockFileInfo::ToString() const
CBlockFileInfo* GetBlockFileInfo(size_t n)
{
+ LOCK(cs_LastBlockFile);
+
return &vinfoBlockFile.at(n);
}
@@ -4279,8 +4357,9 @@ bool LoadMempool(void)
}
int64_t count = 0;
- int64_t skipped = 0;
+ int64_t expired = 0;
int64_t failed = 0;
+ int64_t already_there = 0;
int64_t nNow = GetTime();
try {
@@ -4306,14 +4385,23 @@ bool LoadMempool(void)
CValidationState state;
if (nTime + nExpiryTimeout > nNow) {
LOCK(cs_main);
- AcceptToMemoryPoolWithTime(chainparams, mempool, state, tx, true, nullptr, nTime, nullptr, false, 0);
+ AcceptToMemoryPoolWithTime(chainparams, mempool, state, tx, nullptr /* pfMissingInputs */, nTime,
+ nullptr /* plTxnReplaced */, false /* bypass_limits */, 0 /* nAbsurdFee */);
if (state.IsValid()) {
++count;
} else {
- ++failed;
+ // mempool may contain the transaction already, e.g. from
+ // wallet(s) having loaded it while we were processing
+ // mempool transactions; consider these as valid, instead of
+ // failed, but mark them as 'already there'
+ if (mempool.exists(tx->GetHash())) {
+ ++already_there;
+ } else {
+ ++failed;
+ }
}
} else {
- ++skipped;
+ ++expired;
}
if (ShutdownRequested())
return false;
@@ -4329,7 +4417,7 @@ bool LoadMempool(void)
return false;
}
- LogPrintf("Imported mempool transactions from disk: %i successes, %i failed, %i expired\n", count, failed, skipped);
+ LogPrintf("Imported mempool transactions from disk: %i succeeded, %i failed, %i expired, %i already there\n", count, failed, expired, already_there);
return true;
}