aboutsummaryrefslogtreecommitdiff
path: root/src/main.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/main.cpp')
-rw-r--r--src/main.cpp325
1 files changed, 111 insertions, 214 deletions
diff --git a/src/main.cpp b/src/main.cpp
index 875b7b7068..d470ba9003 100644
--- a/src/main.cpp
+++ b/src/main.cpp
@@ -11,17 +11,27 @@
#include "chainparams.h"
#include "checkpoints.h"
#include "checkqueue.h"
+#include "consensus/consensus.h"
#include "consensus/validation.h"
+#include "hash.h"
#include "init.h"
#include "merkleblock.h"
#include "net.h"
+#include "policy/policy.h"
#include "pow.h"
+#include "primitives/block.h"
+#include "primitives/transaction.h"
+#include "script/script.h"
+#include "script/sigcache.h"
+#include "script/standard.h"
+#include "tinyformat.h"
#include "txdb.h"
#include "txmempool.h"
#include "ui_interface.h"
#include "undo.h"
#include "util.h"
#include "utilmoneystr.h"
+#include "utilstrencodings.h"
#include "validationinterface.h"
#include <sstream>
@@ -57,10 +67,12 @@ bool fTxIndex = false;
bool fHavePruned = false;
bool fPruneMode = false;
bool fIsBareMultisigStd = true;
+bool fRequireStandard = true;
bool fCheckBlockIndex = false;
bool fCheckpointsEnabled = true;
size_t nCoinCacheUsage = 5000 * 300;
uint64_t nPruneTarget = 0;
+bool fAlerts = DEFAULT_ALERTS;
/** Fees smaller than this (in satoshi) are considered zero fee (for relaying and mining) */
CFeeRate minRelayTxFee = CFeeRate(1000);
@@ -71,9 +83,9 @@ struct COrphanTx {
CTransaction tx;
NodeId fromPeer;
};
-map<uint256, COrphanTx> mapOrphanTransactions;
-map<uint256, set<uint256> > mapOrphanTransactionsByPrev;
-void EraseOrphansFor(NodeId peer);
+map<uint256, COrphanTx> mapOrphanTransactions GUARDED_BY(cs_main);;
+map<uint256, set<uint256> > mapOrphanTransactionsByPrev GUARDED_BY(cs_main);;
+void EraseOrphansFor(NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
/**
* Returns true if there are nRequired or more blocks of minVersion or above
@@ -433,13 +445,14 @@ void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vector<CBl
// Iterate over those blocks in vToFetch (in forward direction), adding the ones that
// are not yet downloaded and not in flight to vBlocks. In the mean time, update
- // pindexLastCommonBlock as long as all ancestors are already downloaded.
+ // pindexLastCommonBlock as long as all ancestors are already downloaded, or if it's
+ // already part of our chain (and therefore don't need it even if pruned).
BOOST_FOREACH(CBlockIndex* pindex, vToFetch) {
if (!pindex->IsValid(BLOCK_VALID_TREE)) {
// We consider the chain that this peer is on invalid.
return;
}
- if (pindex->nStatus & BLOCK_HAVE_DATA) {
+ if (pindex->nStatus & BLOCK_HAVE_DATA || chainActive.Contains(pindex)) {
if (pindex->nChainTx)
state->pindexLastCommonBlock = pindex;
} else if (mapBlocksInFlight.count(pindex->GetBlockHash()) == 0) {
@@ -522,7 +535,7 @@ CBlockTreeDB *pblocktree = NULL;
// mapOrphanTransactions
//
-bool AddOrphanTx(const CTransaction& tx, NodeId peer)
+bool AddOrphanTx(const CTransaction& tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{
uint256 hash = tx.GetHash();
if (mapOrphanTransactions.count(hash))
@@ -552,7 +565,7 @@ bool AddOrphanTx(const CTransaction& tx, NodeId peer)
return true;
}
-void static EraseOrphanTx(uint256 hash)
+void static EraseOrphanTx(uint256 hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{
map<uint256, COrphanTx>::iterator it = mapOrphanTransactions.find(hash);
if (it == mapOrphanTransactions.end())
@@ -586,7 +599,7 @@ void EraseOrphansFor(NodeId peer)
}
-unsigned int LimitOrphanTxSize(unsigned int nMaxOrphans)
+unsigned int LimitOrphanTxSize(unsigned int nMaxOrphans) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{
unsigned int nEvicted = 0;
while (mapOrphanTransactions.size() > nMaxOrphans)
@@ -602,76 +615,6 @@ unsigned int LimitOrphanTxSize(unsigned int nMaxOrphans)
return nEvicted;
}
-
-
-
-
-
-
-bool IsStandardTx(const CTransaction& tx, string& reason)
-{
- if (tx.nVersion > CTransaction::CURRENT_VERSION || tx.nVersion < 1) {
- reason = "version";
- return false;
- }
-
- // Extremely large transactions with lots of inputs can cost the network
- // almost as much to process as they cost the sender in fees, because
- // computing signature hashes is O(ninputs*txsize). Limiting transactions
- // to MAX_STANDARD_TX_SIZE mitigates CPU exhaustion attacks.
- unsigned int sz = tx.GetSerializeSize(SER_NETWORK, CTransaction::CURRENT_VERSION);
- if (sz >= MAX_STANDARD_TX_SIZE) {
- reason = "tx-size";
- return false;
- }
-
- BOOST_FOREACH(const CTxIn& txin, tx.vin)
- {
- // Biggest 'standard' txin is a 15-of-15 P2SH multisig with compressed
- // keys. (remember the 520 byte limit on redeemScript size) That works
- // out to a (15*(33+1))+3=513 byte redeemScript, 513+1+15*(73+1)+3=1627
- // bytes of scriptSig, which we round off to 1650 bytes for some minor
- // future-proofing. That's also enough to spend a 20-of-20
- // CHECKMULTISIG scriptPubKey, though such a scriptPubKey is not
- // considered standard)
- if (txin.scriptSig.size() > 1650) {
- reason = "scriptsig-size";
- return false;
- }
- if (!txin.scriptSig.IsPushOnly()) {
- reason = "scriptsig-not-pushonly";
- return false;
- }
- }
-
- unsigned int nDataOut = 0;
- txnouttype whichType;
- BOOST_FOREACH(const CTxOut& txout, tx.vout) {
- if (!::IsStandard(txout.scriptPubKey, whichType)) {
- reason = "scriptpubkey";
- return false;
- }
-
- if (whichType == TX_NULL_DATA)
- nDataOut++;
- else if ((whichType == TX_MULTISIG) && (!fIsBareMultisigStd)) {
- reason = "bare-multisig";
- return false;
- } else if (txout.IsDust(::minRelayTxFee)) {
- reason = "dust";
- return false;
- }
- }
-
- // only one OP_RETURN txout is permitted
- if (nDataOut > 1) {
- reason = "multi-op-return";
- return false;
- }
-
- return true;
-}
-
bool IsFinalTx(const CTransaction &tx, int nBlockHeight, int64_t nBlockTime)
{
if (tx.nLockTime == 0)
@@ -690,74 +633,6 @@ bool CheckFinalTx(const CTransaction &tx)
return IsFinalTx(tx, chainActive.Height() + 1, GetAdjustedTime());
}
-/**
- * Check transaction inputs to mitigate two
- * potential denial-of-service attacks:
- *
- * 1. scriptSigs with extra data stuffed into them,
- * not consumed by scriptPubKey (or P2SH script)
- * 2. P2SH scripts with a crazy number of expensive
- * CHECKSIG/CHECKMULTISIG operations
- */
-bool AreInputsStandard(const CTransaction& tx, const CCoinsViewCache& mapInputs)
-{
- if (tx.IsCoinBase())
- return true; // Coinbases don't use vin normally
-
- for (unsigned int i = 0; i < tx.vin.size(); i++)
- {
- const CTxOut& prev = mapInputs.GetOutputFor(tx.vin[i]);
-
- vector<vector<unsigned char> > vSolutions;
- txnouttype whichType;
- // get the scriptPubKey corresponding to this input:
- const CScript& prevScript = prev.scriptPubKey;
- if (!Solver(prevScript, whichType, vSolutions))
- return false;
- int nArgsExpected = ScriptSigArgsExpected(whichType, vSolutions);
- if (nArgsExpected < 0)
- return false;
-
- // Transactions with extra stuff in their scriptSigs are
- // non-standard. Note that this EvalScript() call will
- // be quick, because if there are any operations
- // beside "push data" in the scriptSig
- // IsStandardTx() will have already returned false
- // and this method isn't called.
- vector<vector<unsigned char> > stack;
- if (!EvalScript(stack, tx.vin[i].scriptSig, SCRIPT_VERIFY_NONE, BaseSignatureChecker()))
- return false;
-
- if (whichType == TX_SCRIPTHASH)
- {
- if (stack.empty())
- return false;
- CScript subscript(stack.back().begin(), stack.back().end());
- vector<vector<unsigned char> > vSolutions2;
- txnouttype whichType2;
- if (Solver(subscript, whichType2, vSolutions2))
- {
- int tmpExpected = ScriptSigArgsExpected(whichType2, vSolutions2);
- if (tmpExpected < 0)
- return false;
- nArgsExpected += tmpExpected;
- }
- else
- {
- // Any other Script with less than 15 sigops OK:
- unsigned int sigops = subscript.GetSigOpCount(true);
- // ... extra data left on the stack after execution is OK, too:
- return (sigops <= MAX_P2SH_SIGOPS);
- }
- }
-
- if (stack.size() != (unsigned int)nArgsExpected)
- return false;
- }
-
- return true;
-}
-
unsigned int GetLegacySigOpCount(const CTransaction& tx)
{
unsigned int nSigOps = 0;
@@ -898,7 +773,7 @@ bool AcceptToMemoryPool(CTxMemPool& pool, CValidationState &state, const CTransa
// Rather not work on nonstandard transactions (unless -testnet/-regtest)
string reason;
- if (Params().RequireStandard() && !IsStandardTx(tx, reason))
+ if (fRequireStandard && !IsStandardTx(tx, reason))
return state.DoS(0,
error("AcceptToMemoryPool: nonstandard transaction: %s", reason),
REJECT_NONSTANDARD, reason);
@@ -969,7 +844,7 @@ bool AcceptToMemoryPool(CTxMemPool& pool, CValidationState &state, const CTransa
}
// Check for non-standard pay-to-script-hash in inputs
- if (Params().RequireStandard() && !AreInputsStandard(tx, view))
+ if (fRequireStandard && !AreInputsStandard(tx, view))
return error("AcceptToMemoryPool: nonstandard transaction input");
// Check that the transaction doesn't have an excessive number of
@@ -1395,22 +1270,21 @@ bool CScriptCheck::operator()() {
return true;
}
-bool CheckInputs(const CTransaction& tx, CValidationState &state, const CCoinsViewCache &inputs, bool fScriptChecks, unsigned int flags, bool cacheStore, std::vector<CScriptCheck> *pvChecks)
+int GetSpendHeight(const CCoinsViewCache& inputs)
{
- if (!tx.IsCoinBase())
- {
- if (pvChecks)
- pvChecks->reserve(tx.vin.size());
+ LOCK(cs_main);
+ CBlockIndex* pindexPrev = mapBlockIndex.find(inputs.GetBestBlock())->second;
+ return pindexPrev->nHeight + 1;
+}
+namespace Consensus {
+bool CheckTxInputs(const CTransaction& tx, CValidationState& state, const CCoinsViewCache& inputs, int nSpendHeight)
+{
// This doesn't trigger the DoS code on purpose; if it did, it would make it easier
// for an attacker to attempt to split the network.
if (!inputs.HaveInputs(tx))
return state.Invalid(error("CheckInputs(): %s inputs unavailable", tx.GetHash().ToString()));
- // While checking, GetBestBlock() refers to the parent block.
- // This is also true for mempool checks.
- CBlockIndex *pindexPrev = mapBlockIndex.find(inputs.GetBestBlock())->second;
- int nSpendHeight = pindexPrev->nHeight + 1;
CAmount nValueIn = 0;
CAmount nFees = 0;
for (unsigned int i = 0; i < tx.vin.size(); i++)
@@ -1449,6 +1323,19 @@ bool CheckInputs(const CTransaction& tx, CValidationState &state, const CCoinsVi
if (!MoneyRange(nFees))
return state.DoS(100, error("CheckInputs(): nFees out of range"),
REJECT_INVALID, "bad-txns-fee-outofrange");
+ return true;
+}
+}// namespace Consensus
+
+bool CheckInputs(const CTransaction& tx, CValidationState &state, const CCoinsViewCache &inputs, bool fScriptChecks, unsigned int flags, bool cacheStore, std::vector<CScriptCheck> *pvChecks)
+{
+ if (!tx.IsCoinBase())
+ {
+ if (!Consensus::CheckTxInputs(tx, state, inputs, GetSpendHeight(inputs)))
+ return false;
+
+ if (pvChecks)
+ pvChecks->reserve(tx.vin.size());
// The first loop above does all the inexpensive checks.
// Only if ALL inputs pass do we perform expensive ECDSA signature checks.
@@ -1711,9 +1598,10 @@ void ThreadScriptCheck() {
// we're being fed a bad chain (blocks being generated much
// too slowly or too quickly).
//
-void PartitionCheck(bool (*initialDownloadCheck)(), CCriticalSection& cs, const CChain& chain, int64_t nPowTargetSpacing)
+void PartitionCheck(bool (*initialDownloadCheck)(), CCriticalSection& cs, const CBlockIndex *const &bestHeader,
+ int64_t nPowTargetSpacing)
{
- if (initialDownloadCheck()) return;
+ if (bestHeader == NULL || initialDownloadCheck()) return;
static int64_t lastAlertTime = 0;
int64_t now = GetAdjustedTime();
@@ -1729,10 +1617,13 @@ void PartitionCheck(bool (*initialDownloadCheck)(), CCriticalSection& cs, const
int64_t startTime = GetAdjustedTime()-SPAN_SECONDS;
LOCK(cs);
- int h = chain.Height();
- while (h > 0 && chain[h]->GetBlockTime() >= startTime)
- --h;
- int nBlocks = chain.Height()-h;
+ const CBlockIndex* i = bestHeader;
+ int nBlocks = 0;
+ while (i->GetBlockTime() >= startTime) {
+ ++nBlocks;
+ i = i->pprev;
+ if (i == NULL) return; // Ran out of chain, we must not be fully sync'ed
+ }
// How likely is it to find that many by chance?
double p = boost::math::pdf(poisson, nBlocks);
@@ -1790,7 +1681,14 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin
return true;
}
- bool fScriptChecks = (!fCheckpointsEnabled || pindex->nHeight >= Checkpoints::GetTotalBlocksEstimate(chainparams.Checkpoints()));
+ bool fScriptChecks = true;
+ if (fCheckpointsEnabled) {
+ CBlockIndex *pindexLastCheckpoint = Checkpoints::GetLastCheckpoint(chainparams.Checkpoints());
+ if (pindexLastCheckpoint && pindexLastCheckpoint->GetAncestor(pindex->nHeight) == pindex) {
+ // This block is an ancestor of a checkpoint: disable script checks
+ fScriptChecks = false;
+ }
+ }
// Do not allow blocks that contain transactions which 'overwrite' older transactions,
// unless those are already completely spent.
@@ -2148,7 +2046,7 @@ static int64_t nTimeFlush = 0;
static int64_t nTimeChainState = 0;
static int64_t nTimePostConnect = 0;
-/**
+/**
* Connect a new block to chainActive. pblock is either NULL or a pointer to a CBlock
* corresponding to pindexNew, to bypass loading it again from disk.
*/
@@ -2169,7 +2067,6 @@ bool static ConnectTip(CValidationState &state, CBlockIndex *pindexNew, CBlock *
LogPrint("bench", " - Load block from disk: %.2fms [%.2fs]\n", (nTime2 - nTime1) * 0.001, nTimeReadFromDisk * 0.000001);
{
CCoinsViewCache view(pcoinsTip);
- CInv inv(MSG_BLOCK, pindexNew->GetBlockHash());
bool rv = ConnectBlock(*pblock, state, pindexNew, view);
GetMainSignals().BlockChecked(*pblock, state);
if (!rv) {
@@ -2177,7 +2074,7 @@ bool static ConnectTip(CValidationState &state, CBlockIndex *pindexNew, CBlock *
InvalidBlockFound(pindexNew, state);
return error("ConnectTip(): ConnectBlock %s failed", pindexNew->GetBlockHash().ToString());
}
- mapBlockSource.erase(inv.hash);
+ mapBlockSource.erase(pindexNew->GetBlockHash());
nTime3 = GetTimeMicros(); nTimeConnectTotal += nTime3 - nTime2;
LogPrint("bench", " - Connect total: %.2fms [%.2fs]\n", (nTime3 - nTime2) * 0.001, nTimeConnectTotal * 0.000001);
assert(view.Flush());
@@ -2712,18 +2609,23 @@ bool CheckBlock(const CBlock& block, CValidationState& state, bool fCheckPOW, bo
return true;
}
-bool ContextualCheckBlockHeader(const CBlockHeader& block, CValidationState& state, CBlockIndex * const pindexPrev)
+static bool CheckIndexAgainstCheckpoint(const CBlockIndex* pindexPrev, CValidationState& state, const CChainParams& chainparams, const uint256& hash)
{
- const CChainParams& chainParams = Params();
- const Consensus::Params& consensusParams = chainParams.GetConsensus();
- uint256 hash = block.GetHash();
- if (hash == consensusParams.hashGenesisBlock)
+ if (*pindexPrev->phashBlock == chainparams.GetConsensus().hashGenesisBlock)
return true;
- assert(pindexPrev);
-
int nHeight = pindexPrev->nHeight+1;
+ // Don't accept any forks from the main chain prior to last checkpoint
+ CBlockIndex* pcheckpoint = Checkpoints::GetLastCheckpoint(chainparams.Checkpoints());
+ if (pcheckpoint && nHeight < pcheckpoint->nHeight)
+ return state.DoS(100, error("%s: forked chain older than last checkpoint (height %d)", __func__, nHeight));
+
+ return true;
+}
+bool ContextualCheckBlockHeader(const CBlockHeader& block, CValidationState& state, CBlockIndex * const pindexPrev)
+{
+ const Consensus::Params& consensusParams = Params().GetConsensus();
// Check proof of work
if (block.nBits != GetNextWorkRequired(pindexPrev, &block, consensusParams))
return state.DoS(100, error("%s: incorrect proof of work", __func__),
@@ -2734,19 +2636,6 @@ bool ContextualCheckBlockHeader(const CBlockHeader& block, CValidationState& sta
return state.Invalid(error("%s: block's timestamp is too early", __func__),
REJECT_INVALID, "time-too-old");
- if(fCheckpointsEnabled)
- {
- // Check that the block chain matches the known block chain up to a checkpoint
- if (!Checkpoints::CheckBlock(chainParams.Checkpoints(), nHeight, hash))
- return state.DoS(100, error("%s: rejected by checkpoint lock-in at %d", __func__, nHeight),
- REJECT_CHECKPOINT, "checkpoint mismatch");
-
- // Don't accept any forks from the main chain prior to last checkpoint
- CBlockIndex* pcheckpoint = Checkpoints::GetLastCheckpoint(chainParams.Checkpoints());
- if (pcheckpoint && nHeight < pcheckpoint->nHeight)
- return state.DoS(100, error("%s: forked chain older than last checkpoint (height %d)", __func__, nHeight));
- }
-
// Reject block.nVersion=1 blocks when 95% (75% on testnet) of the network has upgraded:
if (block.nVersion < 2 && IsSuperMajority(2, pindexPrev, consensusParams.nMajorityRejectBlockOutdated, consensusParams))
return state.Invalid(error("%s: rejected nVersion=1 block", __func__),
@@ -2793,33 +2682,37 @@ bool AcceptBlockHeader(const CBlockHeader& block, CValidationState& state, CBloc
uint256 hash = block.GetHash();
BlockMap::iterator miSelf = mapBlockIndex.find(hash);
CBlockIndex *pindex = NULL;
- if (miSelf != mapBlockIndex.end()) {
- // Block header is already known.
- pindex = miSelf->second;
- if (ppindex)
- *ppindex = pindex;
- if (pindex->nStatus & BLOCK_FAILED_MASK)
- return state.Invalid(error("%s: block is marked invalid", __func__), 0, "duplicate");
- return true;
- }
+ if (hash != chainparams.GetConsensus().hashGenesisBlock) {
- if (!CheckBlockHeader(block, state))
- return false;
+ if (miSelf != mapBlockIndex.end()) {
+ // Block header is already known.
+ pindex = miSelf->second;
+ if (ppindex)
+ *ppindex = pindex;
+ if (pindex->nStatus & BLOCK_FAILED_MASK)
+ return state.Invalid(error("%s: block is marked invalid", __func__), 0, "duplicate");
+ return true;
+ }
- // Get prev block index
- CBlockIndex* pindexPrev = NULL;
- if (hash != chainparams.GetConsensus().hashGenesisBlock) {
+ if (!CheckBlockHeader(block, state))
+ return false;
+
+ // Get prev block index
+ CBlockIndex* pindexPrev = NULL;
BlockMap::iterator mi = mapBlockIndex.find(block.hashPrevBlock);
if (mi == mapBlockIndex.end())
return state.DoS(10, error("%s: prev block not found", __func__), 0, "bad-prevblk");
pindexPrev = (*mi).second;
if (pindexPrev->nStatus & BLOCK_FAILED_MASK)
return state.DoS(100, error("%s: prev block invalid", __func__), REJECT_INVALID, "bad-prevblk");
- }
- if (!ContextualCheckBlockHeader(block, state, pindexPrev))
- return false;
+ assert(pindexPrev);
+ if (fCheckpointsEnabled && !CheckIndexAgainstCheckpoint(pindexPrev, state, chainparams, hash))
+ return error("%s: CheckIndexAgainstCheckpoint(): %s", __func__, state.GetRejectReason().c_str());
+ if (!ContextualCheckBlockHeader(block, state, pindexPrev))
+ return false;
+ }
if (pindex == NULL)
pindex = AddToBlockIndex(block);
@@ -2938,8 +2831,11 @@ bool ProcessNewBlock(CValidationState &state, CNode* pfrom, CBlock* pblock, bool
bool TestBlockValidity(CValidationState &state, const CBlock& block, CBlockIndex * const pindexPrev, bool fCheckPOW, bool fCheckMerkleRoot)
{
+ const CChainParams& chainparams = Params();
AssertLockHeld(cs_main);
- assert(pindexPrev == chainActive.Tip());
+ assert(pindexPrev && pindexPrev == chainActive.Tip());
+ if (fCheckpointsEnabled && !CheckIndexAgainstCheckpoint(pindexPrev, state, chainparams, block.GetHash()))
+ return error("%s: CheckIndexAgainstCheckpoint(): %s", __func__, state.GetRejectReason().c_str());
CCoinsViewCache viewNew(pcoinsTip);
CBlockIndex indexDummy(block);
@@ -3047,9 +2943,9 @@ void FindFilesToPrune(std::set<int>& setFilesToPrune)
if (nCurrentUsage + nBuffer < nPruneTarget) // are we below our target?
break;
- // don't prune files that could have a block within MIN_BLOCKS_TO_KEEP of the main chain's tip
+ // don't prune files that could have a block within MIN_BLOCKS_TO_KEEP of the main chain's tip but keep scanning
if (vinfoBlockFile[fileNumber].nHeightLast > nLastBlockWeCanPrune)
- break;
+ continue;
PruneOneBlockFile(fileNumber);
// Queue up the files for removal
@@ -3703,7 +3599,7 @@ void static CheckBlockIndex()
// CAlert
//
-string GetWarnings(string strFor)
+std::string GetWarnings(const std::string& strFor)
{
int nPriority = 0;
string strStatusBar;
@@ -3768,7 +3664,7 @@ string GetWarnings(string strFor)
//
-bool static AlreadyHave(const CInv& inv)
+bool static AlreadyHave(const CInv& inv) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{
switch (inv.type)
{
@@ -4327,7 +4223,6 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
mempool.check(pcoinsTip);
RelayTransaction(tx);
vWorkQueue.push_back(inv.hash);
- vEraseQueue.push_back(inv.hash);
LogPrint("mempool", "AcceptToMemoryPool: peer=%d %s: accepted %s (poolsz %u)\n",
pfrom->id, pfrom->cleanSubVer,
@@ -4354,7 +4249,6 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
// anyone relaying LegitTxX banned)
CValidationState stateDummy;
- vEraseQueue.push_back(orphanHash);
if (setMisbehaving.count(fromPeer))
continue;
@@ -4363,6 +4257,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
LogPrint("mempool", " accepted orphan tx %s\n", orphanHash.ToString());
RelayTransaction(orphanTx);
vWorkQueue.push_back(orphanHash);
+ vEraseQueue.push_back(orphanHash);
}
else if (!fMissingInputs2)
{
@@ -4374,8 +4269,10 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
setMisbehaving.insert(fromPeer);
LogPrint("mempool", " invalid orphan tx %s\n", orphanHash.ToString());
}
- // too-little-fee orphan
+ // Has inputs but not accepted to mempool
+ // Probably non-standard or insufficient fee/priority
LogPrint("mempool", " removed orphan tx %s\n", orphanHash.ToString());
+ vEraseQueue.push_back(orphanHash);
}
mempool.check(pcoinsTip);
}
@@ -4615,7 +4512,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
}
- else if (strCommand == "alert")
+ else if (fAlerts && strCommand == "alert")
{
CAlert alert;
vRecv >> alert;
@@ -4945,7 +4842,7 @@ bool SendMessages(CNode* pto, bool fSendTrickle)
LogPrintf("Warning: not banning local peer %s!\n", pto->addr.ToString());
else
{
- CNode::Ban(pto->addr);
+ CNode::Ban(pto->addr, BanReasonNodeMisbehaving);
}
}
state.fShouldBan = false;