aboutsummaryrefslogtreecommitdiff
path: root/src/main.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/main.cpp')
-rw-r--r--src/main.cpp507
1 files changed, 391 insertions, 116 deletions
diff --git a/src/main.cpp b/src/main.cpp
index 26a22ae6fd..ceb5cb66f3 100644
--- a/src/main.cpp
+++ b/src/main.cpp
@@ -74,8 +74,8 @@ size_t nCoinCacheUsage = 5000 * 300;
uint64_t nPruneTarget = 0;
bool fAlerts = DEFAULT_ALERTS;
-/** Fees smaller than this (in satoshi) are considered zero fee (for relaying and mining) */
-CFeeRate minRelayTxFee = CFeeRate(1000);
+/** Fees smaller than this (in satoshi) are considered zero fee (for relaying, mining and transaction creation) */
+CFeeRate minRelayTxFee = CFeeRate(DEFAULT_MIN_RELAY_TX_FEE);
CTxMemPool mempool(::minRelayTxFee);
@@ -92,7 +92,7 @@ void EraseOrphansFor(NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
* in the last Consensus::Params::nMajorityWindow blocks, starting at pstart and going backwards.
*/
static bool IsSuperMajority(int minVersion, const CBlockIndex* pstart, unsigned nRequired, const Consensus::Params& consensusParams);
-static void CheckBlockIndex();
+static void CheckBlockIndex(const Consensus::Params& consensusParams);
/** Constant stuff for coinbase transactions we create: */
CScript COINBASE_FLAGS;
@@ -650,10 +650,36 @@ bool IsFinalTx(const CTransaction &tx, int nBlockHeight, int64_t nBlockTime)
return true;
}
-bool CheckFinalTx(const CTransaction &tx)
+bool CheckFinalTx(const CTransaction &tx, int flags)
{
AssertLockHeld(cs_main);
- return IsFinalTx(tx, chainActive.Height() + 1, GetAdjustedTime());
+
+ // By convention a negative value for flags indicates that the
+ // current network-enforced consensus rules should be used. In
+ // a future soft-fork scenario that would mean checking which
+ // rules would be enforced for the next block and setting the
+ // appropriate flags. At the present time no soft-forks are
+ // scheduled, so no flags are set.
+ flags = std::max(flags, 0);
+
+ // CheckFinalTx() uses chainActive.Height()+1 to evaluate
+ // nLockTime because when IsFinalTx() is called within
+ // CBlock::AcceptBlock(), the height of the block *being*
+ // evaluated is what is used. Thus if we want to know if a
+ // transaction can be part of the *next* block, we need to call
+ // IsFinalTx() with one more than chainActive.Height().
+ const int nBlockHeight = chainActive.Height() + 1;
+
+ // BIP113 will require that time-locked transactions have nLockTime set to
+ // less than the median time of the previous block they're contained in.
+ // When the next block is created its previous block will be the current
+ // chain tip, so we use that to calculate the median time passed to
+ // IsFinalTx() if LOCKTIME_MEDIAN_TIME_PAST is set.
+ const int64_t nBlockTime = (flags & LOCKTIME_MEDIAN_TIME_PAST)
+ ? chainActive.Tip()->GetMedianTimePast()
+ : GetAdjustedTime();
+
+ return IsFinalTx(tx, nBlockHeight, nBlockTime);
}
unsigned int GetLegacySigOpCount(const CTransaction& tx)
@@ -797,7 +823,7 @@ bool AcceptToMemoryPool(CTxMemPool& pool, CValidationState &state, const CTransa
// Only accept nLockTime-using transactions that can be mined in the next
// block; we don't want our mempool filled up with transactions that can't
// be mined yet.
- if (!CheckFinalTx(tx))
+ if (!CheckFinalTx(tx, STANDARD_LOCKTIME_VERIFY_FLAGS))
return state.DoS(0, false, REJECT_NONSTANDARD, "non-final");
// is it already in the memory pool?
@@ -806,15 +832,42 @@ bool AcceptToMemoryPool(CTxMemPool& pool, CValidationState &state, const CTransa
return state.Invalid(false, REJECT_ALREADY_KNOWN, "txn-already-in-mempool");
// Check for conflicts with in-memory transactions
+ set<uint256> setConflicts;
{
LOCK(pool.cs); // protect pool.mapNextTx
- for (unsigned int i = 0; i < tx.vin.size(); i++)
+ BOOST_FOREACH(const CTxIn &txin, tx.vin)
{
- COutPoint outpoint = tx.vin[i].prevout;
- if (pool.mapNextTx.count(outpoint))
+ if (pool.mapNextTx.count(txin.prevout))
{
- // Disable replacement feature for now
- return state.Invalid(false, REJECT_CONFLICT, "txn-mempool-conflict");
+ const CTransaction *ptxConflicting = pool.mapNextTx[txin.prevout].ptx;
+ if (!setConflicts.count(ptxConflicting->GetHash()))
+ {
+ // Allow opt-out of transaction replacement by setting
+ // nSequence >= maxint-1 on all inputs.
+ //
+ // maxint-1 is picked to still allow use of nLockTime by
+ // non-replacable transactions. All inputs rather than just one
+ // is for the sake of multi-party protocols, where we don't
+ // want a single party to be able to disable replacement.
+ //
+ // The opt-out ignores descendants as anyone relying on
+ // first-seen mempool behavior should be checking all
+ // unconfirmed ancestors anyway; doing otherwise is hopelessly
+ // insecure.
+ bool fReplacementOptOut = true;
+ BOOST_FOREACH(const CTxIn &txin, ptxConflicting->vin)
+ {
+ if (txin.nSequence < std::numeric_limits<unsigned int>::max()-1)
+ {
+ fReplacementOptOut = false;
+ break;
+ }
+ }
+ if (fReplacementOptOut)
+ return state.Invalid(false, REJECT_CONFLICT, "txn-mempool-conflict");
+
+ setConflicts.insert(ptxConflicting->GetHash());
+ }
}
}
}
@@ -932,6 +985,160 @@ bool AcceptToMemoryPool(CTxMemPool& pool, CValidationState &state, const CTransa
return state.DoS(0, false, REJECT_NONSTANDARD, "too-long-mempool-chain", false, errString);
}
+ // A transaction that spends outputs that would be replaced by it is invalid. Now
+ // that we have the set of all ancestors we can detect this
+ // pathological case by making sure setConflicts and setAncestors don't
+ // intersect.
+ BOOST_FOREACH(CTxMemPool::txiter ancestorIt, setAncestors)
+ {
+ const uint256 &hashAncestor = ancestorIt->GetTx().GetHash();
+ if (setConflicts.count(hashAncestor))
+ {
+ return state.DoS(10, error("AcceptToMemoryPool: %s spends conflicting transaction %s",
+ hash.ToString(),
+ hashAncestor.ToString()),
+ REJECT_INVALID, "bad-txns-spends-conflicting-tx");
+ }
+ }
+
+ // Check if it's economically rational to mine this transaction rather
+ // than the ones it replaces.
+ CAmount nConflictingFees = 0;
+ size_t nConflictingSize = 0;
+ uint64_t nConflictingCount = 0;
+ CTxMemPool::setEntries allConflicting;
+
+ // If we don't hold the lock allConflicting might be incomplete; the
+ // subsequent RemoveStaged() and addUnchecked() calls don't guarantee
+ // mempool consistency for us.
+ LOCK(pool.cs);
+ if (setConflicts.size())
+ {
+ CFeeRate newFeeRate(nFees, nSize);
+ set<uint256> setConflictsParents;
+ const int maxDescendantsToVisit = 100;
+ CTxMemPool::setEntries setIterConflicting;
+ BOOST_FOREACH(const uint256 &hashConflicting, setConflicts)
+ {
+ CTxMemPool::txiter mi = pool.mapTx.find(hashConflicting);
+ if (mi == pool.mapTx.end())
+ continue;
+
+ // Save these to avoid repeated lookups
+ setIterConflicting.insert(mi);
+
+ // If this entry is "dirty", then we don't have descendant
+ // state for this transaction, which means we probably have
+ // lots of in-mempool descendants.
+ // Don't allow replacements of dirty transactions, to ensure
+ // that we don't spend too much time walking descendants.
+ // This should be rare.
+ if (mi->IsDirty()) {
+ return state.DoS(0,
+ error("AcceptToMemoryPool: rejecting replacement %s; cannot replace tx %s with untracked descendants",
+ hash.ToString(),
+ mi->GetTx().GetHash().ToString()),
+ REJECT_NONSTANDARD, "too many potential replacements");
+ }
+
+ // Don't allow the replacement to reduce the feerate of the
+ // mempool.
+ //
+ // We usually don't want to accept replacements with lower
+ // feerates than what they replaced as that would lower the
+ // feerate of the next block. Requiring that the feerate always
+ // be increased is also an easy-to-reason about way to prevent
+ // DoS attacks via replacements.
+ //
+ // The mining code doesn't (currently) take children into
+ // account (CPFP) so we only consider the feerates of
+ // transactions being directly replaced, not their indirect
+ // descendants. While that does mean high feerate children are
+ // ignored when deciding whether or not to replace, we do
+ // require the replacement to pay more overall fees too,
+ // mitigating most cases.
+ CFeeRate oldFeeRate(mi->GetFee(), mi->GetTxSize());
+ if (newFeeRate <= oldFeeRate)
+ {
+ return state.DoS(0,
+ error("AcceptToMemoryPool: rejecting replacement %s; new feerate %s <= old feerate %s",
+ hash.ToString(),
+ newFeeRate.ToString(),
+ oldFeeRate.ToString()),
+ REJECT_INSUFFICIENTFEE, "insufficient fee");
+ }
+
+ BOOST_FOREACH(const CTxIn &txin, mi->GetTx().vin)
+ {
+ setConflictsParents.insert(txin.prevout.hash);
+ }
+
+ nConflictingCount += mi->GetCountWithDescendants();
+ }
+ // This potentially overestimates the number of actual descendants
+ // but we just want to be conservative to avoid doing too much
+ // work.
+ if (nConflictingCount <= maxDescendantsToVisit) {
+ // If not too many to replace, then calculate the set of
+ // transactions that would have to be evicted
+ BOOST_FOREACH(CTxMemPool::txiter it, setIterConflicting) {
+ pool.CalculateDescendants(it, allConflicting);
+ }
+ BOOST_FOREACH(CTxMemPool::txiter it, allConflicting) {
+ nConflictingFees += it->GetFee();
+ nConflictingSize += it->GetTxSize();
+ }
+ } else {
+ return state.DoS(0,
+ error("AcceptToMemoryPool: rejecting replacement %s; too many potential replacements (%d > %d)\n",
+ hash.ToString(),
+ nConflictingCount,
+ maxDescendantsToVisit),
+ REJECT_NONSTANDARD, "too many potential replacements");
+ }
+
+ for (unsigned int j = 0; j < tx.vin.size(); j++)
+ {
+ // We don't want to accept replacements that require low
+ // feerate junk to be mined first. Ideally we'd keep track of
+ // the ancestor feerates and make the decision based on that,
+ // but for now requiring all new inputs to be confirmed works.
+ if (!setConflictsParents.count(tx.vin[j].prevout.hash))
+ {
+ // Rather than check the UTXO set - potentially expensive -
+ // it's cheaper to just check if the new input refers to a
+ // tx that's in the mempool.
+ if (pool.mapTx.find(tx.vin[j].prevout.hash) != pool.mapTx.end())
+ return state.DoS(0, error("AcceptToMemoryPool: replacement %s adds unconfirmed input, idx %d",
+ hash.ToString(), j),
+ REJECT_NONSTANDARD, "replacement-adds-unconfirmed");
+ }
+ }
+
+ // The replacement must pay greater fees than the transactions it
+ // replaces - if we did the bandwidth used by those conflicting
+ // transactions would not be paid for.
+ if (nFees < nConflictingFees)
+ {
+ return state.DoS(0, error("AcceptToMemoryPool: rejecting replacement %s, less fees than conflicting txs; %s < %s",
+ hash.ToString(), FormatMoney(nFees), FormatMoney(nConflictingFees)),
+ REJECT_INSUFFICIENTFEE, "insufficient fee");
+ }
+
+ // Finally in addition to paying more fees than the conflicts the
+ // new transaction must pay for its own bandwidth.
+ CAmount nDeltaFees = nFees - nConflictingFees;
+ if (nDeltaFees < ::minRelayTxFee.GetFee(nSize))
+ {
+ return state.DoS(0,
+ error("AcceptToMemoryPool: rejecting replacement %s, not enough additional fees to relay; %s < %s",
+ hash.ToString(),
+ FormatMoney(nDeltaFees),
+ FormatMoney(::minRelayTxFee.GetFee(nSize))),
+ REJECT_INSUFFICIENTFEE, "insufficient fee");
+ }
+ }
+
// Check against previous transactions
// This is done last to help prevent CPU exhaustion denial-of-service attacks.
if (!CheckInputs(tx, state, view, true, STANDARD_SCRIPT_VERIFY_FLAGS, true))
@@ -952,6 +1159,17 @@ bool AcceptToMemoryPool(CTxMemPool& pool, CValidationState &state, const CTransa
__func__, hash.ToString(), FormatStateMessage(state));
}
+ // Remove conflicting transactions from the mempool
+ BOOST_FOREACH(const CTxMemPool::txiter it, allConflicting)
+ {
+ LogPrint("mempool", "replacing tx %s with %s for %s BTC additional fees, %d delta bytes\n",
+ it->GetTx().GetHash().ToString(),
+ hash.ToString(),
+ FormatMoney(nFees - nConflictingFees),
+ (int)nSize - (int)nConflictingSize);
+ }
+ pool.RemoveStaged(allConflicting);
+
// Store transaction in memory
pool.addUnchecked(hash, entry, setAncestors, !IsInitialBlockDownload());
@@ -973,7 +1191,7 @@ bool AcceptToMemoryPool(CTxMemPool& pool, CValidationState &state, const CTransa
}
/** Return transaction in tx, and if it was found inside a block, its hash is placed in hashBlock */
-bool GetTransaction(const uint256 &hash, CTransaction &txOut, uint256 &hashBlock, bool fAllowSlow)
+bool GetTransaction(const uint256 &hash, CTransaction &txOut, const Consensus::Params& consensusParams, uint256 &hashBlock, bool fAllowSlow)
{
CBlockIndex *pindexSlow = NULL;
@@ -1019,7 +1237,7 @@ bool GetTransaction(const uint256 &hash, CTransaction &txOut, uint256 &hashBlock
if (pindexSlow) {
CBlock block;
- if (ReadBlockFromDisk(block, pindexSlow)) {
+ if (ReadBlockFromDisk(block, pindexSlow, consensusParams)) {
BOOST_FOREACH(const CTransaction &tx, block.vtx) {
if (tx.GetHash() == hash) {
txOut = tx;
@@ -1064,7 +1282,7 @@ bool WriteBlockToDisk(const CBlock& block, CDiskBlockPos& pos, const CMessageHea
return true;
}
-bool ReadBlockFromDisk(CBlock& block, const CDiskBlockPos& pos)
+bool ReadBlockFromDisk(CBlock& block, const CDiskBlockPos& pos, const Consensus::Params& consensusParams)
{
block.SetNull();
@@ -1082,15 +1300,15 @@ bool ReadBlockFromDisk(CBlock& block, const CDiskBlockPos& pos)
}
// Check the header
- if (!CheckProofOfWork(block.GetHash(), block.nBits, Params().GetConsensus()))
+ if (!CheckProofOfWork(block.GetHash(), block.nBits, consensusParams))
return error("ReadBlockFromDisk: Errors in block header at %s", pos.ToString());
return true;
}
-bool ReadBlockFromDisk(CBlock& block, const CBlockIndex* pindex)
+bool ReadBlockFromDisk(CBlock& block, const CBlockIndex* pindex, const Consensus::Params& consensusParams)
{
- if (!ReadBlockFromDisk(block, pindex->GetBlockPos()))
+ if (!ReadBlockFromDisk(block, pindex->GetBlockPos(), consensusParams))
return false;
if (block.GetHash() != pindex->GetBlockHash())
return error("ReadBlockFromDisk(CBlock&, CBlockIndex*): GetHash() doesn't match index for %s at %s",
@@ -1285,10 +1503,17 @@ void UpdateCoins(const CTransaction& tx, CValidationState &state, CCoinsViewCach
undo.nVersion = coins->nVersion;
}
}
+ // add outputs
+ inputs.ModifyNewCoins(tx.GetHash())->FromTx(tx, nHeight);
+ }
+ else {
+ // add outputs for coinbase tx
+ // In this case call the full ModifyCoins which will do a database
+ // lookup to be sure the coins do not already exist otherwise we do not
+ // know whether to mark them fresh or not. We want the duplicate coinbases
+ // before BIP30 to still be properly overwritten.
+ inputs.ModifyCoins(tx.GetHash())->FromTx(tx, nHeight);
}
-
- // add outputs
- inputs.ModifyCoins(tx.GetHash())->FromTx(tx, nHeight);
}
void UpdateCoins(const CTransaction& tx, CValidationState &state, CCoinsViewCache &inputs, int nHeight)
@@ -1686,6 +1911,8 @@ void PartitionCheck(bool (*initialDownloadCheck)(), CCriticalSection& cs, const
}
}
+static int64_t nTimeCheck = 0;
+static int64_t nTimeForks = 0;
static int64_t nTimeVerify = 0;
static int64_t nTimeConnect = 0;
static int64_t nTimeIndex = 0;
@@ -1696,6 +1923,9 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin
{
const CChainParams& chainparams = Params();
AssertLockHeld(cs_main);
+
+ int64_t nTimeStart = GetTimeMicros();
+
// Check it again in case a previous version let a bad block in
if (!CheckBlock(block, state, !fJustCheck, !fJustCheck))
return false;
@@ -1721,6 +1951,9 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin
}
}
+ int64_t nTime1 = GetTimeMicros(); nTimeCheck += nTime1 - nTimeStart;
+ LogPrint("bench", " - Sanity checks: %.2fms [%.2fs]\n", 0.001 * (nTime1 - nTimeStart), nTimeCheck * 0.000001);
+
// Do not allow blocks that contain transactions which 'overwrite' older transactions,
// unless those are already completely spent.
// If such overwrites are allowed, coinbases and transactions depending upon those
@@ -1736,6 +1969,17 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin
bool fEnforceBIP30 = (!pindex->phashBlock) || // Enforce on CreateNewBlock invocations which don't have a hash.
!((pindex->nHeight==91842 && pindex->GetBlockHash() == uint256S("0x00000000000a4d0a398161ffc163c503763b1f4360639393e0e4c8e300e0caec")) ||
(pindex->nHeight==91880 && pindex->GetBlockHash() == uint256S("0x00000000000743f190a18c5577a3c2d2a1f610ae9601ac046a38084ccb7cd721")));
+
+ // Once BIP34 activated it was not possible to create new duplicate coinbases and thus other than starting
+ // with the 2 existing duplicate coinbase pairs, not possible to create overwriting txs. But by the
+ // time BIP34 activated, in each of the existing pairs the duplicate coinbase had overwritten the first
+ // before the first had been spent. Since those coinbases are sufficiently buried its no longer possible to create further
+ // duplicate transactions descending from the known pairs either.
+ // If we're on the known chain at height greater than where BIP34 activated, we can save the db accesses needed for the BIP30 check.
+ CBlockIndex *pindexBIP34height = pindex->pprev->GetAncestor(chainparams.GetConsensus().BIP34Height);
+ //Only continue to enforce if we're below BIP34 activation height or the block hash at that height doesn't correspond.
+ fEnforceBIP30 = fEnforceBIP30 && (!pindexBIP34height || !(pindexBIP34height->GetBlockHash() == chainparams.GetConsensus().BIP34Hash));
+
if (fEnforceBIP30) {
BOOST_FOREACH(const CTransaction& tx, block.vtx) {
const CCoins* coins = view.AccessCoins(tx.GetHash());
@@ -1763,11 +2007,13 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin
flags |= SCRIPT_VERIFY_CHECKLOCKTIMEVERIFY;
}
+ int64_t nTime2 = GetTimeMicros(); nTimeForks += nTime2 - nTime1;
+ LogPrint("bench", " - Fork checks: %.2fms [%.2fs]\n", 0.001 * (nTime2 - nTime1), nTimeForks * 0.000001);
+
CBlockUndo blockundo;
CCheckQueueControl<CScriptCheck> control(fScriptChecks && nScriptCheckThreads ? &scriptcheckqueue : NULL);
- int64_t nTimeStart = GetTimeMicros();
CAmount nFees = 0;
int nInputs = 0;
unsigned int nSigOps = 0;
@@ -1805,7 +2051,8 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin
nFees += view.GetValueIn(tx)-tx.GetValueOut();
std::vector<CScriptCheck> vChecks;
- if (!CheckInputs(tx, state, view, fScriptChecks, flags, false, nScriptCheckThreads ? &vChecks : NULL))
+ bool fCacheResults = fJustCheck; /* Don't cache results if we're actually connecting blocks (still consult the cache, though) */
+ if (!CheckInputs(tx, state, view, fScriptChecks, flags, fCacheResults, nScriptCheckThreads ? &vChecks : NULL))
return error("ConnectBlock(): CheckInputs on %s failed with %s",
tx.GetHash().ToString(), FormatStateMessage(state));
control.Add(vChecks);
@@ -1820,8 +2067,8 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin
vPos.push_back(std::make_pair(tx.GetHash(), pos));
pos.nTxOffset += ::GetSerializeSize(tx, SER_DISK, CLIENT_VERSION);
}
- int64_t nTime1 = GetTimeMicros(); nTimeConnect += nTime1 - nTimeStart;
- LogPrint("bench", " - Connect %u transactions: %.2fms (%.3fms/tx, %.3fms/txin) [%.2fs]\n", (unsigned)block.vtx.size(), 0.001 * (nTime1 - nTimeStart), 0.001 * (nTime1 - nTimeStart) / block.vtx.size(), nInputs <= 1 ? 0 : 0.001 * (nTime1 - nTimeStart) / (nInputs-1), nTimeConnect * 0.000001);
+ int64_t nTime3 = GetTimeMicros(); nTimeConnect += nTime3 - nTime2;
+ LogPrint("bench", " - Connect %u transactions: %.2fms (%.3fms/tx, %.3fms/txin) [%.2fs]\n", (unsigned)block.vtx.size(), 0.001 * (nTime3 - nTime2), 0.001 * (nTime3 - nTime2) / block.vtx.size(), nInputs <= 1 ? 0 : 0.001 * (nTime3 - nTime2) / (nInputs-1), nTimeConnect * 0.000001);
CAmount blockReward = nFees + GetBlockSubsidy(pindex->nHeight, chainparams.GetConsensus());
if (block.vtx[0].GetValueOut() > blockReward)
@@ -1832,8 +2079,8 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin
if (!control.Wait())
return state.DoS(100, false);
- int64_t nTime2 = GetTimeMicros(); nTimeVerify += nTime2 - nTimeStart;
- LogPrint("bench", " - Verify %u txins: %.2fms (%.3fms/txin) [%.2fs]\n", nInputs - 1, 0.001 * (nTime2 - nTimeStart), nInputs <= 1 ? 0 : 0.001 * (nTime2 - nTimeStart) / (nInputs-1), nTimeVerify * 0.000001);
+ int64_t nTime4 = GetTimeMicros(); nTimeVerify += nTime4 - nTime2;
+ LogPrint("bench", " - Verify %u txins: %.2fms (%.3fms/txin) [%.2fs]\n", nInputs - 1, 0.001 * (nTime4 - nTime2), nInputs <= 1 ? 0 : 0.001 * (nTime4 - nTime2) / (nInputs-1), nTimeVerify * 0.000001);
if (fJustCheck)
return true;
@@ -1864,16 +2111,16 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin
// add this block to the view's block chain
view.SetBestBlock(pindex->GetBlockHash());
- int64_t nTime3 = GetTimeMicros(); nTimeIndex += nTime3 - nTime2;
- LogPrint("bench", " - Index writing: %.2fms [%.2fs]\n", 0.001 * (nTime3 - nTime2), nTimeIndex * 0.000001);
+ int64_t nTime5 = GetTimeMicros(); nTimeIndex += nTime5 - nTime4;
+ LogPrint("bench", " - Index writing: %.2fms [%.2fs]\n", 0.001 * (nTime5 - nTime4), nTimeIndex * 0.000001);
// Watch for changes to the previous coinbase transaction.
static uint256 hashPrevBestCoinBase;
GetMainSignals().UpdatedTransaction(hashPrevBestCoinBase);
hashPrevBestCoinBase = block.vtx[0].GetHash();
- int64_t nTime4 = GetTimeMicros(); nTimeCallbacks += nTime4 - nTime3;
- LogPrint("bench", " - Callbacks: %.2fms [%.2fs]\n", 0.001 * (nTime4 - nTime3), nTimeCallbacks * 0.000001);
+ int64_t nTime6 = GetTimeMicros(); nTimeCallbacks += nTime6 - nTime5;
+ LogPrint("bench", " - Callbacks: %.2fms [%.2fs]\n", 0.001 * (nTime6 - nTime5), nTimeCallbacks * 0.000001);
return true;
}
@@ -1892,6 +2139,7 @@ enum FlushStateMode {
* or always and in all cases if we're in prune mode and are deleting files.
*/
bool static FlushStateToDisk(CValidationState &state, FlushStateMode mode) {
+ const CChainParams& chainparams = Params();
LOCK2(cs_main, cs_LastBlockFile);
static int64_t nLastWrite = 0;
static int64_t nLastFlush = 0;
@@ -1900,7 +2148,7 @@ bool static FlushStateToDisk(CValidationState &state, FlushStateMode mode) {
bool fFlushForPrune = false;
try {
if (fPruneMode && fCheckForPruning && !fReindex) {
- FindFilesToPrune(setFilesToPrune);
+ FindFilesToPrune(setFilesToPrune, chainparams.PruneAfterHeight());
fCheckForPruning = false;
if (!setFilesToPrune.empty()) {
fFlushForPrune = true;
@@ -2039,13 +2287,14 @@ void static UpdateTip(CBlockIndex *pindexNew) {
}
/** Disconnect chainActive's tip. You want to manually re-limit mempool size after this */
-bool static DisconnectTip(CValidationState &state) {
+bool static DisconnectTip(CValidationState& state, const Consensus::Params& consensusParams)
+{
CBlockIndex *pindexDelete = chainActive.Tip();
assert(pindexDelete);
mempool.check(pcoinsTip);
// Read block from disk.
CBlock block;
- if (!ReadBlockFromDisk(block, pindexDelete))
+ if (!ReadBlockFromDisk(block, pindexDelete, consensusParams))
return AbortNode(state, "Failed to read block");
// Apply the block atomically to the chain state.
int64_t nStart = GetTimeMicros();
@@ -2099,14 +2348,15 @@ static int64_t nTimePostConnect = 0;
* Connect a new block to chainActive. pblock is either NULL or a pointer to a CBlock
* corresponding to pindexNew, to bypass loading it again from disk.
*/
-bool static ConnectTip(CValidationState &state, CBlockIndex *pindexNew, const CBlock *pblock) {
+bool static ConnectTip(CValidationState& state, const CChainParams& chainparams, CBlockIndex* pindexNew, const CBlock* pblock)
+{
assert(pindexNew->pprev == chainActive.Tip());
mempool.check(pcoinsTip);
// Read block from disk.
int64_t nTime1 = GetTimeMicros();
CBlock block;
if (!pblock) {
- if (!ReadBlockFromDisk(block, pindexNew))
+ if (!ReadBlockFromDisk(block, pindexNew, chainparams.GetConsensus()))
return AbortNode(state, "Failed to read block");
pblock = &block;
}
@@ -2231,7 +2481,8 @@ static void PruneBlockIndexCandidates() {
* Try to make some progress towards making pindexMostWork the active block.
* pblock is either NULL or a pointer to a CBlock corresponding to pindexMostWork.
*/
-static bool ActivateBestChainStep(CValidationState &state, CBlockIndex *pindexMostWork, const CBlock *pblock) {
+static bool ActivateBestChainStep(CValidationState& state, const CChainParams& chainparams, CBlockIndex* pindexMostWork, const CBlock* pblock)
+{
AssertLockHeld(cs_main);
bool fInvalidFound = false;
const CBlockIndex *pindexOldTip = chainActive.Tip();
@@ -2240,7 +2491,7 @@ static bool ActivateBestChainStep(CValidationState &state, CBlockIndex *pindexMo
// Disconnect active blocks which are no longer in the best chain.
bool fBlocksDisconnected = false;
while (chainActive.Tip() && chainActive.Tip() != pindexFork) {
- if (!DisconnectTip(state))
+ if (!DisconnectTip(state, chainparams.GetConsensus()))
return false;
fBlocksDisconnected = true;
}
@@ -2264,7 +2515,7 @@ static bool ActivateBestChainStep(CValidationState &state, CBlockIndex *pindexMo
// Connect new blocks.
BOOST_REVERSE_FOREACH(CBlockIndex *pindexConnect, vpindexToConnect) {
- if (!ConnectTip(state, pindexConnect, pindexConnect == pindexMostWork ? pblock : NULL)) {
+ if (!ConnectTip(state, chainparams, pindexConnect, pindexConnect == pindexMostWork ? pblock : NULL)) {
if (state.IsInvalid()) {
// The block violates a consensus rule.
if (!state.CorruptionPossible())
@@ -2305,10 +2556,10 @@ static bool ActivateBestChainStep(CValidationState &state, CBlockIndex *pindexMo
* or an activated best chain. pblock is either NULL or a pointer to a block
* that is already loaded (to avoid loading it again from disk).
*/
-bool ActivateBestChain(CValidationState &state, const CBlock *pblock) {
+bool ActivateBestChain(CValidationState& state, const CChainParams& chainparams, const CBlock* pblock)
+{
CBlockIndex *pindexNewTip = NULL;
CBlockIndex *pindexMostWork = NULL;
- const CChainParams& chainParams = Params();
do {
boost::this_thread::interruption_point();
@@ -2321,7 +2572,7 @@ bool ActivateBestChain(CValidationState &state, const CBlock *pblock) {
if (pindexMostWork == NULL || pindexMostWork == chainActive.Tip())
return true;
- if (!ActivateBestChainStep(state, pindexMostWork, pblock && pblock->GetHash() == pindexMostWork->GetBlockHash() ? pblock : NULL))
+ if (!ActivateBestChainStep(state, chainparams, pindexMostWork, pblock && pblock->GetHash() == pindexMostWork->GetBlockHash() ? pblock : NULL))
return false;
pindexNewTip = chainActive.Tip();
@@ -2335,7 +2586,7 @@ bool ActivateBestChain(CValidationState &state, const CBlock *pblock) {
// Relay inventory, but don't relay old inventory during initial block download.
int nBlockEstimate = 0;
if (fCheckpointsEnabled)
- nBlockEstimate = Checkpoints::GetTotalBlocksEstimate(chainParams.Checkpoints());
+ nBlockEstimate = Checkpoints::GetTotalBlocksEstimate(chainparams.Checkpoints());
{
LOCK(cs_vNodes);
BOOST_FOREACH(CNode* pnode, vNodes)
@@ -2347,7 +2598,7 @@ bool ActivateBestChain(CValidationState &state, const CBlock *pblock) {
uiInterface.NotifyBlockTip(hashNewTip);
}
} while(pindexMostWork != chainActive.Tip());
- CheckBlockIndex();
+ CheckBlockIndex(chainparams.GetConsensus());
// Write changes periodically to disk, after relay.
if (!FlushStateToDisk(state, FLUSH_STATE_PERIODIC)) {
@@ -2357,7 +2608,8 @@ bool ActivateBestChain(CValidationState &state, const CBlock *pblock) {
return true;
}
-bool InvalidateBlock(CValidationState& state, CBlockIndex *pindex) {
+bool InvalidateBlock(CValidationState& state, const Consensus::Params& consensusParams, CBlockIndex *pindex)
+{
AssertLockHeld(cs_main);
// Mark the block itself as invalid.
@@ -2372,7 +2624,7 @@ bool InvalidateBlock(CValidationState& state, CBlockIndex *pindex) {
setBlockIndexCandidates.erase(pindexWalk);
// ActivateBestChain considers blocks already in chainActive
// unconditionally valid already, so force disconnect away from it.
- if (!DisconnectTip(state)) {
+ if (!DisconnectTip(state, consensusParams)) {
return false;
}
}
@@ -2517,8 +2769,6 @@ bool FindBlockPos(CValidationState &state, CDiskBlockPos &pos, unsigned int nAdd
if (!fKnown) {
while (vinfoBlockFile[nFile].nSize + nAddSize >= MAX_BLOCKFILE_SIZE) {
- LogPrintf("Leaving block file %i: %s\n", nFile, vinfoBlockFile[nFile].ToString());
- FlushBlockFile(true);
nFile++;
if (vinfoBlockFile.size() <= nFile) {
vinfoBlockFile.resize(nFile + 1);
@@ -2528,7 +2778,14 @@ bool FindBlockPos(CValidationState &state, CDiskBlockPos &pos, unsigned int nAdd
pos.nPos = vinfoBlockFile[nFile].nSize;
}
- nLastBlockFile = nFile;
+ if ((int)nFile != nLastBlockFile) {
+ if (!fKnown) {
+ LogPrintf("Leaving block file %i: %s\n", nLastBlockFile, vinfoBlockFile[nLastBlockFile].ToString());
+ }
+ FlushBlockFile(!fKnown);
+ nLastBlockFile = nFile;
+ }
+
vinfoBlockFile[nFile].AddBlock(nHeight, nTime);
if (fKnown)
vinfoBlockFile[nFile].nSize = std::max(pos.nPos + nAddSize, vinfoBlockFile[nFile].nSize);
@@ -2723,10 +2980,15 @@ bool ContextualCheckBlock(const CBlock& block, CValidationState& state, CBlockIn
const Consensus::Params& consensusParams = Params().GetConsensus();
// Check that all transactions are finalized
- BOOST_FOREACH(const CTransaction& tx, block.vtx)
- if (!IsFinalTx(tx, nHeight, block.GetBlockTime())) {
+ BOOST_FOREACH(const CTransaction& tx, block.vtx) {
+ int nLockTimeFlags = 0;
+ int64_t nLockTimeCutoff = (nLockTimeFlags & LOCKTIME_MEDIAN_TIME_PAST)
+ ? pindexPrev->GetMedianTimePast()
+ : block.GetBlockTime();
+ if (!IsFinalTx(tx, nHeight, nLockTimeCutoff)) {
return state.DoS(10, error("%s: contains a non-final transaction", __func__), REJECT_INVALID, "bad-txns-nonfinal");
}
+ }
// Enforce block.nVersion=2 rule that the coinbase starts with serialized block height
// if 750 of the last 1,000 blocks are version 2 or greater (51/100 if testnet):
@@ -2742,9 +3004,8 @@ bool ContextualCheckBlock(const CBlock& block, CValidationState& state, CBlockIn
return true;
}
-bool AcceptBlockHeader(const CBlockHeader& block, CValidationState& state, CBlockIndex** ppindex)
+static bool AcceptBlockHeader(const CBlockHeader& block, CValidationState& state, const CChainParams& chainparams, CBlockIndex** ppindex=NULL)
{
- const CChainParams& chainparams = Params();
AssertLockHeld(cs_main);
// Check for duplicate
uint256 hash = block.GetHash();
@@ -2790,14 +3051,14 @@ bool AcceptBlockHeader(const CBlockHeader& block, CValidationState& state, CBloc
return true;
}
-bool AcceptBlock(const CBlock& block, CValidationState& state, CBlockIndex** ppindex, bool fRequested, CDiskBlockPos* dbp)
+/** Store block on disk. If dbp is non-NULL, the file is known to already reside on disk */
+static bool AcceptBlock(const CBlock& block, CValidationState& state, const CChainParams& chainparams, CBlockIndex** ppindex, bool fRequested, CDiskBlockPos* dbp)
{
- const CChainParams& chainparams = Params();
AssertLockHeld(cs_main);
CBlockIndex *&pindex = *ppindex;
- if (!AcceptBlockHeader(block, state, &pindex))
+ if (!AcceptBlockHeader(block, state, chainparams, &pindex))
return false;
// Try to process all requested blocks that we don't have, but only
@@ -2867,7 +3128,7 @@ static bool IsSuperMajority(int minVersion, const CBlockIndex* pstart, unsigned
}
-bool ProcessNewBlock(CValidationState &state, const CNode* pfrom, const CBlock* pblock, bool fForceProcessing, CDiskBlockPos *dbp)
+bool ProcessNewBlock(CValidationState& state, const CChainParams& chainparams, const CNode* pfrom, const CBlock* pblock, bool fForceProcessing, CDiskBlockPos* dbp)
{
// Preliminary checks
bool checked = CheckBlock(*pblock, state);
@@ -2882,24 +3143,23 @@ bool ProcessNewBlock(CValidationState &state, const CNode* pfrom, const CBlock*
// Store to disk
CBlockIndex *pindex = NULL;
- bool ret = AcceptBlock(*pblock, state, &pindex, fRequested, dbp);
+ bool ret = AcceptBlock(*pblock, state, chainparams, &pindex, fRequested, dbp);
if (pindex && pfrom) {
mapBlockSource[pindex->GetBlockHash()] = pfrom->GetId();
}
- CheckBlockIndex();
+ CheckBlockIndex(chainparams.GetConsensus());
if (!ret)
return error("%s: AcceptBlock FAILED", __func__);
}
- if (!ActivateBestChain(state, pblock))
+ if (!ActivateBestChain(state, chainparams, pblock))
return error("%s: ActivateBestChain failed", __func__);
return true;
}
-bool TestBlockValidity(CValidationState &state, const CBlock& block, CBlockIndex * const pindexPrev, bool fCheckPOW, bool fCheckMerkleRoot)
+bool TestBlockValidity(CValidationState& state, const CChainParams& chainparams, const CBlock& block, CBlockIndex* pindexPrev, bool fCheckPOW, bool fCheckMerkleRoot)
{
- const CChainParams& chainparams = Params();
AssertLockHeld(cs_main);
assert(pindexPrev && pindexPrev == chainActive.Tip());
if (fCheckpointsEnabled && !CheckIndexAgainstCheckpoint(pindexPrev, state, chainparams, block.GetHash()))
@@ -2982,13 +3242,13 @@ void UnlinkPrunedFiles(std::set<int>& setFilesToPrune)
}
/* Calculate the block/rev files that should be deleted to remain under target*/
-void FindFilesToPrune(std::set<int>& setFilesToPrune)
+void FindFilesToPrune(std::set<int>& setFilesToPrune, uint64_t nPruneAfterHeight)
{
LOCK2(cs_main, cs_LastBlockFile);
if (chainActive.Tip() == NULL || nPruneTarget == 0) {
return;
}
- if (chainActive.Tip()->nHeight <= Params().PruneAfterHeight()) {
+ if (chainActive.Tip()->nHeight <= nPruneAfterHeight) {
return;
}
@@ -3216,7 +3476,7 @@ CVerifyDB::~CVerifyDB()
uiInterface.ShowProgress("", 100);
}
-bool CVerifyDB::VerifyDB(CCoinsView *coinsview, int nCheckLevel, int nCheckDepth)
+bool CVerifyDB::VerifyDB(const CChainParams& chainparams, CCoinsView *coinsview, int nCheckLevel, int nCheckDepth)
{
LOCK(cs_main);
if (chainActive.Tip() == NULL || chainActive.Tip()->pprev == NULL)
@@ -3242,7 +3502,7 @@ bool CVerifyDB::VerifyDB(CCoinsView *coinsview, int nCheckLevel, int nCheckDepth
break;
CBlock block;
// check level 0: read from disk
- if (!ReadBlockFromDisk(block, pindex))
+ if (!ReadBlockFromDisk(block, pindex, chainparams.GetConsensus()))
return error("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
// check level 1: verify block validity
if (nCheckLevel >= 1 && !CheckBlock(block, state))
@@ -3282,7 +3542,7 @@ bool CVerifyDB::VerifyDB(CCoinsView *coinsview, int nCheckLevel, int nCheckDepth
uiInterface.ShowProgress(_("Verifying blocks..."), std::max(1, std::min(99, 100 - (int)(((double)(chainActive.Height() - pindex->nHeight)) / (double)nCheckDepth * 50))));
pindex = chainActive.Next(pindex);
CBlock block;
- if (!ReadBlockFromDisk(block, pindex))
+ if (!ReadBlockFromDisk(block, pindex, chainparams.GetConsensus()))
return error("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
if (!ConnectBlock(block, state, pindex, coins))
return error("VerifyDB(): *** found unconnectable block at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
@@ -3333,9 +3593,8 @@ bool LoadBlockIndex()
return true;
}
-
-bool InitBlockIndex() {
- const CChainParams& chainparams = Params();
+bool InitBlockIndex(const CChainParams& chainparams)
+{
LOCK(cs_main);
// Initialize global variables that cannot be constructed at startup.
@@ -3353,7 +3612,7 @@ bool InitBlockIndex() {
// Only add the genesis block if not reindexing (in which case we reuse the one already on disk)
if (!fReindex) {
try {
- CBlock &block = const_cast<CBlock&>(Params().GenesisBlock());
+ CBlock &block = const_cast<CBlock&>(chainparams.GenesisBlock());
// Start new block file
unsigned int nBlockSize = ::GetSerializeSize(block, SER_DISK, CLIENT_VERSION);
CDiskBlockPos blockPos;
@@ -3365,7 +3624,7 @@ bool InitBlockIndex() {
CBlockIndex *pindex = AddToBlockIndex(block);
if (!ReceivedBlockTransactions(block, state, pindex, blockPos))
return error("LoadBlockIndex(): genesis block not accepted");
- if (!ActivateBestChain(state, &block))
+ if (!ActivateBestChain(state, chainparams, &block))
return error("LoadBlockIndex(): genesis block cannot be activated");
// Force a chainstate write so that when we VerifyDB in a moment, it doesn't check stale data
return FlushStateToDisk(state, FLUSH_STATE_ALWAYS);
@@ -3377,11 +3636,8 @@ bool InitBlockIndex() {
return true;
}
-
-
-bool LoadExternalBlockFile(FILE* fileIn, CDiskBlockPos *dbp)
+bool LoadExternalBlockFile(const CChainParams& chainparams, FILE* fileIn, CDiskBlockPos *dbp)
{
- const CChainParams& chainparams = Params();
// Map of disk positions for blocks with unknown parent (only used for reindex)
static std::multimap<uint256, CDiskBlockPos> mapBlocksUnknownParent;
int64_t nStart = GetTimeMillis();
@@ -3401,10 +3657,10 @@ bool LoadExternalBlockFile(FILE* fileIn, CDiskBlockPos *dbp)
try {
// locate a header
unsigned char buf[MESSAGE_START_SIZE];
- blkdat.FindByte(Params().MessageStart()[0]);
+ blkdat.FindByte(chainparams.MessageStart()[0]);
nRewind = blkdat.GetPos()+1;
blkdat >> FLATDATA(buf);
- if (memcmp(buf, Params().MessageStart(), MESSAGE_START_SIZE))
+ if (memcmp(buf, chainparams.MessageStart(), MESSAGE_START_SIZE))
continue;
// read size
blkdat >> nSize;
@@ -3438,7 +3694,7 @@ bool LoadExternalBlockFile(FILE* fileIn, CDiskBlockPos *dbp)
// process in case the block isn't known yet
if (mapBlockIndex.count(hash) == 0 || (mapBlockIndex[hash]->nStatus & BLOCK_HAVE_DATA) == 0) {
CValidationState state;
- if (ProcessNewBlock(state, NULL, &block, true, dbp))
+ if (ProcessNewBlock(state, chainparams, NULL, &block, true, dbp))
nLoaded++;
if (state.IsError())
break;
@@ -3455,12 +3711,12 @@ bool LoadExternalBlockFile(FILE* fileIn, CDiskBlockPos *dbp)
std::pair<std::multimap<uint256, CDiskBlockPos>::iterator, std::multimap<uint256, CDiskBlockPos>::iterator> range = mapBlocksUnknownParent.equal_range(head);
while (range.first != range.second) {
std::multimap<uint256, CDiskBlockPos>::iterator it = range.first;
- if (ReadBlockFromDisk(block, it->second))
+ if (ReadBlockFromDisk(block, it->second, chainparams.GetConsensus()))
{
LogPrintf("%s: Processing out of order child %s of %s\n", __func__, block.GetHash().ToString(),
head.ToString());
CValidationState dummy;
- if (ProcessNewBlock(dummy, NULL, &block, true, &it->second))
+ if (ProcessNewBlock(dummy, chainparams, NULL, &block, true, &it->second))
{
nLoaded++;
queue.push_back(block.GetHash());
@@ -3482,9 +3738,8 @@ bool LoadExternalBlockFile(FILE* fileIn, CDiskBlockPos *dbp)
return nLoaded > 0;
}
-void static CheckBlockIndex()
+void static CheckBlockIndex(const Consensus::Params& consensusParams)
{
- const Consensus::Params& consensusParams = Params().GetConsensus();
if (!fCheckBlockIndex) {
return;
}
@@ -3766,7 +4021,7 @@ bool static AlreadyHave(const CInv& inv) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
return true;
}
-void static ProcessGetData(CNode* pfrom)
+void static ProcessGetData(CNode* pfrom, const Consensus::Params& consensusParams)
{
std::deque<CInv>::iterator it = pfrom->vRecvGetData.begin();
@@ -3799,15 +4054,16 @@ void static ProcessGetData(CNode* pfrom)
// best equivalent proof of work) than the best header chain we know about.
send = mi->second->IsValid(BLOCK_VALID_SCRIPTS) && (pindexBestHeader != NULL) &&
(pindexBestHeader->GetBlockTime() - mi->second->GetBlockTime() < nOneMonth) &&
- (GetBlockProofEquivalentTime(*pindexBestHeader, *mi->second, *pindexBestHeader, Params().GetConsensus()) < nOneMonth);
+ (GetBlockProofEquivalentTime(*pindexBestHeader, *mi->second, *pindexBestHeader, consensusParams) < nOneMonth);
if (!send) {
LogPrintf("%s: ignoring request from peer=%i for old block that isn't in the main chain\n", __func__, pfrom->GetId());
}
}
}
// disconnect node in case we have reached the outbound limit for serving historical blocks
+ // never disconnect whitelisted nodes
static const int nOneWeek = 7 * 24 * 60 * 60; // assume > 1 week = historical
- if (send && CNode::OutboundTargetReached(true) && ( ((pindexBestHeader != NULL) && (pindexBestHeader->GetBlockTime() - mi->second->GetBlockTime() > nOneWeek)) || inv.type == MSG_FILTERED_BLOCK) )
+ if (send && CNode::OutboundTargetReached(true) && ( ((pindexBestHeader != NULL) && (pindexBestHeader->GetBlockTime() - mi->second->GetBlockTime() > nOneWeek)) || inv.type == MSG_FILTERED_BLOCK) && !pfrom->fWhitelisted)
{
LogPrint("net", "historical block serving limit reached, disconnect peer=%d\n", pfrom->GetId());
@@ -3821,7 +4077,7 @@ void static ProcessGetData(CNode* pfrom)
{
// Send block from disk
CBlock block;
- if (!ReadBlockFromDisk(block, (*mi).second))
+ if (!ReadBlockFromDisk(block, (*mi).second, consensusParams))
assert(!"cannot load block from disk");
if (inv.type == MSG_BLOCK)
pfrom->PushMessage("block", block);
@@ -3921,6 +4177,19 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
}
+ if (!(nLocalServices & NODE_BLOOM) &&
+ (strCommand == "filterload" ||
+ strCommand == "filteradd" ||
+ strCommand == "filterclear"))
+ {
+ if (pfrom->nVersion >= NO_BLOOM_VERSION) {
+ Misbehaving(pfrom->GetId(), 100);
+ return false;
+ } else if (GetBoolArg("-enforcenodebloom", false)) {
+ pfrom->fDisconnect = true;
+ return false;
+ }
+ }
if (strCommand == "version")
@@ -3998,9 +4267,11 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
CAddress addr = GetLocalAddress(&pfrom->addr);
if (addr.IsRoutable())
{
+ LogPrintf("ProcessMessages: advertizing address %s\n", addr.ToString());
pfrom->PushAddress(addr);
} else if (IsPeerAddrLocalGood(pfrom)) {
addr.SetIP(pfrom->addrLocal);
+ LogPrintf("ProcessMessages: advertizing address %s\n", addr.ToString());
pfrom->PushAddress(addr);
}
}
@@ -4141,6 +4412,12 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
return error("message inv size() = %u", vInv.size());
}
+ bool fBlocksOnly = GetBoolArg("-blocksonly", DEFAULT_BLOCKSONLY);
+
+ // Allow whitelisted peers to send data other than blocks in blocks only mode if whitelistalwaysrelay is true
+ if (pfrom->fWhitelisted && GetBoolArg("-whitelistalwaysrelay", DEFAULT_WHITELISTALWAYSRELAY))
+ fBlocksOnly = false;
+
LOCK(cs_main);
std::vector<CInv> vToFetch;
@@ -4155,9 +4432,6 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
bool fAlreadyHave = AlreadyHave(inv);
LogPrint("net", "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom->id);
- if (!fAlreadyHave && !fImporting && !fReindex && inv.type != MSG_BLOCK)
- pfrom->AskFor(inv);
-
if (inv.type == MSG_BLOCK) {
UpdateBlockAvailability(pfrom->GetId(), inv.hash);
if (!fAlreadyHave && !fImporting && !fReindex && !mapBlocksInFlight.count(inv.hash)) {
@@ -4181,6 +4455,13 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
LogPrint("net", "getheaders (%d) %s to peer=%d\n", pindexBestHeader->nHeight, inv.hash.ToString(), pfrom->id);
}
}
+ else
+ {
+ if (fBlocksOnly)
+ LogPrint("net", "transaction (%s) inv sent in violation of protocol peer=%d\n", inv.hash.ToString(), pfrom->id);
+ else if (!fAlreadyHave && !fImporting && !fReindex)
+ pfrom->AskFor(inv);
+ }
// Track requests for our stuff
GetMainSignals().Inventory(inv.hash);
@@ -4213,7 +4494,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
LogPrint("net", "received getdata for: %s peer=%d\n", vInv[0].ToString(), pfrom->id);
pfrom->vRecvGetData.insert(pfrom->vRecvGetData.end(), vInv.begin(), vInv.end());
- ProcessGetData(pfrom);
+ ProcessGetData(pfrom, chainparams.GetConsensus());
}
@@ -4268,10 +4549,10 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
vRecv >> locator >> hashStop;
LOCK(cs_main);
-
- if (IsInitialBlockDownload())
+ if (IsInitialBlockDownload() && !pfrom->fWhitelisted) {
+ LogPrint("net", "Ignoring getheaders from peer=%d because node is in initial block download\n", pfrom->id);
return true;
-
+ }
CBlockIndex* pindex = NULL;
if (locator.IsNull())
{
@@ -4305,6 +4586,14 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
else if (strCommand == "tx")
{
+ // Stop processing the transaction early if
+ // We are in blocks only mode and peer is either not whitelisted or whitelistalwaysrelay is off
+ if (GetBoolArg("-blocksonly", DEFAULT_BLOCKSONLY) && (!pfrom->fWhitelisted || !GetBoolArg("-whitelistalwaysrelay", DEFAULT_WHITELISTALWAYSRELAY)))
+ {
+ LogPrint("net", "transaction sent in violation of protocol peer=%d\n", pfrom->id);
+ return true;
+ }
+
vector<uint256> vWorkQueue;
vector<uint256> vEraseQueue;
CTransaction tx;
@@ -4402,7 +4691,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
assert(recentRejects);
recentRejects->insert(tx.GetHash());
- if (pfrom->fWhitelisted) {
+ if (pfrom->fWhitelisted && GetBoolArg("-whitelistalwaysrelay", DEFAULT_WHITELISTALWAYSRELAY)) {
// Always relay transactions received from whitelisted peers, even
// if they were rejected from the mempool, allowing the node to
// function as a gateway for nodes hidden behind it.
@@ -4458,7 +4747,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
Misbehaving(pfrom->GetId(), 20);
return error("non-continuous headers sequence");
}
- if (!AcceptBlockHeader(header, state, &pindexLast)) {
+ if (!AcceptBlockHeader(header, state, chainparams, &pindexLast)) {
int nDoS;
if (state.IsInvalid(nDoS)) {
if (nDoS > 0)
@@ -4479,7 +4768,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
pfrom->PushMessage("getheaders", chainActive.GetLocator(pindexLast), uint256());
}
- CheckBlockIndex();
+ CheckBlockIndex(chainparams.GetConsensus());
}
else if (strCommand == "block" && !fImporting && !fReindex) // Ignore blocks received while importing
@@ -4498,7 +4787,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
// Such an unrequested block may still be processed, subject to the
// conditions in AcceptBlock().
bool forceProcessing = pfrom->fWhitelisted && !IsInitialBlockDownload();
- ProcessNewBlock(state, pfrom, &block, forceProcessing, NULL);
+ ProcessNewBlock(state, chainparams, pfrom, &block, forceProcessing, NULL);
int nDoS;
if (state.IsInvalid(nDoS)) {
assert (state.GetRejectCode() < REJECT_INTERNAL); // Blocks are never rejected with internal reject codes
@@ -4639,7 +4928,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
uint256 alertHash = alert.GetHash();
if (pfrom->setKnown.count(alertHash) == 0)
{
- if (alert.ProcessAlert(Params().AlertKey()))
+ if (alert.ProcessAlert(chainparams.AlertKey()))
{
// Relay
pfrom->setKnown.insert(alertHash);
@@ -4662,21 +4951,6 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
}
- else if (!(nLocalServices & NODE_BLOOM) &&
- (strCommand == "filterload" ||
- strCommand == "filteradd" ||
- strCommand == "filterclear") &&
- //TODO: Remove this line after reasonable network upgrade
- pfrom->nVersion >= NO_BLOOM_VERSION)
- {
- if (pfrom->nVersion >= NO_BLOOM_VERSION)
- Misbehaving(pfrom->GetId(), 100);
- //TODO: Enable this after reasonable network upgrade
- //else
- // pfrom->fDisconnect = true;
- }
-
-
else if (strCommand == "filterload")
{
CBloomFilter filter;
@@ -4763,6 +5037,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
// requires LOCK(cs_vRecvMsg)
bool ProcessMessages(CNode* pfrom)
{
+ const CChainParams& chainparams = Params();
//if (fDebug)
// LogPrintf("%s(%u messages)\n", __func__, pfrom->vRecvMsg.size());
@@ -4777,7 +5052,7 @@ bool ProcessMessages(CNode* pfrom)
bool fOk = true;
if (!pfrom->vRecvGetData.empty())
- ProcessGetData(pfrom);
+ ProcessGetData(pfrom, chainparams.GetConsensus());
// this maintains the order of responses
if (!pfrom->vRecvGetData.empty()) return fOk;
@@ -4804,7 +5079,7 @@ bool ProcessMessages(CNode* pfrom)
it++;
// Scan for message start
- if (memcmp(msg.hdr.pchMessageStart, Params().MessageStart(), MESSAGE_START_SIZE) != 0) {
+ if (memcmp(msg.hdr.pchMessageStart, chainparams.MessageStart(), MESSAGE_START_SIZE) != 0) {
LogPrintf("PROCESSMESSAGE: INVALID MESSAGESTART %s peer=%d\n", SanitizeString(msg.hdr.GetCommand()), pfrom->id);
fOk = false;
break;
@@ -4812,7 +5087,7 @@ bool ProcessMessages(CNode* pfrom)
// Read header
CMessageHeader& hdr = msg.hdr;
- if (!hdr.IsValid(Params().MessageStart()))
+ if (!hdr.IsValid(chainparams.MessageStart()))
{
LogPrintf("PROCESSMESSAGE: ERRORS IN HEADER %s peer=%d\n", SanitizeString(hdr.GetCommand()), pfrom->id);
continue;