aboutsummaryrefslogtreecommitdiff
path: root/src/main.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/main.cpp')
-rw-r--r--src/main.cpp971
1 files changed, 820 insertions, 151 deletions
diff --git a/src/main.cpp b/src/main.cpp
index 91da3f47a5..b86bbda1b8 100644
--- a/src/main.cpp
+++ b/src/main.cpp
@@ -7,6 +7,7 @@
#include "addrman.h"
#include "arith_uint256.h"
+#include "blockencodings.h"
#include "chainparams.h"
#include "checkpoints.h"
#include "checkqueue.h"
@@ -88,12 +89,22 @@ CAmount maxTxFee = DEFAULT_TRANSACTION_MAXFEE;
CTxMemPool mempool(::minRelayTxFee);
FeeFilterRounder filterRounder(::minRelayTxFee);
+struct IteratorComparator
+{
+ template<typename I>
+ bool operator()(const I& a, const I& b)
+ {
+ return &(*a) < &(*b);
+ }
+};
+
struct COrphanTx {
CTransaction tx;
NodeId fromPeer;
+ int64_t nTimeExpire;
};
map<uint256, COrphanTx> mapOrphanTransactions GUARDED_BY(cs_main);
-map<uint256, set<uint256> > mapOrphanTransactionsByPrev GUARDED_BY(cs_main);
+map<COutPoint, set<map<uint256, COrphanTx>::iterator, IteratorComparator>> mapOrphanTransactionsByPrev GUARDED_BY(cs_main);
void EraseOrphansFor(NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
/**
@@ -197,11 +208,15 @@ namespace {
/** Blocks that are in flight, and that are in the queue to be downloaded. Protected by cs_main. */
struct QueuedBlock {
uint256 hash;
- CBlockIndex* pindex; //!< Optional.
- bool fValidatedHeaders; //!< Whether this block has validated headers at the time of request.
+ CBlockIndex* pindex; //!< Optional.
+ bool fValidatedHeaders; //!< Whether this block has validated headers at the time of request.
+ std::unique_ptr<PartiallyDownloadedBlock> partialBlock; //!< Optional, used for CMPCTBLOCK downloads
};
map<uint256, pair<NodeId, list<QueuedBlock>::iterator> > mapBlocksInFlight;
+ /** Stack of nodes which we have set to announce using compact blocks */
+ list<NodeId> lNodesAnnouncingHeaderAndIDs;
+
/** Number of preferable block download peers. */
int nPreferredDownload = 0;
@@ -274,6 +289,12 @@ struct CNodeState {
bool fPreferredDownload;
//! Whether this peer wants invs or headers (when possible) for block announcements.
bool fPreferHeaders;
+ //! Whether this peer wants invs or cmpctblocks (when possible) for block announcements.
+ bool fPreferHeaderAndIDs;
+ //! Whether this peer will send us cmpctblocks if we request them
+ bool fProvidesHeaderAndIDs;
+ //! Whether this peer can give us witnesses
+ bool fHaveWitness;
CNodeState() {
fCurrentlyConnected = false;
@@ -290,6 +311,9 @@ struct CNodeState {
nBlocksInFlightValidHeaders = 0;
fPreferredDownload = false;
fPreferHeaders = false;
+ fPreferHeaderAndIDs = false;
+ fProvidesHeaderAndIDs = false;
+ fHaveWitness = false;
}
};
@@ -358,6 +382,7 @@ void FinalizeNode(NodeId nodeid) {
// Requires cs_main.
// Returns a bool indicating whether we requested this block.
+// Also used if a block was /not/ received and timed out or started with another peer
bool MarkBlockAsReceived(const uint256& hash) {
map<uint256, pair<NodeId, list<QueuedBlock>::iterator> >::iterator itInFlight = mapBlocksInFlight.find(hash);
if (itInFlight != mapBlocksInFlight.end()) {
@@ -381,17 +406,26 @@ bool MarkBlockAsReceived(const uint256& hash) {
}
// Requires cs_main.
-void MarkBlockAsInFlight(NodeId nodeid, const uint256& hash, const Consensus::Params& consensusParams, CBlockIndex *pindex = NULL) {
+// returns false, still setting pit, if the block was already in flight from the same peer
+// pit will only be valid as long as the same cs_main lock is being held
+bool MarkBlockAsInFlight(NodeId nodeid, const uint256& hash, const Consensus::Params& consensusParams, CBlockIndex *pindex = NULL, list<QueuedBlock>::iterator **pit = NULL) {
CNodeState *state = State(nodeid);
assert(state != NULL);
+ // Short-circuit most stuff in case its from the same node
+ map<uint256, pair<NodeId, list<QueuedBlock>::iterator> >::iterator itInFlight = mapBlocksInFlight.find(hash);
+ if (itInFlight != mapBlocksInFlight.end() && itInFlight->second.first == nodeid) {
+ *pit = &itInFlight->second.second;
+ return false;
+ }
+
// Make sure it's not listed somewhere already.
MarkBlockAsReceived(hash);
- QueuedBlock newentry = {hash, pindex, pindex != NULL};
- list<QueuedBlock>::iterator it = state->vBlocksInFlight.insert(state->vBlocksInFlight.end(), newentry);
+ list<QueuedBlock>::iterator it = state->vBlocksInFlight.insert(state->vBlocksInFlight.end(),
+ {hash, pindex, pindex != NULL, std::unique_ptr<PartiallyDownloadedBlock>(pit ? new PartiallyDownloadedBlock(&mempool) : NULL)});
state->nBlocksInFlight++;
- state->nBlocksInFlightValidHeaders += newentry.fValidatedHeaders;
+ state->nBlocksInFlightValidHeaders += it->fValidatedHeaders;
if (state->nBlocksInFlight == 1) {
// We're starting a block download (batch) from this peer.
state->nDownloadingSince = GetTimeMicros();
@@ -399,7 +433,10 @@ void MarkBlockAsInFlight(NodeId nodeid, const uint256& hash, const Consensus::Pa
if (state->nBlocksInFlightValidHeaders == 1 && pindex != NULL) {
nPeersWithValidatedDownloads++;
}
- mapBlocksInFlight[hash] = std::make_pair(nodeid, it);
+ itInFlight = mapBlocksInFlight.insert(std::make_pair(hash, std::make_pair(nodeid, it))).first;
+ if (pit)
+ *pit = &itInFlight->second.second;
+ return true;
}
/** Check whether the last unknown block a peer advertised is not yet known. */
@@ -435,6 +472,32 @@ void UpdateBlockAvailability(NodeId nodeid, const uint256 &hash) {
}
}
+void MaybeSetPeerAsAnnouncingHeaderAndIDs(const CNodeState* nodestate, CNode* pfrom) {
+ if (nLocalServices & NODE_WITNESS) {
+ // Don't ever request compact blocks when segwit is enabled.
+ return;
+ }
+ if (nodestate->fProvidesHeaderAndIDs) {
+ BOOST_FOREACH(const NodeId nodeid, lNodesAnnouncingHeaderAndIDs)
+ if (nodeid == pfrom->GetId())
+ return;
+ bool fAnnounceUsingCMPCTBLOCK = false;
+ uint64_t nCMPCTBLOCKVersion = 1;
+ if (lNodesAnnouncingHeaderAndIDs.size() >= 3) {
+ // As per BIP152, we only get 3 of our peers to announce
+ // blocks using compact encodings.
+ CNode* pnodeStop = FindNode(lNodesAnnouncingHeaderAndIDs.front());
+ if (pnodeStop) {
+ pnodeStop->PushMessage(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion);
+ lNodesAnnouncingHeaderAndIDs.pop_front();
+ }
+ }
+ fAnnounceUsingCMPCTBLOCK = true;
+ pfrom->PushMessage(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion);
+ lNodesAnnouncingHeaderAndIDs.push_back(pfrom->GetId());
+ }
+}
+
// Requires cs_main
bool CanDirectFetch(const Consensus::Params &consensusParams)
{
@@ -599,6 +662,9 @@ CBlockIndex* FindForkInGlobalIndex(const CChain& chain, const CBlockLocator& loc
CBlockIndex* pindex = (*mi).second;
if (chain.Contains(pindex))
return pindex;
+ if (pindex->GetAncestor(chain.Height()) == chain.Tip()) {
+ return chain.Tip();
+ }
}
}
return chain.Genesis();
@@ -623,40 +689,42 @@ bool AddOrphanTx(const CTransaction& tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(c
// large transaction with a missing parent then we assume
// it will rebroadcast it later, after the parent transaction(s)
// have been mined or received.
- // 10,000 orphans, each of which is at most 5,000 bytes big is
- // at most 500 megabytes of orphans:
- unsigned int sz = tx.GetSerializeSize(SER_NETWORK, CTransaction::CURRENT_VERSION);
- if (sz > 5000)
+ // 100 orphans, each of which is at most 99,999 bytes big is
+ // at most 10 megabytes of orphans and somewhat more byprev index (in the worst case):
+ unsigned int sz = GetTransactionCost(tx);
+ if (sz >= MAX_STANDARD_TX_COST)
{
LogPrint("mempool", "ignoring large orphan tx (size: %u, hash: %s)\n", sz, hash.ToString());
return false;
}
- mapOrphanTransactions[hash].tx = tx;
- mapOrphanTransactions[hash].fromPeer = peer;
- BOOST_FOREACH(const CTxIn& txin, tx.vin)
- mapOrphanTransactionsByPrev[txin.prevout.hash].insert(hash);
+ auto ret = mapOrphanTransactions.emplace(hash, COrphanTx{tx, peer, GetTime() + ORPHAN_TX_EXPIRE_TIME});
+ assert(ret.second);
+ BOOST_FOREACH(const CTxIn& txin, tx.vin) {
+ mapOrphanTransactionsByPrev[txin.prevout].insert(ret.first);
+ }
- LogPrint("mempool", "stored orphan tx %s (mapsz %u prevsz %u)\n", hash.ToString(),
+ LogPrint("mempool", "stored orphan tx %s (mapsz %u outsz %u)\n", hash.ToString(),
mapOrphanTransactions.size(), mapOrphanTransactionsByPrev.size());
return true;
}
-void static EraseOrphanTx(uint256 hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
+int static EraseOrphanTx(uint256 hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{
map<uint256, COrphanTx>::iterator it = mapOrphanTransactions.find(hash);
if (it == mapOrphanTransactions.end())
- return;
+ return 0;
BOOST_FOREACH(const CTxIn& txin, it->second.tx.vin)
{
- map<uint256, set<uint256> >::iterator itPrev = mapOrphanTransactionsByPrev.find(txin.prevout.hash);
+ auto itPrev = mapOrphanTransactionsByPrev.find(txin.prevout);
if (itPrev == mapOrphanTransactionsByPrev.end())
continue;
- itPrev->second.erase(hash);
+ itPrev->second.erase(it);
if (itPrev->second.empty())
mapOrphanTransactionsByPrev.erase(itPrev);
}
mapOrphanTransactions.erase(it);
+ return 1;
}
void EraseOrphansFor(NodeId peer)
@@ -668,8 +736,7 @@ void EraseOrphansFor(NodeId peer)
map<uint256, COrphanTx>::iterator maybeErase = iter++; // increment to avoid iterator becoming invalid
if (maybeErase->second.fromPeer == peer)
{
- EraseOrphanTx(maybeErase->second.tx.GetHash());
- ++nErased;
+ nErased += EraseOrphanTx(maybeErase->second.tx.GetHash());
}
}
if (nErased > 0) LogPrint("mempool", "Erased %d orphan tx from peer %d\n", nErased, peer);
@@ -679,6 +746,26 @@ void EraseOrphansFor(NodeId peer)
unsigned int LimitOrphanTxSize(unsigned int nMaxOrphans) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{
unsigned int nEvicted = 0;
+ static int64_t nNextSweep;
+ int64_t nNow = GetTime();
+ if (nNextSweep <= nNow) {
+ // Sweep out expired orphan pool entries:
+ int nErased = 0;
+ int64_t nMinExpTime = nNow + ORPHAN_TX_EXPIRE_TIME - ORPHAN_TX_EXPIRE_INTERVAL;
+ map<uint256, COrphanTx>::iterator iter = mapOrphanTransactions.begin();
+ while (iter != mapOrphanTransactions.end())
+ {
+ map<uint256, COrphanTx>::iterator maybeErase = iter++;
+ if (maybeErase->second.nTimeExpire <= nNow) {
+ nErased += EraseOrphanTx(maybeErase->second.tx.GetHash());
+ } else {
+ nMinExpTime = std::min(maybeErase->second.nTimeExpire, nMinExpTime);
+ }
+ }
+ // Sweep again 5 minutes after the next entry that expires in order to batch the linear scan.
+ nNextSweep = nMinExpTime + ORPHAN_TX_EXPIRE_INTERVAL;
+ if (nErased > 0) LogPrint("mempool", "Erased %d orphan tx due to expiration\n", nErased);
+ }
while (mapOrphanTransactions.size() > nMaxOrphans)
{
// Evict a random orphan:
@@ -938,8 +1025,24 @@ unsigned int GetP2SHSigOpCount(const CTransaction& tx, const CCoinsViewCache& in
return nSigOps;
}
+int64_t GetTransactionSigOpCost(const CTransaction& tx, const CCoinsViewCache& inputs, int flags)
+{
+ int64_t nSigOps = GetLegacySigOpCount(tx) * WITNESS_SCALE_FACTOR;
+
+ if (tx.IsCoinBase())
+ return nSigOps;
+ if (flags & SCRIPT_VERIFY_P2SH) {
+ nSigOps += GetP2SHSigOpCount(tx, inputs) * WITNESS_SCALE_FACTOR;
+ }
+ for (unsigned int i = 0; i < tx.vin.size(); i++)
+ {
+ const CTxOut &prevout = inputs.GetOutputFor(tx.vin[i]);
+ nSigOps += CountWitnessSigOps(tx.vin[i].scriptSig, prevout.scriptPubKey, i < tx.wit.vtxinwit.size() ? &tx.wit.vtxinwit[i].scriptWitness : NULL, flags);
+ }
+ return nSigOps;
+}
@@ -952,8 +1055,8 @@ bool CheckTransaction(const CTransaction& tx, CValidationState &state)
return state.DoS(10, false, REJECT_INVALID, "bad-txns-vin-empty");
if (tx.vout.empty())
return state.DoS(10, false, REJECT_INVALID, "bad-txns-vout-empty");
- // Size limits
- if (::GetSerializeSize(tx, SER_NETWORK, PROTOCOL_VERSION) > MAX_BLOCK_SIZE)
+ // Size limits (this doesn't take the witness into account, as that hasn't been checked for malleability)
+ if (::GetSerializeSize(tx, SER_NETWORK, PROTOCOL_VERSION | SERIALIZE_TRANSACTION_NO_WITNESS) > MAX_BLOCK_BASE_SIZE)
return state.DoS(100, false, REJECT_INVALID, "bad-txns-oversize");
// Check for negative or overflow output values
@@ -1042,6 +1145,11 @@ bool AcceptToMemoryPoolWorker(CTxMemPool& pool, CValidationState& state, const C
return state.DoS(0, false, REJECT_NONSTANDARD, "premature-version2-tx");
}
+ // Reject transactions with witness before segregated witness activates (override with -prematurewitness)
+ if (!GetBoolArg("-prematurewitness",false) && !tx.wit.IsNull() && !IsWitnessEnabled(chainActive.Tip(), Params().GetConsensus())) {
+ return state.DoS(0, false, REJECT_NONSTANDARD, "no-witness-yet", true);
+ }
+
// Only accept nLockTime-using transactions that can be mined in the next
// block; we don't want our mempool filled up with transactions that can't
// be mined yet.
@@ -1154,8 +1262,7 @@ bool AcceptToMemoryPoolWorker(CTxMemPool& pool, CValidationState& state, const C
if (fRequireStandard && !AreInputsStandard(tx, view))
return state.Invalid(false, REJECT_NONSTANDARD, "bad-txns-nonstandard-inputs");
- unsigned int nSigOps = GetLegacySigOpCount(tx);
- nSigOps += GetP2SHSigOpCount(tx, view);
+ int64_t nSigOpsCost = GetTransactionSigOpCost(tx, view, STANDARD_SCRIPT_VERIFY_FLAGS);
CAmount nValueOut = tx.GetValueOut();
CAmount nFees = nValueIn-nValueOut;
@@ -1178,7 +1285,7 @@ bool AcceptToMemoryPoolWorker(CTxMemPool& pool, CValidationState& state, const C
}
}
- CTxMemPoolEntry entry(tx, nFees, GetTime(), dPriority, chainActive.Height(), pool.HasNoInputsOf(tx), inChainInputValue, fSpendsCoinbase, nSigOps, lp);
+ CTxMemPoolEntry entry(tx, nFees, GetTime(), dPriority, chainActive.Height(), pool.HasNoInputsOf(tx), inChainInputValue, fSpendsCoinbase, nSigOpsCost, lp);
unsigned int nSize = entry.GetTxSize();
// Check that the transaction doesn't have an excessive number of
@@ -1186,9 +1293,9 @@ bool AcceptToMemoryPoolWorker(CTxMemPool& pool, CValidationState& state, const C
// itself can contain sigops MAX_STANDARD_TX_SIGOPS is less than
// MAX_BLOCK_SIGOPS; we still consider this an invalid rather than
// merely non-standard transaction.
- if ((nSigOps > MAX_STANDARD_TX_SIGOPS) || (nBytesPerSigOp && nSigOps > nSize / nBytesPerSigOp))
+ if ((nSigOpsCost > MAX_STANDARD_TX_SIGOPS_COST) || (nBytesPerSigOp && nSigOpsCost > nSize * WITNESS_SCALE_FACTOR / nBytesPerSigOp))
return state.DoS(0, false, REJECT_NONSTANDARD, "bad-txns-too-many-sigops", false,
- strprintf("%d", nSigOps));
+ strprintf("%d", nSigOpsCost));
CAmount mempoolRejectFee = pool.GetMinFee(GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000).GetFee(nSize);
if (mempoolRejectFee > 0 && nModifiedFees < mempoolRejectFee) {
@@ -1380,10 +1487,24 @@ bool AcceptToMemoryPoolWorker(CTxMemPool& pool, CValidationState& state, const C
}
}
+ unsigned int scriptVerifyFlags = STANDARD_SCRIPT_VERIFY_FLAGS;
+ if (!Params().RequireStandard()) {
+ scriptVerifyFlags = GetArg("-promiscuousmempoolflags", scriptVerifyFlags);
+ }
+
// Check against previous transactions
// This is done last to help prevent CPU exhaustion denial-of-service attacks.
- if (!CheckInputs(tx, state, view, true, STANDARD_SCRIPT_VERIFY_FLAGS, true))
- return false; // state filled in by CheckInputs
+ if (!CheckInputs(tx, state, view, true, scriptVerifyFlags, true)) {
+ // SCRIPT_VERIFY_CLEANSTACK requires SCRIPT_VERIFY_WITNESS, so we
+ // need to turn both off, and compare against just turning off CLEANSTACK
+ // to see if the failure is specifically due to witness validation.
+ if (CheckInputs(tx, state, view, true, scriptVerifyFlags & ~(SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_CLEANSTACK), true) &&
+ !CheckInputs(tx, state, view, true, scriptVerifyFlags & ~SCRIPT_VERIFY_CLEANSTACK, true)) {
+ // Only the witness is wrong, so the transaction itself may be fine.
+ state.SetCorruptionPossible();
+ }
+ return false;
+ }
// Check again against just the consensus-critical mandatory script
// verification flags, in case of bugs in the standard flags that cause
@@ -1790,7 +1911,8 @@ void UpdateCoins(const CTransaction& tx, CCoinsViewCache& inputs, int nHeight)
bool CScriptCheck::operator()() {
const CScript &scriptSig = ptxTo->vin[nIn].scriptSig;
- if (!VerifyScript(scriptSig, scriptPubKey, nFlags, CachingTransactionSignatureChecker(ptxTo, nIn, cacheStore), &error)) {
+ const CScriptWitness *witness = (nIn < ptxTo->wit.vtxinwit.size()) ? &ptxTo->wit.vtxinwit[nIn].scriptWitness : NULL;
+ if (!VerifyScript(scriptSig, scriptPubKey, witness, nFlags, CachingTransactionSignatureChecker(ptxTo, nIn, amount, cacheStore), &error)) {
return false;
}
return true;
@@ -2179,7 +2301,7 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin
int64_t nTimeStart = GetTimeMicros();
// Check it again in case a previous version let a bad block in
- if (!CheckBlock(block, state, chainparams.GetConsensus(), GetAdjustedTime(), !fJustCheck, !fJustCheck))
+ if (!CheckBlock(block, state, chainparams.GetConsensus(), !fJustCheck, !fJustCheck))
return error("%s: Consensus::CheckBlock: %s", __func__, FormatStateMessage(state));
// verify that the view's current state corresponds to the previous block
@@ -2266,6 +2388,11 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin
nLockTimeFlags |= LOCKTIME_VERIFY_SEQUENCE;
}
+ // Start enforcing WITNESS rules using versionbits logic.
+ if (IsWitnessEnabled(pindex->pprev, chainparams.GetConsensus())) {
+ flags |= SCRIPT_VERIFY_WITNESS;
+ }
+
int64_t nTime2 = GetTimeMicros(); nTimeForks += nTime2 - nTime1;
LogPrint("bench", " - Fork checks: %.2fms [%.2fs]\n", 0.001 * (nTime2 - nTime1), nTimeForks * 0.000001);
@@ -2273,10 +2400,11 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin
CCheckQueueControl<CScriptCheck> control(fScriptChecks && nScriptCheckThreads ? &scriptcheckqueue : NULL);
+ std::vector<uint256> vOrphanErase;
std::vector<int> prevheights;
CAmount nFees = 0;
int nInputs = 0;
- unsigned int nSigOps = 0;
+ int64_t nSigOpsCost = 0;
CDiskTxPos pos(pindex->GetBlockPos(), GetSizeOfCompactSize(block.vtx.size()));
std::vector<std::pair<uint256, CDiskTxPos> > vPos;
vPos.reserve(block.vtx.size());
@@ -2286,10 +2414,6 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin
const CTransaction &tx = block.vtx[i];
nInputs += tx.vin.size();
- nSigOps += GetLegacySigOpCount(tx);
- if (nSigOps > MAX_BLOCK_SIGOPS)
- return state.DoS(100, error("ConnectBlock(): too many sigops"),
- REJECT_INVALID, "bad-blk-sigops");
if (!tx.IsCoinBase())
{
@@ -2305,22 +2429,34 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin
prevheights[j] = view.AccessCoins(tx.vin[j].prevout.hash)->nHeight;
}
+ // Which orphan pool entries must we evict?
+ for (size_t j = 0; j < tx.vin.size(); j++) {
+ auto itByPrev = mapOrphanTransactionsByPrev.find(tx.vin[j].prevout);
+ if (itByPrev == mapOrphanTransactionsByPrev.end()) continue;
+ for (auto mi = itByPrev->second.begin(); mi != itByPrev->second.end(); ++mi) {
+ const CTransaction& orphanTx = (*mi)->second.tx;
+ const uint256& orphanHash = orphanTx.GetHash();
+ vOrphanErase.push_back(orphanHash);
+ }
+ }
+
if (!SequenceLocks(tx, nLockTimeFlags, &prevheights, *pindex)) {
return state.DoS(100, error("%s: contains a non-BIP68-final transaction", __func__),
REJECT_INVALID, "bad-txns-nonfinal");
}
+ }
- if (fStrictPayToScriptHash)
- {
- // Add in sigops done by pay-to-script-hash inputs;
- // this is to prevent a "rogue miner" from creating
- // an incredibly-expensive-to-validate block.
- nSigOps += GetP2SHSigOpCount(tx, view);
- if (nSigOps > MAX_BLOCK_SIGOPS)
- return state.DoS(100, error("ConnectBlock(): too many sigops"),
- REJECT_INVALID, "bad-blk-sigops");
- }
+ // GetTransactionSigOpCost counts 3 types of sigops:
+ // * legacy (always)
+ // * p2sh (when P2SH enabled in flags and excludes coinbase)
+ // * witness (when witness enabled in flags and excludes coinbase)
+ nSigOpsCost += GetTransactionSigOpCost(tx, view, flags);
+ if (nSigOpsCost > MAX_BLOCK_SIGOPS_COST)
+ return state.DoS(100, error("ConnectBlock(): too many sigops"),
+ REJECT_INVALID, "bad-blk-sigops");
+ if (!tx.IsCoinBase())
+ {
nFees += view.GetValueIn(tx)-tx.GetValueOut();
std::vector<CScriptCheck> vChecks;
@@ -2392,6 +2528,15 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin
GetMainSignals().UpdatedTransaction(hashPrevBestCoinBase);
hashPrevBestCoinBase = block.vtx[0].GetHash();
+ // Erase orphan transactions include or precluded by this block
+ if (vOrphanErase.size()) {
+ int nErased = 0;
+ BOOST_FOREACH(uint256 &orphanHash, vOrphanErase) {
+ nErased += EraseOrphanTx(orphanHash);
+ }
+ LogPrint("mempool", "Erased %d orphan tx included or conflicted by block\n", nErased);
+ }
+
int64_t nTime6 = GetTimeMicros(); nTimeCallbacks += nTime6 - nTime5;
LogPrint("bench", " - Callbacks: %.2fms [%.2fs]\n", 0.001 * (nTime6 - nTime5), nTimeCallbacks * 0.000001);
@@ -2582,7 +2727,7 @@ void static UpdateTip(CBlockIndex *pindexNew, const CChainParams& chainParams) {
}
/** Disconnect chainActive's tip. You probably want to call mempool.removeForReorg and manually re-limit mempool size after this, with cs_main held. */
-bool static DisconnectTip(CValidationState& state, const CChainParams& chainparams)
+bool static DisconnectTip(CValidationState& state, const CChainParams& chainparams, bool fBare = false)
{
CBlockIndex *pindexDelete = chainActive.Tip();
assert(pindexDelete);
@@ -2602,24 +2747,28 @@ bool static DisconnectTip(CValidationState& state, const CChainParams& chainpara
// Write the chain state to disk, if necessary.
if (!FlushStateToDisk(state, FLUSH_STATE_IF_NEEDED))
return false;
- // Resurrect mempool transactions from the disconnected block.
- std::vector<uint256> vHashUpdate;
- BOOST_FOREACH(const CTransaction &tx, block.vtx) {
- // ignore validation errors in resurrected transactions
- list<CTransaction> removed;
- CValidationState stateDummy;
- if (tx.IsCoinBase() || !AcceptToMemoryPool(mempool, stateDummy, tx, false, NULL, true)) {
- mempool.removeRecursive(tx, removed);
- } else if (mempool.exists(tx.GetHash())) {
- vHashUpdate.push_back(tx.GetHash());
- }
- }
- // AcceptToMemoryPool/addUnchecked all assume that new mempool entries have
- // no in-mempool children, which is generally not true when adding
- // previously-confirmed transactions back to the mempool.
- // UpdateTransactionsFromBlock finds descendants of any transactions in this
- // block that were added back and cleans up the mempool state.
- mempool.UpdateTransactionsFromBlock(vHashUpdate);
+
+ if (!fBare) {
+ // Resurrect mempool transactions from the disconnected block.
+ std::vector<uint256> vHashUpdate;
+ BOOST_FOREACH(const CTransaction &tx, block.vtx) {
+ // ignore validation errors in resurrected transactions
+ list<CTransaction> removed;
+ CValidationState stateDummy;
+ if (tx.IsCoinBase() || !AcceptToMemoryPool(mempool, stateDummy, tx, false, NULL, true)) {
+ mempool.removeRecursive(tx, removed);
+ } else if (mempool.exists(tx.GetHash())) {
+ vHashUpdate.push_back(tx.GetHash());
+ }
+ }
+ // AcceptToMemoryPool/addUnchecked all assume that new mempool entries have
+ // no in-mempool children, which is generally not true when adding
+ // previously-confirmed transactions back to the mempool.
+ // UpdateTransactionsFromBlock finds descendants of any transactions in this
+ // block that were added back and cleans up the mempool state.
+ mempool.UpdateTransactionsFromBlock(vHashUpdate);
+ }
+
// Update chainActive and related variables.
UpdateTip(pindexDelete->pprev, chainparams);
// Let wallets know transactions went from 1-confirmed to
@@ -3071,6 +3220,9 @@ bool ReceivedBlockTransactions(const CBlock &block, CValidationState& state, CBl
pindexNew->nDataPos = pos.nPos;
pindexNew->nUndoPos = 0;
pindexNew->nStatus |= BLOCK_HAVE_DATA;
+ if (IsWitnessEnabled(pindexNew->pprev, Params().GetConsensus())) {
+ pindexNew->nStatus |= BLOCK_OPT_WITNESS;
+ }
pindexNew->RaiseValidity(BLOCK_VALID_TRANSACTIONS);
setDirtyBlockIndex.insert(pindexNew);
@@ -3196,20 +3348,16 @@ bool FindUndoPos(CValidationState &state, int nFile, CDiskBlockPos &pos, unsigne
return true;
}
-bool CheckBlockHeader(const CBlockHeader& block, CValidationState& state, const Consensus::Params& consensusParams, int64_t nAdjustedTime, bool fCheckPOW)
+bool CheckBlockHeader(const CBlockHeader& block, CValidationState& state, const Consensus::Params& consensusParams, bool fCheckPOW)
{
// Check proof of work matches claimed amount
if (fCheckPOW && !CheckProofOfWork(block.GetHash(), block.nBits, consensusParams))
return state.DoS(50, false, REJECT_INVALID, "high-hash", false, "proof of work failed");
- // Check timestamp
- if (block.GetBlockTime() > nAdjustedTime + 2 * 60 * 60)
- return state.Invalid(false, REJECT_INVALID, "time-too-new", "block timestamp too far in the future");
-
return true;
}
-bool CheckBlock(const CBlock& block, CValidationState& state, const Consensus::Params& consensusParams, int64_t nAdjustedTime, bool fCheckPOW, bool fCheckMerkleRoot)
+bool CheckBlock(const CBlock& block, CValidationState& state, const Consensus::Params& consensusParams, bool fCheckPOW, bool fCheckMerkleRoot)
{
// These are checks that are independent of context.
@@ -3218,7 +3366,7 @@ bool CheckBlock(const CBlock& block, CValidationState& state, const Consensus::P
// Check that the header is valid (particularly PoW). This is mostly
// redundant with the call in AcceptBlockHeader.
- if (!CheckBlockHeader(block, state, consensusParams, nAdjustedTime, fCheckPOW))
+ if (!CheckBlockHeader(block, state, consensusParams, fCheckPOW))
return false;
// Check the merkle root.
@@ -3238,9 +3386,11 @@ bool CheckBlock(const CBlock& block, CValidationState& state, const Consensus::P
// All potential-corruption validation must be done before we do any
// transaction validation, as otherwise we may mark the header as invalid
// because we receive the wrong transactions for it.
+ // Note that witness malleability is checked in ContextualCheckBlock, so no
+ // checks that use witness data may be performed here.
// Size limits
- if (block.vtx.empty() || block.vtx.size() > MAX_BLOCK_SIZE || ::GetSerializeSize(block, SER_NETWORK, PROTOCOL_VERSION) > MAX_BLOCK_SIZE)
+ if (block.vtx.empty() || block.vtx.size() > MAX_BLOCK_BASE_SIZE || ::GetSerializeSize(block, SER_NETWORK, PROTOCOL_VERSION | SERIALIZE_TRANSACTION_NO_WITNESS) > MAX_BLOCK_BASE_SIZE)
return state.DoS(100, false, REJECT_INVALID, "bad-blk-length", false, "size limits failed");
// First transaction must be coinbase, the rest must not be
@@ -3261,7 +3411,7 @@ bool CheckBlock(const CBlock& block, CValidationState& state, const Consensus::P
{
nSigOps += GetLegacySigOpCount(tx);
}
- if (nSigOps > MAX_BLOCK_SIGOPS)
+ if (nSigOps * WITNESS_SCALE_FACTOR > MAX_BLOCK_SIGOPS_COST)
return state.DoS(100, false, REJECT_INVALID, "bad-blk-sigops", false, "out-of-bounds SigOpCount");
if (fCheckPOW && fCheckMerkleRoot)
@@ -3284,7 +3434,72 @@ static bool CheckIndexAgainstCheckpoint(const CBlockIndex* pindexPrev, CValidati
return true;
}
-bool ContextualCheckBlockHeader(const CBlockHeader& block, CValidationState& state, const Consensus::Params& consensusParams, CBlockIndex * const pindexPrev)
+bool IsWitnessEnabled(const CBlockIndex* pindexPrev, const Consensus::Params& params)
+{
+ LOCK(cs_main);
+ return (VersionBitsState(pindexPrev, params, Consensus::DEPLOYMENT_SEGWIT, versionbitscache) == THRESHOLD_ACTIVE);
+}
+
+// Compute at which vout of the block's coinbase transaction the witness
+// commitment occurs, or -1 if not found.
+static int GetWitnessCommitmentIndex(const CBlock& block)
+{
+ int commitpos = -1;
+ for (size_t o = 0; o < block.vtx[0].vout.size(); o++) {
+ if (block.vtx[0].vout[o].scriptPubKey.size() >= 38 && block.vtx[0].vout[o].scriptPubKey[0] == OP_RETURN && block.vtx[0].vout[o].scriptPubKey[1] == 0x24 && block.vtx[0].vout[o].scriptPubKey[2] == 0xaa && block.vtx[0].vout[o].scriptPubKey[3] == 0x21 && block.vtx[0].vout[o].scriptPubKey[4] == 0xa9 && block.vtx[0].vout[o].scriptPubKey[5] == 0xed) {
+ commitpos = o;
+ }
+ }
+ return commitpos;
+}
+
+void UpdateUncommittedBlockStructures(CBlock& block, const CBlockIndex* pindexPrev, const Consensus::Params& consensusParams)
+{
+ int commitpos = GetWitnessCommitmentIndex(block);
+ static const std::vector<unsigned char> nonce(32, 0x00);
+ if (commitpos != -1 && IsWitnessEnabled(pindexPrev, consensusParams) && block.vtx[0].wit.IsEmpty()) {
+ block.vtx[0].wit.vtxinwit.resize(1);
+ block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.resize(1);
+ block.vtx[0].wit.vtxinwit[0].scriptWitness.stack[0] = nonce;
+ }
+}
+
+std::vector<unsigned char> GenerateCoinbaseCommitment(CBlock& block, const CBlockIndex* pindexPrev, const Consensus::Params& consensusParams)
+{
+ std::vector<unsigned char> commitment;
+ int commitpos = GetWitnessCommitmentIndex(block);
+ bool fHaveWitness = false;
+ for (size_t t = 1; t < block.vtx.size(); t++) {
+ if (!block.vtx[t].wit.IsNull()) {
+ fHaveWitness = true;
+ break;
+ }
+ }
+ std::vector<unsigned char> ret(32, 0x00);
+ if (fHaveWitness && IsWitnessEnabled(pindexPrev, consensusParams)) {
+ if (commitpos == -1) {
+ uint256 witnessroot = BlockWitnessMerkleRoot(block, NULL);
+ CHash256().Write(witnessroot.begin(), 32).Write(&ret[0], 32).Finalize(witnessroot.begin());
+ CTxOut out;
+ out.nValue = 0;
+ out.scriptPubKey.resize(38);
+ out.scriptPubKey[0] = OP_RETURN;
+ out.scriptPubKey[1] = 0x24;
+ out.scriptPubKey[2] = 0xaa;
+ out.scriptPubKey[3] = 0x21;
+ out.scriptPubKey[4] = 0xa9;
+ out.scriptPubKey[5] = 0xed;
+ memcpy(&out.scriptPubKey[6], witnessroot.begin(), 32);
+ commitment = std::vector<unsigned char>(out.scriptPubKey.begin(), out.scriptPubKey.end());
+ const_cast<std::vector<CTxOut>*>(&block.vtx[0].vout)->push_back(out);
+ block.vtx[0].UpdateHash();
+ }
+ }
+ UpdateUncommittedBlockStructures(block, pindexPrev, consensusParams);
+ return commitment;
+}
+
+bool ContextualCheckBlockHeader(const CBlockHeader& block, CValidationState& state, const Consensus::Params& consensusParams, CBlockIndex * const pindexPrev, int64_t nAdjustedTime)
{
// Check proof of work
if (block.nBits != GetNextWorkRequired(pindexPrev, &block, consensusParams))
@@ -3294,6 +3509,10 @@ bool ContextualCheckBlockHeader(const CBlockHeader& block, CValidationState& sta
if (block.GetBlockTime() <= pindexPrev->GetMedianTimePast())
return state.Invalid(false, REJECT_INVALID, "time-too-old", "block's timestamp is too early");
+ // Check timestamp
+ if (block.GetBlockTime() > nAdjustedTime + 2 * 60 * 60)
+ return state.Invalid(false, REJECT_INVALID, "time-too-new", "block timestamp too far in the future");
+
// Reject outdated version blocks when 95% (75% on testnet) of the network has upgraded:
for (int32_t version = 2; version < 5; ++version) // check for version 2, 3 and 4 upgrades
if (block.nVersion < version && IsSuperMajority(version, pindexPrev, consensusParams.nMajorityRejectBlockOutdated, consensusParams))
@@ -3336,6 +3555,53 @@ bool ContextualCheckBlock(const CBlock& block, CValidationState& state, CBlockIn
}
}
+ // Validation for witness commitments.
+ // * We compute the witness hash (which is the hash including witnesses) of all the block's transactions, except the
+ // coinbase (where 0x0000....0000 is used instead).
+ // * The coinbase scriptWitness is a stack of a single 32-byte vector, containing a witness nonce (unconstrained).
+ // * We build a merkle tree with all those witness hashes as leaves (similar to the hashMerkleRoot in the block header).
+ // * There must be at least one output whose scriptPubKey is a single 36-byte push, the first 4 bytes of which are
+ // {0xaa, 0x21, 0xa9, 0xed}, and the following 32 bytes are SHA256^2(witness root, witness nonce). In case there are
+ // multiple, the last one is used.
+ bool fHaveWitness = false;
+ if (IsWitnessEnabled(pindexPrev, consensusParams)) {
+ int commitpos = GetWitnessCommitmentIndex(block);
+ if (commitpos != -1) {
+ bool malleated = false;
+ uint256 hashWitness = BlockWitnessMerkleRoot(block, &malleated);
+ // The malleation check is ignored; as the transaction tree itself
+ // already does not permit it, it is impossible to trigger in the
+ // witness tree.
+ if (block.vtx[0].wit.vtxinwit.size() != 1 || block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.size() != 1 || block.vtx[0].wit.vtxinwit[0].scriptWitness.stack[0].size() != 32) {
+ return state.DoS(100, error("%s : invalid witness nonce size", __func__), REJECT_INVALID, "bad-witness-nonce-size", true);
+ }
+ CHash256().Write(hashWitness.begin(), 32).Write(&block.vtx[0].wit.vtxinwit[0].scriptWitness.stack[0][0], 32).Finalize(hashWitness.begin());
+ if (memcmp(hashWitness.begin(), &block.vtx[0].vout[commitpos].scriptPubKey[6], 32)) {
+ return state.DoS(100, error("%s : witness merkle commitment mismatch", __func__), REJECT_INVALID, "bad-witness-merkle-match", true);
+ }
+ fHaveWitness = true;
+ }
+ }
+
+ // No witness data is allowed in blocks that don't commit to witness data, as this would otherwise leave room for spam
+ if (!fHaveWitness) {
+ for (size_t i = 0; i < block.vtx.size(); i++) {
+ if (!block.vtx[i].wit.IsNull()) {
+ return state.DoS(100, error("%s : unexpected witness data found", __func__), REJECT_INVALID, "unexpected-witness", true);
+ }
+ }
+ }
+
+ // After the coinbase witness nonce and commitment are verified,
+ // we can check if the block cost passes (before we've checked the
+ // coinbase witness, it would be possible for the cost to be too
+ // large by filling up the coinbase witness, which doesn't change
+ // the block hash, so we couldn't mark the block as permanently
+ // failed).
+ if (GetBlockCost(block) > MAX_BLOCK_COST) {
+ return state.DoS(100, error("ContextualCheckBlock(): cost limit failed"), REJECT_INVALID, "bad-blk-cost");
+ }
+
return true;
}
@@ -3358,7 +3624,7 @@ static bool AcceptBlockHeader(const CBlockHeader& block, CValidationState& state
return true;
}
- if (!CheckBlockHeader(block, state, chainparams.GetConsensus(), GetAdjustedTime()))
+ if (!CheckBlockHeader(block, state, chainparams.GetConsensus()))
return error("%s: Consensus::CheckBlockHeader: %s, %s", __func__, hash.ToString(), FormatStateMessage(state));
// Get prev block index
@@ -3374,7 +3640,7 @@ static bool AcceptBlockHeader(const CBlockHeader& block, CValidationState& state
if (fCheckpointsEnabled && !CheckIndexAgainstCheckpoint(pindexPrev, state, chainparams, hash))
return error("%s: CheckIndexAgainstCheckpoint(): %s", __func__, state.GetRejectReason().c_str());
- if (!ContextualCheckBlockHeader(block, state, chainparams.GetConsensus(), pindexPrev))
+ if (!ContextualCheckBlockHeader(block, state, chainparams.GetConsensus(), pindexPrev, GetAdjustedTime()))
return error("%s: Consensus::ContextualCheckBlockHeader: %s, %s", __func__, hash.ToString(), FormatStateMessage(state));
}
if (pindex == NULL)
@@ -3387,8 +3653,9 @@ static bool AcceptBlockHeader(const CBlockHeader& block, CValidationState& state
}
/** Store block on disk. If dbp is non-NULL, the file is known to already reside on disk */
-static bool AcceptBlock(const CBlock& block, CValidationState& state, const CChainParams& chainparams, CBlockIndex** ppindex, bool fRequested, const CDiskBlockPos* dbp)
+static bool AcceptBlock(const CBlock& block, CValidationState& state, const CChainParams& chainparams, CBlockIndex** ppindex, bool fRequested, const CDiskBlockPos* dbp, bool* fNewBlock)
{
+ if (fNewBlock) *fNewBlock = false;
AssertLockHeld(cs_main);
CBlockIndex *pindexDummy = NULL;
@@ -3417,6 +3684,7 @@ static bool AcceptBlock(const CBlock& block, CValidationState& state, const CCha
if (!fHasMoreWork) return true; // Don't process less-work chains
if (fTooFarAhead) return true; // Block height is too high
}
+ if (fNewBlock) *fNewBlock = true;
if ((!CheckBlock(block, state, chainparams.GetConsensus(), GetAdjustedTime())) || !ContextualCheckBlock(block, state, pindex->pprev)) {
if (state.IsInvalid() && !state.CorruptionPossible()) {
@@ -3464,7 +3732,7 @@ static bool IsSuperMajority(int minVersion, const CBlockIndex* pstart, unsigned
}
-bool ProcessNewBlock(CValidationState& state, const CChainParams& chainparams, const CNode* pfrom, const CBlock* pblock, bool fForceProcessing, const CDiskBlockPos* dbp)
+bool ProcessNewBlock(CValidationState& state, const CChainParams& chainparams, CNode* pfrom, const CBlock* pblock, bool fForceProcessing, const CDiskBlockPos* dbp)
{
{
LOCK(cs_main);
@@ -3473,9 +3741,11 @@ bool ProcessNewBlock(CValidationState& state, const CChainParams& chainparams, c
// Store to disk
CBlockIndex *pindex = NULL;
- bool ret = AcceptBlock(*pblock, state, chainparams, &pindex, fRequested, dbp);
+ bool fNewBlock = false;
+ bool ret = AcceptBlock(*pblock, state, chainparams, &pindex, fRequested, dbp, &fNewBlock);
if (pindex && pfrom) {
mapBlockSource[pindex->GetBlockHash()] = pfrom->GetId();
+ if (fNewBlock) pfrom->nLastBlockTime = GetTime();
}
CheckBlockIndex(chainparams.GetConsensus());
if (!ret)
@@ -3503,9 +3773,9 @@ bool TestBlockValidity(CValidationState& state, const CChainParams& chainparams,
indexDummy.nHeight = pindexPrev->nHeight + 1;
// NOTE: CheckBlockHeader is called by CheckBlock
- if (!ContextualCheckBlockHeader(block, state, chainparams.GetConsensus(), pindexPrev))
+ if (!ContextualCheckBlockHeader(block, state, chainparams.GetConsensus(), pindexPrev, GetAdjustedTime()))
return error("%s: Consensus::ContextualCheckBlockHeader: %s", __func__, FormatStateMessage(state));
- if (!CheckBlock(block, state, chainparams.GetConsensus(), GetAdjustedTime(), fCheckPOW, fCheckMerkleRoot))
+ if (!CheckBlock(block, state, chainparams.GetConsensus(), fCheckPOW, fCheckMerkleRoot))
return error("%s: Consensus::CheckBlock: %s", __func__, FormatStateMessage(state));
if (!ContextualCheckBlock(block, state, pindexPrev))
return error("%s: Consensus::ContextualCheckBlock: %s", __func__, FormatStateMessage(state));
@@ -3850,7 +4120,7 @@ bool CVerifyDB::VerifyDB(const CChainParams& chainparams, CCoinsView *coinsview,
if (!ReadBlockFromDisk(block, pindex, chainparams.GetConsensus()))
return error("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
// check level 1: verify block validity
- if (nCheckLevel >= 1 && !CheckBlock(block, state, chainparams.GetConsensus(), GetAdjustedTime()))
+ if (nCheckLevel >= 1 && !CheckBlock(block, state, chainparams.GetConsensus()))
return error("%s: *** found bad block at %d, hash=%s (%s)\n", __func__,
pindex->nHeight, pindex->GetBlockHash().ToString(), FormatStateMessage(state));
// check level 2: verify undo validity
@@ -3901,6 +4171,90 @@ bool CVerifyDB::VerifyDB(const CChainParams& chainparams, CCoinsView *coinsview,
return true;
}
+bool RewindBlockIndex(const CChainParams& params)
+{
+ LOCK(cs_main);
+
+ int nHeight = 1;
+ while (nHeight <= chainActive.Height()) {
+ if (IsWitnessEnabled(chainActive[nHeight - 1], params.GetConsensus()) && !(chainActive[nHeight]->nStatus & BLOCK_OPT_WITNESS)) {
+ break;
+ }
+ nHeight++;
+ }
+
+ // nHeight is now the height of the first insufficiently-validated block, or tipheight + 1
+ CValidationState state;
+ CBlockIndex* pindex = chainActive.Tip();
+ while (chainActive.Height() >= nHeight) {
+ if (fPruneMode && !(chainActive.Tip()->nStatus & BLOCK_HAVE_DATA)) {
+ // If pruning, don't try rewinding past the HAVE_DATA point;
+ // since older blocks can't be served anyway, there's
+ // no need to walk further, and trying to DisconnectTip()
+ // will fail (and require a needless reindex/redownload
+ // of the blockchain).
+ break;
+ }
+ if (!DisconnectTip(state, params, true)) {
+ return error("RewindBlockIndex: unable to disconnect block at height %i", pindex->nHeight);
+ }
+ // Occasionally flush state to disk.
+ if (!FlushStateToDisk(state, FLUSH_STATE_PERIODIC))
+ return false;
+ }
+
+ // Reduce validity flag and have-data flags.
+ // We do this after actual disconnecting, otherwise we'll end up writing the lack of data
+ // to disk before writing the chainstate, resulting in a failure to continue if interrupted.
+ for (BlockMap::iterator it = mapBlockIndex.begin(); it != mapBlockIndex.end(); it++) {
+ CBlockIndex* pindexIter = it->second;
+
+ // Note: If we encounter an insufficiently validated block that
+ // is on chainActive, it must be because we are a pruning node, and
+ // this block or some successor doesn't HAVE_DATA, so we were unable to
+ // rewind all the way. Blocks remaining on chainActive at this point
+ // must not have their validity reduced.
+ if (IsWitnessEnabled(pindexIter->pprev, params.GetConsensus()) && !(pindexIter->nStatus & BLOCK_OPT_WITNESS) && !chainActive.Contains(pindexIter)) {
+ // Reduce validity
+ pindexIter->nStatus = std::min<unsigned int>(pindexIter->nStatus & BLOCK_VALID_MASK, BLOCK_VALID_TREE) | (pindexIter->nStatus & ~BLOCK_VALID_MASK);
+ // Remove have-data flags.
+ pindexIter->nStatus &= ~(BLOCK_HAVE_DATA | BLOCK_HAVE_UNDO);
+ // Remove storage location.
+ pindexIter->nFile = 0;
+ pindexIter->nDataPos = 0;
+ pindexIter->nUndoPos = 0;
+ // Remove various other things
+ pindexIter->nTx = 0;
+ pindexIter->nChainTx = 0;
+ pindexIter->nSequenceId = 0;
+ // Make sure it gets written.
+ setDirtyBlockIndex.insert(pindexIter);
+ // Update indexes
+ setBlockIndexCandidates.erase(pindexIter);
+ std::pair<std::multimap<CBlockIndex*, CBlockIndex*>::iterator, std::multimap<CBlockIndex*, CBlockIndex*>::iterator> ret = mapBlocksUnlinked.equal_range(pindexIter->pprev);
+ while (ret.first != ret.second) {
+ if (ret.first->second == pindexIter) {
+ mapBlocksUnlinked.erase(ret.first++);
+ } else {
+ ++ret.first;
+ }
+ }
+ } else if (pindexIter->IsValid(BLOCK_VALID_TRANSACTIONS) && pindexIter->nChainTx) {
+ setBlockIndexCandidates.insert(pindexIter);
+ }
+ }
+
+ PruneBlockIndexCandidates();
+
+ CheckBlockIndex(params.GetConsensus());
+
+ if (!FlushStateToDisk(state, FLUSH_STATE_ALWAYS)) {
+ return false;
+ }
+
+ return true;
+}
+
void UnloadBlockIndex()
{
LOCK(cs_main);
@@ -3995,7 +4349,7 @@ bool LoadExternalBlockFile(const CChainParams& chainparams, FILE* fileIn, CDiskB
int nLoaded = 0;
try {
// This takes over fileIn and calls fclose() on it in the CBufferedFile destructor
- CBufferedFile blkdat(fileIn, 2*MAX_BLOCK_SIZE, MAX_BLOCK_SIZE+8, SER_DISK, CLIENT_VERSION);
+ CBufferedFile blkdat(fileIn, 2*MAX_BLOCK_SERIALIZED_SIZE, MAX_BLOCK_SERIALIZED_SIZE+8, SER_DISK, CLIENT_VERSION);
uint64_t nRewind = blkdat.GetPos();
while (!blkdat.eof()) {
boost::this_thread::interruption_point();
@@ -4014,7 +4368,7 @@ bool LoadExternalBlockFile(const CChainParams& chainparams, FILE* fileIn, CDiskB
continue;
// read size
blkdat >> nSize;
- if (nSize < 80 || nSize > MAX_BLOCK_SIZE)
+ if (nSize < 80 || nSize > MAX_BLOCK_SERIALIZED_SIZE)
continue;
} catch (const std::exception&) {
// no valid block header found; don't complain
@@ -4045,7 +4399,7 @@ bool LoadExternalBlockFile(const CChainParams& chainparams, FILE* fileIn, CDiskB
if (mapBlockIndex.count(hash) == 0 || (mapBlockIndex[hash]->nStatus & BLOCK_HAVE_DATA) == 0) {
LOCK(cs_main);
CValidationState state;
- if (AcceptBlock(block, state, chainparams, NULL, true, dbp))
+ if (AcceptBlock(block, state, chainparams, NULL, true, dbp, NULL))
nLoaded++;
if (state.IsError())
break;
@@ -4078,7 +4432,7 @@ bool LoadExternalBlockFile(const CChainParams& chainparams, FILE* fileIn, CDiskB
head.ToString());
LOCK(cs_main);
CValidationState dummy;
- if (AcceptBlock(block, dummy, chainparams, NULL, true, &it->second))
+ if (AcceptBlock(block, dummy, chainparams, NULL, true, &it->second, NULL))
{
nLoaded++;
queue.push_back(block.GetHash());
@@ -4344,6 +4698,7 @@ bool static AlreadyHave(const CInv& inv) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
switch (inv.type)
{
case MSG_TX:
+ case MSG_WITNESS_TX:
{
assert(recentRejects);
if (chainActive.Tip()->GetBlockHash() != hashRecentRejectsChainTip)
@@ -4364,6 +4719,7 @@ bool static AlreadyHave(const CInv& inv) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
pcoinsTip->HaveCoinsInCache(inv.hash);
}
case MSG_BLOCK:
+ case MSG_WITNESS_BLOCK:
return mapBlockIndex.count(inv.hash);
}
// Don't know what it is, just say we already got one
@@ -4388,7 +4744,7 @@ void static ProcessGetData(CNode* pfrom, const Consensus::Params& consensusParam
boost::this_thread::interruption_point();
it++;
- if (inv.type == MSG_BLOCK || inv.type == MSG_FILTERED_BLOCK)
+ if (inv.type == MSG_BLOCK || inv.type == MSG_FILTERED_BLOCK || inv.type == MSG_CMPCT_BLOCK || inv.type == MSG_WITNESS_BLOCK)
{
bool send = false;
BlockMap::iterator mi = mapBlockIndex.find(inv.hash);
@@ -4429,8 +4785,10 @@ void static ProcessGetData(CNode* pfrom, const Consensus::Params& consensusParam
if (!ReadBlockFromDisk(block, (*mi).second, consensusParams))
assert(!"cannot load block from disk");
if (inv.type == MSG_BLOCK)
+ pfrom->PushMessageWithFlag(SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::BLOCK, block);
+ else if (inv.type == MSG_WITNESS_BLOCK)
pfrom->PushMessage(NetMsgType::BLOCK, block);
- else // MSG_FILTERED_BLOCK)
+ else if (inv.type == MSG_FILTERED_BLOCK)
{
LOCK(pfrom->cs_filter);
if (pfrom->pfilter)
@@ -4445,11 +4803,23 @@ void static ProcessGetData(CNode* pfrom, const Consensus::Params& consensusParam
// however we MUST always provide at least what the remote peer needs
typedef std::pair<unsigned int, uint256> PairType;
BOOST_FOREACH(PairType& pair, merkleBlock.vMatchedTxn)
- pfrom->PushMessage(NetMsgType::TX, block.vtx[pair.first]);
+ pfrom->PushMessageWithFlag(SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::TX, block.vtx[pair.first]);
}
// else
// no response
}
+ else if (inv.type == MSG_CMPCT_BLOCK)
+ {
+ // If a peer is asking for old blocks, we're almost guaranteed
+ // they wont have a useful mempool to match against a compact block,
+ // and we dont feel like constructing the object for them, so
+ // instead we respond with the full, non-compact block.
+ if (mi->second->nHeight >= chainActive.Height() - 10) {
+ CBlockHeaderAndShortTxIDs cmpctblock(block);
+ pfrom->PushMessageWithFlag(SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::CMPCTBLOCK, cmpctblock);
+ } else
+ pfrom->PushMessageWithFlag(SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::BLOCK, block);
+ }
// Trigger the peer node to send a getblocks request for the next batch of inventory
if (inv.hash == pfrom->hashContinue)
@@ -4464,20 +4834,20 @@ void static ProcessGetData(CNode* pfrom, const Consensus::Params& consensusParam
}
}
}
- else if (inv.type == MSG_TX)
+ else if (inv.type == MSG_TX || inv.type == MSG_WITNESS_TX)
{
// Send stream from relay memory
bool push = false;
auto mi = mapRelay.find(inv.hash);
if (mi != mapRelay.end()) {
- pfrom->PushMessage(NetMsgType::TX, *mi->second);
+ pfrom->PushMessageWithFlag(inv.type == MSG_TX ? SERIALIZE_TRANSACTION_NO_WITNESS : 0, NetMsgType::TX, *mi->second);
push = true;
} else if (pfrom->timeLastMempoolReq) {
auto txinfo = mempool.info(inv.hash);
// To protect privacy, do not answer getdata using the mempool when
// that TX couldn't have been INVed in reply to a MEMPOOL request.
if (txinfo.tx && txinfo.nTime <= pfrom->timeLastMempoolReq) {
- pfrom->PushMessage(NetMsgType::TX, *txinfo.tx);
+ pfrom->PushMessageWithFlag(inv.type == MSG_TX ? SERIALIZE_TRANSACTION_NO_WITNESS : 0, NetMsgType::TX, *txinfo.tx);
push = true;
}
}
@@ -4489,7 +4859,7 @@ void static ProcessGetData(CNode* pfrom, const Consensus::Params& consensusParam
// Track requests for our stuff.
GetMainSignals().Inventory(inv.hash);
- if (inv.type == MSG_BLOCK || inv.type == MSG_FILTERED_BLOCK)
+ if (inv.type == MSG_BLOCK || inv.type == MSG_FILTERED_BLOCK || inv.type == MSG_CMPCT_BLOCK || inv.type == MSG_WITNESS_BLOCK)
break;
}
}
@@ -4508,6 +4878,14 @@ void static ProcessGetData(CNode* pfrom, const Consensus::Params& consensusParam
}
}
+uint32_t GetFetchFlags(CNode* pfrom, CBlockIndex* pprev, const Consensus::Params& chainparams) {
+ uint32_t nFetchFlags = 0;
+ if (IsWitnessEnabled(pprev, chainparams) && State(pfrom->GetId())->fHaveWitness) {
+ nFetchFlags |= MSG_WITNESS_FLAG;
+ }
+ return nFetchFlags;
+}
+
bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv, int64_t nTimeReceived, const CChainParams& chainparams)
{
LogPrint("net", "received: %s (%u bytes) peer=%d\n", SanitizeString(strCommand), vRecv.size(), pfrom->id);
@@ -4614,6 +4992,12 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
pfrom->fClient = !(pfrom->nServices & NODE_NETWORK);
+ if((pfrom->nServices & NODE_WITNESS))
+ {
+ LOCK(cs_main);
+ State(pfrom->GetId())->fHaveWitness = true;
+ }
+
// Potentially mark this peer as a preferred download peer.
{
LOCK(cs_main);
@@ -4699,6 +5083,16 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
// nodes)
pfrom->PushMessage(NetMsgType::SENDHEADERS);
}
+ if (pfrom->nVersion >= SHORT_IDS_BLOCKS_VERSION) {
+ // Tell our peer we are willing to provide version-1 cmpctblocks
+ // However, we do not request new block announcements using
+ // cmpctblock messages.
+ // We send this to non-NODE NETWORK peers as well, because
+ // they may wish to request compact blocks from us
+ bool fAnnounceUsingCMPCTBLOCK = false;
+ uint64_t nCMPCTBLOCKVersion = 1;
+ pfrom->PushMessage(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion);
+ }
}
@@ -4773,6 +5167,18 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
State(pfrom->GetId())->fPreferHeaders = true;
}
+ else if (strCommand == NetMsgType::SENDCMPCT)
+ {
+ bool fAnnounceUsingCMPCTBLOCK = false;
+ uint64_t nCMPCTBLOCKVersion = 1;
+ vRecv >> fAnnounceUsingCMPCTBLOCK >> nCMPCTBLOCKVersion;
+ if (nCMPCTBLOCKVersion == 1) {
+ LOCK(cs_main);
+ State(pfrom->GetId())->fProvidesHeaderAndIDs = true;
+ State(pfrom->GetId())->fPreferHeaderAndIDs = fAnnounceUsingCMPCTBLOCK;
+ }
+ }
+
else if (strCommand == NetMsgType::INV)
{
@@ -4793,17 +5199,23 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
LOCK(cs_main);
+ uint32_t nFetchFlags = GetFetchFlags(pfrom, chainActive.Tip(), chainparams.GetConsensus());
+
std::vector<CInv> vToFetch;
for (unsigned int nInv = 0; nInv < vInv.size(); nInv++)
{
- const CInv &inv = vInv[nInv];
+ CInv &inv = vInv[nInv];
boost::this_thread::interruption_point();
bool fAlreadyHave = AlreadyHave(inv);
LogPrint("net", "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom->id);
+ if (inv.type == MSG_TX) {
+ inv.type |= nFetchFlags;
+ }
+
if (inv.type == MSG_BLOCK) {
UpdateBlockAvailability(pfrom->GetId(), inv.hash);
if (!fAlreadyHave && !fImporting && !fReindex && !mapBlocksInFlight.count(inv.hash)) {
@@ -4818,8 +5230,13 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
pfrom->PushMessage(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexBestHeader), inv.hash);
CNodeState *nodestate = State(pfrom->GetId());
if (CanDirectFetch(chainparams.GetConsensus()) &&
- nodestate->nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
- vToFetch.push_back(inv);
+ nodestate->nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER &&
+ (!IsWitnessEnabled(chainActive.Tip(), chainparams.GetConsensus()) || State(pfrom->GetId())->fHaveWitness)) {
+ inv.type |= nFetchFlags;
+ if (nodestate->fProvidesHeaderAndIDs && !(nLocalServices & NODE_WITNESS))
+ vToFetch.push_back(CInv(MSG_CMPCT_BLOCK, inv.hash));
+ else
+ vToFetch.push_back(inv);
// Mark block as in flight already, even though the actual "getdata" message only goes out
// later (within the same cs_main lock, though).
MarkBlockAsInFlight(pfrom->GetId(), inv.hash, chainparams.GetConsensus());
@@ -4916,6 +5333,39 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
}
+ else if (strCommand == NetMsgType::GETBLOCKTXN)
+ {
+ BlockTransactionsRequest req;
+ vRecv >> req;
+
+ BlockMap::iterator it = mapBlockIndex.find(req.blockhash);
+ if (it == mapBlockIndex.end() || !(it->second->nStatus & BLOCK_HAVE_DATA)) {
+ Misbehaving(pfrom->GetId(), 100);
+ LogPrintf("Peer %d sent us a getblocktxn for a block we don't have", pfrom->id);
+ return true;
+ }
+
+ if (it->second->nHeight < chainActive.Height() - 15) {
+ LogPrint("net", "Peer %d sent us a getblocktxn for a block > 15 deep", pfrom->id);
+ return true;
+ }
+
+ CBlock block;
+ assert(ReadBlockFromDisk(block, it->second, chainparams.GetConsensus()));
+
+ BlockTransactions resp(req);
+ for (size_t i = 0; i < req.indexes.size(); i++) {
+ if (req.indexes[i] >= block.vtx.size()) {
+ Misbehaving(pfrom->GetId(), 100);
+ LogPrintf("Peer %d sent us a getblocktxn with out-of-bounds tx indices", pfrom->id);
+ return true;
+ }
+ resp.txn[i] = block.vtx[req.indexes[i]];
+ }
+ pfrom->PushMessageWithFlag(SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::BLOCKTXN, resp);
+ }
+
+
else if (strCommand == NetMsgType::GETHEADERS)
{
CBlockLocator locator;
@@ -4975,7 +5425,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
return true;
}
- vector<uint256> vWorkQueue;
+ deque<COutPoint> vWorkQueue;
vector<uint256> vEraseQueue;
CTransaction tx;
vRecv >> tx;
@@ -4994,7 +5444,11 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
if (!AlreadyHave(inv) && AcceptToMemoryPool(mempool, state, tx, true, &fMissingInputs)) {
mempool.check(pcoinsTip);
RelayTransaction(tx);
- vWorkQueue.push_back(inv.hash);
+ for (unsigned int i = 0; i < tx.vout.size(); i++) {
+ vWorkQueue.emplace_back(inv.hash, i);
+ }
+
+ pfrom->nLastTXTime = GetTime();
LogPrint("mempool", "AcceptToMemoryPool: peer=%d: accepted %s (poolsz %u txn, %u kB)\n",
pfrom->id,
@@ -5003,18 +5457,18 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
// Recursively process any orphan transactions that depended on this one
set<NodeId> setMisbehaving;
- for (unsigned int i = 0; i < vWorkQueue.size(); i++)
- {
- map<uint256, set<uint256> >::iterator itByPrev = mapOrphanTransactionsByPrev.find(vWorkQueue[i]);
+ while (!vWorkQueue.empty()) {
+ auto itByPrev = mapOrphanTransactionsByPrev.find(vWorkQueue.front());
+ vWorkQueue.pop_front();
if (itByPrev == mapOrphanTransactionsByPrev.end())
continue;
- for (set<uint256>::iterator mi = itByPrev->second.begin();
+ for (auto mi = itByPrev->second.begin();
mi != itByPrev->second.end();
++mi)
{
- const uint256& orphanHash = *mi;
- const CTransaction& orphanTx = mapOrphanTransactions[orphanHash].tx;
- NodeId fromPeer = mapOrphanTransactions[orphanHash].fromPeer;
+ const CTransaction& orphanTx = (*mi)->second.tx;
+ const uint256& orphanHash = orphanTx.GetHash();
+ NodeId fromPeer = (*mi)->second.fromPeer;
bool fMissingInputs2 = false;
// Use a dummy CValidationState so someone can't setup nodes to counter-DoS based on orphan
// resolution (that is, feeding people an invalid transaction based on LegitTxX in order to get
@@ -5027,13 +5481,15 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
if (AcceptToMemoryPool(mempool, stateDummy, orphanTx, true, &fMissingInputs2)) {
LogPrint("mempool", " accepted orphan tx %s\n", orphanHash.ToString());
RelayTransaction(orphanTx);
- vWorkQueue.push_back(orphanHash);
+ for (unsigned int i = 0; i < orphanTx.vout.size(); i++) {
+ vWorkQueue.emplace_back(orphanHash, i);
+ }
vEraseQueue.push_back(orphanHash);
}
else if (!fMissingInputs2)
{
int nDos = 0;
- if (stateDummy.IsInvalid(nDos) && nDos > 0)
+ if (stateDummy.IsInvalid(nDos) && nDos > 0 && (!state.CorruptionPossible() || State(fromPeer)->fHaveWitness))
{
// Punish peer that gave us an invalid orphan tx
Misbehaving(fromPeer, nDos);
@@ -5044,8 +5500,10 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
// Probably non-standard or insufficient fee/priority
LogPrint("mempool", " removed orphan tx %s\n", orphanHash.ToString());
vEraseQueue.push_back(orphanHash);
- assert(recentRejects);
- recentRejects->insert(orphanHash);
+ if (!stateDummy.CorruptionPossible()) {
+ assert(recentRejects);
+ recentRejects->insert(orphanHash);
+ }
}
mempool.check(pcoinsTip);
}
@@ -5056,16 +5514,34 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
}
else if (fMissingInputs)
{
- AddOrphanTx(tx, pfrom->GetId());
+ bool fRejectedParents = false; // It may be the case that the orphans parents have all been rejected
+ BOOST_FOREACH(const CTxIn& txin, tx.vin) {
+ if (recentRejects->contains(txin.prevout.hash)) {
+ fRejectedParents = true;
+ break;
+ }
+ }
+ if (!fRejectedParents) {
+ BOOST_FOREACH(const CTxIn& txin, tx.vin) {
+ CInv inv(MSG_TX, txin.prevout.hash);
+ pfrom->AddInventoryKnown(inv);
+ if (!AlreadyHave(inv)) pfrom->AskFor(inv);
+ }
+ AddOrphanTx(tx, pfrom->GetId());
- // DoS prevention: do not allow mapOrphanTransactions to grow unbounded
- unsigned int nMaxOrphanTx = (unsigned int)std::max((int64_t)0, GetArg("-maxorphantx", DEFAULT_MAX_ORPHAN_TRANSACTIONS));
- unsigned int nEvicted = LimitOrphanTxSize(nMaxOrphanTx);
- if (nEvicted > 0)
- LogPrint("mempool", "mapOrphan overflow, removed %u tx\n", nEvicted);
+ // DoS prevention: do not allow mapOrphanTransactions to grow unbounded
+ unsigned int nMaxOrphanTx = (unsigned int)std::max((int64_t)0, GetArg("-maxorphantx", DEFAULT_MAX_ORPHAN_TRANSACTIONS));
+ unsigned int nEvicted = LimitOrphanTxSize(nMaxOrphanTx);
+ if (nEvicted > 0)
+ LogPrint("mempool", "mapOrphan overflow, removed %u tx\n", nEvicted);
+ } else {
+ LogPrint("mempool", "not keeping orphan with rejected parents %s\n",tx.GetHash().ToString());
+ }
} else {
- assert(recentRejects);
- recentRejects->insert(tx.GetHash());
+ if (!state.CorruptionPossible()) {
+ assert(recentRejects);
+ recentRejects->insert(tx.GetHash());
+ }
if (pfrom->fWhitelisted && GetBoolArg("-whitelistforcerelay", DEFAULT_WHITELISTFORCERELAY)) {
// Always relay transactions received from whitelisted peers, even
@@ -5094,13 +5570,184 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
if (state.GetRejectCode() < REJECT_INTERNAL) // Never send AcceptToMemoryPool's internal codes over P2P
pfrom->PushMessage(NetMsgType::REJECT, strCommand, (unsigned char)state.GetRejectCode(),
state.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH), inv.hash);
- if (nDoS > 0)
+ if (nDoS > 0 && (!state.CorruptionPossible() || State(pfrom->id)->fHaveWitness)) {
+ // When a non-witness-supporting peer gives us a transaction that would
+ // be accepted if witness validation was off, we can't blame them for it.
Misbehaving(pfrom->GetId(), nDoS);
+ }
}
FlushStateToDisk(state, FLUSH_STATE_PERIODIC);
}
+ else if (strCommand == NetMsgType::CMPCTBLOCK && !fImporting && !fReindex) // Ignore blocks received while importing
+ {
+ CBlockHeaderAndShortTxIDs cmpctblock;
+ vRecv >> cmpctblock;
+
+ LOCK(cs_main);
+
+ if (mapBlockIndex.find(cmpctblock.header.hashPrevBlock) == mapBlockIndex.end()) {
+ // Doesn't connect (or is genesis), instead of DoSing in AcceptBlockHeader, request deeper headers
+ if (!IsInitialBlockDownload())
+ pfrom->PushMessage(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexBestHeader), uint256());
+ return true;
+ }
+
+ CBlockIndex *pindex = NULL;
+ CValidationState state;
+ if (!AcceptBlockHeader(cmpctblock.header, state, chainparams, &pindex)) {
+ int nDoS;
+ if (state.IsInvalid(nDoS)) {
+ if (nDoS > 0)
+ Misbehaving(pfrom->GetId(), nDoS);
+ LogPrintf("Peer %d sent us invalid header via cmpctblock\n", pfrom->id);
+ return true;
+ }
+ }
+
+ // If AcceptBlockHeader returned true, it set pindex
+ assert(pindex);
+ UpdateBlockAvailability(pfrom->GetId(), pindex->GetBlockHash());
+
+ std::map<uint256, pair<NodeId, list<QueuedBlock>::iterator> >::iterator blockInFlightIt = mapBlocksInFlight.find(pindex->GetBlockHash());
+ bool fAlreadyInFlight = blockInFlightIt != mapBlocksInFlight.end();
+
+ if (pindex->nStatus & BLOCK_HAVE_DATA) // Nothing to do here
+ return true;
+
+ if (pindex->nChainWork <= chainActive.Tip()->nChainWork || // We know something better
+ pindex->nTx != 0) { // We had this block at some point, but pruned it
+ if (fAlreadyInFlight) {
+ // We requested this block for some reason, but our mempool will probably be useless
+ // so we just grab the block via normal getdata
+ std::vector<CInv> vInv(1);
+ vInv[0] = CInv(MSG_BLOCK, cmpctblock.header.GetHash());
+ pfrom->PushMessage(NetMsgType::GETDATA, vInv);
+ return true;
+ }
+ }
+
+ // If we're not close to tip yet, give up and let parallel block fetch work its magic
+ if (!fAlreadyInFlight && !CanDirectFetch(chainparams.GetConsensus()))
+ return true;
+
+ CNodeState *nodestate = State(pfrom->GetId());
+
+ // We want to be a bit conservative just to be extra careful about DoS
+ // possibilities in compact block processing...
+ if (pindex->nHeight <= chainActive.Height() + 2) {
+ if ((!fAlreadyInFlight && nodestate->nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) ||
+ (fAlreadyInFlight && blockInFlightIt->second.first == pfrom->GetId())) {
+ list<QueuedBlock>::iterator *queuedBlockIt = NULL;
+ if (!MarkBlockAsInFlight(pfrom->GetId(), pindex->GetBlockHash(), chainparams.GetConsensus(), pindex, &queuedBlockIt)) {
+ if (!(*queuedBlockIt)->partialBlock)
+ (*queuedBlockIt)->partialBlock.reset(new PartiallyDownloadedBlock(&mempool));
+ else {
+ // The block was already in flight using compact blocks from the same peer
+ LogPrint("net", "Peer sent us compact block we were already syncing!\n");
+ return true;
+ }
+ }
+
+ PartiallyDownloadedBlock& partialBlock = *(*queuedBlockIt)->partialBlock;
+ ReadStatus status = partialBlock.InitData(cmpctblock);
+ if (status == READ_STATUS_INVALID) {
+ MarkBlockAsReceived(pindex->GetBlockHash()); // Reset in-flight state in case of whitelist
+ Misbehaving(pfrom->GetId(), 100);
+ LogPrintf("Peer %d sent us invalid compact block\n", pfrom->id);
+ return true;
+ } else if (status == READ_STATUS_FAILED) {
+ // Duplicate txindexes, the block is now in-flight, so just request it
+ std::vector<CInv> vInv(1);
+ vInv[0] = CInv(MSG_BLOCK, cmpctblock.header.GetHash());
+ pfrom->PushMessage(NetMsgType::GETDATA, vInv);
+ return true;
+ }
+
+ BlockTransactionsRequest req;
+ for (size_t i = 0; i < cmpctblock.BlockTxCount(); i++) {
+ if (!partialBlock.IsTxAvailable(i))
+ req.indexes.push_back(i);
+ }
+ if (req.indexes.empty()) {
+ // Dirty hack to jump to BLOCKTXN code (TODO: move message handling into their own functions)
+ BlockTransactions txn;
+ txn.blockhash = cmpctblock.header.GetHash();
+ CDataStream blockTxnMsg(SER_NETWORK, PROTOCOL_VERSION);
+ blockTxnMsg << txn;
+ return ProcessMessage(pfrom, NetMsgType::BLOCKTXN, blockTxnMsg, nTimeReceived, chainparams);
+ } else {
+ req.blockhash = pindex->GetBlockHash();
+ pfrom->PushMessage(NetMsgType::GETBLOCKTXN, req);
+ }
+ }
+ } else {
+ if (fAlreadyInFlight) {
+ // We requested this block, but its far into the future, so our
+ // mempool will probably be useless - request the block normally
+ std::vector<CInv> vInv(1);
+ vInv[0] = CInv(MSG_BLOCK, cmpctblock.header.GetHash());
+ pfrom->PushMessage(NetMsgType::GETDATA, vInv);
+ return true;
+ } else {
+ // If this was an announce-cmpctblock, we want the same treatment as a header message
+ // Dirty hack to process as if it were just a headers message (TODO: move message handling into their own functions)
+ std::vector<CBlock> headers;
+ headers.push_back(cmpctblock.header);
+ CDataStream vHeadersMsg(SER_NETWORK, PROTOCOL_VERSION);
+ vHeadersMsg << headers;
+ return ProcessMessage(pfrom, NetMsgType::HEADERS, vHeadersMsg, nTimeReceived, chainparams);
+ }
+ }
+
+ CheckBlockIndex(chainparams.GetConsensus());
+ }
+
+ else if (strCommand == NetMsgType::BLOCKTXN && !fImporting && !fReindex) // Ignore blocks received while importing
+ {
+ BlockTransactions resp;
+ vRecv >> resp;
+
+ LOCK(cs_main);
+
+ map<uint256, pair<NodeId, list<QueuedBlock>::iterator> >::iterator it = mapBlocksInFlight.find(resp.blockhash);
+ if (it == mapBlocksInFlight.end() || !it->second.second->partialBlock ||
+ it->second.first != pfrom->GetId()) {
+ LogPrint("net", "Peer %d sent us block transactions for block we weren't expecting\n", pfrom->id);
+ return true;
+ }
+
+ PartiallyDownloadedBlock& partialBlock = *it->second.second->partialBlock;
+ CBlock block;
+ ReadStatus status = partialBlock.FillBlock(block, resp.txn);
+ if (status == READ_STATUS_INVALID) {
+ MarkBlockAsReceived(resp.blockhash); // Reset in-flight state in case of whitelist
+ Misbehaving(pfrom->GetId(), 100);
+ LogPrintf("Peer %d sent us invalid compact block/non-matching block transactions\n", pfrom->id);
+ return true;
+ } else if (status == READ_STATUS_FAILED) {
+ // Might have collided, fall back to getdata now :(
+ std::vector<CInv> invs;
+ invs.push_back(CInv(MSG_BLOCK, resp.blockhash));
+ pfrom->PushMessage(NetMsgType::GETDATA, invs);
+ } else {
+ CValidationState state;
+ ProcessNewBlock(state, chainparams, pfrom, &block, false, NULL);
+ int nDoS;
+ if (state.IsInvalid(nDoS)) {
+ assert (state.GetRejectCode() < REJECT_INTERNAL); // Blocks are never rejected with internal reject codes
+ pfrom->PushMessage(NetMsgType::REJECT, strCommand, (unsigned char)state.GetRejectCode(),
+ state.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH), block.GetHash());
+ if (nDoS > 0) {
+ LOCK(cs_main);
+ Misbehaving(pfrom->GetId(), nDoS);
+ }
+ }
+ }
+ }
+
+
else if (strCommand == NetMsgType::HEADERS && !fImporting && !fReindex) // Ignore headers received while importing
{
std::vector<CBlockHeader> headers;
@@ -5126,13 +5773,6 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
return true;
}
- // If we already know the last header in the message, then it contains
- // no new information for us. In this case, we do not request
- // more headers later. This prevents multiple chains of redundant
- // getheader requests from running in parallel if triggered by incoming
- // blocks while the node is still in initial headers sync.
- const bool hasNewHeaders = (mapBlockIndex.count(headers.back().GetHash()) == 0);
-
CBlockIndex *pindexLast = NULL;
BOOST_FOREACH(const CBlockHeader& header, headers) {
CValidationState state;
@@ -5150,10 +5790,10 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
}
}
- if (pindexLast)
- UpdateBlockAvailability(pfrom->GetId(), pindexLast->GetBlockHash());
+ assert(pindexLast);
+ UpdateBlockAvailability(pfrom->GetId(), pindexLast->GetBlockHash());
- if (nCount == MAX_HEADERS_RESULTS && pindexLast && hasNewHeaders) {
+ if (nCount == MAX_HEADERS_RESULTS) {
// Headers message had its maximum size; the peer may have more headers.
// TODO: optimize: if pindexLast is an ancestor of chainActive.Tip or pindexBestHeader, continue
// from there instead.
@@ -5171,7 +5811,8 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
// Calculate all the blocks we'd need to switch to pindexLast, up to a limit.
while (pindexWalk && !chainActive.Contains(pindexWalk) && vToFetch.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
if (!(pindexWalk->nStatus & BLOCK_HAVE_DATA) &&
- !mapBlocksInFlight.count(pindexWalk->GetBlockHash())) {
+ !mapBlocksInFlight.count(pindexWalk->GetBlockHash()) &&
+ (!IsWitnessEnabled(pindexWalk->pprev, chainparams.GetConsensus()) || State(pfrom->GetId())->fHaveWitness)) {
// We don't have this block, and it's not yet in flight.
vToFetch.push_back(pindexWalk);
}
@@ -5193,7 +5834,8 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
// Can't download any more from this peer
break;
}
- vGetData.push_back(CInv(MSG_BLOCK, pindex->GetBlockHash()));
+ uint32_t nFetchFlags = GetFetchFlags(pfrom, pindex->pprev, chainparams.GetConsensus());
+ vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash()));
MarkBlockAsInFlight(pfrom->GetId(), pindex->GetBlockHash(), chainparams.GetConsensus(), pindex);
LogPrint("net", "Requesting block %s from peer=%d\n",
pindex->GetBlockHash().ToString(), pfrom->id);
@@ -5203,6 +5845,13 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
pindexLast->GetBlockHash().ToString(), pindexLast->nHeight);
}
if (vGetData.size() > 0) {
+ if (nodestate->fProvidesHeaderAndIDs && vGetData.size() == 1 && mapBlocksInFlight.size() == 1 && pindexLast->pprev->IsValid(BLOCK_VALID_CHAIN) && !(nLocalServices & NODE_WITNESS)) {
+ // We seem to be rather well-synced, so it appears pfrom was the first to provide us
+ // with this block! Let's get them to announce using compact blocks in the future.
+ MaybeSetPeerAsAnnouncingHeaderAndIDs(nodestate, pfrom);
+ // In any case, we want to download using a compact block, not a regular one
+ vGetData[0] = CInv(MSG_CMPCT_BLOCK, vGetData[0].hash);
+ }
pfrom->PushMessage(NetMsgType::GETDATA, vGetData);
}
}
@@ -5756,7 +6405,9 @@ bool SendMessages(CNode* pto)
// add all to the inv queue.
LOCK(pto->cs_inventory);
vector<CBlock> vHeaders;
- bool fRevertToInv = (!state.fPreferHeaders || pto->vBlockHashesToAnnounce.size() > MAX_BLOCKS_TO_ANNOUNCE);
+ bool fRevertToInv = ((!state.fPreferHeaders &&
+ (!state.fPreferHeaderAndIDs || pto->vBlockHashesToAnnounce.size() > 1)) ||
+ pto->vBlockHashesToAnnounce.size() > MAX_BLOCKS_TO_ANNOUNCE);
CBlockIndex *pBestIndex = NULL; // last header queued for delivery
ProcessBlockAvailability(pto->id); // ensure pindexBestKnownBlock is up-to-date
@@ -5808,6 +6459,33 @@ bool SendMessages(CNode* pto)
}
}
}
+ if (!fRevertToInv && !vHeaders.empty()) {
+ if (vHeaders.size() == 1 && state.fPreferHeaderAndIDs) {
+ // We only send up to 1 block as header-and-ids, as otherwise
+ // probably means we're doing an initial-ish-sync or they're slow
+ LogPrint("net", "%s sending header-and-ids %s to peer %d\n", __func__,
+ vHeaders.front().GetHash().ToString(), pto->id);
+ //TODO: Shouldn't need to reload block from disk, but requires refactor
+ CBlock block;
+ assert(ReadBlockFromDisk(block, pBestIndex, consensusParams));
+ CBlockHeaderAndShortTxIDs cmpctblock(block);
+ pto->PushMessage(NetMsgType::CMPCTBLOCK, cmpctblock);
+ state.pindexBestHeaderSent = pBestIndex;
+ } else if (state.fPreferHeaders) {
+ if (vHeaders.size() > 1) {
+ LogPrint("net", "%s: %u headers, range (%s, %s), to peer=%d\n", __func__,
+ vHeaders.size(),
+ vHeaders.front().GetHash().ToString(),
+ vHeaders.back().GetHash().ToString(), pto->id);
+ } else {
+ LogPrint("net", "%s: sending header %s to peer=%d\n", __func__,
+ vHeaders.front().GetHash().ToString(), pto->id);
+ }
+ pto->PushMessage(NetMsgType::HEADERS, vHeaders);
+ state.pindexBestHeaderSent = pBestIndex;
+ } else
+ fRevertToInv = true;
+ }
if (fRevertToInv) {
// If falling back to using an inv, just try to inv the tip.
// The last entry in vBlockHashesToAnnounce was our tip at some point
@@ -5833,18 +6511,6 @@ bool SendMessages(CNode* pto)
pto->id, hashToAnnounce.ToString());
}
}
- } else if (!vHeaders.empty()) {
- if (vHeaders.size() > 1) {
- LogPrint("net", "%s: %u headers, range (%s, %s), to peer=%d\n", __func__,
- vHeaders.size(),
- vHeaders.front().GetHash().ToString(),
- vHeaders.back().GetHash().ToString(), pto->id);
- } else {
- LogPrint("net", "%s: sending header %s to peer=%d\n", __func__,
- vHeaders.front().GetHash().ToString(), pto->id);
- }
- pto->PushMessage(NetMsgType::HEADERS, vHeaders);
- state.pindexBestHeaderSent = pBestIndex;
}
pto->vBlockHashesToAnnounce.clear();
}
@@ -6015,10 +6681,13 @@ bool SendMessages(CNode* pto)
NodeId staller = -1;
FindNextBlocksToDownload(pto->GetId(), MAX_BLOCKS_IN_TRANSIT_PER_PEER - state.nBlocksInFlight, vToDownload, staller);
BOOST_FOREACH(CBlockIndex *pindex, vToDownload) {
- vGetData.push_back(CInv(MSG_BLOCK, pindex->GetBlockHash()));
- MarkBlockAsInFlight(pto->GetId(), pindex->GetBlockHash(), consensusParams, pindex);
- LogPrint("net", "Requesting block %s (%d) peer=%d\n", pindex->GetBlockHash().ToString(),
- pindex->nHeight, pto->id);
+ if (State(pto->GetId())->fHaveWitness || !IsWitnessEnabled(pindex->pprev, consensusParams)) {
+ uint32_t nFetchFlags = GetFetchFlags(pto, pindex->pprev, consensusParams);
+ vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash()));
+ MarkBlockAsInFlight(pto->GetId(), pindex->GetBlockHash(), consensusParams, pindex);
+ LogPrint("net", "Requesting block %s (%d) peer=%d\n", pindex->GetBlockHash().ToString(),
+ pindex->nHeight, pto->id);
+ }
}
if (state.nBlocksInFlight == 0 && staller != -1) {
if (State(staller)->nStallingSince == 0) {