aboutsummaryrefslogtreecommitdiff
path: root/src/net_processing.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/net_processing.cpp')
-rw-r--r--src/net_processing.cpp378
1 files changed, 254 insertions, 124 deletions
diff --git a/src/net_processing.cpp b/src/net_processing.cpp
index 42c46b7384..945a97909d 100644
--- a/src/net_processing.cpp
+++ b/src/net_processing.cpp
@@ -51,7 +51,7 @@ struct IteratorComparator
struct COrphanTx {
// When modifying, adapt the copy of this definition in tests/DoS_tests.
- CTransaction tx;
+ CTransactionRef tx;
NodeId fromPeer;
int64_t nTimeExpire;
};
@@ -101,7 +101,7 @@ namespace {
/** Blocks that are in flight, and that are in the queue to be downloaded. Protected by cs_main. */
struct QueuedBlock {
uint256 hash;
- CBlockIndex* pindex; //!< Optional.
+ const CBlockIndex* pindex; //!< Optional.
bool fValidatedHeaders; //!< Whether this block has validated headers at the time of request.
std::unique_ptr<PartiallyDownloadedBlock> partialBlock; //!< Optional, used for CMPCTBLOCK downloads
};
@@ -156,13 +156,13 @@ struct CNodeState {
//! List of asynchronously-determined block rejections to notify this peer about.
std::vector<CBlockReject> rejects;
//! The best known block we know this peer has announced.
- CBlockIndex *pindexBestKnownBlock;
+ const CBlockIndex *pindexBestKnownBlock;
//! The hash of the last unknown block this peer has announced.
uint256 hashLastUnknownBlock;
//! The last full block we both have.
- CBlockIndex *pindexLastCommonBlock;
+ const CBlockIndex *pindexLastCommonBlock;
//! The best header we have sent our peer.
- CBlockIndex *pindexBestHeaderSent;
+ const CBlockIndex *pindexBestHeaderSent;
//! Length of current-streak of unconnecting headers announcements
int nUnconnectingHeaders;
//! Whether we've started headers synchronization with this peer.
@@ -331,7 +331,7 @@ bool MarkBlockAsReceived(const uint256& hash) {
// Requires cs_main.
// returns false, still setting pit, if the block was already in flight from the same peer
// pit will only be valid as long as the same cs_main lock is being held
-bool MarkBlockAsInFlight(NodeId nodeid, const uint256& hash, const Consensus::Params& consensusParams, CBlockIndex *pindex = NULL, list<QueuedBlock>::iterator **pit = NULL) {
+bool MarkBlockAsInFlight(NodeId nodeid, const uint256& hash, const Consensus::Params& consensusParams, const CBlockIndex *pindex = NULL, list<QueuedBlock>::iterator **pit = NULL) {
CNodeState *state = State(nodeid);
assert(state != NULL);
@@ -437,7 +437,7 @@ bool CanDirectFetch(const Consensus::Params &consensusParams)
}
// Requires cs_main
-bool PeerHasHeader(CNodeState *state, CBlockIndex *pindex)
+bool PeerHasHeader(CNodeState *state, const CBlockIndex *pindex)
{
if (state->pindexBestKnownBlock && pindex == state->pindexBestKnownBlock->GetAncestor(pindex->nHeight))
return true;
@@ -448,7 +448,7 @@ bool PeerHasHeader(CNodeState *state, CBlockIndex *pindex)
/** Find the last common ancestor two blocks have.
* Both pa and pb must be non-NULL. */
-CBlockIndex* LastCommonAncestor(CBlockIndex* pa, CBlockIndex* pb) {
+const CBlockIndex* LastCommonAncestor(const CBlockIndex* pa, const CBlockIndex* pb) {
if (pa->nHeight > pb->nHeight) {
pa = pa->GetAncestor(pb->nHeight);
} else if (pb->nHeight > pa->nHeight) {
@@ -467,7 +467,7 @@ CBlockIndex* LastCommonAncestor(CBlockIndex* pa, CBlockIndex* pb) {
/** Update pindexLastCommonBlock and add not-in-flight missing successors to vBlocks, until it has
* at most count entries. */
-void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vector<CBlockIndex*>& vBlocks, NodeId& nodeStaller, const Consensus::Params& consensusParams) {
+void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, NodeId& nodeStaller, const Consensus::Params& consensusParams) {
if (count == 0)
return;
@@ -495,8 +495,8 @@ void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vector<CBl
if (state->pindexLastCommonBlock == state->pindexBestKnownBlock)
return;
- std::vector<CBlockIndex*> vToFetch;
- CBlockIndex *pindexWalk = state->pindexLastCommonBlock;
+ std::vector<const CBlockIndex*> vToFetch;
+ const CBlockIndex *pindexWalk = state->pindexLastCommonBlock;
// Never fetch further than the best block we know the peer has, or more than BLOCK_DOWNLOAD_WINDOW + 1 beyond the last
// linked block we have in common with this peer. The +1 is so we can detect stalling, namely if we would be able to
// download that next block if the window were 1 larger.
@@ -519,7 +519,7 @@ void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vector<CBl
// are not yet downloaded and not in flight to vBlocks. In the mean time, update
// pindexLastCommonBlock as long as all ancestors are already downloaded, or if it's
// already part of our chain (and therefore don't need it even if pruned).
- BOOST_FOREACH(CBlockIndex* pindex, vToFetch) {
+ BOOST_FOREACH(const CBlockIndex* pindex, vToFetch) {
if (!pindex->IsValid(BLOCK_VALID_TREE)) {
// We consider the chain that this peer is on invalid.
return;
@@ -591,9 +591,9 @@ void UnregisterNodeSignals(CNodeSignals& nodeSignals)
// mapOrphanTransactions
//
-bool AddOrphanTx(const CTransaction& tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
+bool AddOrphanTx(const CTransactionRef& tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{
- uint256 hash = tx.GetHash();
+ const uint256& hash = tx->GetHash();
if (mapOrphanTransactions.count(hash))
return false;
@@ -604,7 +604,7 @@ bool AddOrphanTx(const CTransaction& tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(c
// have been mined or received.
// 100 orphans, each of which is at most 99,999 bytes big is
// at most 10 megabytes of orphans and somewhat more byprev index (in the worst case):
- unsigned int sz = GetTransactionWeight(tx);
+ unsigned int sz = GetTransactionWeight(*tx);
if (sz >= MAX_STANDARD_TX_WEIGHT)
{
LogPrint("mempool", "ignoring large orphan tx (size: %u, hash: %s)\n", sz, hash.ToString());
@@ -613,7 +613,7 @@ bool AddOrphanTx(const CTransaction& tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(c
auto ret = mapOrphanTransactions.emplace(hash, COrphanTx{tx, peer, GetTime() + ORPHAN_TX_EXPIRE_TIME});
assert(ret.second);
- BOOST_FOREACH(const CTxIn& txin, tx.vin) {
+ BOOST_FOREACH(const CTxIn& txin, tx->vin) {
mapOrphanTransactionsByPrev[txin.prevout].insert(ret.first);
}
@@ -627,7 +627,7 @@ int static EraseOrphanTx(uint256 hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
map<uint256, COrphanTx>::iterator it = mapOrphanTransactions.find(hash);
if (it == mapOrphanTransactions.end())
return 0;
- BOOST_FOREACH(const CTxIn& txin, it->second.tx.vin)
+ BOOST_FOREACH(const CTxIn& txin, it->second.tx->vin)
{
auto itPrev = mapOrphanTransactionsByPrev.find(txin.prevout);
if (itPrev == mapOrphanTransactionsByPrev.end())
@@ -649,10 +649,10 @@ void EraseOrphansFor(NodeId peer)
map<uint256, COrphanTx>::iterator maybeErase = iter++; // increment to avoid iterator becoming invalid
if (maybeErase->second.fromPeer == peer)
{
- nErased += EraseOrphanTx(maybeErase->second.tx.GetHash());
+ nErased += EraseOrphanTx(maybeErase->second.tx->GetHash());
}
}
- if (nErased > 0) LogPrint("mempool", "Erased %d orphan tx from peer %d\n", nErased, peer);
+ if (nErased > 0) LogPrint("mempool", "Erased %d orphan tx from peer=%d\n", nErased, peer);
}
@@ -670,7 +670,7 @@ unsigned int LimitOrphanTxSize(unsigned int nMaxOrphans) EXCLUSIVE_LOCKS_REQUIRE
{
map<uint256, COrphanTx>::iterator maybeErase = iter++;
if (maybeErase->second.nTimeExpire <= nNow) {
- nErased += EraseOrphanTx(maybeErase->second.tx.GetHash());
+ nErased += EraseOrphanTx(maybeErase->second.tx->GetHash());
} else {
nMinExpTime = std::min(maybeErase->second.nTimeExpire, nMinExpTime);
}
@@ -741,7 +741,7 @@ void PeerLogicValidation::SyncTransaction(const CTransaction& tx, const CBlockIn
auto itByPrev = mapOrphanTransactionsByPrev.find(tx.vin[j].prevout);
if (itByPrev == mapOrphanTransactionsByPrev.end()) continue;
for (auto mi = itByPrev->second.begin(); mi != itByPrev->second.end(); ++mi) {
- const CTransaction& orphanTx = (*mi)->second.tx;
+ const CTransaction& orphanTx = *(*mi)->second.tx;
const uint256& orphanHash = orphanTx.GetHash();
vOrphanErase.push_back(orphanHash);
}
@@ -757,6 +757,51 @@ void PeerLogicValidation::SyncTransaction(const CTransaction& tx, const CBlockIn
}
}
+static CCriticalSection cs_most_recent_block;
+static std::shared_ptr<const CBlock> most_recent_block;
+static std::shared_ptr<const CBlockHeaderAndShortTxIDs> most_recent_compact_block;
+static uint256 most_recent_block_hash;
+
+void PeerLogicValidation::NewPoWValidBlock(const CBlockIndex *pindex, const std::shared_ptr<const CBlock>& pblock) {
+ std::shared_ptr<const CBlockHeaderAndShortTxIDs> pcmpctblock = std::make_shared<const CBlockHeaderAndShortTxIDs> (*pblock, true);
+ CNetMsgMaker msgMaker(PROTOCOL_VERSION);
+
+ LOCK(cs_main);
+
+ static int nHighestFastAnnounce = 0;
+ if (pindex->nHeight <= nHighestFastAnnounce)
+ return;
+ nHighestFastAnnounce = pindex->nHeight;
+
+ bool fWitnessEnabled = IsWitnessEnabled(pindex->pprev, Params().GetConsensus());
+ uint256 hashBlock(pblock->GetHash());
+
+ {
+ LOCK(cs_most_recent_block);
+ most_recent_block_hash = hashBlock;
+ most_recent_block = pblock;
+ most_recent_compact_block = pcmpctblock;
+ }
+
+ connman->ForEachNode([this, &pcmpctblock, pindex, &msgMaker, fWitnessEnabled, &hashBlock](CNode* pnode) {
+ // TODO: Avoid the repeated-serialization here
+ if (pnode->nVersion < INVALID_CB_NO_BAN_VERSION || pnode->fDisconnect)
+ return;
+ ProcessBlockAvailability(pnode->GetId());
+ CNodeState &state = *State(pnode->GetId());
+ // If the peer has, or we announced to them the previous block already,
+ // but we don't think they have this one, go ahead and announce it
+ if (state.fPreferHeaderAndIDs && (!fWitnessEnabled || state.fWantsCmpctWitness) &&
+ !PeerHasHeader(&state, pindex) && PeerHasHeader(&state, pindex->pprev)) {
+
+ LogPrint("net", "%s sending header-and-ids %s to peer=%d\n", "PeerLogicValidation::NewPoWValidBlock",
+ hashBlock.ToString(), pnode->id);
+ connman->PushMessage(pnode, msgMaker.Make(NetMsgType::CMPCTBLOCK, *pcmpctblock));
+ state.pindexBestHeaderSent = pindex;
+ }
+ });
+}
+
void PeerLogicValidation::UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload) {
const int nNewHeight = pindexNew->nHeight;
connman->SetBestHeight(nNewHeight);
@@ -896,22 +941,23 @@ static void RelayAddress(const CAddress& addr, bool fReachable, CConnman& connma
connman.ForEachNodeThen(std::move(sortfunc), std::move(pushfunc));
}
-void static ProcessGetData(CNode* pfrom, const Consensus::Params& consensusParams, CConnman& connman)
+void static ProcessGetData(CNode* pfrom, const Consensus::Params& consensusParams, CConnman& connman, std::atomic<bool>& interruptMsgProc)
{
std::deque<CInv>::iterator it = pfrom->vRecvGetData.begin();
- unsigned int nMaxSendBufferSize = connman.GetSendBufferSize();
vector<CInv> vNotFound;
CNetMsgMaker msgMaker(pfrom->GetSendVersion());
LOCK(cs_main);
while (it != pfrom->vRecvGetData.end()) {
// Don't bother if send buffer is too full to respond anyway
- if (pfrom->nSendSize >= nMaxSendBufferSize)
+ if (pfrom->fPauseSend)
break;
const CInv &inv = *it;
{
- boost::this_thread::interruption_point();
+ if (interruptMsgProc)
+ return;
+
it++;
if (inv.type == MSG_BLOCK || inv.type == MSG_FILTERED_BLOCK || inv.type == MSG_CMPCT_BLOCK || inv.type == MSG_WITNESS_BLOCK)
@@ -920,6 +966,21 @@ void static ProcessGetData(CNode* pfrom, const Consensus::Params& consensusParam
BlockMap::iterator mi = mapBlockIndex.find(inv.hash);
if (mi != mapBlockIndex.end())
{
+ if (mi->second->nChainTx && !mi->second->IsValid(BLOCK_VALID_SCRIPTS) &&
+ mi->second->IsValid(BLOCK_VALID_TREE)) {
+ // If we have the block and all of its parents, but have not yet validated it,
+ // we might be in the middle of connecting it (ie in the unlock of cs_main
+ // before ActivateBestChain but after AcceptBlock).
+ // In this case, we need to run ActivateBestChain prior to checking the relay
+ // conditions below.
+ std::shared_ptr<const CBlock> a_recent_block;
+ {
+ LOCK(cs_most_recent_block);
+ a_recent_block = most_recent_block;
+ }
+ CValidationState dummy;
+ ActivateBestChain(dummy, Params(), a_recent_block);
+ }
if (chainActive.Contains(mi->second)) {
send = true;
} else {
@@ -1057,7 +1118,7 @@ void static ProcessGetData(CNode* pfrom, const Consensus::Params& consensusParam
}
}
-uint32_t GetFetchFlags(CNode* pfrom, CBlockIndex* pprev, const Consensus::Params& chainparams) {
+uint32_t GetFetchFlags(CNode* pfrom, const CBlockIndex* pprev, const Consensus::Params& chainparams) {
uint32_t nFetchFlags = 0;
if ((pfrom->GetLocalServices() & NODE_WITNESS) && State(pfrom->GetId())->fHaveWitness) {
nFetchFlags |= MSG_WITNESS_FLAG;
@@ -1065,12 +1126,27 @@ uint32_t GetFetchFlags(CNode* pfrom, CBlockIndex* pprev, const Consensus::Params
return nFetchFlags;
}
-bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv, int64_t nTimeReceived, const CChainParams& chainparams, CConnman& connman)
-{
- unsigned int nMaxSendBufferSize = connman.GetSendBufferSize();
+inline void static SendBlockTransactions(const CBlock& block, const BlockTransactionsRequest& req, CNode* pfrom, CConnman& connman) {
+ BlockTransactions resp(req);
+ for (size_t i = 0; i < req.indexes.size(); i++) {
+ if (req.indexes[i] >= block.vtx.size()) {
+ LOCK(cs_main);
+ Misbehaving(pfrom->GetId(), 100);
+ LogPrintf("Peer %d sent us a getblocktxn with out-of-bounds tx indices", pfrom->id);
+ return;
+ }
+ resp.txn[i] = block.vtx[req.indexes[i]];
+ }
+ LOCK(cs_main);
+ CNetMsgMaker msgMaker(pfrom->GetSendVersion());
+ int nSendFlags = State(pfrom->GetId())->fWantsCmpctWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS;
+ connman.PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::BLOCKTXN, resp));
+}
+bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv, int64_t nTimeReceived, const CChainParams& chainparams, CConnman& connman, std::atomic<bool>& interruptMsgProc)
+{
LogPrint("net", "received: %s (%u bytes) peer=%d\n", SanitizeString(strCommand), vRecv.size(), pfrom->id);
- if (mapArgs.count("-dropmessagestest") && GetRand(atoi(mapArgs["-dropmessagestest"])) == 0)
+ if (IsArgSet("-dropmessagestest") && GetRand(GetArg("-dropmessagestest", 0)) == 0)
{
LogPrintf("dropmessagestest DROPPING RECV MESSAGE\n");
return true;
@@ -1305,7 +1381,8 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
int64_t nSince = nNow - 10 * 60;
BOOST_FOREACH(CAddress& addr, vAddr)
{
- boost::this_thread::interruption_point();
+ if (interruptMsgProc)
+ return true;
if ((addr.nServices & REQUIRED_SERVICES) != REQUIRED_SERVICES)
continue;
@@ -1387,7 +1464,8 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
{
CInv &inv = vInv[nInv];
- boost::this_thread::interruption_point();
+ if (interruptMsgProc)
+ return true;
bool fAlreadyHave = AlreadyHave(inv);
LogPrint("net", "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom->id);
@@ -1419,11 +1497,6 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
// Track requests for our stuff
GetMainSignals().Inventory(inv.hash);
-
- if (pfrom->nSendSize > (nMaxSendBufferSize * 2)) {
- Misbehaving(pfrom->GetId(), 50);
- return error("send buffer size() = %u", pfrom->nSendSize);
- }
}
if (!vToFetch.empty())
@@ -1449,7 +1522,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
LogPrint("net", "received getdata for: %s peer=%d\n", vInv[0].ToString(), pfrom->id);
pfrom->vRecvGetData.insert(pfrom->vRecvGetData.end(), vInv.begin(), vInv.end());
- ProcessGetData(pfrom, chainparams.GetConsensus(), connman);
+ ProcessGetData(pfrom, chainparams.GetConsensus(), connman, interruptMsgProc);
}
@@ -1459,10 +1532,27 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
uint256 hashStop;
vRecv >> locator >> hashStop;
+ // We might have announced the currently-being-connected tip using a
+ // compact block, which resulted in the peer sending a getblocks
+ // request, which we would otherwise respond to without the new block.
+ // To avoid this situation we simply verify that we are on our best
+ // known chain now. This is super overkill, but we handle it better
+ // for getheaders requests, and there are no known nodes which support
+ // compact blocks but still use getblocks to request blocks.
+ {
+ std::shared_ptr<const CBlock> a_recent_block;
+ {
+ LOCK(cs_most_recent_block);
+ a_recent_block = most_recent_block;
+ }
+ CValidationState dummy;
+ ActivateBestChain(dummy, Params(), a_recent_block);
+ }
+
LOCK(cs_main);
// Find the last block the caller has in the main chain
- CBlockIndex* pindex = FindForkInGlobalIndex(chainActive, locator);
+ const CBlockIndex* pindex = FindForkInGlobalIndex(chainActive, locator);
// Send the rest of the chain
if (pindex)
@@ -1502,6 +1592,18 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
BlockTransactionsRequest req;
vRecv >> req;
+ std::shared_ptr<const CBlock> recent_block;
+ {
+ LOCK(cs_most_recent_block);
+ if (most_recent_block_hash == req.blockhash)
+ recent_block = most_recent_block;
+ // Unlock cs_most_recent_block to avoid cs_main lock inversion
+ }
+ if (recent_block) {
+ SendBlockTransactions(*recent_block, req, pfrom, connman);
+ return true;
+ }
+
LOCK(cs_main);
BlockMap::iterator it = mapBlockIndex.find(req.blockhash);
@@ -1523,7 +1625,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
inv.type = State(pfrom->GetId())->fWantsCmpctWitness ? MSG_WITNESS_BLOCK : MSG_BLOCK;
inv.hash = req.blockhash;
pfrom->vRecvGetData.push_back(inv);
- ProcessGetData(pfrom, chainparams.GetConsensus(), connman);
+ ProcessGetData(pfrom, chainparams.GetConsensus(), connman, interruptMsgProc);
return true;
}
@@ -1531,17 +1633,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
bool ret = ReadBlockFromDisk(block, it->second, chainparams.GetConsensus());
assert(ret);
- BlockTransactions resp(req);
- for (size_t i = 0; i < req.indexes.size(); i++) {
- if (req.indexes[i] >= block.vtx.size()) {
- Misbehaving(pfrom->GetId(), 100);
- LogPrintf("Peer %d sent us a getblocktxn with out-of-bounds tx indices", pfrom->id);
- return true;
- }
- resp.txn[i] = block.vtx[req.indexes[i]];
- }
- int nSendFlags = State(pfrom->GetId())->fWantsCmpctWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS;
- connman.PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::BLOCKTXN, resp));
+ SendBlockTransactions(block, req, pfrom, connman);
}
@@ -1558,7 +1650,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
}
CNodeState *nodestate = State(pfrom->GetId());
- CBlockIndex* pindex = NULL;
+ const CBlockIndex* pindex = NULL;
if (locator.IsNull())
{
// If locator is null, return the hashStop block
@@ -1589,6 +1681,14 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
// if our peer has chainActive.Tip() (and thus we are sending an empty
// headers message). In both cases it's safe to update
// pindexBestHeaderSent to be our tip.
+ //
+ // It is important that we simply reset the BestHeaderSent value here,
+ // and not max(BestHeaderSent, newHeaderSent). We might have announced
+ // the currently-being-connected tip using a compact block, which
+ // resulted in the peer sending a headers request, which we respond to
+ // without the new block. By resetting the BestHeaderSent, we ensure we
+ // will re-announce the new block via headers (or compact blocks again)
+ // in the SendMessages logic.
nodestate->pindexBestHeaderSent = pindex ? pindex : chainActive.Tip();
connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::HEADERS, vHeaders));
}
@@ -1606,7 +1706,9 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
deque<COutPoint> vWorkQueue;
vector<uint256> vEraseQueue;
- CTransaction tx(deserialize, vRecv);
+ CTransactionRef ptx;
+ vRecv >> ptx;
+ const CTransaction& tx = *ptx;
CInv inv(MSG_TX, tx.GetHash());
pfrom->AddInventoryKnown(inv);
@@ -1619,7 +1721,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
pfrom->setAskFor.erase(inv.hash);
mapAlreadyAskedFor.erase(inv.hash);
- if (!AlreadyHave(inv) && AcceptToMemoryPool(mempool, state, tx, true, &fMissingInputs)) {
+ if (!AlreadyHave(inv) && AcceptToMemoryPool(mempool, state, ptx, true, &fMissingInputs)) {
mempool.check(pcoinsTip);
RelayTransaction(tx, connman);
for (unsigned int i = 0; i < tx.vout.size(); i++) {
@@ -1644,7 +1746,8 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
mi != itByPrev->second.end();
++mi)
{
- const CTransaction& orphanTx = (*mi)->second.tx;
+ const CTransactionRef& porphanTx = (*mi)->second.tx;
+ const CTransaction& orphanTx = *porphanTx;
const uint256& orphanHash = orphanTx.GetHash();
NodeId fromPeer = (*mi)->second.fromPeer;
bool fMissingInputs2 = false;
@@ -1656,7 +1759,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
if (setMisbehaving.count(fromPeer))
continue;
- if (AcceptToMemoryPool(mempool, stateDummy, orphanTx, true, &fMissingInputs2)) {
+ if (AcceptToMemoryPool(mempool, stateDummy, porphanTx, true, &fMissingInputs2)) {
LogPrint("mempool", " accepted orphan tx %s\n", orphanHash.ToString());
RelayTransaction(orphanTx, connman);
for (unsigned int i = 0; i < orphanTx.vout.size(); i++) {
@@ -1709,7 +1812,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
pfrom->AddInventoryKnown(_inv);
if (!AlreadyHave(_inv)) pfrom->AskFor(_inv);
}
- AddOrphanTx(tx, pfrom->GetId());
+ AddOrphanTx(ptx, pfrom->GetId());
// DoS prevention: do not allow mapOrphanTransactions to grow unbounded
unsigned int nMaxOrphanTx = (unsigned int)std::max((int64_t)0, GetArg("-maxorphantx", DEFAULT_MAX_ORPHAN_TRANSACTIONS));
@@ -1718,6 +1821,9 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
LogPrint("mempool", "mapOrphan overflow, removed %u tx\n", nEvicted);
} else {
LogPrint("mempool", "not keeping orphan with rejected parents %s\n",tx.GetHash().ToString());
+ // We will continue to reject this tx since it has rejected
+ // parents so avoid re-requesting it from other peers.
+ recentRejects->insert(tx.GetHash());
}
} else {
if (!tx.HasWitness() && !state.CorruptionPossible()) {
@@ -1778,7 +1884,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
}
}
- CBlockIndex *pindex = NULL;
+ const CBlockIndex *pindex = NULL;
CValidationState state;
if (!ProcessNewBlockHeaders({cmpctblock.header}, state, chainparams, &pindex)) {
int nDoS;
@@ -1792,11 +1898,24 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
}
}
+ // When we succeed in decoding a block's txids from a cmpctblock
+ // message we typically jump to the BLOCKTXN handling code, with a
+ // dummy (empty) BLOCKTXN message, to re-use the logic there in
+ // completing processing of the putative block (without cs_main).
+ bool fProcessBLOCKTXN = false;
+ CDataStream blockTxnMsg(SER_NETWORK, PROTOCOL_VERSION);
+
+ // If we end up treating this as a plain headers message, call that as well
+ // without cs_main.
+ bool fRevertToHeaderProcessing = false;
+ CDataStream vHeadersMsg(SER_NETWORK, PROTOCOL_VERSION);
+
// Keep a CBlock for "optimistic" compactblock reconstructions (see
// below)
std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
bool fBlockReconstructed = false;
+ {
LOCK(cs_main);
// If AcceptBlockHeader returned true, it set pindex
assert(pindex);
@@ -1872,9 +1991,8 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
// Dirty hack to jump to BLOCKTXN code (TODO: move message handling into their own functions)
BlockTransactions txn;
txn.blockhash = cmpctblock.header.GetHash();
- CDataStream blockTxnMsg(SER_NETWORK, PROTOCOL_VERSION);
blockTxnMsg << txn;
- return ProcessMessage(pfrom, NetMsgType::BLOCKTXN, blockTxnMsg, nTimeReceived, chainparams, connman);
+ fProcessBLOCKTXN = true;
} else {
req.blockhash = pindex->GetBlockHash();
connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::GETBLOCKTXN, req));
@@ -1910,11 +2028,17 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
// Dirty hack to process as if it were just a headers message (TODO: move message handling into their own functions)
std::vector<CBlock> headers;
headers.push_back(cmpctblock.header);
- CDataStream vHeadersMsg(SER_NETWORK, PROTOCOL_VERSION);
vHeadersMsg << headers;
- return ProcessMessage(pfrom, NetMsgType::HEADERS, vHeadersMsg, nTimeReceived, chainparams, connman);
+ fRevertToHeaderProcessing = true;
}
}
+ } // cs_main
+
+ if (fProcessBLOCKTXN)
+ return ProcessMessage(pfrom, NetMsgType::BLOCKTXN, blockTxnMsg, nTimeReceived, chainparams, connman, interruptMsgProc);
+
+ if (fRevertToHeaderProcessing)
+ return ProcessMessage(pfrom, NetMsgType::HEADERS, vHeadersMsg, nTimeReceived, chainparams, connman, interruptMsgProc);
if (fBlockReconstructed) {
// If we got here, we were able to optimistically reconstruct a
@@ -2030,7 +2154,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
return true;
}
- CBlockIndex *pindexLast = NULL;
+ const CBlockIndex *pindexLast = NULL;
{
LOCK(cs_main);
CNodeState *nodestate = State(pfrom->GetId());
@@ -2107,8 +2231,8 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
// If this set of headers is valid and ends in a block with at least as
// much work as our tip, download as much as possible.
if (fCanDirectFetch && pindexLast->IsValid(BLOCK_VALID_TREE) && chainActive.Tip()->nChainWork <= pindexLast->nChainWork) {
- vector<CBlockIndex *> vToFetch;
- CBlockIndex *pindexWalk = pindexLast;
+ vector<const CBlockIndex *> vToFetch;
+ const CBlockIndex *pindexWalk = pindexLast;
// Calculate all the blocks we'd need to switch to pindexLast, up to a limit.
while (pindexWalk && !chainActive.Contains(pindexWalk) && vToFetch.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
if (!(pindexWalk->nStatus & BLOCK_HAVE_DATA) &&
@@ -2130,7 +2254,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
} else {
vector<CInv> vGetData;
// Download as much as possible, from earliest to latest.
- BOOST_REVERSE_FOREACH(CBlockIndex *pindex, vToFetch) {
+ BOOST_REVERSE_FOREACH(const CBlockIndex *pindex, vToFetch) {
if (nodestate->nBlocksInFlight >= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
// Can't download any more from this peer
break;
@@ -2423,14 +2547,9 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
return true;
}
-// requires LOCK(cs_vRecvMsg)
-bool ProcessMessages(CNode* pfrom, CConnman& connman)
+bool ProcessMessages(CNode* pfrom, CConnman& connman, std::atomic<bool>& interruptMsgProc)
{
const CChainParams& chainparams = Params();
- unsigned int nMaxSendBufferSize = connman.GetSendBufferSize();
- //if (fDebug)
- // LogPrintf("%s(%u messages)\n", __func__, pfrom->vRecvMsg.size());
-
//
// Message format
// (4) message start
@@ -2439,40 +2558,40 @@ bool ProcessMessages(CNode* pfrom, CConnman& connman)
// (4) checksum
// (x) data
//
- bool fOk = true;
+ bool fMoreWork = false;
if (!pfrom->vRecvGetData.empty())
- ProcessGetData(pfrom, chainparams.GetConsensus(), connman);
+ ProcessGetData(pfrom, chainparams.GetConsensus(), connman, interruptMsgProc);
+
+ if (pfrom->fDisconnect)
+ return false;
// this maintains the order of responses
- if (!pfrom->vRecvGetData.empty()) return fOk;
+ if (!pfrom->vRecvGetData.empty()) return true;
- std::deque<CNetMessage>::iterator it = pfrom->vRecvMsg.begin();
- while (!pfrom->fDisconnect && it != pfrom->vRecvMsg.end()) {
// Don't bother if send buffer is too full to respond anyway
- if (pfrom->nSendSize >= nMaxSendBufferSize)
- break;
-
- // get next message
- CNetMessage& msg = *it;
-
- //if (fDebug)
- // LogPrintf("%s(message %u msgsz, %u bytes, complete:%s)\n", __func__,
- // msg.hdr.nMessageSize, msg.vRecv.size(),
- // msg.complete() ? "Y" : "N");
-
- // end, if an incomplete message is found
- if (!msg.complete())
- break;
-
- // at this point, any failure means we can delete the current message
- it++;
+ if (pfrom->fPauseSend)
+ return false;
+ std::list<CNetMessage> msgs;
+ {
+ LOCK(pfrom->cs_vProcessMsg);
+ if (pfrom->vProcessMsg.empty())
+ return false;
+ // Just take one message
+ msgs.splice(msgs.begin(), pfrom->vProcessMsg, pfrom->vProcessMsg.begin());
+ pfrom->nProcessQueueSize -= msgs.front().vRecv.size() + CMessageHeader::HEADER_SIZE;
+ pfrom->fPauseRecv = pfrom->nProcessQueueSize > connman.GetReceiveFloodSize();
+ fMoreWork = !pfrom->vProcessMsg.empty();
+ }
+ CNetMessage& msg(msgs.front());
+
+ msg.SetVersion(pfrom->GetRecvVersion());
// Scan for message start
if (memcmp(msg.hdr.pchMessageStart, chainparams.MessageStart(), CMessageHeader::MESSAGE_START_SIZE) != 0) {
LogPrintf("PROCESSMESSAGE: INVALID MESSAGESTART %s peer=%d\n", SanitizeString(msg.hdr.GetCommand()), pfrom->id);
- fOk = false;
- break;
+ pfrom->fDisconnect = true;
+ return false;
}
// Read header
@@ -2480,7 +2599,7 @@ bool ProcessMessages(CNode* pfrom, CConnman& connman)
if (!hdr.IsValid(chainparams.MessageStart()))
{
LogPrintf("PROCESSMESSAGE: ERRORS IN HEADER %s peer=%d\n", SanitizeString(hdr.GetCommand()), pfrom->id);
- continue;
+ return fMoreWork;
}
string strCommand = hdr.GetCommand();
@@ -2496,15 +2615,18 @@ bool ProcessMessages(CNode* pfrom, CConnman& connman)
SanitizeString(strCommand), nMessageSize,
HexStr(hash.begin(), hash.begin()+CMessageHeader::CHECKSUM_SIZE),
HexStr(hdr.pchChecksum, hdr.pchChecksum+CMessageHeader::CHECKSUM_SIZE));
- continue;
+ return fMoreWork;
}
// Process message
bool fRet = false;
try
{
- fRet = ProcessMessage(pfrom, strCommand, vRecv, msg.nTime, chainparams, connman);
- boost::this_thread::interruption_point();
+ fRet = ProcessMessage(pfrom, strCommand, vRecv, msg.nTime, chainparams, connman, interruptMsgProc);
+ if (interruptMsgProc)
+ return false;
+ if (!pfrom->vRecvGetData.empty())
+ fMoreWork = true;
}
catch (const std::ios_base::failure& e)
{
@@ -2529,9 +2651,6 @@ bool ProcessMessages(CNode* pfrom, CConnman& connman)
PrintExceptionContinue(&e, "ProcessMessages()");
}
}
- catch (const boost::thread_interrupted&) {
- throw;
- }
catch (const std::exception& e) {
PrintExceptionContinue(&e, "ProcessMessages()");
} catch (...) {
@@ -2541,14 +2660,7 @@ bool ProcessMessages(CNode* pfrom, CConnman& connman)
if (!fRet)
LogPrintf("%s(%s, %u bytes) FAILED peer=%d\n", __func__, SanitizeString(strCommand), nMessageSize, pfrom->id);
- break;
- }
-
- // In case the connection got shut down, its receive buffer was wiped
- if (!pfrom->fDisconnect)
- pfrom->vRecvMsg.erase(pfrom->vRecvMsg.begin(), it);
-
- return fOk;
+ return fMoreWork;
}
class CompareInvMempoolOrder
@@ -2568,7 +2680,7 @@ public:
}
};
-bool SendMessages(CNode* pto, CConnman& connman)
+bool SendMessages(CNode* pto, CConnman& connman, std::atomic<bool>& interruptMsgProc)
{
const Consensus::Params& consensusParams = Params().GetConsensus();
{
@@ -2622,6 +2734,8 @@ bool SendMessages(CNode* pto, CConnman& connman)
state.fShouldBan = false;
if (pto->fWhitelisted)
LogPrintf("Warning: not punishing whitelisted peer %s!\n", pto->addr.ToString());
+ else if (pto->fAddnode)
+ LogPrintf("Warning: not punishing addnoded peer %s!\n", pto->addr.ToString());
else {
pto->fDisconnect = true;
if (pto->addr.IsLocal())
@@ -2718,7 +2832,7 @@ bool SendMessages(CNode* pto, CConnman& connman)
bool fRevertToInv = ((!state.fPreferHeaders &&
(!state.fPreferHeaderAndIDs || pto->vBlockHashesToAnnounce.size() > 1)) ||
pto->vBlockHashesToAnnounce.size() > MAX_BLOCKS_TO_ANNOUNCE);
- CBlockIndex *pBestIndex = NULL; // last header queued for delivery
+ const CBlockIndex *pBestIndex = NULL; // last header queued for delivery
ProcessBlockAvailability(pto->id); // ensure pindexBestKnownBlock is up-to-date
if (!fRevertToInv) {
@@ -2729,7 +2843,7 @@ bool SendMessages(CNode* pto, CConnman& connman)
BOOST_FOREACH(const uint256 &hash, pto->vBlockHashesToAnnounce) {
BlockMap::iterator mi = mapBlockIndex.find(hash);
assert(mi != mapBlockIndex.end());
- CBlockIndex *pindex = mi->second;
+ const CBlockIndex *pindex = mi->second;
if (chainActive[pindex->nHeight] != pindex) {
// Bail out if we reorged away from this block
fRevertToInv = true;
@@ -2773,15 +2887,31 @@ bool SendMessages(CNode* pto, CConnman& connman)
if (vHeaders.size() == 1 && state.fPreferHeaderAndIDs) {
// We only send up to 1 block as header-and-ids, as otherwise
// probably means we're doing an initial-ish-sync or they're slow
- LogPrint("net", "%s sending header-and-ids %s to peer %d\n", __func__,
+ LogPrint("net", "%s sending header-and-ids %s to peer=%d\n", __func__,
vHeaders.front().GetHash().ToString(), pto->id);
- //TODO: Shouldn't need to reload block from disk, but requires refactor
- CBlock block;
- bool ret = ReadBlockFromDisk(block, pBestIndex, consensusParams);
- assert(ret);
- CBlockHeaderAndShortTxIDs cmpctblock(block, state.fWantsCmpctWitness);
+
int nSendFlags = state.fWantsCmpctWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS;
- connman.PushMessage(pto, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, cmpctblock));
+
+ bool fGotBlockFromCache = false;
+ {
+ LOCK(cs_most_recent_block);
+ if (most_recent_block_hash == pBestIndex->GetBlockHash()) {
+ if (state.fWantsCmpctWitness)
+ connman.PushMessage(pto, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, *most_recent_compact_block));
+ else {
+ CBlockHeaderAndShortTxIDs cmpctblock(*most_recent_block, state.fWantsCmpctWitness);
+ connman.PushMessage(pto, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, cmpctblock));
+ }
+ fGotBlockFromCache = true;
+ }
+ }
+ if (!fGotBlockFromCache) {
+ CBlock block;
+ bool ret = ReadBlockFromDisk(block, pBestIndex, consensusParams);
+ assert(ret);
+ CBlockHeaderAndShortTxIDs cmpctblock(block, state.fWantsCmpctWitness);
+ connman.PushMessage(pto, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, cmpctblock));
+ }
state.pindexBestHeaderSent = pBestIndex;
} else if (state.fPreferHeaders) {
if (vHeaders.size() > 1) {
@@ -2806,7 +2936,7 @@ bool SendMessages(CNode* pto, CConnman& connman)
const uint256 &hashToAnnounce = pto->vBlockHashesToAnnounce.back();
BlockMap::iterator mi = mapBlockIndex.find(hashToAnnounce);
assert(mi != mapBlockIndex.end());
- CBlockIndex *pindex = mi->second;
+ const CBlockIndex *pindex = mi->second;
// Warn if we're announcing a block that is not on the main chain.
// This should be very rare and could be optimized out.
@@ -2991,10 +3121,10 @@ bool SendMessages(CNode* pto, CConnman& connman)
//
vector<CInv> vGetData;
if (!pto->fClient && (fFetch || !IsInitialBlockDownload()) && state.nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
- vector<CBlockIndex*> vToDownload;
+ vector<const CBlockIndex*> vToDownload;
NodeId staller = -1;
FindNextBlocksToDownload(pto->GetId(), MAX_BLOCKS_IN_TRANSIT_PER_PEER - state.nBlocksInFlight, vToDownload, staller, consensusParams);
- BOOST_FOREACH(CBlockIndex *pindex, vToDownload) {
+ BOOST_FOREACH(const CBlockIndex *pindex, vToDownload) {
uint32_t nFetchFlags = GetFetchFlags(pto, pindex->pprev, consensusParams);
vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash()));
MarkBlockAsInFlight(pto->GetId(), pindex->GetBlockHash(), consensusParams, pindex);