diff options
Diffstat (limited to 'src/net_processing.cpp')
-rw-r--r-- | src/net_processing.cpp | 517 |
1 files changed, 287 insertions, 230 deletions
diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 20426eaceb..61e6ae7448 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -1,5 +1,5 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto -// Copyright (c) 2009-2016 The Bitcoin Core developers +// Copyright (c) 2009-2017 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. @@ -51,12 +51,13 @@ struct COrphanTx { NodeId fromPeer; int64_t nTimeExpire; }; -std::map<uint256, COrphanTx> mapOrphanTransactions GUARDED_BY(cs_main); -std::map<COutPoint, std::set<std::map<uint256, COrphanTx>::iterator, IteratorComparator>> mapOrphanTransactionsByPrev GUARDED_BY(cs_main); -void EraseOrphansFor(NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(cs_main); +static CCriticalSection g_cs_orphans; +std::map<uint256, COrphanTx> mapOrphanTransactions GUARDED_BY(g_cs_orphans); +std::map<COutPoint, std::set<std::map<uint256, COrphanTx>::iterator, IteratorComparator>> mapOrphanTransactionsByPrev GUARDED_BY(g_cs_orphans); +void EraseOrphansFor(NodeId peer); -static size_t vExtraTxnForCompactIt = 0; -static std::vector<std::pair<uint256, CTransactionRef>> vExtraTxnForCompact GUARDED_BY(cs_main); +static size_t vExtraTxnForCompactIt GUARDED_BY(g_cs_orphans) = 0; +static std::vector<std::pair<uint256, CTransactionRef>> vExtraTxnForCompact GUARDED_BY(g_cs_orphans); static const uint64_t RANDOMIZER_ID_ADDRESS_RELAY = 0x3cac0035b5866b90ULL; // SHA256("main address relay")[0:8] @@ -127,7 +128,7 @@ namespace { int g_outbound_peers_with_protect_from_disconnect = 0; /** When our tip was last updated. */ - int64_t g_last_tip_update = 0; + std::atomic<int64_t> g_last_tip_update(0); /** Relay map, protected by cs_main. */ typedef std::map<uint256, CTransactionRef> MapRelay; @@ -338,7 +339,7 @@ bool MarkBlockAsInFlight(NodeId nodeid, const uint256& hash, const CBlockIndex* CNodeState *state = State(nodeid); assert(state != nullptr); - // Short-circuit most stuff in case its from the same node + // Short-circuit most stuff in case it is from the same node std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> >::iterator itInFlight = mapBlocksInFlight.find(hash); if (itInFlight != mapBlocksInFlight.end() && itInFlight->second.first == nodeid) { if (pit) { @@ -373,10 +374,11 @@ void ProcessBlockAvailability(NodeId nodeid) { assert(state != nullptr); if (!state->hashLastUnknownBlock.IsNull()) { - BlockMap::iterator itOld = mapBlockIndex.find(state->hashLastUnknownBlock); - if (itOld != mapBlockIndex.end() && itOld->second->nChainWork > 0) { - if (state->pindexBestKnownBlock == nullptr || itOld->second->nChainWork >= state->pindexBestKnownBlock->nChainWork) - state->pindexBestKnownBlock = itOld->second; + const CBlockIndex* pindex = LookupBlockIndex(state->hashLastUnknownBlock); + if (pindex && pindex->nChainWork > 0) { + if (state->pindexBestKnownBlock == nullptr || pindex->nChainWork >= state->pindexBestKnownBlock->nChainWork) { + state->pindexBestKnownBlock = pindex; + } state->hashLastUnknownBlock.SetNull(); } } @@ -389,17 +391,24 @@ void UpdateBlockAvailability(NodeId nodeid, const uint256 &hash) { ProcessBlockAvailability(nodeid); - BlockMap::iterator it = mapBlockIndex.find(hash); - if (it != mapBlockIndex.end() && it->second->nChainWork > 0) { + const CBlockIndex* pindex = LookupBlockIndex(hash); + if (pindex && pindex->nChainWork > 0) { // An actually better block was announced. - if (state->pindexBestKnownBlock == nullptr || it->second->nChainWork >= state->pindexBestKnownBlock->nChainWork) - state->pindexBestKnownBlock = it->second; + if (state->pindexBestKnownBlock == nullptr || pindex->nChainWork >= state->pindexBestKnownBlock->nChainWork) { + state->pindexBestKnownBlock = pindex; + } } else { // An unknown block was announced; just assume that the latest one is the best one. state->hashLastUnknownBlock = hash; } } +/** + * When a peer sends us a valid block, instruct it to announce blocks to us + * using CMPCTBLOCK if possible by adding its nodeid to the end of + * lNodesAnnouncingHeaderAndIDs, and keeping that list under a certain size by + * removing the first element if necessary. + */ void MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid, CConnman* connman) { AssertLockHeld(cs_main); CNodeState* nodestate = State(nodeid); @@ -509,7 +518,7 @@ void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vector<con } // Iterate over those blocks in vToFetch (in forward direction), adding the ones that - // are not yet downloaded and not in flight to vBlocks. In the mean time, update + // are not yet downloaded and not in flight to vBlocks. In the meantime, update // pindexLastCommonBlock as long as all ancestors are already downloaded, or if it's // already part of our chain (and therefore don't need it even if pruned). for (const CBlockIndex* pindex : vToFetch) { @@ -631,7 +640,7 @@ bool GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats) { // mapOrphanTransactions // -void AddToCompactExtraTransactions(const CTransactionRef& tx) EXCLUSIVE_LOCKS_REQUIRED(cs_main) +void AddToCompactExtraTransactions(const CTransactionRef& tx) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans) { size_t max_extra_txn = gArgs.GetArg("-blockreconstructionextratxn", DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN); if (max_extra_txn <= 0) @@ -642,7 +651,7 @@ void AddToCompactExtraTransactions(const CTransactionRef& tx) EXCLUSIVE_LOCKS_RE vExtraTxnForCompactIt = (vExtraTxnForCompactIt + 1) % max_extra_txn; } -bool AddOrphanTx(const CTransactionRef& tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(cs_main) +bool AddOrphanTx(const CTransactionRef& tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans) { const uint256& hash = tx->GetHash(); if (mapOrphanTransactions.count(hash)) @@ -675,7 +684,7 @@ bool AddOrphanTx(const CTransactionRef& tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRE return true; } -int static EraseOrphanTx(uint256 hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) +int static EraseOrphanTx(uint256 hash) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans) { std::map<uint256, COrphanTx>::iterator it = mapOrphanTransactions.find(hash); if (it == mapOrphanTransactions.end()) @@ -695,6 +704,7 @@ int static EraseOrphanTx(uint256 hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) void EraseOrphansFor(NodeId peer) { + LOCK(g_cs_orphans); int nErased = 0; std::map<uint256, COrphanTx>::iterator iter = mapOrphanTransactions.begin(); while (iter != mapOrphanTransactions.end()) @@ -709,8 +719,10 @@ void EraseOrphansFor(NodeId peer) } -unsigned int LimitOrphanTxSize(unsigned int nMaxOrphans) EXCLUSIVE_LOCKS_REQUIRED(cs_main) +unsigned int LimitOrphanTxSize(unsigned int nMaxOrphans) { + LOCK(g_cs_orphans); + unsigned int nEvicted = 0; static int64_t nNextSweep; int64_t nNow = GetTime(); @@ -745,8 +757,12 @@ unsigned int LimitOrphanTxSize(unsigned int nMaxOrphans) EXCLUSIVE_LOCKS_REQUIRE return nEvicted; } -// Requires cs_main. -void Misbehaving(NodeId pnode, int howmuch) +/** + * Mark a misbehaving peer to be banned depending upon the value of `-banscore`. + * + * Requires cs_main. + */ +void Misbehaving(NodeId pnode, int howmuch, const std::string& message) { if (howmuch == 0) return; @@ -757,12 +773,13 @@ void Misbehaving(NodeId pnode, int howmuch) state->nMisbehavior += howmuch; int banscore = gArgs.GetArg("-banscore", DEFAULT_BANSCORE_THRESHOLD); + std::string message_prefixed = message.empty() ? "" : (": " + message); if (state->nMisbehavior >= banscore && state->nMisbehavior - howmuch < banscore) { - LogPrintf("%s: %s peer=%d (%d -> %d) BAN THRESHOLD EXCEEDED\n", __func__, state->name, pnode, state->nMisbehavior-howmuch, state->nMisbehavior); + LogPrint(BCLog::NET, "%s: %s peer=%d (%d -> %d) BAN THRESHOLD EXCEEDED%s\n", __func__, state->name, pnode, state->nMisbehavior-howmuch, state->nMisbehavior, message_prefixed); state->fShouldBan = true; } else - LogPrintf("%s: %s peer=%d (%d -> %d)\n", __func__, state->name, pnode, state->nMisbehavior-howmuch, state->nMisbehavior); + LogPrint(BCLog::NET, "%s: %s peer=%d (%d -> %d)%s\n", __func__, state->name, pnode, state->nMisbehavior-howmuch, state->nMisbehavior, message_prefixed); } @@ -803,8 +820,12 @@ PeerLogicValidation::PeerLogicValidation(CConnman* connmanIn, CScheduler &schedu scheduler.scheduleEvery(std::bind(&PeerLogicValidation::CheckForStaleTipAndEvictPeers, this, consensusParams), EXTRA_PEER_CHECK_INTERVAL * 1000); } +/** + * Evict orphan txn pool entries (EraseOrphanTx) based on a newly connected + * block. Also save the time of the last tip update. + */ void PeerLogicValidation::BlockConnected(const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindex, const std::vector<CTransactionRef>& vtxConflicted) { - LOCK(cs_main); + LOCK(g_cs_orphans); std::vector<uint256> vOrphanErase; @@ -823,7 +844,7 @@ void PeerLogicValidation::BlockConnected(const std::shared_ptr<const CBlock>& pb } } - // Erase orphan transactions include or precluded by this block + // Erase orphan transactions included or precluded by this block if (vOrphanErase.size()) { int nErased = 0; for (uint256 &orphanHash : vOrphanErase) { @@ -842,6 +863,10 @@ static std::shared_ptr<const CBlockHeaderAndShortTxIDs> most_recent_compact_bloc static uint256 most_recent_block_hash; static bool fWitnessesPresentInMostRecentCompactBlock; +/** + * Maintain state about the best-seen block and fast-announce a compact block + * to compatible peers. + */ void PeerLogicValidation::NewPoWValidBlock(const CBlockIndex *pindex, const std::shared_ptr<const CBlock>& pblock) { std::shared_ptr<const CBlockHeaderAndShortTxIDs> pcmpctblock = std::make_shared<const CBlockHeaderAndShortTxIDs> (*pblock, true); const CNetMsgMaker msgMaker(PROTOCOL_VERSION); @@ -883,10 +908,15 @@ void PeerLogicValidation::NewPoWValidBlock(const CBlockIndex *pindex, const std: }); } +/** + * Update our best height and announce any block hashes which weren't previously + * in chainActive to our peers. + */ void PeerLogicValidation::UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload) { const int nNewHeight = pindexNew->nHeight; connman->SetBestHeight(nNewHeight); + SetServiceFlagsIBDCache(!fInitialDownload); if (!fInitialDownload) { // Find the hashes of all blocks that weren't previously in the best chain. std::vector<uint256> vHashes; @@ -914,6 +944,10 @@ void PeerLogicValidation::UpdatedBlockTip(const CBlockIndex *pindexNew, const CB nTimeBestReceived = GetTime(); } +/** + * Handle invalid block rejection and consequent peer banning, maintain which + * peers announce compact blocks. + */ void PeerLogicValidation::BlockChecked(const CBlock& block, const CValidationState& state) { LOCK(cs_main); @@ -971,15 +1005,19 @@ bool static AlreadyHave(const CInv& inv) EXCLUSIVE_LOCKS_REQUIRED(cs_main) recentRejects->reset(); } + { + LOCK(g_cs_orphans); + if (mapOrphanTransactions.count(inv.hash)) return true; + } + return recentRejects->contains(inv.hash) || mempool.exists(inv.hash) || - mapOrphanTransactions.count(inv.hash) || pcoinsTip->HaveCoinInCache(COutPoint(inv.hash, 0)) || // Best effort: only try output 0 and 1 pcoinsTip->HaveCoinInCache(COutPoint(inv.hash, 1)); } case MSG_BLOCK: case MSG_WITNESS_BLOCK: - return mapBlockIndex.count(inv.hash); + return LookupBlockIndex(inv.hash) != nullptr; } // Don't know what it is, just say we already got one return true; @@ -1030,180 +1068,197 @@ static void RelayAddress(const CAddress& addr, bool fReachable, CConnman* connma connman->ForEachNodeThen(std::move(sortfunc), std::move(pushfunc)); } -void static ProcessGetData(CNode* pfrom, const Consensus::Params& consensusParams, CConnman* connman, const std::atomic<bool>& interruptMsgProc) +void static ProcessGetBlockData(CNode* pfrom, const Consensus::Params& consensusParams, const CInv& inv, CConnman* connman, const std::atomic<bool>& interruptMsgProc) { - std::deque<CInv>::iterator it = pfrom->vRecvGetData.begin(); - std::vector<CInv> vNotFound; - const CNetMsgMaker msgMaker(pfrom->GetSendVersion()); - LOCK(cs_main); + bool send = false; + std::shared_ptr<const CBlock> a_recent_block; + std::shared_ptr<const CBlockHeaderAndShortTxIDs> a_recent_compact_block; + bool fWitnessesPresentInARecentCompactBlock; + { + LOCK(cs_most_recent_block); + a_recent_block = most_recent_block; + a_recent_compact_block = most_recent_compact_block; + fWitnessesPresentInARecentCompactBlock = fWitnessesPresentInMostRecentCompactBlock; + } - while (it != pfrom->vRecvGetData.end()) { - // Don't bother if send buffer is too full to respond anyway - if (pfrom->fPauseSend) - break; + bool need_activate_chain = false; + { + LOCK(cs_main); + const CBlockIndex* pindex = LookupBlockIndex(inv.hash); + if (pindex) { + if (pindex->nChainTx && !pindex->IsValid(BLOCK_VALID_SCRIPTS) && + pindex->IsValid(BLOCK_VALID_TREE)) { + // If we have the block and all of its parents, but have not yet validated it, + // we might be in the middle of connecting it (ie in the unlock of cs_main + // before ActivateBestChain but after AcceptBlock). + // In this case, we need to run ActivateBestChain prior to checking the relay + // conditions below. + need_activate_chain = true; + } + } + } // release cs_main before calling ActivateBestChain + if (need_activate_chain) { + CValidationState dummy; + ActivateBestChain(dummy, Params(), a_recent_block); + } - const CInv &inv = *it; - { - if (interruptMsgProc) - return; + LOCK(cs_main); + const CBlockIndex* pindex = LookupBlockIndex(inv.hash); + if (pindex) { + send = BlockRequestAllowed(pindex, consensusParams); + if (!send) { + LogPrint(BCLog::NET, "%s: ignoring request from peer=%i for old block that isn't in the main chain\n", __func__, pfrom->GetId()); + } + } + const CNetMsgMaker msgMaker(pfrom->GetSendVersion()); + // disconnect node in case we have reached the outbound limit for serving historical blocks + // never disconnect whitelisted nodes + if (send && connman->OutboundTargetReached(true) && ( ((pindexBestHeader != nullptr) && (pindexBestHeader->GetBlockTime() - pindex->GetBlockTime() > HISTORICAL_BLOCK_AGE)) || inv.type == MSG_FILTERED_BLOCK) && !pfrom->fWhitelisted) + { + LogPrint(BCLog::NET, "historical block serving limit reached, disconnect peer=%d\n", pfrom->GetId()); - it++; + //disconnect node + pfrom->fDisconnect = true; + send = false; + } + // Avoid leaking prune-height by never sending blocks below the NODE_NETWORK_LIMITED threshold + if (send && !pfrom->fWhitelisted && ( + (((pfrom->GetLocalServices() & NODE_NETWORK_LIMITED) == NODE_NETWORK_LIMITED) && ((pfrom->GetLocalServices() & NODE_NETWORK) != NODE_NETWORK) && (chainActive.Tip()->nHeight - pindex->nHeight > (int)NODE_NETWORK_LIMITED_MIN_BLOCKS + 2 /* add two blocks buffer extension for possible races */) ) + )) { + LogPrint(BCLog::NET, "Ignore block request below NODE_NETWORK_LIMITED threshold from peer=%d\n", pfrom->GetId()); - if (inv.type == MSG_BLOCK || inv.type == MSG_FILTERED_BLOCK || inv.type == MSG_CMPCT_BLOCK || inv.type == MSG_WITNESS_BLOCK) + //disconnect node and prevent it from stalling (would otherwise wait for the missing block) + pfrom->fDisconnect = true; + send = false; + } + // Pruned nodes may have deleted the block, so check whether + // it's available before trying to send. + if (send && (pindex->nStatus & BLOCK_HAVE_DATA)) + { + std::shared_ptr<const CBlock> pblock; + if (a_recent_block && a_recent_block->GetHash() == pindex->GetBlockHash()) { + pblock = a_recent_block; + } else { + // Send block from disk + std::shared_ptr<CBlock> pblockRead = std::make_shared<CBlock>(); + if (!ReadBlockFromDisk(*pblockRead, pindex, consensusParams)) + assert(!"cannot load block from disk"); + pblock = pblockRead; + } + if (inv.type == MSG_BLOCK) + connman->PushMessage(pfrom, msgMaker.Make(SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::BLOCK, *pblock)); + else if (inv.type == MSG_WITNESS_BLOCK) + connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::BLOCK, *pblock)); + else if (inv.type == MSG_FILTERED_BLOCK) + { + bool sendMerkleBlock = false; + CMerkleBlock merkleBlock; { - bool send = false; - BlockMap::iterator mi = mapBlockIndex.find(inv.hash); - std::shared_ptr<const CBlock> a_recent_block; - std::shared_ptr<const CBlockHeaderAndShortTxIDs> a_recent_compact_block; - bool fWitnessesPresentInARecentCompactBlock; - { - LOCK(cs_most_recent_block); - a_recent_block = most_recent_block; - a_recent_compact_block = most_recent_compact_block; - fWitnessesPresentInARecentCompactBlock = fWitnessesPresentInMostRecentCompactBlock; + LOCK(pfrom->cs_filter); + if (pfrom->pfilter) { + sendMerkleBlock = true; + merkleBlock = CMerkleBlock(*pblock, *pfrom->pfilter); } - if (mi != mapBlockIndex.end()) - { - if (mi->second->nChainTx && !mi->second->IsValid(BLOCK_VALID_SCRIPTS) && - mi->second->IsValid(BLOCK_VALID_TREE)) { - // If we have the block and all of its parents, but have not yet validated it, - // we might be in the middle of connecting it (ie in the unlock of cs_main - // before ActivateBestChain but after AcceptBlock). - // In this case, we need to run ActivateBestChain prior to checking the relay - // conditions below. - CValidationState dummy; - ActivateBestChain(dummy, Params(), a_recent_block); - } - send = BlockRequestAllowed(mi->second, consensusParams); - if (!send) { - LogPrint(BCLog::NET, "%s: ignoring request from peer=%i for old block that isn't in the main chain\n", __func__, pfrom->GetId()); - } + } + if (sendMerkleBlock) { + connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::MERKLEBLOCK, merkleBlock)); + // CMerkleBlock just contains hashes, so also push any transactions in the block the client did not see + // This avoids hurting performance by pointlessly requiring a round-trip + // Note that there is currently no way for a node to request any single transactions we didn't send here - + // they must either disconnect and retry or request the full block. + // Thus, the protocol spec specified allows for us to provide duplicate txn here, + // however we MUST always provide at least what the remote peer needs + typedef std::pair<unsigned int, uint256> PairType; + for (PairType& pair : merkleBlock.vMatchedTxn) + connman->PushMessage(pfrom, msgMaker.Make(SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::TX, *pblock->vtx[pair.first])); + } + // else + // no response + } + else if (inv.type == MSG_CMPCT_BLOCK) + { + // If a peer is asking for old blocks, we're almost guaranteed + // they won't have a useful mempool to match against a compact block, + // and we don't feel like constructing the object for them, so + // instead we respond with the full, non-compact block. + bool fPeerWantsWitness = State(pfrom->GetId())->fWantsCmpctWitness; + int nSendFlags = fPeerWantsWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS; + if (CanDirectFetch(consensusParams) && pindex->nHeight >= chainActive.Height() - MAX_CMPCTBLOCK_DEPTH) { + if ((fPeerWantsWitness || !fWitnessesPresentInARecentCompactBlock) && a_recent_compact_block && a_recent_compact_block->header.GetHash() == pindex->GetBlockHash()) { + connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, *a_recent_compact_block)); + } else { + CBlockHeaderAndShortTxIDs cmpctblock(*pblock, fPeerWantsWitness); + connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, cmpctblock)); } - // disconnect node in case we have reached the outbound limit for serving historical blocks - // never disconnect whitelisted nodes - if (send && connman->OutboundTargetReached(true) && ( ((pindexBestHeader != nullptr) && (pindexBestHeader->GetBlockTime() - mi->second->GetBlockTime() > HISTORICAL_BLOCK_AGE)) || inv.type == MSG_FILTERED_BLOCK) && !pfrom->fWhitelisted) - { - LogPrint(BCLog::NET, "historical block serving limit reached, disconnect peer=%d\n", pfrom->GetId()); + } else { + connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::BLOCK, *pblock)); + } + } - //disconnect node - pfrom->fDisconnect = true; - send = false; - } - // Avoid leaking prune-height by never sending blocks below the NODE_NETWORK_LIMITED threshold - if (send && !pfrom->fWhitelisted && ( - (((pfrom->GetLocalServices() & NODE_NETWORK_LIMITED) == NODE_NETWORK_LIMITED) && ((pfrom->GetLocalServices() & NODE_NETWORK) != NODE_NETWORK) && (chainActive.Tip()->nHeight - mi->second->nHeight > (int)NODE_NETWORK_LIMITED_MIN_BLOCKS + 2 /* add two blocks buffer extension for possible races */) ) - )) { - LogPrint(BCLog::NET, "Ignore block request below NODE_NETWORK_LIMITED threshold from peer=%d\n", pfrom->GetId()); + // Trigger the peer node to send a getblocks request for the next batch of inventory + if (inv.hash == pfrom->hashContinue) + { + // Bypass PushInventory, this must send even if redundant, + // and we want it right after the last block so they don't + // wait for other stuff first. + std::vector<CInv> vInv; + vInv.push_back(CInv(MSG_BLOCK, chainActive.Tip()->GetBlockHash())); + connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::INV, vInv)); + pfrom->hashContinue.SetNull(); + } + } +} - //disconnect node and prevent it from stalling (would otherwise wait for the missing block) - pfrom->fDisconnect = true; - send = false; - } - // Pruned nodes may have deleted the block, so check whether - // it's available before trying to send. - if (send && (mi->second->nStatus & BLOCK_HAVE_DATA)) - { - std::shared_ptr<const CBlock> pblock; - if (a_recent_block && a_recent_block->GetHash() == (*mi).second->GetBlockHash()) { - pblock = a_recent_block; - } else { - // Send block from disk - std::shared_ptr<CBlock> pblockRead = std::make_shared<CBlock>(); - if (!ReadBlockFromDisk(*pblockRead, (*mi).second, consensusParams)) - assert(!"cannot load block from disk"); - pblock = pblockRead; - } - if (inv.type == MSG_BLOCK) - connman->PushMessage(pfrom, msgMaker.Make(SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::BLOCK, *pblock)); - else if (inv.type == MSG_WITNESS_BLOCK) - connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::BLOCK, *pblock)); - else if (inv.type == MSG_FILTERED_BLOCK) - { - bool sendMerkleBlock = false; - CMerkleBlock merkleBlock; - { - LOCK(pfrom->cs_filter); - if (pfrom->pfilter) { - sendMerkleBlock = true; - merkleBlock = CMerkleBlock(*pblock, *pfrom->pfilter); - } - } - if (sendMerkleBlock) { - connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::MERKLEBLOCK, merkleBlock)); - // CMerkleBlock just contains hashes, so also push any transactions in the block the client did not see - // This avoids hurting performance by pointlessly requiring a round-trip - // Note that there is currently no way for a node to request any single transactions we didn't send here - - // they must either disconnect and retry or request the full block. - // Thus, the protocol spec specified allows for us to provide duplicate txn here, - // however we MUST always provide at least what the remote peer needs - typedef std::pair<unsigned int, uint256> PairType; - for (PairType& pair : merkleBlock.vMatchedTxn) - connman->PushMessage(pfrom, msgMaker.Make(SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::TX, *pblock->vtx[pair.first])); - } - // else - // no response - } - else if (inv.type == MSG_CMPCT_BLOCK) - { - // If a peer is asking for old blocks, we're almost guaranteed - // they won't have a useful mempool to match against a compact block, - // and we don't feel like constructing the object for them, so - // instead we respond with the full, non-compact block. - bool fPeerWantsWitness = State(pfrom->GetId())->fWantsCmpctWitness; - int nSendFlags = fPeerWantsWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS; - if (CanDirectFetch(consensusParams) && mi->second->nHeight >= chainActive.Height() - MAX_CMPCTBLOCK_DEPTH) { - if ((fPeerWantsWitness || !fWitnessesPresentInARecentCompactBlock) && a_recent_compact_block && a_recent_compact_block->header.GetHash() == mi->second->GetBlockHash()) { - connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, *a_recent_compact_block)); - } else { - CBlockHeaderAndShortTxIDs cmpctblock(*pblock, fPeerWantsWitness); - connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, cmpctblock)); - } - } else { - connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::BLOCK, *pblock)); - } - } +void static ProcessGetData(CNode* pfrom, const Consensus::Params& consensusParams, CConnman* connman, const std::atomic<bool>& interruptMsgProc) +{ + AssertLockNotHeld(cs_main); - // Trigger the peer node to send a getblocks request for the next batch of inventory - if (inv.hash == pfrom->hashContinue) - { - // Bypass PushInventory, this must send even if redundant, - // and we want it right after the last block so they don't - // wait for other stuff first. - std::vector<CInv> vInv; - vInv.push_back(CInv(MSG_BLOCK, chainActive.Tip()->GetBlockHash())); - connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::INV, vInv)); - pfrom->hashContinue.SetNull(); - } - } - } - else if (inv.type == MSG_TX || inv.type == MSG_WITNESS_TX) - { - // Send stream from relay memory - bool push = false; - auto mi = mapRelay.find(inv.hash); - int nSendFlags = (inv.type == MSG_TX ? SERIALIZE_TRANSACTION_NO_WITNESS : 0); - if (mi != mapRelay.end()) { - connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::TX, *mi->second)); + std::deque<CInv>::iterator it = pfrom->vRecvGetData.begin(); + std::vector<CInv> vNotFound; + const CNetMsgMaker msgMaker(pfrom->GetSendVersion()); + { + LOCK(cs_main); + + while (it != pfrom->vRecvGetData.end() && (it->type == MSG_TX || it->type == MSG_WITNESS_TX)) { + if (interruptMsgProc) + return; + // Don't bother if send buffer is too full to respond anyway + if (pfrom->fPauseSend) + break; + + const CInv &inv = *it; + it++; + + // Send stream from relay memory + bool push = false; + auto mi = mapRelay.find(inv.hash); + int nSendFlags = (inv.type == MSG_TX ? SERIALIZE_TRANSACTION_NO_WITNESS : 0); + if (mi != mapRelay.end()) { + connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::TX, *mi->second)); + push = true; + } else if (pfrom->timeLastMempoolReq) { + auto txinfo = mempool.info(inv.hash); + // To protect privacy, do not answer getdata using the mempool when + // that TX couldn't have been INVed in reply to a MEMPOOL request. + if (txinfo.tx && txinfo.nTime <= pfrom->timeLastMempoolReq) { + connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::TX, *txinfo.tx)); push = true; - } else if (pfrom->timeLastMempoolReq) { - auto txinfo = mempool.info(inv.hash); - // To protect privacy, do not answer getdata using the mempool when - // that TX couldn't have been INVed in reply to a MEMPOOL request. - if (txinfo.tx && txinfo.nTime <= pfrom->timeLastMempoolReq) { - connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::TX, *txinfo.tx)); - push = true; - } - } - if (!push) { - vNotFound.push_back(inv); } } + if (!push) { + vNotFound.push_back(inv); + } // Track requests for our stuff. GetMainSignals().Inventory(inv.hash); + } + } // release cs_main - if (inv.type == MSG_BLOCK || inv.type == MSG_FILTERED_BLOCK || inv.type == MSG_CMPCT_BLOCK || inv.type == MSG_WITNESS_BLOCK) - break; + if (it != pfrom->vRecvGetData.end() && !pfrom->fPauseSend) { + const CInv &inv = *it; + if (inv.type == MSG_BLOCK || inv.type == MSG_FILTERED_BLOCK || inv.type == MSG_CMPCT_BLOCK || inv.type == MSG_WITNESS_BLOCK) { + it++; + ProcessGetBlockData(pfrom, consensusParams, inv, connman, interruptMsgProc); } } @@ -1234,8 +1289,7 @@ inline void static SendBlockTransactions(const CBlock& block, const BlockTransac for (size_t i = 0; i < req.indexes.size(); i++) { if (req.indexes[i] >= block.vtx.size()) { LOCK(cs_main); - Misbehaving(pfrom->GetId(), 100); - LogPrintf("Peer %d sent us a getblocktxn with out-of-bounds tx indices", pfrom->GetId()); + Misbehaving(pfrom->GetId(), 100, strprintf("Peer %d sent us a getblocktxn with out-of-bounds tx indices", pfrom->GetId())); return; } resp.txn[i] = block.vtx[req.indexes[i]]; @@ -1270,7 +1324,7 @@ bool static ProcessHeadersMessage(CNode *pfrom, CConnman *connman, const std::ve // don't connect before giving DoS points // - Once a headers message is received that is valid and does connect, // nUnconnectingHeaders gets reset back to 0. - if (mapBlockIndex.find(headers[0].hashPrevBlock) == mapBlockIndex.end() && nCount < MAX_BLOCKS_TO_ANNOUNCE) { + if (!LookupBlockIndex(headers[0].hashPrevBlock) && nCount < MAX_BLOCKS_TO_ANNOUNCE) { nodestate->nUnconnectingHeaders++; connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexBestHeader), uint256())); LogPrint(BCLog::NET, "received header %s: missing prev block %s, sending getheaders (%d) to end (peer=%d, nUnconnectingHeaders=%d)\n", @@ -1292,15 +1346,15 @@ bool static ProcessHeadersMessage(CNode *pfrom, CConnman *connman, const std::ve uint256 hashLastBlock; for (const CBlockHeader& header : headers) { if (!hashLastBlock.IsNull() && header.hashPrevBlock != hashLastBlock) { - Misbehaving(pfrom->GetId(), 20); - return error("non-continuous headers sequence"); + Misbehaving(pfrom->GetId(), 20, "non-continuous headers sequence"); + return false; } hashLastBlock = header.GetHash(); } // If we don't have the last header, then they'll have given us // something new (if these headers are valid). - if (mapBlockIndex.find(hashLastBlock) == mapBlockIndex.end()) { + if (!LookupBlockIndex(hashLastBlock)) { received_new_header = true; } } @@ -1312,9 +1366,11 @@ bool static ProcessHeadersMessage(CNode *pfrom, CConnman *connman, const std::ve if (state.IsInvalid(nDoS)) { LOCK(cs_main); if (nDoS > 0) { - Misbehaving(pfrom->GetId(), nDoS); + Misbehaving(pfrom->GetId(), nDoS, "invalid header received"); + } else { + LogPrint(BCLog::NET, "peer=%d: invalid header received\n", pfrom->GetId()); } - if (punish_duplicate_invalid && mapBlockIndex.find(first_invalid_header.GetHash()) != mapBlockIndex.end()) { + if (punish_duplicate_invalid && LookupBlockIndex(first_invalid_header.GetHash())) { // Goal: don't allow outbound peers to use up our outbound // connection slots if they are on incompatible chains. // @@ -1348,7 +1404,7 @@ bool static ProcessHeadersMessage(CNode *pfrom, CConnman *connman, const std::ve // etc), and not just the duplicate-invalid case. pfrom->fDisconnect = true; } - return error("invalid header received"); + return false; } } @@ -1614,7 +1670,13 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr pfrom->cleanSubVer = cleanSubVer; } pfrom->nStartingHeight = nStartingHeight; - pfrom->fClient = !(nServices & NODE_NETWORK); + + // set nodes not relaying blocks and tx and not serving (parts) of the historical blockchain as "clients" + pfrom->fClient = (!(nServices & NODE_NETWORK) && !(nServices & NODE_NETWORK_LIMITED)); + + // set nodes not capable of serving the complete blockchain history as "limited nodes" + pfrom->m_limited_node = (!(nServices & NODE_NETWORK) && (nServices & NODE_NETWORK_LIMITED)); + { LOCK(pfrom->cs_filter); pfrom->fRelayTxes = fRelay; // set to true after we get the first filter* message @@ -1757,8 +1819,8 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr if (vAddr.size() > 1000) { LOCK(cs_main); - Misbehaving(pfrom->GetId(), 20); - return error("message addr size() = %u", vAddr.size()); + Misbehaving(pfrom->GetId(), 20, strprintf("message addr size() = %u", vAddr.size())); + return false; } // Store the new addresses @@ -1773,7 +1835,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr // We only bother storing full nodes, though this may include // things which we would not make an outbound connection to, in // part because we may make feeler connections to them. - if (!MayHaveUsefulAddressDB(addr.nServices)) + if (!MayHaveUsefulAddressDB(addr.nServices) && !HasAllDesirableServiceFlags(addr.nServices)) continue; if (addr.nTime <= 100000000 || addr.nTime > nNow + 10 * 60) @@ -1833,8 +1895,8 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr if (vInv.size() > MAX_INV_SZ) { LOCK(cs_main); - Misbehaving(pfrom->GetId(), 20); - return error("message inv size() = %u", vInv.size()); + Misbehaving(pfrom->GetId(), 20, strprintf("message inv size() = %u", vInv.size())); + return false; } bool fBlocksOnly = !fRelayTxes; @@ -1894,8 +1956,8 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr if (vInv.size() > MAX_INV_SZ) { LOCK(cs_main); - Misbehaving(pfrom->GetId(), 20); - return error("message getdata size() = %u", vInv.size()); + Misbehaving(pfrom->GetId(), 20, strprintf("message getdata size() = %u", vInv.size())); + return false; } LogPrint(BCLog::NET, "received getdata (%u invsz) peer=%d\n", vInv.size(), pfrom->GetId()); @@ -1989,13 +2051,13 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr LOCK(cs_main); - BlockMap::iterator it = mapBlockIndex.find(req.blockhash); - if (it == mapBlockIndex.end() || !(it->second->nStatus & BLOCK_HAVE_DATA)) { + const CBlockIndex* pindex = LookupBlockIndex(req.blockhash); + if (!pindex || !(pindex->nStatus & BLOCK_HAVE_DATA)) { LogPrint(BCLog::NET, "Peer %d sent us a getblocktxn for a block we don't have", pfrom->GetId()); return true; } - if (it->second->nHeight < chainActive.Height() - MAX_BLOCKTXN_DEPTH) { + if (pindex->nHeight < chainActive.Height() - MAX_BLOCKTXN_DEPTH) { // If an older block is requested (should never happen in practice, // but can happen in tests) send a block response instead of a // blocktxn response. Sending a full block response instead of a @@ -2008,12 +2070,12 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr inv.type = State(pfrom->GetId())->fWantsCmpctWitness ? MSG_WITNESS_BLOCK : MSG_BLOCK; inv.hash = req.blockhash; pfrom->vRecvGetData.push_back(inv); - ProcessGetData(pfrom, chainparams.GetConsensus(), connman, interruptMsgProc); + // The message processing loop will go around again (without pausing) and we'll respond then (without cs_main) return true; } CBlock block; - bool ret = ReadBlockFromDisk(block, it->second, chainparams.GetConsensus()); + bool ret = ReadBlockFromDisk(block, pindex, chainparams.GetConsensus()); assert(ret); SendBlockTransactions(block, req, pfrom, connman); @@ -2037,10 +2099,10 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr if (locator.IsNull()) { // If locator is null, return the hashStop block - BlockMap::iterator mi = mapBlockIndex.find(hashStop); - if (mi == mapBlockIndex.end()) + pindex = LookupBlockIndex(hashStop); + if (!pindex) { return true; - pindex = (*mi).second; + } if (!BlockRequestAllowed(pindex, chainparams.GetConsensus())) { LogPrint(BCLog::NET, "%s: ignoring request from peer=%i for old block header that isn't in the main chain\n", __func__, pfrom->GetId()); @@ -2101,7 +2163,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr CInv inv(MSG_TX, tx.GetHash()); pfrom->AddInventoryKnown(inv); - LOCK(cs_main); + LOCK2(cs_main, g_cs_orphans); bool fMissingInputs = false; CValidationState state; @@ -2279,14 +2341,14 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr { LOCK(cs_main); - if (mapBlockIndex.find(cmpctblock.header.hashPrevBlock) == mapBlockIndex.end()) { + if (!LookupBlockIndex(cmpctblock.header.hashPrevBlock)) { // Doesn't connect (or is genesis), instead of DoSing in AcceptBlockHeader, request deeper headers if (!IsInitialBlockDownload()) connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexBestHeader), uint256())); return true; } - if (mapBlockIndex.find(cmpctblock.header.GetHash()) == mapBlockIndex.end()) { + if (!LookupBlockIndex(cmpctblock.header.GetHash())) { received_new_header = true; } } @@ -2297,9 +2359,8 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr int nDoS; if (state.IsInvalid(nDoS)) { if (nDoS > 0) { - LogPrintf("Peer %d sent us invalid header via cmpctblock\n", pfrom->GetId()); LOCK(cs_main); - Misbehaving(pfrom->GetId(), nDoS); + Misbehaving(pfrom->GetId(), nDoS, strprintf("Peer %d sent us invalid header via cmpctblock\n", pfrom->GetId())); } else { LogPrint(BCLog::NET, "Peer %d sent us invalid header via cmpctblock\n", pfrom->GetId()); } @@ -2324,7 +2385,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr bool fBlockReconstructed = false; { - LOCK(cs_main); + LOCK2(cs_main, g_cs_orphans); // If AcceptBlockHeader returned true, it set pindex assert(pindex); UpdateBlockAvailability(pfrom->GetId(), pindex->GetBlockHash()); @@ -2385,8 +2446,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr ReadStatus status = partialBlock.InitData(cmpctblock, vExtraTxnForCompact); if (status == READ_STATUS_INVALID) { MarkBlockAsReceived(pindex->GetBlockHash()); // Reset in-flight state in case of whitelist - Misbehaving(pfrom->GetId(), 100); - LogPrintf("Peer %d sent us invalid compact block\n", pfrom->GetId()); + Misbehaving(pfrom->GetId(), 100, strprintf("Peer %d sent us invalid compact block\n", pfrom->GetId())); return true; } else if (status == READ_STATUS_FAILED) { // Duplicate txindexes, the block is now in-flight, so just request it @@ -2513,8 +2573,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr ReadStatus status = partialBlock.FillBlock(*pblock, resp.txn); if (status == READ_STATUS_INVALID) { MarkBlockAsReceived(resp.blockhash); // Reset in-flight state in case of whitelist - Misbehaving(pfrom->GetId(), 100); - LogPrintf("Peer %d sent us invalid compact block/non-matching block transactions\n", pfrom->GetId()); + Misbehaving(pfrom->GetId(), 100, strprintf("Peer %d sent us invalid compact block/non-matching block transactions\n", pfrom->GetId())); return true; } else if (status == READ_STATUS_FAILED) { // Might have collided, fall back to getdata now :( @@ -2576,8 +2635,8 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr unsigned int nCount = ReadCompactSize(vRecv); if (nCount > MAX_HEADERS_RESULTS) { LOCK(cs_main); - Misbehaving(pfrom->GetId(), 20); - return error("headers message size = %u", nCount); + Misbehaving(pfrom->GetId(), 20, strprintf("headers message size = %u", nCount)); + return false; } headers.resize(nCount); for (unsigned int n = 0; n < nCount; n++) { @@ -2838,7 +2897,7 @@ static bool SendRejectsAndCheckIfBanned(CNode* pnode, CConnman* connman) CNodeState &state = *State(pnode->GetId()); for (const CBlockReject& reject : state.rejects) { - connman->PushMessage(pnode, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::REJECT, (std::string)NetMsgType::BLOCK, reject.chRejectCode, reject.strRejectReason, reject.hashBlock)); + connman->PushMessage(pnode, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::REJECT, std::string(NetMsgType::BLOCK), reject.chRejectCode, reject.strRejectReason, reject.hashBlock)); } state.rejects.clear(); @@ -3271,9 +3330,8 @@ bool PeerLogicValidation::SendMessages(CNode* pto, std::atomic<bool>& interruptM // then send all headers past that one. If we come across any // headers that aren't on chainActive, give up. for (const uint256 &hash : pto->vBlockHashesToAnnounce) { - BlockMap::iterator mi = mapBlockIndex.find(hash); - assert(mi != mapBlockIndex.end()); - const CBlockIndex *pindex = mi->second; + const CBlockIndex* pindex = LookupBlockIndex(hash); + assert(pindex); if (chainActive[pindex->nHeight] != pindex) { // Bail out if we reorged away from this block fRevertToInv = true; @@ -3364,9 +3422,8 @@ bool PeerLogicValidation::SendMessages(CNode* pto, std::atomic<bool>& interruptM // in the past. if (!pto->vBlockHashesToAnnounce.empty()) { const uint256 &hashToAnnounce = pto->vBlockHashesToAnnounce.back(); - BlockMap::iterator mi = mapBlockIndex.find(hashToAnnounce); - assert(mi != mapBlockIndex.end()); - const CBlockIndex *pindex = mi->second; + const CBlockIndex* pindex = LookupBlockIndex(hashToAnnounce); + assert(pindex); // Warn if we're announcing a block that is not on the main chain. // This should be very rare and could be optimized out. @@ -3586,7 +3643,7 @@ bool PeerLogicValidation::SendMessages(CNode* pto, std::atomic<bool>& interruptM // Message: getdata (blocks) // std::vector<CInv> vGetData; - if (!pto->fClient && (fFetch || !IsInitialBlockDownload()) && state.nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) { + if (!pto->fClient && ((fFetch && !pto->m_limited_node) || !IsInitialBlockDownload()) && state.nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) { std::vector<const CBlockIndex*> vToDownload; NodeId staller = -1; FindNextBlocksToDownload(pto->GetId(), MAX_BLOCKS_IN_TRANSIT_PER_PEER - state.nBlocksInFlight, vToDownload, staller, consensusParams); |