diff options
Diffstat (limited to 'src/net_processing.cpp')
-rw-r--r-- | src/net_processing.cpp | 336 |
1 files changed, 173 insertions, 163 deletions
diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 50ac76924b..78dc1a752a 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -1205,6 +1205,178 @@ inline void static SendBlockTransactions(const CBlock& block, const BlockTransac connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::BLOCKTXN, resp)); } +bool static ProcessHeadersMessage(CNode *pfrom, CConnman *connman, const std::vector<CBlockHeader>& headers, const CChainParams& chainparams) +{ + const CNetMsgMaker msgMaker(pfrom->GetSendVersion()); + size_t nCount = headers.size(); + + if (nCount == 0) { + // Nothing interesting. Stop asking this peers for more headers. + return true; + } + + const CBlockIndex *pindexLast = nullptr; + { + LOCK(cs_main); + CNodeState *nodestate = State(pfrom->GetId()); + + // If this looks like it could be a block announcement (nCount < + // MAX_BLOCKS_TO_ANNOUNCE), use special logic for handling headers that + // don't connect: + // - Send a getheaders message in response to try to connect the chain. + // - The peer can send up to MAX_UNCONNECTING_HEADERS in a row that + // don't connect before giving DoS points + // - Once a headers message is received that is valid and does connect, + // nUnconnectingHeaders gets reset back to 0. + if (mapBlockIndex.find(headers[0].hashPrevBlock) == mapBlockIndex.end() && nCount < MAX_BLOCKS_TO_ANNOUNCE) { + nodestate->nUnconnectingHeaders++; + connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexBestHeader), uint256())); + LogPrint(BCLog::NET, "received header %s: missing prev block %s, sending getheaders (%d) to end (peer=%d, nUnconnectingHeaders=%d)\n", + headers[0].GetHash().ToString(), + headers[0].hashPrevBlock.ToString(), + pindexBestHeader->nHeight, + pfrom->GetId(), nodestate->nUnconnectingHeaders); + // Set hashLastUnknownBlock for this peer, so that if we + // eventually get the headers - even from a different peer - + // we can use this peer to download. + UpdateBlockAvailability(pfrom->GetId(), headers.back().GetHash()); + + if (nodestate->nUnconnectingHeaders % MAX_UNCONNECTING_HEADERS == 0) { + Misbehaving(pfrom->GetId(), 20); + } + return true; + } + + uint256 hashLastBlock; + for (const CBlockHeader& header : headers) { + if (!hashLastBlock.IsNull() && header.hashPrevBlock != hashLastBlock) { + Misbehaving(pfrom->GetId(), 20); + return error("non-continuous headers sequence"); + } + hashLastBlock = header.GetHash(); + } + } + + CValidationState state; + if (!ProcessNewBlockHeaders(headers, state, chainparams, &pindexLast)) { + int nDoS; + if (state.IsInvalid(nDoS)) { + if (nDoS > 0) { + LOCK(cs_main); + Misbehaving(pfrom->GetId(), nDoS); + } + return error("invalid header received"); + } + } + + { + LOCK(cs_main); + CNodeState *nodestate = State(pfrom->GetId()); + if (nodestate->nUnconnectingHeaders > 0) { + LogPrint(BCLog::NET, "peer=%d: resetting nUnconnectingHeaders (%d -> 0)\n", pfrom->GetId(), nodestate->nUnconnectingHeaders); + } + nodestate->nUnconnectingHeaders = 0; + + assert(pindexLast); + UpdateBlockAvailability(pfrom->GetId(), pindexLast->GetBlockHash()); + + // From here, pindexBestKnownBlock should be guaranteed to be non-null, + // because it is set in UpdateBlockAvailability. Some nullptr checks + // are still present, however, as belt-and-suspenders. + + if (nCount == MAX_HEADERS_RESULTS) { + // Headers message had its maximum size; the peer may have more headers. + // TODO: optimize: if pindexLast is an ancestor of chainActive.Tip or pindexBestHeader, continue + // from there instead. + LogPrint(BCLog::NET, "more getheaders (%d) to end to peer=%d (startheight:%d)\n", pindexLast->nHeight, pfrom->GetId(), pfrom->nStartingHeight); + connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexLast), uint256())); + } + + bool fCanDirectFetch = CanDirectFetch(chainparams.GetConsensus()); + // If this set of headers is valid and ends in a block with at least as + // much work as our tip, download as much as possible. + if (fCanDirectFetch && pindexLast->IsValid(BLOCK_VALID_TREE) && chainActive.Tip()->nChainWork <= pindexLast->nChainWork) { + std::vector<const CBlockIndex*> vToFetch; + const CBlockIndex *pindexWalk = pindexLast; + // Calculate all the blocks we'd need to switch to pindexLast, up to a limit. + while (pindexWalk && !chainActive.Contains(pindexWalk) && vToFetch.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER) { + if (!(pindexWalk->nStatus & BLOCK_HAVE_DATA) && + !mapBlocksInFlight.count(pindexWalk->GetBlockHash()) && + (!IsWitnessEnabled(pindexWalk->pprev, chainparams.GetConsensus()) || State(pfrom->GetId())->fHaveWitness)) { + // We don't have this block, and it's not yet in flight. + vToFetch.push_back(pindexWalk); + } + pindexWalk = pindexWalk->pprev; + } + // If pindexWalk still isn't on our main chain, we're looking at a + // very large reorg at a time we think we're close to caught up to + // the main chain -- this shouldn't really happen. Bail out on the + // direct fetch and rely on parallel download instead. + if (!chainActive.Contains(pindexWalk)) { + LogPrint(BCLog::NET, "Large reorg, won't direct fetch to %s (%d)\n", + pindexLast->GetBlockHash().ToString(), + pindexLast->nHeight); + } else { + std::vector<CInv> vGetData; + // Download as much as possible, from earliest to latest. + for (const CBlockIndex *pindex : reverse_iterate(vToFetch)) { + if (nodestate->nBlocksInFlight >= MAX_BLOCKS_IN_TRANSIT_PER_PEER) { + // Can't download any more from this peer + break; + } + uint32_t nFetchFlags = GetFetchFlags(pfrom); + vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash())); + MarkBlockAsInFlight(pfrom->GetId(), pindex->GetBlockHash(), pindex); + LogPrint(BCLog::NET, "Requesting block %s from peer=%d\n", + pindex->GetBlockHash().ToString(), pfrom->GetId()); + } + if (vGetData.size() > 1) { + LogPrint(BCLog::NET, "Downloading blocks toward %s (%d) via headers direct fetch\n", + pindexLast->GetBlockHash().ToString(), pindexLast->nHeight); + } + if (vGetData.size() > 0) { + if (nodestate->fSupportsDesiredCmpctVersion && vGetData.size() == 1 && mapBlocksInFlight.size() == 1 && pindexLast->pprev->IsValid(BLOCK_VALID_CHAIN)) { + // In any case, we want to download using a compact block, not a regular one + vGetData[0] = CInv(MSG_CMPCT_BLOCK, vGetData[0].hash); + } + connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETDATA, vGetData)); + } + } + } + // If we're in IBD, we want outbound peers that will serve us a useful + // chain. Disconnect peers that are on chains with insufficient work. + if (IsInitialBlockDownload() && nCount != MAX_HEADERS_RESULTS) { + // When nCount < MAX_HEADERS_RESULTS, we know we have no more + // headers to fetch from this peer. + if (nodestate->pindexBestKnownBlock && nodestate->pindexBestKnownBlock->nChainWork < nMinimumChainWork) { + // This peer has too little work on their headers chain to help + // us sync -- disconnect if using an outbound slot (unless + // whitelisted or addnode). + // Note: We compare their tip to nMinimumChainWork (rather than + // chainActive.Tip()) because we won't start block download + // until we have a headers chain that has at least + // nMinimumChainWork, even if a peer has a chain past our tip, + // as an anti-DoS measure. + if (IsOutboundDisconnectionCandidate(pfrom)) { + LogPrintf("Disconnecting outbound peer %d -- headers chain has insufficient work\n", pfrom->GetId()); + pfrom->fDisconnect = true; + } + } + } + + if (!pfrom->fDisconnect && IsOutboundDisconnectionCandidate(pfrom) && nodestate->pindexBestKnownBlock != nullptr) { + // If this is an outbound peer, check to see if we should protect + // it from the bad/lagging chain logic. + if (g_outbound_peers_with_protect_from_disconnect < MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT && nodestate->pindexBestKnownBlock->nChainWork >= chainActive.Tip()->nChainWork && !nodestate->m_chain_sync.m_protect) { + nodestate->m_chain_sync.m_protect = true; + ++g_outbound_peers_with_protect_from_disconnect; + } + } + } + + return true; +} + bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStream& vRecv, int64_t nTimeReceived, const CChainParams& chainparams, CConnman* connman, const std::atomic<bool>& interruptMsgProc) { LogPrint(BCLog::NET, "received: %s (%u bytes) peer=%d\n", SanitizeString(strCommand), vRecv.size(), pfrom->GetId()); @@ -2308,169 +2480,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr ReadCompactSize(vRecv); // ignore tx count; assume it is 0. } - if (nCount == 0) { - // Nothing interesting. Stop asking this peers for more headers. - return true; - } - - const CBlockIndex *pindexLast = nullptr; - { - LOCK(cs_main); - CNodeState *nodestate = State(pfrom->GetId()); - - // If this looks like it could be a block announcement (nCount < - // MAX_BLOCKS_TO_ANNOUNCE), use special logic for handling headers that - // don't connect: - // - Send a getheaders message in response to try to connect the chain. - // - The peer can send up to MAX_UNCONNECTING_HEADERS in a row that - // don't connect before giving DoS points - // - Once a headers message is received that is valid and does connect, - // nUnconnectingHeaders gets reset back to 0. - if (mapBlockIndex.find(headers[0].hashPrevBlock) == mapBlockIndex.end() && nCount < MAX_BLOCKS_TO_ANNOUNCE) { - nodestate->nUnconnectingHeaders++; - connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexBestHeader), uint256())); - LogPrint(BCLog::NET, "received header %s: missing prev block %s, sending getheaders (%d) to end (peer=%d, nUnconnectingHeaders=%d)\n", - headers[0].GetHash().ToString(), - headers[0].hashPrevBlock.ToString(), - pindexBestHeader->nHeight, - pfrom->GetId(), nodestate->nUnconnectingHeaders); - // Set hashLastUnknownBlock for this peer, so that if we - // eventually get the headers - even from a different peer - - // we can use this peer to download. - UpdateBlockAvailability(pfrom->GetId(), headers.back().GetHash()); - - if (nodestate->nUnconnectingHeaders % MAX_UNCONNECTING_HEADERS == 0) { - Misbehaving(pfrom->GetId(), 20); - } - return true; - } - - uint256 hashLastBlock; - for (const CBlockHeader& header : headers) { - if (!hashLastBlock.IsNull() && header.hashPrevBlock != hashLastBlock) { - Misbehaving(pfrom->GetId(), 20); - return error("non-continuous headers sequence"); - } - hashLastBlock = header.GetHash(); - } - } - - CValidationState state; - if (!ProcessNewBlockHeaders(headers, state, chainparams, &pindexLast)) { - int nDoS; - if (state.IsInvalid(nDoS)) { - if (nDoS > 0) { - LOCK(cs_main); - Misbehaving(pfrom->GetId(), nDoS); - } - return error("invalid header received"); - } - } - - { - LOCK(cs_main); - CNodeState *nodestate = State(pfrom->GetId()); - if (nodestate->nUnconnectingHeaders > 0) { - LogPrint(BCLog::NET, "peer=%d: resetting nUnconnectingHeaders (%d -> 0)\n", pfrom->GetId(), nodestate->nUnconnectingHeaders); - } - nodestate->nUnconnectingHeaders = 0; - - assert(pindexLast); - UpdateBlockAvailability(pfrom->GetId(), pindexLast->GetBlockHash()); - - // From here, pindexBestKnownBlock should be guaranteed to be non-null, - // because it is set in UpdateBlockAvailability. Some nullptr checks - // are still present, however, as belt-and-suspenders. - - if (nCount == MAX_HEADERS_RESULTS) { - // Headers message had its maximum size; the peer may have more headers. - // TODO: optimize: if pindexLast is an ancestor of chainActive.Tip or pindexBestHeader, continue - // from there instead. - LogPrint(BCLog::NET, "more getheaders (%d) to end to peer=%d (startheight:%d)\n", pindexLast->nHeight, pfrom->GetId(), pfrom->nStartingHeight); - connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexLast), uint256())); - } - - bool fCanDirectFetch = CanDirectFetch(chainparams.GetConsensus()); - // If this set of headers is valid and ends in a block with at least as - // much work as our tip, download as much as possible. - if (fCanDirectFetch && pindexLast->IsValid(BLOCK_VALID_TREE) && chainActive.Tip()->nChainWork <= pindexLast->nChainWork) { - std::vector<const CBlockIndex*> vToFetch; - const CBlockIndex *pindexWalk = pindexLast; - // Calculate all the blocks we'd need to switch to pindexLast, up to a limit. - while (pindexWalk && !chainActive.Contains(pindexWalk) && vToFetch.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER) { - if (!(pindexWalk->nStatus & BLOCK_HAVE_DATA) && - !mapBlocksInFlight.count(pindexWalk->GetBlockHash()) && - (!IsWitnessEnabled(pindexWalk->pprev, chainparams.GetConsensus()) || State(pfrom->GetId())->fHaveWitness)) { - // We don't have this block, and it's not yet in flight. - vToFetch.push_back(pindexWalk); - } - pindexWalk = pindexWalk->pprev; - } - // If pindexWalk still isn't on our main chain, we're looking at a - // very large reorg at a time we think we're close to caught up to - // the main chain -- this shouldn't really happen. Bail out on the - // direct fetch and rely on parallel download instead. - if (!chainActive.Contains(pindexWalk)) { - LogPrint(BCLog::NET, "Large reorg, won't direct fetch to %s (%d)\n", - pindexLast->GetBlockHash().ToString(), - pindexLast->nHeight); - } else { - std::vector<CInv> vGetData; - // Download as much as possible, from earliest to latest. - for (const CBlockIndex *pindex : reverse_iterate(vToFetch)) { - if (nodestate->nBlocksInFlight >= MAX_BLOCKS_IN_TRANSIT_PER_PEER) { - // Can't download any more from this peer - break; - } - uint32_t nFetchFlags = GetFetchFlags(pfrom); - vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash())); - MarkBlockAsInFlight(pfrom->GetId(), pindex->GetBlockHash(), pindex); - LogPrint(BCLog::NET, "Requesting block %s from peer=%d\n", - pindex->GetBlockHash().ToString(), pfrom->GetId()); - } - if (vGetData.size() > 1) { - LogPrint(BCLog::NET, "Downloading blocks toward %s (%d) via headers direct fetch\n", - pindexLast->GetBlockHash().ToString(), pindexLast->nHeight); - } - if (vGetData.size() > 0) { - if (nodestate->fSupportsDesiredCmpctVersion && vGetData.size() == 1 && mapBlocksInFlight.size() == 1 && pindexLast->pprev->IsValid(BLOCK_VALID_CHAIN)) { - // In any case, we want to download using a compact block, not a regular one - vGetData[0] = CInv(MSG_CMPCT_BLOCK, vGetData[0].hash); - } - connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETDATA, vGetData)); - } - } - } - // If we're in IBD, we want outbound peers that will serve us a useful - // chain. Disconnect peers that are on chains with insufficient work. - if (IsInitialBlockDownload() && nCount != MAX_HEADERS_RESULTS) { - // When nCount < MAX_HEADERS_RESULTS, we know we have no more - // headers to fetch from this peer. - if (nodestate->pindexBestKnownBlock && nodestate->pindexBestKnownBlock->nChainWork < nMinimumChainWork) { - // This peer has too little work on their headers chain to help - // us sync -- disconnect if using an outbound slot (unless - // whitelisted or addnode). - // Note: We compare their tip to nMinimumChainWork (rather than - // chainActive.Tip()) because we won't start block download - // until we have a headers chain that has at least - // nMinimumChainWork, even if a peer has a chain past our tip, - // as an anti-DoS measure. - if (IsOutboundDisconnectionCandidate(pfrom)) { - LogPrintf("Disconnecting outbound peer %d -- headers chain has insufficient work\n", pfrom->GetId()); - pfrom->fDisconnect = true; - } - } - } - - if (!pfrom->fDisconnect && IsOutboundDisconnectionCandidate(pfrom) && nodestate->pindexBestKnownBlock != nullptr) { - // If this is an outbound peer, check to see if we should protect - // it from the bad/lagging chain logic. - if (g_outbound_peers_with_protect_from_disconnect < MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT && nodestate->pindexBestKnownBlock->nChainWork >= chainActive.Tip()->nChainWork && !nodestate->m_chain_sync.m_protect) { - nodestate->m_chain_sync.m_protect = true; - ++g_outbound_peers_with_protect_from_disconnect; - } - } - } + return ProcessHeadersMessage(pfrom, connman, headers, chainparams); } else if (strCommand == NetMsgType::BLOCK && !fImporting && !fReindex) // Ignore blocks received while importing |