aboutsummaryrefslogtreecommitdiff
path: root/src/net_processing.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/net_processing.cpp')
-rw-r--r--src/net_processing.cpp138
1 files changed, 116 insertions, 22 deletions
diff --git a/src/net_processing.cpp b/src/net_processing.cpp
index b046b3ac16..06086d6804 100644
--- a/src/net_processing.cpp
+++ b/src/net_processing.cpp
@@ -18,6 +18,7 @@
#include <index/blockfilterindex.h>
#include <kernel/mempool_entry.h>
#include <logging.h>
+#include <kernel/chain.h>
#include <merkleblock.h>
#include <netbase.h>
#include <netmessagemaker.h>
@@ -483,7 +484,7 @@ public:
CTxMemPool& pool, Options opts);
/** Overridden from CValidationInterface. */
- void BlockConnected(const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindexConnected) override
+ void BlockConnected(ChainstateRole role, const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindexConnected) override
EXCLUSIVE_LOCKS_REQUIRED(!m_recent_confirmed_transactions_mutex);
void BlockDisconnected(const std::shared_ptr<const CBlock> &block, const CBlockIndex* pindex) override
EXCLUSIVE_LOCKS_REQUIRED(!m_recent_confirmed_transactions_mutex);
@@ -892,6 +893,38 @@ private:
*/
void FindNextBlocksToDownload(const Peer& peer, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, NodeId& nodeStaller) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
+ /** Request blocks for the background chainstate, if one is in use. */
+ void TryDownloadingHistoricalBlocks(const Peer& peer, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, const CBlockIndex* from_tip, const CBlockIndex* target_block) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
+
+ /**
+ * \brief Find next blocks to download from a peer after a starting block.
+ *
+ * \param vBlocks Vector of blocks to download which will be appended to.
+ * \param peer Peer which blocks will be downloaded from.
+ * \param state Pointer to the state of the peer.
+ * \param pindexWalk Pointer to the starting block to add to vBlocks.
+ * \param count Maximum number of blocks to allow in vBlocks. No more
+ * blocks will be added if it reaches this size.
+ * \param nWindowEnd Maximum height of blocks to allow in vBlocks. No
+ * blocks will be added above this height.
+ * \param activeChain Optional pointer to a chain to compare against. If
+ * provided, any next blocks which are already contained
+ * in this chain will not be appended to vBlocks, but
+ * instead will be used to update the
+ * state->pindexLastCommonBlock pointer.
+ * \param nodeStaller Optional pointer to a NodeId variable that will receive
+ * the ID of another peer that might be causing this peer
+ * to stall. This is set to the ID of the peer which
+ * first requested the first in-flight block in the
+ * download window. It is only set if vBlocks is empty at
+ * the end of this function call and if increasing
+ * nWindowEnd by 1 would cause it to be non-empty (which
+ * indicates the download might be stalled because every
+ * block in the window is in flight and no other peer is
+ * trying to download the next block).
+ */
+ void FindNextBlocks(std::vector<const CBlockIndex*>& vBlocks, const Peer& peer, CNodeState *state, const CBlockIndex *pindexWalk, unsigned int count, int nWindowEnd, const CChain* activeChain=nullptr, NodeId* nodeStaller=nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
+
/* Multimap used to preserve insertion order */
typedef std::multimap<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator>> BlockDownloadMap;
BlockDownloadMap mapBlocksInFlight GUARDED_BY(cs_main);
@@ -1312,6 +1345,7 @@ void PeerManagerImpl::UpdateBlockAvailability(NodeId nodeid, const uint256 &hash
}
}
+// Logic for calculating which blocks to download from a given peer, given our current tip.
void PeerManagerImpl::FindNextBlocksToDownload(const Peer& peer, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, NodeId& nodeStaller)
{
if (count == 0)
@@ -1341,12 +1375,47 @@ void PeerManagerImpl::FindNextBlocksToDownload(const Peer& peer, unsigned int co
if (state->pindexLastCommonBlock == state->pindexBestKnownBlock)
return;
- std::vector<const CBlockIndex*> vToFetch;
const CBlockIndex *pindexWalk = state->pindexLastCommonBlock;
// Never fetch further than the best block we know the peer has, or more than BLOCK_DOWNLOAD_WINDOW + 1 beyond the last
// linked block we have in common with this peer. The +1 is so we can detect stalling, namely if we would be able to
// download that next block if the window were 1 larger.
int nWindowEnd = state->pindexLastCommonBlock->nHeight + BLOCK_DOWNLOAD_WINDOW;
+
+ FindNextBlocks(vBlocks, peer, state, pindexWalk, count, nWindowEnd, &m_chainman.ActiveChain(), &nodeStaller);
+}
+
+void PeerManagerImpl::TryDownloadingHistoricalBlocks(const Peer& peer, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, const CBlockIndex *from_tip, const CBlockIndex* target_block)
+{
+ Assert(from_tip);
+ Assert(target_block);
+
+ if (vBlocks.size() >= count) {
+ return;
+ }
+
+ vBlocks.reserve(count);
+ CNodeState *state = Assert(State(peer.m_id));
+
+ if (state->pindexBestKnownBlock == nullptr || state->pindexBestKnownBlock->GetAncestor(target_block->nHeight) != target_block) {
+ // This peer can't provide us the complete series of blocks leading up to the
+ // assumeutxo snapshot base.
+ //
+ // Presumably this peer's chain has less work than our ActiveChain()'s tip, or else we
+ // will eventually crash when we try to reorg to it. Let other logic
+ // deal with whether we disconnect this peer.
+ //
+ // TODO at some point in the future, we might choose to request what blocks
+ // this peer does have from the historical chain, despite it not having a
+ // complete history beneath the snapshot base.
+ return;
+ }
+
+ FindNextBlocks(vBlocks, peer, state, from_tip, count, std::min<int>(from_tip->nHeight + BLOCK_DOWNLOAD_WINDOW, target_block->nHeight));
+}
+
+void PeerManagerImpl::FindNextBlocks(std::vector<const CBlockIndex*>& vBlocks, const Peer& peer, CNodeState *state, const CBlockIndex *pindexWalk, unsigned int count, int nWindowEnd, const CChain* activeChain, NodeId* nodeStaller)
+{
+ std::vector<const CBlockIndex*> vToFetch;
int nMaxHeight = std::min<int>(state->pindexBestKnownBlock->nHeight, nWindowEnd + 1);
NodeId waitingfor = -1;
while (pindexWalk->nHeight < nMaxHeight) {
@@ -1374,8 +1443,8 @@ void PeerManagerImpl::FindNextBlocksToDownload(const Peer& peer, unsigned int co
// We wouldn't download this block or its descendants from this peer.
return;
}
- if (pindex->nStatus & BLOCK_HAVE_DATA || m_chainman.ActiveChain().Contains(pindex)) {
- if (pindex->HaveTxsDownloaded())
+ if (pindex->nStatus & BLOCK_HAVE_DATA || (activeChain && activeChain->Contains(pindex))) {
+ if (activeChain && pindex->HaveTxsDownloaded())
state->pindexLastCommonBlock = pindex;
} else if (!IsBlockRequested(pindex->GetBlockHash())) {
// The block is not already downloaded, and not yet in flight.
@@ -1383,7 +1452,7 @@ void PeerManagerImpl::FindNextBlocksToDownload(const Peer& peer, unsigned int co
// We reached the end of the window.
if (vBlocks.size() == 0 && waitingfor != peer.m_id) {
// We aren't able to fetch anything, but we would be if the download window was one larger.
- nodeStaller = waitingfor;
+ if (nodeStaller) *nodeStaller = waitingfor;
}
return;
}
@@ -1843,11 +1912,30 @@ void PeerManagerImpl::StartScheduledTasks(CScheduler& scheduler)
* announcements for them. Also save the time of the last tip update and
* possibly reduce dynamic block stalling timeout.
*/
-void PeerManagerImpl::BlockConnected(const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindex)
+void PeerManagerImpl::BlockConnected(
+ ChainstateRole role,
+ const std::shared_ptr<const CBlock>& pblock,
+ const CBlockIndex* pindex)
{
- m_orphanage.EraseForBlock(*pblock);
+ // Update this for all chainstate roles so that we don't mistakenly see peers
+ // helping us do background IBD as having a stale tip.
m_last_tip_update = GetTime<std::chrono::seconds>();
+ // In case the dynamic timeout was doubled once or more, reduce it slowly back to its default value
+ auto stalling_timeout = m_block_stalling_timeout.load();
+ Assume(stalling_timeout >= BLOCK_STALLING_TIMEOUT_DEFAULT);
+ if (stalling_timeout != BLOCK_STALLING_TIMEOUT_DEFAULT) {
+ const auto new_timeout = std::max(std::chrono::duration_cast<std::chrono::seconds>(stalling_timeout * 0.85), BLOCK_STALLING_TIMEOUT_DEFAULT);
+ if (m_block_stalling_timeout.compare_exchange_strong(stalling_timeout, new_timeout)) {
+ LogPrint(BCLog::NET, "Decreased stalling timeout to %d seconds\n", count_seconds(new_timeout));
+ }
+ }
+
+ if (role == ChainstateRole::BACKGROUND) {
+ return;
+ }
+ m_orphanage.EraseForBlock(*pblock);
+
{
LOCK(m_recent_confirmed_transactions_mutex);
for (const auto& ptx : pblock->vtx) {
@@ -1864,16 +1952,6 @@ void PeerManagerImpl::BlockConnected(const std::shared_ptr<const CBlock>& pblock
m_txrequest.ForgetTxHash(ptx->GetWitnessHash());
}
}
-
- // In case the dynamic timeout was doubled once or more, reduce it slowly back to its default value
- auto stalling_timeout = m_block_stalling_timeout.load();
- Assume(stalling_timeout >= BLOCK_STALLING_TIMEOUT_DEFAULT);
- if (stalling_timeout != BLOCK_STALLING_TIMEOUT_DEFAULT) {
- const auto new_timeout = std::max(std::chrono::duration_cast<std::chrono::seconds>(stalling_timeout * 0.85), BLOCK_STALLING_TIMEOUT_DEFAULT);
- if (m_block_stalling_timeout.compare_exchange_strong(stalling_timeout, new_timeout)) {
- LogPrint(BCLog::NET, "Decreased stalling timeout to %d seconds\n", count_seconds(new_timeout));
- }
- }
}
void PeerManagerImpl::BlockDisconnected(const std::shared_ptr<const CBlock> &block, const CBlockIndex* pindex)
@@ -3507,13 +3585,16 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
return;
}
- if (!pfrom.IsInboundConn()) {
+ // Log succesful connections unconditionally for outbound, but not for inbound as those
+ // can be triggered by an attacker at high rate.
+ if (!pfrom.IsInboundConn() || LogAcceptCategory(BCLog::NET, BCLog::Level::Debug)) {
const auto mapped_as{m_connman.GetMappedAS(pfrom.addr)};
- LogPrintf("New outbound peer connected: version: %d, blocks=%d, peer=%d%s%s (%s)\n",
+ LogPrintf("New %s %s peer connected: version: %d, blocks=%d, peer=%d%s%s\n",
+ pfrom.ConnectionTypeAsString(),
+ TransportTypeAsString(pfrom.m_transport->GetInfo().transport_type),
pfrom.nVersion.load(), peer->m_starting_height,
pfrom.GetId(), (fLogIPs ? strprintf(", peeraddr=%s", pfrom.addr.ToStringAddrPort()) : ""),
- (mapped_as ? strprintf(", mapped_as=%d", mapped_as) : ""),
- pfrom.ConnectionTypeAsString());
+ (mapped_as ? strprintf(", mapped_as=%d", mapped_as) : ""));
}
if (pfrom.GetCommonVersion() >= SHORT_IDS_BLOCKS_VERSION) {
@@ -5847,7 +5928,20 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
if (CanServeBlocks(*peer) && ((sync_blocks_and_headers_from_peer && !IsLimitedPeer(*peer)) || !m_chainman.IsInitialBlockDownload()) && state.vBlocksInFlight.size() < MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
std::vector<const CBlockIndex*> vToDownload;
NodeId staller = -1;
- FindNextBlocksToDownload(*peer, MAX_BLOCKS_IN_TRANSIT_PER_PEER - state.vBlocksInFlight.size(), vToDownload, staller);
+ auto get_inflight_budget = [&state]() {
+ return std::max(0, MAX_BLOCKS_IN_TRANSIT_PER_PEER - static_cast<int>(state.vBlocksInFlight.size()));
+ };
+
+ // If a snapshot chainstate is in use, we want to find its next blocks
+ // before the background chainstate to prioritize getting to network tip.
+ FindNextBlocksToDownload(*peer, get_inflight_budget(), vToDownload, staller);
+ if (m_chainman.BackgroundSyncInProgress() && !IsLimitedPeer(*peer)) {
+ TryDownloadingHistoricalBlocks(
+ *peer,
+ get_inflight_budget(),
+ vToDownload, m_chainman.GetBackgroundSyncTip(),
+ Assert(m_chainman.GetSnapshotBaseBlock()));
+ }
for (const CBlockIndex *pindex : vToDownload) {
uint32_t nFetchFlags = GetFetchFlags(*peer);
vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash()));