aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSuhas Daftuar <sdaftuar@chaincode.com>2023-05-04 14:08:37 -0400
committerJames O'Beirne <james.obeirne@pm.me>2023-09-30 05:45:37 -0400
commitb73d3bbd23220857bf17cbb6401275bf58013b72 (patch)
treec068b3dc31d50bba653c92386b12b2274feed167
parent5bbf735defac20f58133bea95226e13a5d8209bc (diff)
net_processing: Request assumeutxo background chain blocks
Add new PeerManagerImpl::TryDownloadingHistoricalBlocks method and use it to request background chain blocks in addition to blocks normally requested by FindNextBlocksToDownload. Co-authored-by: Ryan Ofsky <ryan@ofsky.org> Co-authored-by: James O'Beirne <james.obeirne@gmail.com>
-rw-r--r--src/net_processing.cpp91
-rw-r--r--src/validation.cpp6
-rw-r--r--src/validation.h16
3 files changed, 105 insertions, 8 deletions
diff --git a/src/net_processing.cpp b/src/net_processing.cpp
index b046b3ac16..4675942366 100644
--- a/src/net_processing.cpp
+++ b/src/net_processing.cpp
@@ -892,6 +892,38 @@ private:
*/
void FindNextBlocksToDownload(const Peer& peer, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, NodeId& nodeStaller) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
+ /** Request blocks for the background chainstate, if one is in use. */
+ void TryDownloadingHistoricalBlocks(const Peer& peer, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, const CBlockIndex* from_tip, const CBlockIndex* target_block) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
+
+ /**
+ * \brief Find next blocks to download from a peer after a starting block.
+ *
+ * \param vBlocks Vector of blocks to download which will be appended to.
+ * \param peer Peer which blocks will be downloaded from.
+ * \param state Pointer to the state of the peer.
+ * \param pindexWalk Pointer to the starting block to add to vBlocks.
+ * \param count Maximum number of blocks to allow in vBlocks. No more
+ * blocks will be added if it reaches this size.
+ * \param nWindowEnd Maximum height of blocks to allow in vBlocks. No
+ * blocks will be added above this height.
+ * \param activeChain Optional pointer to a chain to compare against. If
+ * provided, any next blocks which are already contained
+ * in this chain will not be appended to vBlocks, but
+ * instead will be used to update the
+ * state->pindexLastCommonBlock pointer.
+ * \param nodeStaller Optional pointer to a NodeId variable that will receive
+ * the ID of another peer that might be causing this peer
+ * to stall. This is set to the ID of the peer which
+ * first requested the first in-flight block in the
+ * download window. It is only set if vBlocks is empty at
+ * the end of this function call and if increasing
+ * nWindowEnd by 1 would cause it to be non-empty (which
+ * indicates the download might be stalled because every
+ * block in the window is in flight and no other peer is
+ * trying to download the next block).
+ */
+ void FindNextBlocks(std::vector<const CBlockIndex*>& vBlocks, const Peer& peer, CNodeState *state, const CBlockIndex *pindexWalk, unsigned int count, int nWindowEnd, const CChain* activeChain=nullptr, NodeId* nodeStaller=nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
+
/* Multimap used to preserve insertion order */
typedef std::multimap<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator>> BlockDownloadMap;
BlockDownloadMap mapBlocksInFlight GUARDED_BY(cs_main);
@@ -1312,6 +1344,7 @@ void PeerManagerImpl::UpdateBlockAvailability(NodeId nodeid, const uint256 &hash
}
}
+// Logic for calculating which blocks to download from a given peer, given our current tip.
void PeerManagerImpl::FindNextBlocksToDownload(const Peer& peer, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, NodeId& nodeStaller)
{
if (count == 0)
@@ -1341,12 +1374,47 @@ void PeerManagerImpl::FindNextBlocksToDownload(const Peer& peer, unsigned int co
if (state->pindexLastCommonBlock == state->pindexBestKnownBlock)
return;
- std::vector<const CBlockIndex*> vToFetch;
const CBlockIndex *pindexWalk = state->pindexLastCommonBlock;
// Never fetch further than the best block we know the peer has, or more than BLOCK_DOWNLOAD_WINDOW + 1 beyond the last
// linked block we have in common with this peer. The +1 is so we can detect stalling, namely if we would be able to
// download that next block if the window were 1 larger.
int nWindowEnd = state->pindexLastCommonBlock->nHeight + BLOCK_DOWNLOAD_WINDOW;
+
+ FindNextBlocks(vBlocks, peer, state, pindexWalk, count, nWindowEnd, &m_chainman.ActiveChain(), &nodeStaller);
+}
+
+void PeerManagerImpl::TryDownloadingHistoricalBlocks(const Peer& peer, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, const CBlockIndex *from_tip, const CBlockIndex* target_block)
+{
+ Assert(from_tip);
+ Assert(target_block);
+
+ if (vBlocks.size() >= count) {
+ return;
+ }
+
+ vBlocks.reserve(count);
+ CNodeState *state = Assert(State(peer.m_id));
+
+ if (state->pindexBestKnownBlock == nullptr || state->pindexBestKnownBlock->GetAncestor(target_block->nHeight) != target_block) {
+ // This peer can't provide us the complete series of blocks leading up to the
+ // assumeutxo snapshot base.
+ //
+ // Presumably this peer's chain has less work than our ActiveChain()'s tip, or else we
+ // will eventually crash when we try to reorg to it. Let other logic
+ // deal with whether we disconnect this peer.
+ //
+ // TODO at some point in the future, we might choose to request what blocks
+ // this peer does have from the historical chain, despite it not having a
+ // complete history beneath the snapshot base.
+ return;
+ }
+
+ FindNextBlocks(vBlocks, peer, state, from_tip, count, std::min<int>(from_tip->nHeight + BLOCK_DOWNLOAD_WINDOW, target_block->nHeight));
+}
+
+void PeerManagerImpl::FindNextBlocks(std::vector<const CBlockIndex*>& vBlocks, const Peer& peer, CNodeState *state, const CBlockIndex *pindexWalk, unsigned int count, int nWindowEnd, const CChain* activeChain, NodeId* nodeStaller)
+{
+ std::vector<const CBlockIndex*> vToFetch;
int nMaxHeight = std::min<int>(state->pindexBestKnownBlock->nHeight, nWindowEnd + 1);
NodeId waitingfor = -1;
while (pindexWalk->nHeight < nMaxHeight) {
@@ -1374,8 +1442,8 @@ void PeerManagerImpl::FindNextBlocksToDownload(const Peer& peer, unsigned int co
// We wouldn't download this block or its descendants from this peer.
return;
}
- if (pindex->nStatus & BLOCK_HAVE_DATA || m_chainman.ActiveChain().Contains(pindex)) {
- if (pindex->HaveTxsDownloaded())
+ if (pindex->nStatus & BLOCK_HAVE_DATA || (activeChain && activeChain->Contains(pindex))) {
+ if (activeChain && pindex->HaveTxsDownloaded())
state->pindexLastCommonBlock = pindex;
} else if (!IsBlockRequested(pindex->GetBlockHash())) {
// The block is not already downloaded, and not yet in flight.
@@ -1383,7 +1451,7 @@ void PeerManagerImpl::FindNextBlocksToDownload(const Peer& peer, unsigned int co
// We reached the end of the window.
if (vBlocks.size() == 0 && waitingfor != peer.m_id) {
// We aren't able to fetch anything, but we would be if the download window was one larger.
- nodeStaller = waitingfor;
+ if (nodeStaller) *nodeStaller = waitingfor;
}
return;
}
@@ -5847,7 +5915,20 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
if (CanServeBlocks(*peer) && ((sync_blocks_and_headers_from_peer && !IsLimitedPeer(*peer)) || !m_chainman.IsInitialBlockDownload()) && state.vBlocksInFlight.size() < MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
std::vector<const CBlockIndex*> vToDownload;
NodeId staller = -1;
- FindNextBlocksToDownload(*peer, MAX_BLOCKS_IN_TRANSIT_PER_PEER - state.vBlocksInFlight.size(), vToDownload, staller);
+ auto get_inflight_budget = [&state]() {
+ return std::max(0, MAX_BLOCKS_IN_TRANSIT_PER_PEER - static_cast<int>(state.vBlocksInFlight.size()));
+ };
+
+ // If a snapshot chainstate is in use, we want to find its next blocks
+ // before the background chainstate to prioritize getting to network tip.
+ FindNextBlocksToDownload(*peer, get_inflight_budget(), vToDownload, staller);
+ if (m_chainman.BackgroundSyncInProgress() && !IsLimitedPeer(*peer)) {
+ TryDownloadingHistoricalBlocks(
+ *peer,
+ get_inflight_budget(),
+ vToDownload, m_chainman.GetBackgroundSyncTip(),
+ Assert(m_chainman.GetSnapshotBaseBlock()));
+ }
for (const CBlockIndex *pindex : vToDownload) {
uint32_t nFetchFlags = GetFetchFlags(*peer);
vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash()));
diff --git a/src/validation.cpp b/src/validation.cpp
index 357b4d422d..a12f121dc3 100644
--- a/src/validation.cpp
+++ b/src/validation.cpp
@@ -4148,6 +4148,12 @@ bool ChainstateManager::ProcessNewBlock(const std::shared_ptr<const CBlock>& blo
return error("%s: ActivateBestChain failed (%s)", __func__, state.ToString());
}
+ Chainstate* bg_chain{WITH_LOCK(cs_main, return BackgroundSyncInProgress() ? m_ibd_chainstate.get() : nullptr)};
+ BlockValidationState bg_state;
+ if (bg_chain && !bg_chain->ActivateBestChain(bg_state, block)) {
+ return error("%s: [background] ActivateBestChain failed (%s)", __func__, bg_state.ToString());
+ }
+
return true;
}
diff --git a/src/validation.h b/src/validation.h
index 3f0a2312b5..319e40447b 100644
--- a/src/validation.h
+++ b/src/validation.h
@@ -881,9 +881,6 @@ private:
/** Most recent headers presync progress update, for rate-limiting. */
std::chrono::time_point<std::chrono::steady_clock> m_last_presync_update GUARDED_BY(::cs_main) {};
- //! Returns nullptr if no snapshot has been loaded.
- const CBlockIndex* GetSnapshotBaseBlock() const EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
-
//! Return the height of the base block of the snapshot in use, if one exists, else
//! nullopt.
std::optional<int> GetSnapshotBaseHeight() const EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
@@ -1034,12 +1031,25 @@ public:
//! Otherwise, revert to using the ibd chainstate and shutdown.
SnapshotCompletionResult MaybeCompleteSnapshotValidation() EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
+ //! Returns nullptr if no snapshot has been loaded.
+ const CBlockIndex* GetSnapshotBaseBlock() const EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
+
//! The most-work chain.
Chainstate& ActiveChainstate() const;
CChain& ActiveChain() const EXCLUSIVE_LOCKS_REQUIRED(GetMutex()) { return ActiveChainstate().m_chain; }
int ActiveHeight() const EXCLUSIVE_LOCKS_REQUIRED(GetMutex()) { return ActiveChain().Height(); }
CBlockIndex* ActiveTip() const EXCLUSIVE_LOCKS_REQUIRED(GetMutex()) { return ActiveChain().Tip(); }
+ //! The state of a background sync (for net processing)
+ bool BackgroundSyncInProgress() const EXCLUSIVE_LOCKS_REQUIRED(GetMutex()) {
+ return IsUsable(m_snapshot_chainstate.get()) && IsUsable(m_ibd_chainstate.get());
+ }
+
+ //! The tip of the background sync chain
+ const CBlockIndex* GetBackgroundSyncTip() const EXCLUSIVE_LOCKS_REQUIRED(GetMutex()) {
+ return BackgroundSyncInProgress() ? m_ibd_chainstate->m_chain.Tip() : nullptr;
+ }
+
node::BlockMap& BlockIndex() EXCLUSIVE_LOCKS_REQUIRED(::cs_main)
{
AssertLockHeld(::cs_main);