diff options
Diffstat (limited to 'src/net_processing.cpp')
-rw-r--r-- | src/net_processing.cpp | 90 |
1 files changed, 45 insertions, 45 deletions
diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 71ebd72b83..b3facdcd3a 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -579,7 +579,7 @@ static bool TipMayBeStale(const Consensus::Params &consensusParams) EXCLUSIVE_LO static bool CanDirectFetch(const Consensus::Params &consensusParams) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { - return chainActive.Tip()->GetBlockTime() > GetAdjustedTime() - consensusParams.nPowTargetSpacing * 20; + return ::ChainActive().Tip()->GetBlockTime() > GetAdjustedTime() - consensusParams.nPowTargetSpacing * 20; } static bool PeerHasHeader(CNodeState *state, const CBlockIndex *pindex) EXCLUSIVE_LOCKS_REQUIRED(cs_main) @@ -605,7 +605,7 @@ static void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vec // Make sure pindexBestKnownBlock is up to date, we'll need it. ProcessBlockAvailability(nodeid); - if (state->pindexBestKnownBlock == nullptr || state->pindexBestKnownBlock->nChainWork < chainActive.Tip()->nChainWork || state->pindexBestKnownBlock->nChainWork < nMinimumChainWork) { + if (state->pindexBestKnownBlock == nullptr || state->pindexBestKnownBlock->nChainWork < ::ChainActive().Tip()->nChainWork || state->pindexBestKnownBlock->nChainWork < nMinimumChainWork) { // This peer has nothing interesting. return; } @@ -613,7 +613,7 @@ static void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vec if (state->pindexLastCommonBlock == nullptr) { // Bootstrap quickly by guessing a parent of our best tip is the forking point. // Guessing wrong in either direction is not a problem. - state->pindexLastCommonBlock = chainActive[std::min(state->pindexBestKnownBlock->nHeight, chainActive.Height())]; + state->pindexLastCommonBlock = ::ChainActive()[std::min(state->pindexBestKnownBlock->nHeight, ::ChainActive().Height())]; } // If the peer reorganized, our previous pindexLastCommonBlock may not be an ancestor @@ -655,7 +655,7 @@ static void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vec // We wouldn't download this block or its descendants from this peer. return; } - if (pindex->nStatus & BLOCK_HAVE_DATA || chainActive.Contains(pindex)) { + if (pindex->nStatus & BLOCK_HAVE_DATA || ::ChainActive().Contains(pindex)) { if (pindex->HaveTxsDownloaded()) state->pindexLastCommonBlock = pindex; } else if (mapBlocksInFlight.count(pindex->GetBlockHash()) == 0) { @@ -1071,7 +1071,7 @@ static bool MaybePunishNode(NodeId nodeid, const CValidationState& state, bool v static bool BlockRequestAllowed(const CBlockIndex* pindex, const Consensus::Params& consensusParams) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { AssertLockHeld(cs_main); - if (chainActive.Contains(pindex)) return true; + if (::ChainActive().Contains(pindex)) return true; return pindex->IsValid(BLOCK_VALID_SCRIPTS) && (pindexBestHeader != nullptr) && (pindexBestHeader->GetBlockTime() - pindex->GetBlockTime() < STALE_RELAY_AGE_LIMIT) && (GetBlockProofEquivalentTime(*pindexBestHeader, *pindex, *pindexBestHeader, consensusParams) < STALE_RELAY_AGE_LIMIT); @@ -1183,7 +1183,7 @@ void PeerLogicValidation::NewPoWValidBlock(const CBlockIndex *pindex, const std: /** * Update our best height and announce any block hashes which weren't previously - * in chainActive to our peers. + * in ::ChainActive() to our peers. */ void PeerLogicValidation::UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload) { const int nNewHeight = pindexNew->nHeight; @@ -1264,13 +1264,13 @@ bool static AlreadyHave(const CInv& inv) EXCLUSIVE_LOCKS_REQUIRED(cs_main) case MSG_WITNESS_TX: { assert(recentRejects); - if (chainActive.Tip()->GetBlockHash() != hashRecentRejectsChainTip) + if (::ChainActive().Tip()->GetBlockHash() != hashRecentRejectsChainTip) { // If the chain tip has changed previously rejected transactions // might be now valid, e.g. due to a nLockTime'd tx becoming valid, // or a double-spend. Reset the rejects filter and give those // txs a second chance. - hashRecentRejectsChainTip = chainActive.Tip()->GetBlockHash(); + hashRecentRejectsChainTip = ::ChainActive().Tip()->GetBlockHash(); recentRejects->reset(); } @@ -1395,7 +1395,7 @@ void static ProcessGetBlockData(CNode* pfrom, const CChainParams& chainparams, c } // Avoid leaking prune-height by never sending blocks below the NODE_NETWORK_LIMITED threshold if (send && !pfrom->fWhitelisted && ( - (((pfrom->GetLocalServices() & NODE_NETWORK_LIMITED) == NODE_NETWORK_LIMITED) && ((pfrom->GetLocalServices() & NODE_NETWORK) != NODE_NETWORK) && (chainActive.Tip()->nHeight - pindex->nHeight > (int)NODE_NETWORK_LIMITED_MIN_BLOCKS + 2 /* add two blocks buffer extension for possible races */) ) + (((pfrom->GetLocalServices() & NODE_NETWORK_LIMITED) == NODE_NETWORK_LIMITED) && ((pfrom->GetLocalServices() & NODE_NETWORK) != NODE_NETWORK) && (::ChainActive().Tip()->nHeight - pindex->nHeight > (int)NODE_NETWORK_LIMITED_MIN_BLOCKS + 2 /* add two blocks buffer extension for possible races */) ) )) { LogPrint(BCLog::NET, "Ignore block request below NODE_NETWORK_LIMITED threshold from peer=%d\n", pfrom->GetId()); @@ -1465,7 +1465,7 @@ void static ProcessGetBlockData(CNode* pfrom, const CChainParams& chainparams, c // instead we respond with the full, non-compact block. bool fPeerWantsWitness = State(pfrom->GetId())->fWantsCmpctWitness; int nSendFlags = fPeerWantsWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS; - if (CanDirectFetch(consensusParams) && pindex->nHeight >= chainActive.Height() - MAX_CMPCTBLOCK_DEPTH) { + if (CanDirectFetch(consensusParams) && pindex->nHeight >= ::ChainActive().Height() - MAX_CMPCTBLOCK_DEPTH) { if ((fPeerWantsWitness || !fWitnessesPresentInARecentCompactBlock) && a_recent_compact_block && a_recent_compact_block->header.GetHash() == pindex->GetBlockHash()) { connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, *a_recent_compact_block)); } else { @@ -1485,7 +1485,7 @@ void static ProcessGetBlockData(CNode* pfrom, const CChainParams& chainparams, c // and we want it right after the last block so they don't // wait for other stuff first. std::vector<CInv> vInv; - vInv.push_back(CInv(MSG_BLOCK, chainActive.Tip()->GetBlockHash())); + vInv.push_back(CInv(MSG_BLOCK, ::ChainActive().Tip()->GetBlockHash())); connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::INV, vInv)); pfrom->hashContinue.SetNull(); } @@ -1606,7 +1606,7 @@ bool static ProcessHeadersMessage(CNode *pfrom, CConnman *connman, const std::ve // nUnconnectingHeaders gets reset back to 0. if (!LookupBlockIndex(headers[0].hashPrevBlock) && nCount < MAX_BLOCKS_TO_ANNOUNCE) { nodestate->nUnconnectingHeaders++; - connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexBestHeader), uint256())); + connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETHEADERS, ::ChainActive().GetLocator(pindexBestHeader), uint256())); LogPrint(BCLog::NET, "received header %s: missing prev block %s, sending getheaders (%d) to end (peer=%d, nUnconnectingHeaders=%d)\n", headers[0].GetHash().ToString(), headers[0].hashPrevBlock.ToString(), @@ -1663,26 +1663,26 @@ bool static ProcessHeadersMessage(CNode *pfrom, CConnman *connman, const std::ve // because it is set in UpdateBlockAvailability. Some nullptr checks // are still present, however, as belt-and-suspenders. - if (received_new_header && pindexLast->nChainWork > chainActive.Tip()->nChainWork) { + if (received_new_header && pindexLast->nChainWork > ::ChainActive().Tip()->nChainWork) { nodestate->m_last_block_announcement = GetTime(); } if (nCount == MAX_HEADERS_RESULTS) { // Headers message had its maximum size; the peer may have more headers. - // TODO: optimize: if pindexLast is an ancestor of chainActive.Tip or pindexBestHeader, continue + // TODO: optimize: if pindexLast is an ancestor of ::ChainActive().Tip or pindexBestHeader, continue // from there instead. LogPrint(BCLog::NET, "more getheaders (%d) to end to peer=%d (startheight:%d)\n", pindexLast->nHeight, pfrom->GetId(), pfrom->nStartingHeight); - connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexLast), uint256())); + connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETHEADERS, ::ChainActive().GetLocator(pindexLast), uint256())); } bool fCanDirectFetch = CanDirectFetch(chainparams.GetConsensus()); // If this set of headers is valid and ends in a block with at least as // much work as our tip, download as much as possible. - if (fCanDirectFetch && pindexLast->IsValid(BLOCK_VALID_TREE) && chainActive.Tip()->nChainWork <= pindexLast->nChainWork) { + if (fCanDirectFetch && pindexLast->IsValid(BLOCK_VALID_TREE) && ::ChainActive().Tip()->nChainWork <= pindexLast->nChainWork) { std::vector<const CBlockIndex*> vToFetch; const CBlockIndex *pindexWalk = pindexLast; // Calculate all the blocks we'd need to switch to pindexLast, up to a limit. - while (pindexWalk && !chainActive.Contains(pindexWalk) && vToFetch.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER) { + while (pindexWalk && !::ChainActive().Contains(pindexWalk) && vToFetch.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER) { if (!(pindexWalk->nStatus & BLOCK_HAVE_DATA) && !mapBlocksInFlight.count(pindexWalk->GetBlockHash()) && (!IsWitnessEnabled(pindexWalk->pprev, chainparams.GetConsensus()) || State(pfrom->GetId())->fHaveWitness)) { @@ -1695,7 +1695,7 @@ bool static ProcessHeadersMessage(CNode *pfrom, CConnman *connman, const std::ve // very large reorg at a time we think we're close to caught up to // the main chain -- this shouldn't really happen. Bail out on the // direct fetch and rely on parallel download instead. - if (!chainActive.Contains(pindexWalk)) { + if (!::ChainActive().Contains(pindexWalk)) { LogPrint(BCLog::NET, "Large reorg, won't direct fetch to %s (%d)\n", pindexLast->GetBlockHash().ToString(), pindexLast->nHeight); @@ -1736,7 +1736,7 @@ bool static ProcessHeadersMessage(CNode *pfrom, CConnman *connman, const std::ve // us sync -- disconnect if using an outbound slot (unless // whitelisted or addnode). // Note: We compare their tip to nMinimumChainWork (rather than - // chainActive.Tip()) because we won't start block download + // ::ChainActive().Tip()) because we won't start block download // until we have a headers chain that has at least // nMinimumChainWork, even if a peer has a chain past our tip, // as an anti-DoS measure. @@ -1750,7 +1750,7 @@ bool static ProcessHeadersMessage(CNode *pfrom, CConnman *connman, const std::ve if (!pfrom->fDisconnect && IsOutboundDisconnectionCandidate(pfrom) && nodestate->pindexBestKnownBlock != nullptr) { // If this is an outbound peer, check to see if we should protect // it from the bad/lagging chain logic. - if (g_outbound_peers_with_protect_from_disconnect < MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT && nodestate->pindexBestKnownBlock->nChainWork >= chainActive.Tip()->nChainWork && !nodestate->m_chain_sync.m_protect) { + if (g_outbound_peers_with_protect_from_disconnect < MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT && nodestate->pindexBestKnownBlock->nChainWork >= ::ChainActive().Tip()->nChainWork && !nodestate->m_chain_sync.m_protect) { LogPrint(BCLog::NET, "Protecting outbound peer=%d from eviction\n", pfrom->GetId()); nodestate->m_chain_sync.m_protect = true; ++g_outbound_peers_with_protect_from_disconnect; @@ -2220,7 +2220,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr // fell back to inv we probably have a reorg which we should get the headers for first, // we now only provide a getheaders response here. When we receive the headers, we will // then ask for the blocks we need. - connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexBestHeader), inv.hash)); + connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETHEADERS, ::ChainActive().GetLocator(pindexBestHeader), inv.hash)); LogPrint(BCLog::NET, "getheaders (%d) %s to peer=%d\n", pindexBestHeader->nHeight, inv.hash.ToString(), pfrom->GetId()); } } @@ -2291,14 +2291,14 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr LOCK(cs_main); // Find the last block the caller has in the main chain - const CBlockIndex* pindex = FindForkInGlobalIndex(chainActive, locator); + const CBlockIndex* pindex = FindForkInGlobalIndex(::ChainActive(), locator); // Send the rest of the chain if (pindex) - pindex = chainActive.Next(pindex); + pindex = ::ChainActive().Next(pindex); int nLimit = 500; LogPrint(BCLog::NET, "getblocks %d to %s limit %d from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), nLimit, pfrom->GetId()); - for (; pindex; pindex = chainActive.Next(pindex)) + for (; pindex; pindex = ::ChainActive().Next(pindex)) { if (pindex->GetBlockHash() == hashStop) { @@ -2308,7 +2308,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr // If pruning, don't inv blocks unless we have on disk and are likely to still have // for some reasonable time window (1 hour) that block relay might require. const int nPrunedBlocksLikelyToHave = MIN_BLOCKS_TO_KEEP - 3600 / chainparams.GetConsensus().nPowTargetSpacing; - if (fPruneMode && (!(pindex->nStatus & BLOCK_HAVE_DATA) || pindex->nHeight <= chainActive.Tip()->nHeight - nPrunedBlocksLikelyToHave)) + if (fPruneMode && (!(pindex->nStatus & BLOCK_HAVE_DATA) || pindex->nHeight <= ::ChainActive().Tip()->nHeight - nPrunedBlocksLikelyToHave)) { LogPrint(BCLog::NET, " getblocks stopping, pruned or too old block at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString()); break; @@ -2350,7 +2350,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr return true; } - if (pindex->nHeight < chainActive.Height() - MAX_BLOCKTXN_DEPTH) { + if (pindex->nHeight < ::ChainActive().Height() - MAX_BLOCKTXN_DEPTH) { // If an older block is requested (should never happen in practice, // but can happen in tests) send a block response instead of a // blocktxn response. Sending a full block response instead of a @@ -2410,23 +2410,23 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr else { // Find the last block the caller has in the main chain - pindex = FindForkInGlobalIndex(chainActive, locator); + pindex = FindForkInGlobalIndex(::ChainActive(), locator); if (pindex) - pindex = chainActive.Next(pindex); + pindex = ::ChainActive().Next(pindex); } // we must use CBlocks, as CBlockHeaders won't include the 0x00 nTx count at the end std::vector<CBlock> vHeaders; int nLimit = MAX_HEADERS_RESULTS; LogPrint(BCLog::NET, "getheaders %d to %s from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), pfrom->GetId()); - for (; pindex; pindex = chainActive.Next(pindex)) + for (; pindex; pindex = ::ChainActive().Next(pindex)) { vHeaders.push_back(pindex->GetBlockHeader()); if (--nLimit <= 0 || pindex->GetBlockHash() == hashStop) break; } - // pindex can be nullptr either if we sent chainActive.Tip() OR - // if our peer has chainActive.Tip() (and thus we are sending an empty + // pindex can be nullptr either if we sent ::ChainActive().Tip() OR + // if our peer has ::ChainActive().Tip() (and thus we are sending an empty // headers message). In both cases it's safe to update // pindexBestHeaderSent to be our tip. // @@ -2437,7 +2437,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr // without the new block. By resetting the BestHeaderSent, we ensure we // will re-announce the new block via headers (or compact blocks again) // in the SendMessages logic. - nodestate->pindexBestHeaderSent = pindex ? pindex : chainActive.Tip(); + nodestate->pindexBestHeaderSent = pindex ? pindex : ::ChainActive().Tip(); connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::HEADERS, vHeaders)); return true; } @@ -2610,7 +2610,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr if (!LookupBlockIndex(cmpctblock.header.hashPrevBlock)) { // Doesn't connect (or is genesis), instead of DoSing in AcceptBlockHeader, request deeper headers if (!IsInitialBlockDownload()) - connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexBestHeader), uint256())); + connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETHEADERS, ::ChainActive().GetLocator(pindexBestHeader), uint256())); return true; } @@ -2654,7 +2654,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr // If this was a new header with more work than our tip, update the // peer's last block announcement time - if (received_new_header && pindex->nChainWork > chainActive.Tip()->nChainWork) { + if (received_new_header && pindex->nChainWork > ::ChainActive().Tip()->nChainWork) { nodestate->m_last_block_announcement = GetTime(); } @@ -2664,7 +2664,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr if (pindex->nStatus & BLOCK_HAVE_DATA) // Nothing to do here return true; - if (pindex->nChainWork <= chainActive.Tip()->nChainWork || // We know something better + if (pindex->nChainWork <= ::ChainActive().Tip()->nChainWork || // We know something better pindex->nTx != 0) { // We had this block at some point, but pruned it if (fAlreadyInFlight) { // We requested this block for some reason, but our mempool will probably be useless @@ -2688,7 +2688,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr // We want to be a bit conservative just to be extra careful about DoS // possibilities in compact block processing... - if (pindex->nHeight <= chainActive.Height() + 2) { + if (pindex->nHeight <= ::ChainActive().Height() + 2) { if ((!fAlreadyInFlight && nodestate->nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) || (fAlreadyInFlight && blockInFlightIt->second.first == pfrom->GetId())) { std::list<QueuedBlock>::iterator* queuedBlockIt = nullptr; @@ -3338,7 +3338,7 @@ void PeerLogicValidation::ConsiderEviction(CNode *pto, int64_t time_in_seconds) // their chain has more work than ours, we should sync to it, // unless it's invalid, in which case we should find that out and // disconnect from them elsewhere). - if (state.pindexBestKnownBlock != nullptr && state.pindexBestKnownBlock->nChainWork >= chainActive.Tip()->nChainWork) { + if (state.pindexBestKnownBlock != nullptr && state.pindexBestKnownBlock->nChainWork >= ::ChainActive().Tip()->nChainWork) { if (state.m_chain_sync.m_timeout != 0) { state.m_chain_sync.m_timeout = 0; state.m_chain_sync.m_work_header = nullptr; @@ -3350,7 +3350,7 @@ void PeerLogicValidation::ConsiderEviction(CNode *pto, int64_t time_in_seconds) // where we checked against our tip. // Either way, set a new timeout based on current tip. state.m_chain_sync.m_timeout = time_in_seconds + CHAIN_SYNC_TIMEOUT; - state.m_chain_sync.m_work_header = chainActive.Tip(); + state.m_chain_sync.m_work_header = ::ChainActive().Tip(); state.m_chain_sync.m_sent_getheaders = false; } else if (state.m_chain_sync.m_timeout > 0 && time_in_seconds > state.m_chain_sync.m_timeout) { // No evidence yet that our peer has synced to a chain with work equal to that @@ -3363,7 +3363,7 @@ void PeerLogicValidation::ConsiderEviction(CNode *pto, int64_t time_in_seconds) } else { assert(state.m_chain_sync.m_work_header); LogPrint(BCLog::NET, "sending getheaders to outbound peer=%d to verify chain work (current best known block:%s, benchmark blockhash: %s)\n", pto->GetId(), state.pindexBestKnownBlock != nullptr ? state.pindexBestKnownBlock->GetBlockHash().ToString() : "<none>", state.m_chain_sync.m_work_header->GetBlockHash().ToString()); - connman->PushMessage(pto, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(state.m_chain_sync.m_work_header->pprev), uint256())); + connman->PushMessage(pto, msgMaker.Make(NetMsgType::GETHEADERS, ::ChainActive().GetLocator(state.m_chain_sync.m_work_header->pprev), uint256())); state.m_chain_sync.m_sent_getheaders = true; constexpr int64_t HEADERS_RESPONSE_TIME = 120; // 2 minutes // Bump the timeout to allow a response, which could clear the timeout @@ -3561,7 +3561,7 @@ bool PeerLogicValidation::SendMessages(CNode* pto) // Start block sync if (pindexBestHeader == nullptr) - pindexBestHeader = chainActive.Tip(); + pindexBestHeader = ::ChainActive().Tip(); bool fFetch = state.fPreferredDownload || (nPreferredDownload == 0 && !pto->fClient && !pto->fOneShot); // Download if this is a nice peer, or we have no nice peers and this one might do. if (!state.fSyncStarted && !pto->fClient && !fImporting && !fReindex) { // Only actively request headers from a single peer, unless we're close to today. @@ -3580,7 +3580,7 @@ bool PeerLogicValidation::SendMessages(CNode* pto) if (pindexStart->pprev) pindexStart = pindexStart->pprev; LogPrint(BCLog::NET, "initial getheaders (%d) to peer=%d (startheight:%d)\n", pindexStart->nHeight, pto->GetId(), pto->nStartingHeight); - connman->PushMessage(pto, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexStart), uint256())); + connman->PushMessage(pto, msgMaker.Make(NetMsgType::GETHEADERS, ::ChainActive().GetLocator(pindexStart), uint256())); } } @@ -3607,11 +3607,11 @@ bool PeerLogicValidation::SendMessages(CNode* pto) bool fFoundStartingHeader = false; // Try to find first header that our peer doesn't have, and // then send all headers past that one. If we come across any - // headers that aren't on chainActive, give up. + // headers that aren't on ::ChainActive(), give up. for (const uint256 &hash : pto->vBlockHashesToAnnounce) { const CBlockIndex* pindex = LookupBlockIndex(hash); assert(pindex); - if (chainActive[pindex->nHeight] != pindex) { + if (::ChainActive()[pindex->nHeight] != pindex) { // Bail out if we reorged away from this block fRevertToInv = true; break; @@ -3707,9 +3707,9 @@ bool PeerLogicValidation::SendMessages(CNode* pto) // Warn if we're announcing a block that is not on the main chain. // This should be very rare and could be optimized out. // Just log for now. - if (chainActive[pindex->nHeight] != pindex) { + if (::ChainActive()[pindex->nHeight] != pindex) { LogPrint(BCLog::NET, "Announcing block %s not on main chain (tip=%s)\n", - hashToAnnounce.ToString(), chainActive.Tip()->GetBlockHash().ToString()); + hashToAnnounce.ToString(), ::ChainActive().Tip()->GetBlockHash().ToString()); } // If the peer's chain has this block, don't inv it back. |