diff options
Diffstat (limited to 'src/net_processing.cpp')
-rw-r--r-- | src/net_processing.cpp | 882 |
1 files changed, 685 insertions, 197 deletions
diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 10311a10be..b5e1222db5 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -1,5 +1,5 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto -// Copyright (c) 2009-2021 The Bitcoin Core developers +// Copyright (c) 2009-2022 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. @@ -14,11 +14,14 @@ #include <consensus/validation.h> #include <deploymentstatus.h> #include <hash.h> +#include <headerssync.h> #include <index/blockfilterindex.h> +#include <kernel/mempool_entry.h> #include <merkleblock.h> #include <netbase.h> #include <netmessagemaker.h> #include <node/blockstorage.h> +#include <node/txreconciliation.h> #include <policy/fees.h> #include <policy/policy.h> #include <policy/settings.h> @@ -266,19 +269,14 @@ struct Peer { /** The feerate in the most recent BIP133 `feefilter` message sent to the peer. * It is *not* a p2p protocol violation for the peer to send us * transactions with a lower fee rate than this. See BIP133. */ - CAmount m_fee_filter_sent{0}; + CAmount m_fee_filter_sent GUARDED_BY(NetEventsInterface::g_msgproc_mutex){0}; /** Timestamp after which we will send the next BIP133 `feefilter` message * to the peer. */ - std::chrono::microseconds m_next_send_feefilter{0}; + std::chrono::microseconds m_next_send_feefilter GUARDED_BY(NetEventsInterface::g_msgproc_mutex){0}; struct TxRelay { mutable RecursiveMutex m_bloom_filter_mutex; - /** Whether the peer wishes to receive transaction announcements. - * - * This is initially set based on the fRelay flag in the received - * `version` message. If initially set to false, it can only be flipped - * to true if we have offered the peer NODE_BLOOM services and it sends - * us a `filterload` or `filterclear` message. See BIP37. */ + /** Whether we relay transactions to this peer. */ bool m_relay_txs GUARDED_BY(m_bloom_filter_mutex){false}; /** A bloom filter for which transactions to announce to the peer. See BIP37. */ std::unique_ptr<CBloomFilter> m_bloom_filter PT_GUARDED_BY(m_bloom_filter_mutex) GUARDED_BY(m_bloom_filter_mutex){nullptr}; @@ -292,7 +290,7 @@ struct Peer { * non-wtxid-relay peers, wtxid for wtxid-relay peers). We use the * mempool to sort transactions in dependency order before relay, so * this does not have to be sorted. */ - std::set<uint256> m_tx_inventory_to_send; + std::set<uint256> m_tx_inventory_to_send GUARDED_BY(m_tx_inventory_mutex); /** Whether the peer has requested us to send our complete mempool. Only * permitted if the peer has NetPermissionFlags::Mempool. See BIP35. */ bool m_send_mempool GUARDED_BY(m_tx_inventory_mutex){false}; @@ -300,7 +298,7 @@ struct Peer { std::atomic<std::chrono::seconds> m_last_mempool_req{0s}; /** The next time after which we will send an `inv` message containing * transaction announcements to this peer. */ - std::chrono::microseconds m_next_inv_send_time{0}; + std::chrono::microseconds m_next_inv_send_time GUARDED_BY(m_tx_inventory_mutex){0}; /** Minimum fee rate with which to filter transaction announcements to this node. See BIP133. */ std::atomic<CAmount> m_fee_filter_received{0}; @@ -321,7 +319,7 @@ struct Peer { }; /** A vector of addresses to send to the peer, limited to MAX_ADDR_TO_SEND. */ - std::vector<CAddress> m_addrs_to_send; + std::vector<CAddress> m_addrs_to_send GUARDED_BY(NetEventsInterface::g_msgproc_mutex); /** Probabilistic filter to track recent addr messages relayed with this * peer. Used to avoid relaying redundant addresses to this peer. * @@ -331,7 +329,7 @@ struct Peer { * * Presence of this filter must correlate with m_addr_relay_enabled. **/ - std::unique_ptr<CRollingBloomFilter> m_addr_known; + std::unique_ptr<CRollingBloomFilter> m_addr_known GUARDED_BY(NetEventsInterface::g_msgproc_mutex); /** Whether we are participating in address relay with this connection. * * We set this bool to true for outbound peers (other than @@ -348,7 +346,7 @@ struct Peer { * initialized.*/ std::atomic_bool m_addr_relay_enabled{false}; /** Whether a getaddr request to this peer is outstanding. */ - bool m_getaddr_sent{false}; + bool m_getaddr_sent GUARDED_BY(NetEventsInterface::g_msgproc_mutex){false}; /** Guards address sending timers. */ mutable Mutex m_addr_send_times_mutex; /** Time point to send the next ADDR message to this peer. */ @@ -359,22 +357,19 @@ struct Peer { * messages, indicating a preference to receive ADDRv2 instead of ADDR ones. */ std::atomic_bool m_wants_addrv2{false}; /** Whether this peer has already sent us a getaddr message. */ - bool m_getaddr_recvd{false}; + bool m_getaddr_recvd GUARDED_BY(NetEventsInterface::g_msgproc_mutex){false}; /** Number of addresses that can be processed from this peer. Start at 1 to * permit self-announcement. */ - double m_addr_token_bucket{1.0}; + double m_addr_token_bucket GUARDED_BY(NetEventsInterface::g_msgproc_mutex){1.0}; /** When m_addr_token_bucket was last updated */ - std::chrono::microseconds m_addr_token_timestamp{GetTime<std::chrono::microseconds>()}; + std::chrono::microseconds m_addr_token_timestamp GUARDED_BY(NetEventsInterface::g_msgproc_mutex){GetTime<std::chrono::microseconds>()}; /** Total number of addresses that were dropped due to rate limiting. */ std::atomic<uint64_t> m_addr_rate_limited{0}; /** Total number of addresses that were processed (excludes rate-limited ones). */ std::atomic<uint64_t> m_addr_processed{0}; - /** Set of txids to reconsider once their parent transactions have been accepted **/ - std::set<uint256> m_orphan_work_set GUARDED_BY(g_cs_orphans); - /** Whether we've sent this peer a getheaders in response to an inv prior to initial-headers-sync completing */ - bool m_inv_triggered_getheaders_before_sync{false}; + bool m_inv_triggered_getheaders_before_sync GUARDED_BY(NetEventsInterface::g_msgproc_mutex){false}; /** Protects m_getdata_requests **/ Mutex m_getdata_requests_mutex; @@ -382,7 +377,16 @@ struct Peer { std::deque<CInv> m_getdata_requests GUARDED_BY(m_getdata_requests_mutex); /** Time of the last getheaders message to this peer */ - NodeClock::time_point m_last_getheaders_timestamp{}; + NodeClock::time_point m_last_getheaders_timestamp GUARDED_BY(NetEventsInterface::g_msgproc_mutex){}; + + /** Protects m_headers_sync **/ + Mutex m_headers_sync_mutex; + /** Headers-sync state for this peer (eg for initial sync, or syncing large + * reorgs) **/ + std::unique_ptr<HeadersSyncState> m_headers_sync PT_GUARDED_BY(m_headers_sync_mutex) GUARDED_BY(m_headers_sync_mutex) {}; + + /** Whether we've sent our peer a sendheaders message. **/ + std::atomic<bool> m_sent_sendheaders{false}; explicit Peer(NodeId id, ServiceFlags our_services) : m_id{id} @@ -392,9 +396,7 @@ struct Peer { private: Mutex m_tx_relay_mutex; - /** Transaction relay data. Will be a nullptr if we're not relaying - * transactions with this peer (e.g. if it's a block-relay-only peer or - * the peer has sent us fRelay=false with bloom filters disabled). */ + /** Transaction relay data. May be a nullptr. */ std::unique_ptr<TxRelay> m_tx_relay GUARDED_BY(m_tx_relay_mutex); }; @@ -506,11 +508,11 @@ public: /** Implement NetEventsInterface */ void InitializeNode(CNode& node, ServiceFlags our_services) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); - void FinalizeNode(const CNode& node) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); + void FinalizeNode(const CNode& node) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_headers_presync_mutex); bool ProcessMessages(CNode* pfrom, std::atomic<bool>& interrupt) override - EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_recent_confirmed_transactions_mutex, !m_most_recent_block_mutex); - bool SendMessages(CNode* pto) override EXCLUSIVE_LOCKS_REQUIRED(pto->cs_sendProcessing) - EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_recent_confirmed_transactions_mutex, !m_most_recent_block_mutex); + EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_recent_confirmed_transactions_mutex, !m_most_recent_block_mutex, !m_headers_presync_mutex, g_msgproc_mutex); + bool SendMessages(CNode* pto) override + EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_recent_confirmed_transactions_mutex, !m_most_recent_block_mutex, g_msgproc_mutex); /** Implement PeerManager */ void StartScheduledTasks(CScheduler& scheduler) override; @@ -525,12 +527,12 @@ public: void UnitTestMisbehaving(NodeId peer_id, int howmuch) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex) { Misbehaving(*Assert(GetPeerRef(peer_id)), howmuch, ""); }; void ProcessMessage(CNode& pfrom, const std::string& msg_type, CDataStream& vRecv, const std::chrono::microseconds time_received, const std::atomic<bool>& interruptMsgProc) override - EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_recent_confirmed_transactions_mutex, !m_most_recent_block_mutex); + EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_recent_confirmed_transactions_mutex, !m_most_recent_block_mutex, !m_headers_presync_mutex, g_msgproc_mutex); void UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds) override; private: /** Consider evicting an outbound peer based on the amount of time they've been behind our tip */ - void ConsiderEviction(CNode& pto, Peer& peer, std::chrono::seconds time_in_seconds) EXCLUSIVE_LOCKS_REQUIRED(cs_main); + void ConsiderEviction(CNode& pto, Peer& peer, std::chrono::seconds time_in_seconds) EXCLUSIVE_LOCKS_REQUIRED(cs_main, g_msgproc_mutex); /** If we have extra outbound peers, try to disconnect the one with the oldest block announcement */ void EvictExtraOutboundPeers(std::chrono::seconds now) EXCLUSIVE_LOCKS_REQUIRED(cs_main); @@ -582,29 +584,92 @@ private: */ bool MaybeDiscourageAndDisconnect(CNode& pnode, Peer& peer); - void ProcessOrphanTx(std::set<uint256>& orphan_work_set) EXCLUSIVE_LOCKS_REQUIRED(cs_main, g_cs_orphans) - EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); - /** Process a single headers message from a peer. */ + /** + * Reconsider orphan transactions after a parent has been accepted to the mempool. + * + * @peer[in] peer The peer whose orphan transactions we will reconsider. Generally only + * one orphan will be reconsidered on each call of this function. If an + * accepted orphan has orphaned children, those will need to be + * reconsidered, creating more work, possibly for other peers. + * @return True if meaningful work was done (an orphan was accepted/rejected). + * If no meaningful work was done, then the work set for this peer + * will be empty. + */ + bool ProcessOrphanTx(Peer& peer) + EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex); + + /** Process a single headers message from a peer. + * + * @param[in] pfrom CNode of the peer + * @param[in] peer The peer sending us the headers + * @param[in] headers The headers received. Note that this may be modified within ProcessHeadersMessage. + * @param[in] via_compact_block Whether this header came in via compact block handling. + */ void ProcessHeadersMessage(CNode& pfrom, Peer& peer, - const std::vector<CBlockHeader>& headers, + std::vector<CBlockHeader>&& headers, bool via_compact_block) - EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); + EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_headers_presync_mutex, g_msgproc_mutex); /** Various helpers for headers processing, invoked by ProcessHeadersMessage() */ + /** Return true if headers are continuous and have valid proof-of-work (DoS points assigned on failure) */ + bool CheckHeadersPoW(const std::vector<CBlockHeader>& headers, const Consensus::Params& consensusParams, Peer& peer); + /** Calculate an anti-DoS work threshold for headers chains */ + arith_uint256 GetAntiDoSWorkThreshold(); /** Deal with state tracking and headers sync for peers that send the * occasional non-connecting header (this can happen due to BIP 130 headers * announcements for blocks interacting with the 2hr (MAX_FUTURE_BLOCK_TIME) rule). */ - void HandleFewUnconnectingHeaders(CNode& pfrom, Peer& peer, const std::vector<CBlockHeader>& headers); + void HandleFewUnconnectingHeaders(CNode& pfrom, Peer& peer, const std::vector<CBlockHeader>& headers) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); /** Return true if the headers connect to each other, false otherwise */ bool CheckHeadersAreContinuous(const std::vector<CBlockHeader>& headers) const; + /** Try to continue a low-work headers sync that has already begun. + * Assumes the caller has already verified the headers connect, and has + * checked that each header satisfies the proof-of-work target included in + * the header. + * @param[in] peer The peer we're syncing with. + * @param[in] pfrom CNode of the peer + * @param[in,out] headers The headers to be processed. + * @return True if the passed in headers were successfully processed + * as the continuation of a low-work headers sync in progress; + * false otherwise. + * If false, the passed in headers will be returned back to + * the caller. + * If true, the returned headers may be empty, indicating + * there is no more work for the caller to do; or the headers + * may be populated with entries that have passed anti-DoS + * checks (and therefore may be validated for block index + * acceptance by the caller). + */ + bool IsContinuationOfLowWorkHeadersSync(Peer& peer, CNode& pfrom, + std::vector<CBlockHeader>& headers) + EXCLUSIVE_LOCKS_REQUIRED(peer.m_headers_sync_mutex, !m_headers_presync_mutex, g_msgproc_mutex); + /** Check work on a headers chain to be processed, and if insufficient, + * initiate our anti-DoS headers sync mechanism. + * + * @param[in] peer The peer whose headers we're processing. + * @param[in] pfrom CNode of the peer + * @param[in] chain_start_header Where these headers connect in our index. + * @param[in,out] headers The headers to be processed. + * + * @return True if chain was low work (headers will be empty after + * calling); false otherwise. + */ + bool TryLowWorkHeadersSync(Peer& peer, CNode& pfrom, + const CBlockIndex* chain_start_header, + std::vector<CBlockHeader>& headers) + EXCLUSIVE_LOCKS_REQUIRED(!peer.m_headers_sync_mutex, !m_peer_mutex, !m_headers_presync_mutex, g_msgproc_mutex); + + /** Return true if the given header is an ancestor of + * m_chainman.m_best_header or our current tip */ + bool IsAncestorOfBestHeaderOrTip(const CBlockIndex* header) EXCLUSIVE_LOCKS_REQUIRED(cs_main); + /** Request further headers from this peer with a given locator. * We don't issue a getheaders message if we have a recent one outstanding. * This returns true if a getheaders is actually sent, and false otherwise. */ - bool MaybeSendGetHeaders(CNode& pfrom, const CBlockLocator& locator, Peer& peer); + bool MaybeSendGetHeaders(CNode& pfrom, const CBlockLocator& locator, Peer& peer) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); /** Potentially fetch blocks from this peer upon receipt of a new headers tip */ - void HeadersDirectFetchBlocks(CNode& pfrom, const Peer& peer, const CBlockIndex* pindexLast); + void HeadersDirectFetchBlocks(CNode& pfrom, const Peer& peer, const CBlockIndex& last_header); /** Update peer state based on received headers message */ - void UpdatePeerStateForReceivedHeaders(CNode& pfrom, const CBlockIndex *pindexLast, bool received_new_header, bool may_have_more_headers); + void UpdatePeerStateForReceivedHeaders(CNode& pfrom, const CBlockIndex& last_header, bool received_new_header, bool may_have_more_headers); void SendBlockTransactions(CNode& pfrom, Peer& peer, const CBlock& block, const BlockTransactionsRequest& req); @@ -624,7 +689,10 @@ private: void MaybeSendPing(CNode& node_to, Peer& peer, std::chrono::microseconds now); /** Send `addr` messages on a regular schedule. */ - void MaybeSendAddr(CNode& node, Peer& peer, std::chrono::microseconds current_time); + void MaybeSendAddr(CNode& node, Peer& peer, std::chrono::microseconds current_time) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); + + /** Send a single `sendheaders` message, after we have completed headers sync with a peer. */ + void MaybeSendSendHeaders(CNode& node, Peer& peer) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); /** Relay (gossip) an address to a few randomly chosen nodes. * @@ -633,10 +701,10 @@ private: * @param[in] fReachable Whether the address' network is reachable. We relay unreachable * addresses less. */ - void RelayAddress(NodeId originator, const CAddress& addr, bool fReachable) EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); + void RelayAddress(NodeId originator, const CAddress& addr, bool fReachable) EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex); /** Send `feefilter` message. */ - void MaybeSendFeefilter(CNode& node, Peer& peer, std::chrono::microseconds current_time); + void MaybeSendFeefilter(CNode& node, Peer& peer, std::chrono::microseconds current_time) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); const CChainParams& m_chainparams; CConnman& m_connman; @@ -646,12 +714,13 @@ private: ChainstateManager& m_chainman; CTxMemPool& m_mempool; TxRequestTracker m_txrequest GUARDED_BY(::cs_main); + std::unique_ptr<TxReconciliationTracker> m_txreconciliation; /** The height of the best chain */ std::atomic<int> m_best_height{-1}; /** Next time to check for stale tip */ - std::chrono::seconds m_stale_tip_check_time{0s}; + std::chrono::seconds m_stale_tip_check_time GUARDED_BY(cs_main){0s}; /** Whether this node is running in -blocksonly mode */ const bool m_ignore_incoming_txs; @@ -660,7 +729,7 @@ private: /** Whether we've completed initial sync yet, for determining when to turn * on extra block-relay-only peers. */ - bool m_initial_sync_finished{false}; + bool m_initial_sync_finished GUARDED_BY(cs_main){false}; /** Protects m_peer_map. This mutex must not be locked while holding a lock * on any of the mutexes inside a Peer object. */ @@ -689,7 +758,7 @@ private: int nSyncStarted GUARDED_BY(cs_main) = 0; /** Hash of the last block we received via INV */ - uint256 m_last_block_inv_triggering_headers_sync{}; + uint256 m_last_block_inv_triggering_headers_sync GUARDED_BY(g_msgproc_mutex){}; /** * Sources of received blocks, saved to be able punish them when processing @@ -785,8 +854,26 @@ private: std::shared_ptr<const CBlockHeaderAndShortTxIDs> m_most_recent_compact_block GUARDED_BY(m_most_recent_block_mutex); uint256 m_most_recent_block_hash GUARDED_BY(m_most_recent_block_mutex); + // Data about the low-work headers synchronization, aggregated from all peers' HeadersSyncStates. + /** Mutex guarding the other m_headers_presync_* variables. */ + Mutex m_headers_presync_mutex; + /** A type to represent statistics about a peer's low-work headers sync. + * + * - The first field is the total verified amount of work in that synchronization. + * - The second is: + * - nullopt: the sync is in REDOWNLOAD phase (phase 2). + * - {height, timestamp}: the sync has the specified tip height and block timestamp (phase 1). + */ + using HeadersPresyncStats = std::pair<arith_uint256, std::optional<std::pair<int64_t, uint32_t>>>; + /** Statistics for all peers in low-work headers sync. */ + std::map<NodeId, HeadersPresyncStats> m_headers_presync_stats GUARDED_BY(m_headers_presync_mutex) {}; + /** The peer with the most-work entry in m_headers_presync_stats. */ + NodeId m_headers_presync_bestpeer GUARDED_BY(m_headers_presync_mutex) {-1}; + /** The m_headers_presync_stats improved, and needs signalling. */ + std::atomic_bool m_headers_presync_should_signal{false}; + /** Height of the highest block announced using BIP 152 high-bandwidth mode. */ - int m_highest_fast_announce{0}; + int m_highest_fast_announce GUARDED_BY(::cs_main){0}; /** Have we requested this block from a peer */ bool IsBlockRequested(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main); @@ -822,7 +909,7 @@ private: EXCLUSIVE_LOCKS_REQUIRED(!m_most_recent_block_mutex, peer.m_getdata_requests_mutex) LOCKS_EXCLUDED(::cs_main); /** Process a new block. Perform any post-processing housekeeping */ - void ProcessBlock(CNode& node, const std::shared_ptr<const CBlock>& block, bool force_processing); + void ProcessBlock(CNode& node, const std::shared_ptr<const CBlock>& block, bool force_processing, bool min_pow_checked); /** Relay map (txid or wtxid -> CTransactionRef) */ typedef std::map<uint256, CTransactionRef> MapRelay; @@ -847,14 +934,14 @@ private: /** Storage for orphan information */ TxOrphanage m_orphanage; - void AddToCompactExtraTransactions(const CTransactionRef& tx) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans); + void AddToCompactExtraTransactions(const CTransactionRef& tx) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); /** Orphan/conflicted/etc transactions that are kept for compact block reconstruction. * The last -blockreconstructionextratxn/DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN of * these are kept in a ring buffer */ - std::vector<std::pair<uint256, CTransactionRef>> vExtraTxnForCompact GUARDED_BY(g_cs_orphans); + std::vector<std::pair<uint256, CTransactionRef>> vExtraTxnForCompact GUARDED_BY(g_msgproc_mutex); /** Offset into vExtraTxnForCompact to insert the next tx */ - size_t vExtraTxnForCompactIt GUARDED_BY(g_cs_orphans) = 0; + size_t vExtraTxnForCompactIt GUARDED_BY(g_msgproc_mutex) = 0; /** Check whether the last unknown block a peer advertised is not yet known. */ void ProcessBlockAvailability(NodeId nodeid) EXCLUSIVE_LOCKS_REQUIRED(cs_main); @@ -933,7 +1020,10 @@ private: * @return True if address relay is enabled with peer * False if address relay is disallowed */ - bool SetupAddressRelay(const CNode& node, Peer& peer); + bool SetupAddressRelay(const CNode& node, Peer& peer) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); + + void AddAddressKnown(Peer& peer, const CAddress& addr) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); + void PushAddress(Peer& peer, const CAddress& addr, FastRandomContext& insecure_rand) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); }; const CNodeState* PeerManagerImpl::State(NodeId pnode) const EXCLUSIVE_LOCKS_REQUIRED(cs_main) @@ -959,13 +1049,13 @@ static bool IsAddrCompatible(const Peer& peer, const CAddress& addr) return peer.m_wants_addrv2 || addr.IsAddrV1Compatible(); } -static void AddAddressKnown(Peer& peer, const CAddress& addr) +void PeerManagerImpl::AddAddressKnown(Peer& peer, const CAddress& addr) { assert(peer.m_addr_known); peer.m_addr_known->insert(addr.GetKey()); } -static void PushAddress(Peer& peer, const CAddress& addr, FastRandomContext& insecure_rand) +void PeerManagerImpl::PushAddress(Peer& peer, const CAddress& addr, FastRandomContext& insecure_rand) { // Known checking here is only to save space from duplicates. // Before sending, we'll filter it again for known addresses that were @@ -1157,7 +1247,7 @@ bool PeerManagerImpl::TipMayBeStale() bool PeerManagerImpl::CanDirectFetch() { - return m_chainman.ActiveChain().Tip()->GetBlockTime() > GetAdjustedTime() - m_chainparams.GetConsensus().nPowTargetSpacing * 20; + return m_chainman.ActiveChain().Tip()->Time() > GetAdjustedTime() - m_chainparams.GetConsensus().PowTargetSpacing() * 20; } static bool PeerHasHeader(CNodeState *state, const CBlockIndex *pindex) EXCLUSIVE_LOCKS_REQUIRED(cs_main) @@ -1214,7 +1304,7 @@ void PeerManagerImpl::FindNextBlocksToDownload(const Peer& peer, unsigned int co // Make sure pindexBestKnownBlock is up to date, we'll need it. ProcessBlockAvailability(peer.m_id); - if (state->pindexBestKnownBlock == nullptr || state->pindexBestKnownBlock->nChainWork < m_chainman.ActiveChain().Tip()->nChainWork || state->pindexBestKnownBlock->nChainWork < nMinimumChainWork) { + if (state->pindexBestKnownBlock == nullptr || state->pindexBestKnownBlock->nChainWork < m_chainman.ActiveChain().Tip()->nChainWork || state->pindexBestKnownBlock->nChainWork < m_chainman.MinimumChainWork()) { // This peer has nothing interesting. return; } @@ -1303,7 +1393,7 @@ void PeerManagerImpl::PushNodeVersion(CNode& pnode, const Peer& peer) CService addr_you = addr.IsRoutable() && !IsProxy(addr) && addr.IsAddrV1Compatible() ? addr : CService(); uint64_t your_services{addr.nServices}; - const bool tx_relay = !m_ignore_incoming_txs && !pnode.IsBlockOnlyConn() && !pnode.IsFeelerConn(); + const bool tx_relay{!RejectIncomingTxs(pnode)}; m_connman.PushMessage(&pnode, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::VERSION, PROTOCOL_VERSION, my_services, nTime, your_services, addr_you, // Together the pre-version-31402 serialization of CAddress "addrYou" (without nTime) my_services, CService(), // Together the pre-version-31402 serialization of CAddress "addrMe" (without nTime) @@ -1415,8 +1505,9 @@ void PeerManagerImpl::FinalizeNode(const CNode& node) for (const QueuedBlock& entry : state->vBlocksInFlight) { mapBlocksInFlight.erase(entry.pindex->GetBlockHash()); } - WITH_LOCK(g_cs_orphans, m_orphanage.EraseForPeer(nodeid)); + m_orphanage.EraseForPeer(nodeid); m_txrequest.DisconnectedPeer(nodeid); + if (m_txreconciliation) m_txreconciliation->ForgetPeer(nodeid); m_num_preferred_download_peers -= state->fPreferredDownload; m_peers_downloading_from -= (state->nBlocksInFlight != 0); assert(m_peers_downloading_from >= 0); @@ -1443,6 +1534,10 @@ void PeerManagerImpl::FinalizeNode(const CNode& node) // fSuccessfullyConnected set. m_addrman.Connected(node.addr); } + { + LOCK(m_headers_presync_mutex); + m_headers_presync_stats.erase(nodeid); + } LogPrint(BCLog::NET, "Cleared nodestate for peer=%d\n", nodeid); } @@ -1507,6 +1602,12 @@ bool PeerManagerImpl::GetNodeStateStats(NodeId nodeid, CNodeStateStats& stats) c stats.m_addr_processed = peer->m_addr_processed.load(); stats.m_addr_rate_limited = peer->m_addr_rate_limited.load(); stats.m_addr_relay_enabled = peer->m_addr_relay_enabled.load(); + { + LOCK(peer->m_headers_sync_mutex); + if (peer->m_headers_sync) { + stats.presync_height = peer->m_headers_sync->GetPresyncHeight(); + } + } return true; } @@ -1550,6 +1651,10 @@ bool PeerManagerImpl::MaybePunishNodeForBlock(NodeId nodeid, const BlockValidati switch (state.GetResult()) { case BlockValidationResult::BLOCK_RESULT_UNSET: break; + case BlockValidationResult::BLOCK_HEADER_LOW_WORK: + // We didn't try to process the block because the header chain may have + // too little work. + break; // The node is providing invalid data: case BlockValidationResult::BLOCK_CONSENSUS: case BlockValidationResult::BLOCK_MUTATED: @@ -1647,6 +1752,8 @@ std::optional<std::string> PeerManagerImpl::FetchBlock(NodeId peer_id, const CBl LOCK(cs_main); // Mark block as in-flight unless it already is (for this peer). + // If the peer does not send us a block, vBlocksInFlight remains non-empty, + // causing us to timeout and disconnect. // If a block was already in-flight for a different peer, its BLOCKTXN // response will be dropped. if (!BlockRequested(peer_id, block_index)) return "Already requested from this peer"; @@ -1687,6 +1794,11 @@ PeerManagerImpl::PeerManagerImpl(CConnman& connman, AddrMan& addrman, m_mempool(pool), m_ignore_incoming_txs(ignore_incoming_txs) { + // While Erlay support is incomplete, it must be enabled explicitly via -txreconciliation. + // This argument can go away after Erlay support is complete. + if (gArgs.GetBoolArg("-txreconciliation", DEFAULT_TXRECONCILIATION_ENABLE)) { + m_txreconciliation = std::make_unique<TxReconciliationTracker>(TXRECONCILIATION_VERSION); + } } void PeerManagerImpl::StartScheduledTasks(CScheduler& scheduler) @@ -1927,8 +2039,15 @@ void PeerManagerImpl::RelayTransaction(const uint256& txid, const uint256& wtxid auto tx_relay = peer.GetTxRelay(); if (!tx_relay) continue; - const uint256& hash{peer.m_wtxid_relay ? wtxid : txid}; LOCK(tx_relay->m_tx_inventory_mutex); + // Only queue transactions for announcement once the version handshake + // is completed. The time of arrival for these transactions is + // otherwise at risk of leaking to a spy, if the spy is able to + // distinguish transactions received during the handshake from the rest + // in the announcement. + if (tx_relay->m_next_inv_send_time == 0s) continue; + + const uint256& hash{peer.m_wtxid_relay ? wtxid : txid}; if (!tx_relay->m_tx_inventory_known_filter.contains(hash)) { tx_relay->m_tx_inventory_to_send.insert(hash); } @@ -2201,9 +2320,9 @@ void PeerManagerImpl::ProcessGetData(CNode& pfrom, Peer& peer, const std::atomic std::vector<uint256> parent_ids_to_add; { LOCK(m_mempool.cs); - auto txiter = m_mempool.GetIter(tx->GetHash()); - if (txiter) { - const CTxMemPoolEntry::Parents& parents = (*txiter)->GetMemPoolParentsConst(); + auto tx_iter = m_mempool.GetIter(tx->GetHash()); + if (tx_iter) { + const CTxMemPoolEntry::Parents& parents = (*tx_iter)->GetMemPoolParentsConst(); parent_ids_to_add.reserve(parents.size()); for (const CTxMemPoolEntry& parent : parents) { if (parent.GetTime() > now - UNCONDITIONAL_RELAY_DELAY) { @@ -2280,6 +2399,35 @@ void PeerManagerImpl::SendBlockTransactions(CNode& pfrom, Peer& peer, const CBlo m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::BLOCKTXN, resp)); } +bool PeerManagerImpl::CheckHeadersPoW(const std::vector<CBlockHeader>& headers, const Consensus::Params& consensusParams, Peer& peer) +{ + // Do these headers have proof-of-work matching what's claimed? + if (!HasValidProofOfWork(headers, consensusParams)) { + Misbehaving(peer, 100, "header with invalid proof of work"); + return false; + } + + // Are these headers connected to each other? + if (!CheckHeadersAreContinuous(headers)) { + Misbehaving(peer, 20, "non-continuous headers sequence"); + return false; + } + return true; +} + +arith_uint256 PeerManagerImpl::GetAntiDoSWorkThreshold() +{ + arith_uint256 near_chaintip_work = 0; + LOCK(cs_main); + if (m_chainman.ActiveChain().Tip() != nullptr) { + const CBlockIndex *tip = m_chainman.ActiveChain().Tip(); + // Use a 144 block buffer, so that we'll accept headers that fork from + // near our tip. + near_chaintip_work = tip->nChainWork - std::min<arith_uint256>(144*GetBlockProof(*tip), tip->nChainWork); + } + return std::max(near_chaintip_work, m_chainman.MinimumChainWork()); +} + /** * Special handling for unconnecting headers that might be part of a block * announcement. @@ -2302,7 +2450,7 @@ void PeerManagerImpl::HandleFewUnconnectingHeaders(CNode& pfrom, Peer& peer, nodestate->nUnconnectingHeaders++; // Try to fill in the missing headers. - if (MaybeSendGetHeaders(pfrom, m_chainman.ActiveChain().GetLocator(m_chainman.m_best_header), peer)) { + if (MaybeSendGetHeaders(pfrom, GetLocator(m_chainman.m_best_header), peer)) { LogPrint(BCLog::NET, "received header %s: missing prev block %s, sending getheaders (%d) to end (peer=%d, nUnconnectingHeaders=%d)\n", headers[0].GetHash().ToString(), headers[0].hashPrevBlock.ToString(), @@ -2333,6 +2481,150 @@ bool PeerManagerImpl::CheckHeadersAreContinuous(const std::vector<CBlockHeader>& return true; } +bool PeerManagerImpl::IsContinuationOfLowWorkHeadersSync(Peer& peer, CNode& pfrom, std::vector<CBlockHeader>& headers) +{ + if (peer.m_headers_sync) { + auto result = peer.m_headers_sync->ProcessNextHeaders(headers, headers.size() == MAX_HEADERS_RESULTS); + if (result.request_more) { + auto locator = peer.m_headers_sync->NextHeadersRequestLocator(); + // If we were instructed to ask for a locator, it should not be empty. + Assume(!locator.vHave.empty()); + if (!locator.vHave.empty()) { + // It should be impossible for the getheaders request to fail, + // because we should have cleared the last getheaders timestamp + // when processing the headers that triggered this call. But + // it may be possible to bypass this via compactblock + // processing, so check the result before logging just to be + // safe. + bool sent_getheaders = MaybeSendGetHeaders(pfrom, locator, peer); + if (sent_getheaders) { + LogPrint(BCLog::NET, "more getheaders (from %s) to peer=%d\n", + locator.vHave.front().ToString(), pfrom.GetId()); + } else { + LogPrint(BCLog::NET, "error sending next getheaders (from %s) to continue sync with peer=%d\n", + locator.vHave.front().ToString(), pfrom.GetId()); + } + } + } + + if (peer.m_headers_sync->GetState() == HeadersSyncState::State::FINAL) { + peer.m_headers_sync.reset(nullptr); + + // Delete this peer's entry in m_headers_presync_stats. + // If this is m_headers_presync_bestpeer, it will be replaced later + // by the next peer that triggers the else{} branch below. + LOCK(m_headers_presync_mutex); + m_headers_presync_stats.erase(pfrom.GetId()); + } else { + // Build statistics for this peer's sync. + HeadersPresyncStats stats; + stats.first = peer.m_headers_sync->GetPresyncWork(); + if (peer.m_headers_sync->GetState() == HeadersSyncState::State::PRESYNC) { + stats.second = {peer.m_headers_sync->GetPresyncHeight(), + peer.m_headers_sync->GetPresyncTime()}; + } + + // Update statistics in stats. + LOCK(m_headers_presync_mutex); + m_headers_presync_stats[pfrom.GetId()] = stats; + auto best_it = m_headers_presync_stats.find(m_headers_presync_bestpeer); + bool best_updated = false; + if (best_it == m_headers_presync_stats.end()) { + // If the cached best peer is outdated, iterate over all remaining ones (including + // newly updated one) to find the best one. + NodeId peer_best{-1}; + const HeadersPresyncStats* stat_best{nullptr}; + for (const auto& [peer, stat] : m_headers_presync_stats) { + if (!stat_best || stat > *stat_best) { + peer_best = peer; + stat_best = &stat; + } + } + m_headers_presync_bestpeer = peer_best; + best_updated = (peer_best == pfrom.GetId()); + } else if (best_it->first == pfrom.GetId() || stats > best_it->second) { + // pfrom was and remains the best peer, or pfrom just became best. + m_headers_presync_bestpeer = pfrom.GetId(); + best_updated = true; + } + if (best_updated && stats.second.has_value()) { + // If the best peer updated, and it is in its first phase, signal. + m_headers_presync_should_signal = true; + } + } + + if (result.success) { + // We only overwrite the headers passed in if processing was + // successful. + headers.swap(result.pow_validated_headers); + } + + return result.success; + } + // Either we didn't have a sync in progress, or something went wrong + // processing these headers, or we are returning headers to the caller to + // process. + return false; +} + +bool PeerManagerImpl::TryLowWorkHeadersSync(Peer& peer, CNode& pfrom, const CBlockIndex* chain_start_header, std::vector<CBlockHeader>& headers) +{ + // Calculate the total work on this chain. + arith_uint256 total_work = chain_start_header->nChainWork + CalculateHeadersWork(headers); + + // Our dynamic anti-DoS threshold (minimum work required on a headers chain + // before we'll store it) + arith_uint256 minimum_chain_work = GetAntiDoSWorkThreshold(); + + // Avoid DoS via low-difficulty-headers by only processing if the headers + // are part of a chain with sufficient work. + if (total_work < minimum_chain_work) { + // Only try to sync with this peer if their headers message was full; + // otherwise they don't have more headers after this so no point in + // trying to sync their too-little-work chain. + if (headers.size() == MAX_HEADERS_RESULTS) { + // Note: we could advance to the last header in this set that is + // known to us, rather than starting at the first header (which we + // may already have); however this is unlikely to matter much since + // ProcessHeadersMessage() already handles the case where all + // headers in a received message are already known and are + // ancestors of m_best_header or chainActive.Tip(), by skipping + // this logic in that case. So even if the first header in this set + // of headers is known, some header in this set must be new, so + // advancing to the first unknown header would be a small effect. + LOCK(peer.m_headers_sync_mutex); + peer.m_headers_sync.reset(new HeadersSyncState(peer.m_id, m_chainparams.GetConsensus(), + chain_start_header, minimum_chain_work)); + + // Now a HeadersSyncState object for tracking this synchronization + // is created, process the headers using it as normal. Failures are + // handled inside of IsContinuationOfLowWorkHeadersSync. + (void)IsContinuationOfLowWorkHeadersSync(peer, pfrom, headers); + } else { + LogPrint(BCLog::NET, "Ignoring low-work chain (height=%u) from peer=%d\n", chain_start_header->nHeight + headers.size(), pfrom.GetId()); + } + + // The peer has not yet given us a chain that meets our work threshold, + // so we want to prevent further processing of the headers in any case. + headers = {}; + return true; + } + + return false; +} + +bool PeerManagerImpl::IsAncestorOfBestHeaderOrTip(const CBlockIndex* header) +{ + if (header == nullptr) { + return false; + } else if (m_chainman.m_best_header != nullptr && header == m_chainman.m_best_header->GetAncestor(header->nHeight)) { + return true; + } else if (m_chainman.ActiveChain().Contains(header)) { + return true; + } + return false; +} + bool PeerManagerImpl::MaybeSendGetHeaders(CNode& pfrom, const CBlockLocator& locator, Peer& peer) { const CNetMsgMaker msgMaker(pfrom.GetCommonVersion()); @@ -2350,22 +2642,21 @@ bool PeerManagerImpl::MaybeSendGetHeaders(CNode& pfrom, const CBlockLocator& loc } /* - * Given a new headers tip ending in pindexLast, potentially request blocks towards that tip. + * Given a new headers tip ending in last_header, potentially request blocks towards that tip. * We require that the given tip have at least as much work as our tip, and for * our current tip to be "close to synced" (see CanDirectFetch()). */ -void PeerManagerImpl::HeadersDirectFetchBlocks(CNode& pfrom, const Peer& peer, const CBlockIndex* pindexLast) +void PeerManagerImpl::HeadersDirectFetchBlocks(CNode& pfrom, const Peer& peer, const CBlockIndex& last_header) { const CNetMsgMaker msgMaker(pfrom.GetCommonVersion()); LOCK(cs_main); CNodeState *nodestate = State(pfrom.GetId()); - if (CanDirectFetch() && pindexLast->IsValid(BLOCK_VALID_TREE) && m_chainman.ActiveChain().Tip()->nChainWork <= pindexLast->nChainWork) { - + if (CanDirectFetch() && last_header.IsValid(BLOCK_VALID_TREE) && m_chainman.ActiveChain().Tip()->nChainWork <= last_header.nChainWork) { std::vector<const CBlockIndex*> vToFetch; - const CBlockIndex *pindexWalk = pindexLast; - // Calculate all the blocks we'd need to switch to pindexLast, up to a limit. + const CBlockIndex* pindexWalk{&last_header}; + // Calculate all the blocks we'd need to switch to last_header, up to a limit. while (pindexWalk && !m_chainman.ActiveChain().Contains(pindexWalk) && vToFetch.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER) { if (!(pindexWalk->nStatus & BLOCK_HAVE_DATA) && !IsBlockRequested(pindexWalk->GetBlockHash()) && @@ -2381,8 +2672,8 @@ void PeerManagerImpl::HeadersDirectFetchBlocks(CNode& pfrom, const Peer& peer, c // direct fetch and rely on parallel download instead. if (!m_chainman.ActiveChain().Contains(pindexWalk)) { LogPrint(BCLog::NET, "Large reorg, won't direct fetch to %s (%d)\n", - pindexLast->GetBlockHash().ToString(), - pindexLast->nHeight); + last_header.GetBlockHash().ToString(), + last_header.nHeight); } else { std::vector<CInv> vGetData; // Download as much as possible, from earliest to latest. @@ -2399,14 +2690,15 @@ void PeerManagerImpl::HeadersDirectFetchBlocks(CNode& pfrom, const Peer& peer, c } if (vGetData.size() > 1) { LogPrint(BCLog::NET, "Downloading blocks toward %s (%d) via headers direct fetch\n", - pindexLast->GetBlockHash().ToString(), pindexLast->nHeight); + last_header.GetBlockHash().ToString(), + last_header.nHeight); } if (vGetData.size() > 0) { if (!m_ignore_incoming_txs && nodestate->m_provides_cmpctblocks && vGetData.size() == 1 && mapBlocksInFlight.size() == 1 && - pindexLast->pprev->IsValid(BLOCK_VALID_CHAIN)) { + last_header.pprev->IsValid(BLOCK_VALID_CHAIN)) { // In any case, we want to download using a compact block, not a regular one vGetData[0] = CInv(MSG_CMPCT_BLOCK, vGetData[0].hash); } @@ -2417,12 +2709,12 @@ void PeerManagerImpl::HeadersDirectFetchBlocks(CNode& pfrom, const Peer& peer, c } /** - * Given receipt of headers from a peer ending in pindexLast, along with + * Given receipt of headers from a peer ending in last_header, along with * whether that header was new and whether the headers message was full, * update the state we keep for the peer. */ void PeerManagerImpl::UpdatePeerStateForReceivedHeaders(CNode& pfrom, - const CBlockIndex *pindexLast, bool received_new_header, bool may_have_more_headers) + const CBlockIndex& last_header, bool received_new_header, bool may_have_more_headers) { LOCK(cs_main); CNodeState *nodestate = State(pfrom.GetId()); @@ -2431,14 +2723,13 @@ void PeerManagerImpl::UpdatePeerStateForReceivedHeaders(CNode& pfrom, } nodestate->nUnconnectingHeaders = 0; - assert(pindexLast); - UpdateBlockAvailability(pfrom.GetId(), pindexLast->GetBlockHash()); + UpdateBlockAvailability(pfrom.GetId(), last_header.GetBlockHash()); // From here, pindexBestKnownBlock should be guaranteed to be non-null, // because it is set in UpdateBlockAvailability. Some nullptr checks // are still present, however, as belt-and-suspenders. - if (received_new_header && pindexLast->nChainWork > m_chainman.ActiveChain().Tip()->nChainWork) { + if (received_new_header && last_header.nChainWork > m_chainman.ActiveChain().Tip()->nChainWork) { nodestate->m_last_block_announcement = GetTime(); } @@ -2447,14 +2738,14 @@ void PeerManagerImpl::UpdatePeerStateForReceivedHeaders(CNode& pfrom, if (m_chainman.ActiveChainstate().IsInitialBlockDownload() && !may_have_more_headers) { // If the peer has no more headers to give us, then we know we have // their tip. - if (nodestate->pindexBestKnownBlock && nodestate->pindexBestKnownBlock->nChainWork < nMinimumChainWork) { + if (nodestate->pindexBestKnownBlock && nodestate->pindexBestKnownBlock->nChainWork < m_chainman.MinimumChainWork()) { // This peer has too little work on their headers chain to help // us sync -- disconnect if it is an outbound disconnection // candidate. - // Note: We compare their tip to nMinimumChainWork (rather than + // Note: We compare their tip to the minimum chain work (rather than // m_chainman.ActiveChain().Tip()) because we won't start block download // until we have a headers chain that has at least - // nMinimumChainWork, even if a peer has a chain past our tip, + // the minimum chain work, even if a peer has a chain past our tip, // as an anti-DoS measure. if (pfrom.IsOutboundOrBlockRelayConn()) { LogPrintf("Disconnecting outbound peer %d -- headers chain has insufficient work\n", pfrom.GetId()); @@ -2478,21 +2769,73 @@ void PeerManagerImpl::UpdatePeerStateForReceivedHeaders(CNode& pfrom, } void PeerManagerImpl::ProcessHeadersMessage(CNode& pfrom, Peer& peer, - const std::vector<CBlockHeader>& headers, + std::vector<CBlockHeader>&& headers, bool via_compact_block) { - const CNetMsgMaker msgMaker(pfrom.GetCommonVersion()); size_t nCount = headers.size(); if (nCount == 0) { // Nothing interesting. Stop asking this peers for more headers. + // If we were in the middle of headers sync, receiving an empty headers + // message suggests that the peer suddenly has nothing to give us + // (perhaps it reorged to our chain). Clear download state for this peer. + LOCK(peer.m_headers_sync_mutex); + if (peer.m_headers_sync) { + peer.m_headers_sync.reset(nullptr); + LOCK(m_headers_presync_mutex); + m_headers_presync_stats.erase(pfrom.GetId()); + } + return; + } + + // Before we do any processing, make sure these pass basic sanity checks. + // We'll rely on headers having valid proof-of-work further down, as an + // anti-DoS criteria (note: this check is required before passing any + // headers into HeadersSyncState). + if (!CheckHeadersPoW(headers, m_chainparams.GetConsensus(), peer)) { + // Misbehaving() calls are handled within CheckHeadersPoW(), so we can + // just return. (Note that even if a header is announced via compact + // block, the header itself should be valid, so this type of error can + // always be punished.) return; } const CBlockIndex *pindexLast = nullptr; + // We'll set already_validated_work to true if these headers are + // successfully processed as part of a low-work headers sync in progress + // (either in PRESYNC or REDOWNLOAD phase). + // If true, this will mean that any headers returned to us (ie during + // REDOWNLOAD) can be validated without further anti-DoS checks. + bool already_validated_work = false; + + // If we're in the middle of headers sync, let it do its magic. + bool have_headers_sync = false; + { + LOCK(peer.m_headers_sync_mutex); + + already_validated_work = IsContinuationOfLowWorkHeadersSync(peer, pfrom, headers); + + // The headers we passed in may have been: + // - untouched, perhaps if no headers-sync was in progress, or some + // failure occurred + // - erased, such as if the headers were successfully processed and no + // additional headers processing needs to take place (such as if we + // are still in PRESYNC) + // - replaced with headers that are now ready for validation, such as + // during the REDOWNLOAD phase of a low-work headers sync. + // So just check whether we still have headers that we need to process, + // or not. + if (headers.empty()) { + return; + } + + have_headers_sync = !!peer.m_headers_sync; + } + // Do these headers connect to something in our block index? - bool headers_connect_blockindex{WITH_LOCK(::cs_main, return m_chainman.m_blockman.LookupBlockIndex(headers[0].hashPrevBlock) != nullptr)}; + const CBlockIndex *chain_start_header{WITH_LOCK(::cs_main, return m_chainman.m_blockman.LookupBlockIndex(headers[0].hashPrevBlock))}; + bool headers_connect_blockindex{chain_start_header != nullptr}; if (!headers_connect_blockindex) { if (nCount <= MAX_BLOCKS_TO_ANNOUNCE) { @@ -2506,81 +2849,100 @@ void PeerManagerImpl::ProcessHeadersMessage(CNode& pfrom, Peer& peer, return; } + // If the headers we received are already in memory and an ancestor of + // m_best_header or our tip, skip anti-DoS checks. These headers will not + // use any more memory (and we are not leaking information that could be + // used to fingerprint us). + const CBlockIndex *last_received_header{nullptr}; + { + LOCK(cs_main); + last_received_header = m_chainman.m_blockman.LookupBlockIndex(headers.back().GetHash()); + if (IsAncestorOfBestHeaderOrTip(last_received_header)) { + already_validated_work = true; + } + } + + // If our peer has NetPermissionFlags::NoBan privileges, then bypass our + // anti-DoS logic (this saves bandwidth when we connect to a trusted peer + // on startup). + if (pfrom.HasPermission(NetPermissionFlags::NoBan)) { + already_validated_work = true; + } + // At this point, the headers connect to something in our block index. - if (!CheckHeadersAreContinuous(headers)) { - Misbehaving(peer, 20, "non-continuous headers sequence"); + // Do anti-DoS checks to determine if we should process or store for later + // processing. + if (!already_validated_work && TryLowWorkHeadersSync(peer, pfrom, + chain_start_header, headers)) { + // If we successfully started a low-work headers sync, then there + // should be no headers to process any further. + Assume(headers.empty()); return; } + // At this point, we have a set of headers with sufficient work on them + // which can be processed. + // If we don't have the last header, then this peer will have given us // something new (if these headers are valid). - bool received_new_header{WITH_LOCK(::cs_main, return m_chainman.m_blockman.LookupBlockIndex(headers.back().GetHash()) == nullptr)}; + bool received_new_header{last_received_header == nullptr}; + // Now process all the headers. BlockValidationState state; - if (!m_chainman.ProcessNewBlockHeaders(headers, state, &pindexLast)) { + if (!m_chainman.ProcessNewBlockHeaders(headers, /*min_pow_checked=*/true, state, &pindexLast)) { if (state.IsInvalid()) { MaybePunishNodeForBlock(pfrom.GetId(), state, via_compact_block, "invalid header received"); return; } } + assert(pindexLast); - // Consider fetching more headers. - if (nCount == MAX_HEADERS_RESULTS) { + // Consider fetching more headers if we are not using our headers-sync mechanism. + if (nCount == MAX_HEADERS_RESULTS && !have_headers_sync) { // Headers message had its maximum size; the peer may have more headers. - if (MaybeSendGetHeaders(pfrom, WITH_LOCK(m_chainman.GetMutex(), return m_chainman.ActiveChain().GetLocator(pindexLast)), peer)) { + if (MaybeSendGetHeaders(pfrom, GetLocator(pindexLast), peer)) { LogPrint(BCLog::NET, "more getheaders (%d) to end to peer=%d (startheight:%d)\n", pindexLast->nHeight, pfrom.GetId(), peer.m_starting_height); } } - UpdatePeerStateForReceivedHeaders(pfrom, pindexLast, received_new_header, nCount == MAX_HEADERS_RESULTS); + UpdatePeerStateForReceivedHeaders(pfrom, *pindexLast, received_new_header, nCount == MAX_HEADERS_RESULTS); // Consider immediately downloading blocks. - HeadersDirectFetchBlocks(pfrom, peer, pindexLast); + HeadersDirectFetchBlocks(pfrom, peer, *pindexLast); return; } -/** - * Reconsider orphan transactions after a parent has been accepted to the mempool. - * - * @param[in,out] orphan_work_set The set of orphan transactions to reconsider. Generally only one - * orphan will be reconsidered on each call of this function. This set - * may be added to if accepting an orphan causes its children to be - * reconsidered. - */ -void PeerManagerImpl::ProcessOrphanTx(std::set<uint256>& orphan_work_set) +bool PeerManagerImpl::ProcessOrphanTx(Peer& peer) { - AssertLockHeld(cs_main); - AssertLockHeld(g_cs_orphans); - - while (!orphan_work_set.empty()) { - const uint256 orphanHash = *orphan_work_set.begin(); - orphan_work_set.erase(orphan_work_set.begin()); + AssertLockHeld(g_msgproc_mutex); + LOCK(cs_main); - const auto [porphanTx, from_peer] = m_orphanage.GetTx(orphanHash); - if (porphanTx == nullptr) continue; + CTransactionRef porphanTx = nullptr; + while (CTransactionRef porphanTx = m_orphanage.GetTxToReconsider(peer.m_id)) { const MempoolAcceptResult result = m_chainman.ProcessTransaction(porphanTx); const TxValidationState& state = result.m_state; + const uint256& orphanHash = porphanTx->GetHash(); if (result.m_result_type == MempoolAcceptResult::ResultType::VALID) { LogPrint(BCLog::MEMPOOL, " accepted orphan tx %s\n", orphanHash.ToString()); RelayTransaction(orphanHash, porphanTx->GetWitnessHash()); - m_orphanage.AddChildrenToWorkSet(*porphanTx, orphan_work_set); + m_orphanage.AddChildrenToWorkSet(*porphanTx); m_orphanage.EraseTx(orphanHash); for (const CTransactionRef& removedTx : result.m_replaced_transactions.value()) { AddToCompactExtraTransactions(removedTx); } - break; + return true; } else if (state.GetResult() != TxValidationResult::TX_MISSING_INPUTS) { if (state.IsInvalid()) { LogPrint(BCLog::MEMPOOL, " invalid orphan tx %s from peer=%d. %s\n", orphanHash.ToString(), - from_peer, + peer.m_id, state.ToString()); // Maybe punish peer that gave us an invalid orphan tx - MaybePunishNodeForTx(from_peer, state); + MaybePunishNodeForTx(peer.m_id, state); } // Has inputs but not accepted to mempool // Probably non-standard or insufficient fee @@ -2615,9 +2977,11 @@ void PeerManagerImpl::ProcessOrphanTx(std::set<uint256>& orphan_work_set) } } m_orphanage.EraseTx(orphanHash); - break; + return true; } } + + return false; } bool PeerManagerImpl::PrepareBlockFilterRequest(CNode& node, Peer& peer, @@ -2788,10 +3152,10 @@ void PeerManagerImpl::ProcessGetCFCheckPt(CNode& node, Peer& peer, CDataStream& m_connman.PushMessage(&node, std::move(msg)); } -void PeerManagerImpl::ProcessBlock(CNode& node, const std::shared_ptr<const CBlock>& block, bool force_processing) +void PeerManagerImpl::ProcessBlock(CNode& node, const std::shared_ptr<const CBlock>& block, bool force_processing, bool min_pow_checked) { bool new_block{false}; - m_chainman.ProcessNewBlock(block, force_processing, &new_block); + m_chainman.ProcessNewBlock(block, force_processing, min_pow_checked, &new_block); if (new_block) { node.m_last_block_time = GetTime<std::chrono::seconds>(); } else { @@ -2804,6 +3168,8 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, const std::chrono::microseconds time_received, const std::atomic<bool>& interruptMsgProc) { + AssertLockHeld(g_msgproc_mutex); + LogPrint(BCLog::NET, "received: %s (%u bytes) peer=%d\n", SanitizeString(msg_type), vRecv.size(), pfrom.GetId()); PeerRef peer = GetPeerRef(pfrom.GetId()); @@ -2905,8 +3271,6 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, m_connman.PushMessage(&pfrom, msg_maker.Make(NetMsgType::SENDADDRV2)); } - m_connman.PushMessage(&pfrom, msg_maker.Make(NetMsgType::VERACK)); - pfrom.m_has_all_wanted_services = HasAllDesirableServiceFlags(nServices); peer->m_their_services = nServices; pfrom.SetAddrLocal(addrMe); @@ -2916,11 +3280,14 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, } peer->m_starting_height = starting_height; - // We only initialize the m_tx_relay data structure if: - // - this isn't an outbound block-relay-only connection; and - // - fRelay=true or we're offering NODE_BLOOM to this peer - // (NODE_BLOOM means that the peer may turn on tx relay later) + // Only initialize the Peer::TxRelay m_relay_txs data structure if: + // - this isn't an outbound block-relay-only connection, and + // - this isn't an outbound feeler connection, and + // - fRelay=true (the peer wishes to receive transaction announcements) + // or we're offering NODE_BLOOM to this peer. NODE_BLOOM means that + // the peer may turn on transaction relay later. if (!pfrom.IsBlockOnlyConn() && + !pfrom.IsFeelerConn() && (fRelay || (peer->m_our_services & NODE_BLOOM))) { auto* const tx_relay = peer->SetTxRelay(); { @@ -2930,6 +3297,22 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, if (fRelay) pfrom.m_relays_txs = true; } + if (greatest_common_version >= WTXID_RELAY_VERSION && m_txreconciliation) { + // Per BIP-330, we announce txreconciliation support if: + // - protocol version per the peer's VERSION message supports WTXID_RELAY; + // - transaction relay is supported per the peer's VERSION message (see m_relays_txs); + // - this is not a block-relay-only connection and not a feeler (see m_relays_txs); + // - this is not an addr fetch connection; + // - we are not in -blocksonly mode. + if (pfrom.m_relays_txs && !pfrom.IsAddrFetchConn() && !m_ignore_incoming_txs) { + const uint64_t recon_salt = m_txreconciliation->PreRegisterPeer(pfrom.GetId()); + m_connman.PushMessage(&pfrom, msg_maker.Make(NetMsgType::SENDTXRCNCL, + TXRECONCILIATION_VERSION, recon_salt)); + } + } + + m_connman.PushMessage(&pfrom, msg_maker.Make(NetMsgType::VERACK)); + // Potentially mark this peer as a preferred download peer. { LOCK(cs_main); @@ -2938,39 +3321,20 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, m_num_preferred_download_peers += state->fPreferredDownload; } - // Self advertisement & GETADDR logic - if (!pfrom.IsInboundConn() && SetupAddressRelay(pfrom, *peer)) { - // For outbound peers, we try to relay our address (so that other - // nodes can try to find us more quickly, as we have no guarantee - // that an outbound peer is even aware of how to reach us) and do a - // one-time address fetch (to help populate/update our addrman). If - // we're starting up for the first time, our addrman may be pretty - // empty and no one will know who we are, so these mechanisms are - // important to help us connect to the network. - // + // Attempt to initialize address relay for outbound peers and use result + // to decide whether to send GETADDR, so that we don't send it to + // inbound or outbound block-relay-only peers. + bool send_getaddr{false}; + if (!pfrom.IsInboundConn()) { + send_getaddr = SetupAddressRelay(pfrom, *peer); + } + if (send_getaddr) { + // Do a one-time address fetch to help populate/update our addrman. + // If we're starting up for the first time, our addrman may be pretty + // empty, so this mechanism is important to help us connect to the network. // We skip this for block-relay-only peers. We want to avoid // potentially leaking addr information and we do not want to // indicate to the peer that we will participate in addr relay. - if (fListen && !m_chainman.ActiveChainstate().IsInitialBlockDownload()) - { - CAddress addr{GetLocalAddress(pfrom.addr), peer->m_our_services, Now<NodeSeconds>()}; - FastRandomContext insecure_rand; - if (addr.IsRoutable()) - { - LogPrint(BCLog::NET, "ProcessMessages: advertising address %s\n", addr.ToString()); - PushAddress(*peer, addr, insecure_rand); - } else if (IsPeerAddrLocalGood(&pfrom)) { - // Override just the address with whatever the peer sees us as. - // Leave the port in addr as it was returned by GetLocalAddress() - // above, as this is an outbound connection and the peer cannot - // observe our listening port. - addr.SetIP(addrMe); - LogPrint(BCLog::NET, "ProcessMessages: advertising address %s\n", addr.ToString()); - PushAddress(*peer, addr, insecure_rand); - } - } - - // Get recent addresses m_connman.PushMessage(&pfrom, CNetMsgMaker(greatest_common_version).Make(NetMsgType::GETADDR)); peer->m_getaddr_sent = true; // When requesting a getaddr, accept an additional MAX_ADDR_TO_SEND addresses in response @@ -3015,7 +3379,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, // If the peer is old enough to have the old alert system, send it the final alert. if (greatest_common_version <= 70012) { - CDataStream finalAlert(ParseHex("60010000000000000000000000ffffff7f00000000ffffff7ffeffff7f01ffffff7f00000000ffffff7f00ffffff7f002f555247454e543a20416c657274206b657920636f6d70726f6d697365642c2075706772616465207265717569726564004630440220653febd6410f470f6bae11cad19c48413becb1ac2c17f908fd0fd53bdc3abd5202206d0e9c96fe88d4a0f01ed9dedae2b6f9e00da94cad0fecaae66ecf689bf71b50"), SER_NETWORK, PROTOCOL_VERSION); + DataStream finalAlert{ParseHex("60010000000000000000000000ffffff7f00000000ffffff7ffeffff7f01ffffff7f00000000ffffff7f00ffffff7f002f555247454e543a20416c657274206b657920636f6d70726f6d697365642c2075706772616465207265717569726564004630440220653febd6410f470f6bae11cad19c48413becb1ac2c17f908fd0fd53bdc3abd5202206d0e9c96fe88d4a0f01ed9dedae2b6f9e00da94cad0fecaae66ecf689bf71b50")}; m_connman.PushMessage(&pfrom, CNetMsgMaker(greatest_common_version).Make("alert", finalAlert)); } @@ -3049,13 +3413,6 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, pfrom.ConnectionTypeAsString()); } - if (pfrom.GetCommonVersion() >= SENDHEADERS_VERSION) { - // Tell our peer we prefer to receive headers rather than inv's - // We send this to non-NODE NETWORK peers as well, because even - // non-NODE NETWORK peers can announce blocks (such as pruning - // nodes) - m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::SENDHEADERS)); - } if (pfrom.GetCommonVersion() >= SHORT_IDS_BLOCKS_VERSION) { // Tell our peer we are willing to provide version 2 cmpctblocks. // However, we do not request new block announcements using @@ -3064,6 +3421,30 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, // they may wish to request compact blocks from us m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::SENDCMPCT, /*high_bandwidth=*/false, /*version=*/CMPCTBLOCKS_VERSION)); } + + if (m_txreconciliation) { + if (!peer->m_wtxid_relay || !m_txreconciliation->IsPeerRegistered(pfrom.GetId())) { + // We could have optimistically pre-registered/registered the peer. In that case, + // we should forget about the reconciliation state here if this wasn't followed + // by WTXIDRELAY (since WTXIDRELAY can't be announced later). + m_txreconciliation->ForgetPeer(pfrom.GetId()); + } + } + + if (auto tx_relay = peer->GetTxRelay()) { + // `TxRelay::m_tx_inventory_to_send` must be empty before the + // version handshake is completed as + // `TxRelay::m_next_inv_send_time` is first initialised in + // `SendMessages` after the verack is received. Any transactions + // received during the version handshake would otherwise + // immediately be advertised without random delay, potentially + // leaking the time of arrival to a spy. + Assume(WITH_LOCK( + tx_relay->m_tx_inventory_mutex, + return tx_relay->m_tx_inventory_to_send.empty() && + tx_relay->m_next_inv_send_time == 0s)); + } + pfrom.fSuccessfullyConnected = true; return; } @@ -3127,6 +3508,61 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, return; } + // Received from a peer demonstrating readiness to announce transactions via reconciliations. + // This feature negotiation must happen between VERSION and VERACK to avoid relay problems + // from switching announcement protocols after the connection is up. + if (msg_type == NetMsgType::SENDTXRCNCL) { + if (!m_txreconciliation) { + LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "sendtxrcncl from peer=%d ignored, as our node does not have txreconciliation enabled\n", pfrom.GetId()); + return; + } + + if (pfrom.fSuccessfullyConnected) { + LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "sendtxrcncl received after verack from peer=%d; disconnecting\n", pfrom.GetId()); + pfrom.fDisconnect = true; + return; + } + + // Peer must not offer us reconciliations if we specified no tx relay support in VERSION. + if (RejectIncomingTxs(pfrom)) { + LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "sendtxrcncl received from peer=%d to which we indicated no tx relay; disconnecting\n", pfrom.GetId()); + pfrom.fDisconnect = true; + return; + } + + // Peer must not offer us reconciliations if they specified no tx relay support in VERSION. + // This flag might also be false in other cases, but the RejectIncomingTxs check above + // eliminates them, so that this flag fully represents what we are looking for. + if (!pfrom.m_relays_txs) { + LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "sendtxrcncl received from peer=%d which indicated no tx relay to us; disconnecting\n", pfrom.GetId()); + pfrom.fDisconnect = true; + return; + } + + uint32_t peer_txreconcl_version; + uint64_t remote_salt; + vRecv >> peer_txreconcl_version >> remote_salt; + + const ReconciliationRegisterResult result = m_txreconciliation->RegisterPeer(pfrom.GetId(), pfrom.IsInboundConn(), + peer_txreconcl_version, remote_salt); + switch (result) { + case ReconciliationRegisterResult::NOT_FOUND: + LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "Ignore unexpected txreconciliation signal from peer=%d\n", pfrom.GetId()); + break; + case ReconciliationRegisterResult::SUCCESS: + break; + case ReconciliationRegisterResult::ALREADY_REGISTERED: + LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "txreconciliation protocol violation from peer=%d (sendtxrcncl received from already registered peer); disconnecting\n", pfrom.GetId()); + pfrom.fDisconnect = true; + return; + case ReconciliationRegisterResult::PROTOCOL_VIOLATION: + LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "txreconciliation protocol violation from peer=%d; disconnecting\n", pfrom.GetId()); + pfrom.fDisconnect = true; + return; + } + return; + } + if (!pfrom.fSuccessfullyConnected) { LogPrint(BCLog::NET, "Unsupported message \"%s\" prior to verack from peer=%d\n", SanitizeString(msg_type), pfrom.GetId()); return; @@ -3302,7 +3738,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, // use if we turned on sync with all peers). CNodeState& state{*Assert(State(pfrom.GetId()))}; if (state.fSyncStarted || (!peer->m_inv_triggered_getheaders_before_sync && *best_block != m_last_block_inv_triggering_headers_sync)) { - if (MaybeSendGetHeaders(pfrom, m_chainman.ActiveChain().GetLocator(m_chainman.m_best_header), *peer)) { + if (MaybeSendGetHeaders(pfrom, GetLocator(m_chainman.m_best_header), *peer)) { LogPrint(BCLog::NET, "getheaders (%d) %s to peer=%d\n", m_chainman.m_best_header->nHeight, best_block->ToString(), pfrom.GetId()); @@ -3481,12 +3917,12 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, // Note that if we were to be on a chain that forks from the checkpointed // chain, then serving those headers to a peer that has seen the // checkpointed chain would cause that peer to disconnect us. Requiring - // that our chainwork exceed nMinimumChainWork is a protection against + // that our chainwork exceed the minimum chain work is a protection against // being fed a bogus chain when we started up for the first time and // getting partitioned off the honest network for serving that chain to // others. if (m_chainman.ActiveTip() == nullptr || - (m_chainman.ActiveTip()->nChainWork < nMinimumChainWork && !pfrom.HasPermission(NetPermissionFlags::Download))) { + (m_chainman.ActiveTip()->nChainWork < m_chainman.MinimumChainWork() && !pfrom.HasPermission(NetPermissionFlags::Download))) { LogPrint(BCLog::NET, "Ignoring getheaders from peer=%d because active chain has too little work; sending empty response\n", pfrom.GetId()); // Just respond with an empty headers message, to tell the peer to // go away but not treat us as unresponsive. @@ -3574,7 +4010,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, AddKnownTx(*peer, txid); } - LOCK2(cs_main, g_cs_orphans); + LOCK(cs_main); m_txrequest.ReceivedResponse(pfrom.GetId(), txid); if (tx.HasWitness()) m_txrequest.ReceivedResponse(pfrom.GetId(), wtxid); @@ -3615,7 +4051,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, m_txrequest.ForgetTxHash(tx.GetHash()); m_txrequest.ForgetTxHash(tx.GetWitnessHash()); RelayTransaction(tx.GetHash(), tx.GetWitnessHash()); - m_orphanage.AddChildrenToWorkSet(tx, peer->m_orphan_work_set); + m_orphanage.AddChildrenToWorkSet(tx); pfrom.m_last_tx_time = GetTime<std::chrono::seconds>(); @@ -3627,9 +4063,6 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, for (const CTransactionRef& removedTx : result.m_replaced_transactions.value()) { AddToCompactExtraTransactions(removedTx); } - - // Recursively process any orphan transactions that depended on this one - ProcessOrphanTx(peer->m_orphan_work_set); } else if (state.GetResult() == TxValidationResult::TX_MISSING_INPUTS) { @@ -3766,12 +4199,17 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, { LOCK(cs_main); - if (!m_chainman.m_blockman.LookupBlockIndex(cmpctblock.header.hashPrevBlock)) { + const CBlockIndex* prev_block = m_chainman.m_blockman.LookupBlockIndex(cmpctblock.header.hashPrevBlock); + if (!prev_block) { // Doesn't connect (or is genesis), instead of DoSing in AcceptBlockHeader, request deeper headers if (!m_chainman.ActiveChainstate().IsInitialBlockDownload()) { - MaybeSendGetHeaders(pfrom, m_chainman.ActiveChain().GetLocator(m_chainman.m_best_header), *peer); + MaybeSendGetHeaders(pfrom, GetLocator(m_chainman.m_best_header), *peer); } return; + } else if (prev_block->nChainWork + CalculateHeadersWork({cmpctblock.header}) < GetAntiDoSWorkThreshold()) { + // If we get a low-work header in a compact block, we can ignore it. + LogPrint(BCLog::NET, "Ignoring low-work compact block from peer %d\n", pfrom.GetId()); + return; } if (!m_chainman.m_blockman.LookupBlockIndex(cmpctblock.header.GetHash())) { @@ -3781,7 +4219,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, const CBlockIndex *pindex = nullptr; BlockValidationState state; - if (!m_chainman.ProcessNewBlockHeaders({cmpctblock.header}, state, &pindex)) { + if (!m_chainman.ProcessNewBlockHeaders({cmpctblock.header}, /*min_pow_checked=*/true, state, &pindex)) { if (state.IsInvalid()) { MaybePunishNodeForBlock(pfrom.GetId(), state, /*via_compact_block=*/true, "invalid header via cmpctblock"); return; @@ -3805,7 +4243,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, bool fBlockReconstructed = false; { - LOCK2(cs_main, g_cs_orphans); + LOCK(cs_main); // If AcceptBlockHeader returned true, it set pindex assert(pindex); UpdateBlockAvailability(pfrom.GetId(), pindex->GetBlockHash()); @@ -3945,10 +4383,10 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, // (eg disk space). Because we only try to reconstruct blocks when // we're close to caught up (via the CanDirectFetch() requirement // above, combined with the behavior of not requesting blocks until - // we have a chain with at least nMinimumChainWork), and we ignore + // we have a chain with at least the minimum chain work), and we ignore // compact blocks with less work than our tip, it is safe to treat // reconstructed compact blocks as having been requested. - ProcessBlock(pfrom, pblock, /*force_processing=*/true); + ProcessBlock(pfrom, pblock, /*force_processing=*/true, /*min_pow_checked=*/true); LOCK(cs_main); // hold cs_main for CBlockIndex::IsValid() if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS)) { // Clear download state for this block, which is in @@ -4031,7 +4469,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, // disk-space attacks), but this should be safe due to the // protections in the compact block handler -- see related comment // in compact block optimistic reconstruction handling. - ProcessBlock(pfrom, pblock, /*force_processing=*/true); + ProcessBlock(pfrom, pblock, /*force_processing=*/true, /*min_pow_checked=*/true); } return; } @@ -4062,7 +4500,23 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, ReadCompactSize(vRecv); // ignore tx count; assume it is 0. } - return ProcessHeadersMessage(pfrom, *peer, headers, /*via_compact_block=*/false); + ProcessHeadersMessage(pfrom, *peer, std::move(headers), /*via_compact_block=*/false); + + // Check if the headers presync progress needs to be reported to validation. + // This needs to be done without holding the m_headers_presync_mutex lock. + if (m_headers_presync_should_signal.exchange(false)) { + HeadersPresyncStats stats; + { + LOCK(m_headers_presync_mutex); + auto it = m_headers_presync_stats.find(m_headers_presync_bestpeer); + if (it != m_headers_presync_stats.end()) stats = it->second; + } + if (stats.second) { + m_chainman.ReportHeadersPresync(stats.first, stats.second->first, stats.second->second); + } + } + + return; } if (msg_type == NetMsgType::BLOCK) @@ -4080,6 +4534,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, bool forceProcessing = false; const uint256 hash(pblock->GetHash()); + bool min_pow_checked = false; { LOCK(cs_main); // Always process the block if we requested it, since we may @@ -4090,8 +4545,14 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, // which peers send us compact blocks, so the race between here and // cs_main in ProcessNewBlock is fine. mapBlockSource.emplace(hash, std::make_pair(pfrom.GetId(), true)); + + // Check work on this block against our anti-dos thresholds. + const CBlockIndex* prev_block = m_chainman.m_blockman.LookupBlockIndex(pblock->hashPrevBlock); + if (prev_block && prev_block->nChainWork + CalculateHeadersWork({pblock->GetBlockHeader()}) >= GetAntiDoSWorkThreshold()) { + min_pow_checked = true; + } } - ProcessBlock(pfrom, pblock, forceProcessing); + ProcessBlock(pfrom, pblock, forceProcessing, min_pow_checked); return; } @@ -4396,6 +4857,8 @@ bool PeerManagerImpl::MaybeDiscourageAndDisconnect(CNode& pnode, Peer& peer) bool PeerManagerImpl::ProcessMessages(CNode* pfrom, std::atomic<bool>& interruptMsgProc) { + AssertLockHeld(g_msgproc_mutex); + bool fMoreWork = false; PeerRef peer = GetPeerRef(pfrom->GetId()); @@ -4408,16 +4871,13 @@ bool PeerManagerImpl::ProcessMessages(CNode* pfrom, std::atomic<bool>& interrupt } } - { - LOCK2(cs_main, g_cs_orphans); - if (!peer->m_orphan_work_set.empty()) { - ProcessOrphanTx(peer->m_orphan_work_set); - } - } + const bool processed_orphan = ProcessOrphanTx(*peer); if (pfrom->fDisconnect) return false; + if (processed_orphan) return true; + // this maintains the order of responses // and prevents m_getdata_requests to grow unbounded { @@ -4425,11 +4885,6 @@ bool PeerManagerImpl::ProcessMessages(CNode* pfrom, std::atomic<bool>& interrupt if (!peer->m_getdata_requests.empty()) return true; } - { - LOCK(g_cs_orphans); - if (!peer->m_orphan_work_set.empty()) return true; - } - // Don't bother if send buffer is too full to respond anyway if (pfrom->fPauseSend) return false; @@ -4467,6 +4922,12 @@ bool PeerManagerImpl::ProcessMessages(CNode* pfrom, std::atomic<bool>& interrupt LOCK(peer->m_getdata_requests_mutex); if (!peer->m_getdata_requests.empty()) fMoreWork = true; } + // Does this peer has an orphan ready to reconsider? + // (Note: we may have provided a parent for an orphan provided + // by another peer that was already processed; in that case, + // the extra work may not be noticed, possibly resulting in an + // unnecessary 100ms delay) + if (m_orphanage.HaveTxToReconsider(peer->m_id)) fMoreWork = true; } catch (const std::exception& e) { LogPrint(BCLog::NET, "%s(%s, %u bytes): Exception '%s' (%s) caught\n", __func__, SanitizeString(msg.m_type), msg.m_message_size, e.what(), typeid(e).name()); } catch (...) { @@ -4519,7 +4980,7 @@ void PeerManagerImpl::ConsiderEviction(CNode& pto, Peer& peer, std::chrono::seco // getheaders in-flight already, in which case the peer should // still respond to us with a sufficiently high work chain tip. MaybeSendGetHeaders(pto, - m_chainman.ActiveChain().GetLocator(state.m_chain_sync.m_work_header->pprev), + GetLocator(state.m_chain_sync.m_work_header->pprev), peer); LogPrint(BCLog::NET, "sending getheaders to outbound peer=%d to verify chain work (current best known block:%s, benchmark blockhash: %s)\n", pto.GetId(), state.pindexBestKnownBlock != nullptr ? state.pindexBestKnownBlock->GetBlockHash().ToString() : "<none>", state.m_chain_sync.m_work_header->GetBlockHash().ToString()); state.m_chain_sync.m_sent_getheaders = true; @@ -4747,7 +5208,7 @@ void PeerManagerImpl::MaybeSendAddr(CNode& node, Peer& peer, std::chrono::micros // Remove addr records that the peer already knows about, and add new // addrs to the m_addr_known filter on the same pass. - auto addr_already_known = [&peer](const CAddress& addr) { + auto addr_already_known = [&peer](const CAddress& addr) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex) { bool ret = peer.m_addr_known->contains(addr.GetKey()); if (!ret) peer.m_addr_known->insert(addr.GetKey()); return ret; @@ -4776,6 +5237,27 @@ void PeerManagerImpl::MaybeSendAddr(CNode& node, Peer& peer, std::chrono::micros } } +void PeerManagerImpl::MaybeSendSendHeaders(CNode& node, Peer& peer) +{ + // Delay sending SENDHEADERS (BIP 130) until we're done with an + // initial-headers-sync with this peer. Receiving headers announcements for + // new blocks while trying to sync their headers chain is problematic, + // because of the state tracking done. + if (!peer.m_sent_sendheaders && node.GetCommonVersion() >= SENDHEADERS_VERSION) { + LOCK(cs_main); + CNodeState &state = *State(node.GetId()); + if (state.pindexBestKnownBlock != nullptr && + state.pindexBestKnownBlock->nChainWork > m_chainman.MinimumChainWork()) { + // Tell our peer we prefer to receive headers rather than inv's + // We send this to non-NODE NETWORK peers as well, because even + // non-NODE NETWORK peers can announce blocks (such as pruning + // nodes) + m_connman.PushMessage(&node, CNetMsgMaker(node.GetCommonVersion()).Make(NetMsgType::SENDHEADERS)); + peer.m_sent_sendheaders = true; + } + } +} + void PeerManagerImpl::MaybeSendFeefilter(CNode& pto, Peer& peer, std::chrono::microseconds current_time) { if (m_ignore_incoming_txs) return; @@ -4844,6 +5326,7 @@ bool PeerManagerImpl::RejectIncomingTxs(const CNode& peer) const { // block-relay-only peers may never send txs to us if (peer.IsBlockOnlyConn()) return true; + if (peer.IsFeelerConn()) return true; // In -blocksonly mode, peers need the 'relay' permission to send txs to us if (m_ignore_incoming_txs && !peer.HasPermission(NetPermissionFlags::Relay)) return true; return false; @@ -4857,8 +5340,9 @@ bool PeerManagerImpl::SetupAddressRelay(const CNode& node, Peer& peer) if (node.IsBlockOnlyConn()) return false; if (!peer.m_addr_relay_enabled.exchange(true)) { - // First addr message we have received from the peer, initialize - // m_addr_known + // During version message processing (non-block-relay-only outbound peers) + // or on first addr-related message we have received (inbound peers), initialize + // m_addr_known. peer.m_addr_known = std::make_unique<CRollingBloomFilter>(5000, 0.001); } @@ -4867,6 +5351,8 @@ bool PeerManagerImpl::SetupAddressRelay(const CNode& node, Peer& peer) bool PeerManagerImpl::SendMessages(CNode* pto) { + AssertLockHeld(g_msgproc_mutex); + PeerRef peer = GetPeerRef(pto->GetId()); if (!peer) return false; const Consensus::Params& consensusParams = m_chainparams.GetConsensus(); @@ -4897,6 +5383,8 @@ bool PeerManagerImpl::SendMessages(CNode* pto) MaybeSendAddr(*pto, *peer, current_time); + MaybeSendSendHeaders(*pto, *peer); + { LOCK(cs_main); @@ -4930,7 +5418,7 @@ bool PeerManagerImpl::SendMessages(CNode* pto) if (!state.fSyncStarted && CanServeBlocks(*peer) && !fImporting && !fReindex) { // Only actively request headers from a single peer, unless we're close to today. - if ((nSyncStarted == 0 && sync_blocks_and_headers_from_peer) || m_chainman.m_best_header->GetBlockTime() > GetAdjustedTime() - 24 * 60 * 60) { + if ((nSyncStarted == 0 && sync_blocks_and_headers_from_peer) || m_chainman.m_best_header->Time() > GetAdjustedTime() - 24h) { const CBlockIndex* pindexStart = m_chainman.m_best_header; /* If possible, start at the block preceding the currently best known header. This ensures that we always get a @@ -4941,7 +5429,7 @@ bool PeerManagerImpl::SendMessages(CNode* pto) got back an empty response. */ if (pindexStart->pprev) pindexStart = pindexStart->pprev; - if (MaybeSendGetHeaders(*pto, m_chainman.ActiveChain().GetLocator(pindexStart), *peer)) { + if (MaybeSendGetHeaders(*pto, GetLocator(pindexStart), *peer)) { LogPrint(BCLog::NET, "initial getheaders (%d) to peer=%d (startheight:%d)\n", pindexStart->nHeight, pto->GetId(), peer->m_starting_height); state.fSyncStarted = true; @@ -4950,7 +5438,7 @@ bool PeerManagerImpl::SendMessages(CNode* pto) // Convert HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER to microseconds before scaling // to maintain precision std::chrono::microseconds{HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER} * - (GetAdjustedTime() - m_chainman.m_best_header->GetBlockTime()) / consensusParams.nPowTargetSpacing + Ticks<std::chrono::seconds>(GetAdjustedTime() - m_chainman.m_best_header->Time()) / consensusParams.nPowTargetSpacing ); nSyncStarted++; } @@ -5274,7 +5762,7 @@ bool PeerManagerImpl::SendMessages(CNode* pto) // Check for headers sync timeouts if (state.fSyncStarted && state.m_headers_sync_timeout < std::chrono::microseconds::max()) { // Detect whether this is a stalling initial-headers-sync peer - if (m_chainman.m_best_header->GetBlockTime() <= GetAdjustedTime() - 24 * 60 * 60) { + if (m_chainman.m_best_header->Time() <= GetAdjustedTime() - 24h) { if (current_time > state.m_headers_sync_timeout && nSyncStarted == 1 && (m_num_preferred_download_peers - state.fPreferredDownload >= 1)) { // Disconnect a peer (without NetPermissionFlags::NoBan permission) if it is our only sync peer, // and we have others we could be using instead. |