diff options
Diffstat (limited to 'src/net_processing.cpp')
-rw-r--r-- | src/net_processing.cpp | 2227 |
1 files changed, 1450 insertions, 777 deletions
diff --git a/src/net_processing.cpp b/src/net_processing.cpp index aee1e4c30c..74700580ad 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -14,6 +14,7 @@ #include <consensus/validation.h> #include <deploymentstatus.h> #include <hash.h> +#include <headerssync.h> #include <index/blockfilterindex.h> #include <merkleblock.h> #include <netbase.h> @@ -21,6 +22,7 @@ #include <node/blockstorage.h> #include <policy/fees.h> #include <policy/policy.h> +#include <policy/settings.h> #include <primitives/block.h> #include <primitives/transaction.h> #include <random.h> @@ -28,6 +30,7 @@ #include <scheduler.h> #include <streams.h> #include <sync.h> +#include <timedata.h> #include <tinyformat.h> #include <txmempool.h> #include <txorphanage.h> @@ -41,6 +44,7 @@ #include <algorithm> #include <atomic> #include <chrono> +#include <future> #include <memory> #include <optional> #include <typeinfo> @@ -59,6 +63,8 @@ static constexpr auto UNCONDITIONAL_RELAY_DELAY = 2min; * Timeout = base + per_header * (expected number of headers) */ static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_BASE = 15min; static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER = 1ms; +/** How long to wait for a peer to respond to a getheaders request */ +static constexpr auto HEADERS_RESPONSE_TIME{2min}; /** Protect at least this many outbound peers from disconnection due to slow/ * behind headers chain. */ @@ -134,6 +140,8 @@ static const unsigned int NODE_NETWORK_LIMITED_MIN_BLOCKS = 288; static constexpr auto AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL{24h}; /** Average delay between peer address broadcasts */ static constexpr auto AVG_ADDRESS_BROADCAST_INTERVAL{30s}; +/** Delay between rotating the peers we relay a particular address to */ +static constexpr auto ROTATE_ADDR_RELAY_DEST_INTERVAL{24h}; /** Average delay between trickled inventory transmissions for inbound peers. * Blocks and peers with NetPermissionFlags::NoBan permission bypass this. */ static constexpr auto INBOUND_INVENTORY_BROADCAST_INTERVAL{5s}; @@ -172,6 +180,8 @@ static constexpr double MAX_ADDR_RATE_PER_SECOND{0.1}; * based increments won't go above this, but the MAX_ADDR_TO_SEND increment following GETADDR * is exempt from this limit). */ static constexpr size_t MAX_ADDR_PROCESSING_TOKEN_BUCKET{MAX_ADDR_TO_SEND}; +/** The compactblocks version we support. See BIP 152. */ +static constexpr uint64_t CMPCTBLOCKS_VERSION{2}; // Internal stuff namespace { @@ -199,6 +209,23 @@ struct Peer { /** Same id as the CNode object for this peer */ const NodeId m_id{0}; + /** Services we offered to this peer. + * + * This is supplied by CConnman during peer initialization. It's const + * because there is no protocol defined for renegotiating services + * initially offered to a peer. The set of local services we offer should + * not change after initialization. + * + * An interesting example of this is NODE_NETWORK and initial block + * download: a node which starts up from scratch doesn't have any blocks + * to serve, but still advertises NODE_NETWORK because it will eventually + * fulfill this role after IBD completes. P2P code is written in such a + * way that it can gracefully handle peers who don't make good on their + * service advertisements. */ + const ServiceFlags m_our_services; + /** Services this peer offered to us. */ + std::atomic<ServiceFlags> m_their_services{NODE_NONE}; + /** Protects misbehavior data members */ Mutex m_misbehavior_mutex; /** Accumulated misbehavior score for this peer */ @@ -232,6 +259,65 @@ struct Peer { /** Whether a ping has been requested by the user */ std::atomic<bool> m_ping_queued{false}; + /** Whether this peer relays txs via wtxid */ + std::atomic<bool> m_wtxid_relay{false}; + /** The feerate in the most recent BIP133 `feefilter` message sent to the peer. + * It is *not* a p2p protocol violation for the peer to send us + * transactions with a lower fee rate than this. See BIP133. */ + CAmount m_fee_filter_sent{0}; + /** Timestamp after which we will send the next BIP133 `feefilter` message + * to the peer. */ + std::chrono::microseconds m_next_send_feefilter{0}; + + struct TxRelay { + mutable RecursiveMutex m_bloom_filter_mutex; + /** Whether the peer wishes to receive transaction announcements. + * + * This is initially set based on the fRelay flag in the received + * `version` message. If initially set to false, it can only be flipped + * to true if we have offered the peer NODE_BLOOM services and it sends + * us a `filterload` or `filterclear` message. See BIP37. */ + bool m_relay_txs GUARDED_BY(m_bloom_filter_mutex){false}; + /** A bloom filter for which transactions to announce to the peer. See BIP37. */ + std::unique_ptr<CBloomFilter> m_bloom_filter PT_GUARDED_BY(m_bloom_filter_mutex) GUARDED_BY(m_bloom_filter_mutex){nullptr}; + + mutable RecursiveMutex m_tx_inventory_mutex; + /** A filter of all the txids and wtxids that the peer has announced to + * us or we have announced to the peer. We use this to avoid announcing + * the same txid/wtxid to a peer that already has the transaction. */ + CRollingBloomFilter m_tx_inventory_known_filter GUARDED_BY(m_tx_inventory_mutex){50000, 0.000001}; + /** Set of transaction ids we still have to announce (txid for + * non-wtxid-relay peers, wtxid for wtxid-relay peers). We use the + * mempool to sort transactions in dependency order before relay, so + * this does not have to be sorted. */ + std::set<uint256> m_tx_inventory_to_send GUARDED_BY(m_tx_inventory_mutex); + /** Whether the peer has requested us to send our complete mempool. Only + * permitted if the peer has NetPermissionFlags::Mempool. See BIP35. */ + bool m_send_mempool GUARDED_BY(m_tx_inventory_mutex){false}; + /** The last time a BIP35 `mempool` request was serviced. */ + std::atomic<std::chrono::seconds> m_last_mempool_req{0s}; + /** The next time after which we will send an `inv` message containing + * transaction announcements to this peer. */ + std::chrono::microseconds m_next_inv_send_time{0}; + + /** Minimum fee rate with which to filter transaction announcements to this node. See BIP133. */ + std::atomic<CAmount> m_fee_filter_received{0}; + }; + + /* Initializes a TxRelay struct for this peer. Can be called at most once for a peer. */ + TxRelay* SetTxRelay() EXCLUSIVE_LOCKS_REQUIRED(!m_tx_relay_mutex) + { + LOCK(m_tx_relay_mutex); + Assume(!m_tx_relay); + m_tx_relay = std::make_unique<Peer::TxRelay>(); + return m_tx_relay.get(); + }; + + TxRelay* GetTxRelay() EXCLUSIVE_LOCKS_REQUIRED(!m_tx_relay_mutex) + { + return WITH_LOCK(m_tx_relay_mutex, return m_tx_relay.get()); + }; + /** A vector of addresses to send to the peer, limited to MAX_ADDR_TO_SEND. */ std::vector<CAddress> m_addrs_to_send; /** Probabilistic filter to track recent addr messages relayed with this @@ -285,71 +371,193 @@ struct Peer { /** Set of txids to reconsider once their parent transactions have been accepted **/ std::set<uint256> m_orphan_work_set GUARDED_BY(g_cs_orphans); + /** Whether we've sent this peer a getheaders in response to an inv prior to initial-headers-sync completing */ + bool m_inv_triggered_getheaders_before_sync{false}; + /** Protects m_getdata_requests **/ Mutex m_getdata_requests_mutex; /** Work queue of items requested by this peer **/ std::deque<CInv> m_getdata_requests GUARDED_BY(m_getdata_requests_mutex); - explicit Peer(NodeId id) - : m_id(id) + /** Time of the last getheaders message to this peer */ + NodeClock::time_point m_last_getheaders_timestamp{}; + + /** Protects m_headers_sync **/ + Mutex m_headers_sync_mutex; + /** Headers-sync state for this peer (eg for initial sync, or syncing large + * reorgs) **/ + std::unique_ptr<HeadersSyncState> m_headers_sync PT_GUARDED_BY(m_headers_sync_mutex) GUARDED_BY(m_headers_sync_mutex) {}; + + /** Whether we've sent our peer a sendheaders message. **/ + std::atomic<bool> m_sent_sendheaders{false}; + + explicit Peer(NodeId id, ServiceFlags our_services) + : m_id{id} + , m_our_services{our_services} {} + +private: + Mutex m_tx_relay_mutex; + + /** Transaction relay data. Will be a nullptr if we're not relaying + * transactions with this peer (e.g. if it's a block-relay-only peer or + * the peer has sent us fRelay=false with bloom filters disabled). */ + std::unique_ptr<TxRelay> m_tx_relay GUARDED_BY(m_tx_relay_mutex); }; using PeerRef = std::shared_ptr<Peer>; +/** + * Maintain validation-specific state about nodes, protected by cs_main, instead + * by CNode's own locks. This simplifies asynchronous operation, where + * processing of incoming data is done after the ProcessMessage call returns, + * and we're no longer holding the node's locks. + */ +struct CNodeState { + //! The best known block we know this peer has announced. + const CBlockIndex* pindexBestKnownBlock{nullptr}; + //! The hash of the last unknown block this peer has announced. + uint256 hashLastUnknownBlock{}; + //! The last full block we both have. + const CBlockIndex* pindexLastCommonBlock{nullptr}; + //! The best header we have sent our peer. + const CBlockIndex* pindexBestHeaderSent{nullptr}; + //! Length of current-streak of unconnecting headers announcements + int nUnconnectingHeaders{0}; + //! Whether we've started headers synchronization with this peer. + bool fSyncStarted{false}; + //! When to potentially disconnect peer for stalling headers download + std::chrono::microseconds m_headers_sync_timeout{0us}; + //! Since when we're stalling block download progress (in microseconds), or 0. + std::chrono::microseconds m_stalling_since{0us}; + std::list<QueuedBlock> vBlocksInFlight; + //! When the first entry in vBlocksInFlight started downloading. Don't care when vBlocksInFlight is empty. + std::chrono::microseconds m_downloading_since{0us}; + int nBlocksInFlight{0}; + //! Whether we consider this a preferred download peer. + bool fPreferredDownload{false}; + //! Whether this peer wants invs or headers (when possible) for block announcements. + bool fPreferHeaders{false}; + /** Whether this peer wants invs or cmpctblocks (when possible) for block announcements. */ + bool m_requested_hb_cmpctblocks{false}; + /** Whether this peer will send us cmpctblocks if we request them. */ + bool m_provides_cmpctblocks{false}; + + /** State used to enforce CHAIN_SYNC_TIMEOUT and EXTRA_PEER_CHECK_INTERVAL logic. + * + * Both are only in effect for outbound, non-manual, non-protected connections. + * Any peer protected (m_protect = true) is not chosen for eviction. A peer is + * marked as protected if all of these are true: + * - its connection type is IsBlockOnlyConn() == false + * - it gave us a valid connecting header + * - we haven't reached MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT yet + * - its chain tip has at least as much work as ours + * + * CHAIN_SYNC_TIMEOUT: if a peer's best known block has less work than our tip, + * set a timeout CHAIN_SYNC_TIMEOUT in the future: + * - If at timeout their best known block now has more work than our tip + * when the timeout was set, then either reset the timeout or clear it + * (after comparing against our current tip's work) + * - If at timeout their best known block still has less work than our + * tip did when the timeout was set, then send a getheaders message, + * and set a shorter timeout, HEADERS_RESPONSE_TIME seconds in future. + * If their best known block is still behind when that new timeout is + * reached, disconnect. + * + * EXTRA_PEER_CHECK_INTERVAL: after each interval, if we have too many outbound peers, + * drop the outbound one that least recently announced us a new block. + */ + struct ChainSyncTimeoutState { + //! A timeout used for checking whether our peer has sufficiently synced + std::chrono::seconds m_timeout{0s}; + //! A header with the work we require on our peer's chain + const CBlockIndex* m_work_header{nullptr}; + //! After timeout is reached, set to true after sending getheaders + bool m_sent_getheaders{false}; + //! Whether this peer is protected from disconnection due to a bad/slow chain + bool m_protect{false}; + }; + + ChainSyncTimeoutState m_chain_sync; + + //! Time of last new block announcement + int64_t m_last_block_announcement{0}; + + //! Whether this peer is an inbound connection + const bool m_is_inbound; + + //! A rolling bloom filter of all announced tx CInvs to this peer. + CRollingBloomFilter m_recently_announced_invs = CRollingBloomFilter{INVENTORY_MAX_RECENT_RELAY, 0.000001}; + + CNodeState(bool is_inbound) : m_is_inbound(is_inbound) {} +}; + class PeerManagerImpl final : public PeerManager { public: - PeerManagerImpl(const CChainParams& chainparams, CConnman& connman, AddrMan& addrman, + PeerManagerImpl(CConnman& connman, AddrMan& addrman, BanMan* banman, ChainstateManager& chainman, CTxMemPool& pool, bool ignore_incoming_txs); /** Overridden from CValidationInterface. */ - void BlockConnected(const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindexConnected) override; - void BlockDisconnected(const std::shared_ptr<const CBlock> &block, const CBlockIndex* pindex) override; - void UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload) override; - void BlockChecked(const CBlock& block, const BlockValidationState& state) override; - void NewPoWValidBlock(const CBlockIndex *pindex, const std::shared_ptr<const CBlock>& pblock) override; + void BlockConnected(const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindexConnected) override + EXCLUSIVE_LOCKS_REQUIRED(!m_recent_confirmed_transactions_mutex); + void BlockDisconnected(const std::shared_ptr<const CBlock> &block, const CBlockIndex* pindex) override + EXCLUSIVE_LOCKS_REQUIRED(!m_recent_confirmed_transactions_mutex); + void UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload) override + EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); + void BlockChecked(const CBlock& block, const BlockValidationState& state) override + EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); + void NewPoWValidBlock(const CBlockIndex *pindex, const std::shared_ptr<const CBlock>& pblock) override + EXCLUSIVE_LOCKS_REQUIRED(!m_most_recent_block_mutex); /** Implement NetEventsInterface */ - void InitializeNode(CNode* pnode) override; - void FinalizeNode(const CNode& node) override; - bool ProcessMessages(CNode* pfrom, std::atomic<bool>& interrupt) override; - bool SendMessages(CNode* pto) override EXCLUSIVE_LOCKS_REQUIRED(pto->cs_sendProcessing); + void InitializeNode(CNode& node, ServiceFlags our_services) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); + void FinalizeNode(const CNode& node) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_headers_presync_mutex); + bool ProcessMessages(CNode* pfrom, std::atomic<bool>& interrupt) override + EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_recent_confirmed_transactions_mutex, !m_most_recent_block_mutex, !m_headers_presync_mutex); + bool SendMessages(CNode* pto) override EXCLUSIVE_LOCKS_REQUIRED(pto->cs_sendProcessing) + EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_recent_confirmed_transactions_mutex, !m_most_recent_block_mutex); /** Implement PeerManager */ void StartScheduledTasks(CScheduler& scheduler) override; void CheckForStaleTipAndEvictPeers() override; - std::optional<std::string> FetchBlock(NodeId peer_id, const CBlockIndex& block_index) override; - bool GetNodeStateStats(NodeId nodeid, CNodeStateStats& stats) const override; + std::optional<std::string> FetchBlock(NodeId peer_id, const CBlockIndex& block_index) override + EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); + bool GetNodeStateStats(NodeId nodeid, CNodeStateStats& stats) const override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); bool IgnoresIncomingTxs() override { return m_ignore_incoming_txs; } - void SendPings() override; - void RelayTransaction(const uint256& txid, const uint256& wtxid) override; + void SendPings() override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); + void RelayTransaction(const uint256& txid, const uint256& wtxid) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); void SetBestHeight(int height) override { m_best_height = height; }; - void Misbehaving(const NodeId pnode, const int howmuch, const std::string& message) override; + void UnitTestMisbehaving(NodeId peer_id, int howmuch) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex) { Misbehaving(*Assert(GetPeerRef(peer_id)), howmuch, ""); }; void ProcessMessage(CNode& pfrom, const std::string& msg_type, CDataStream& vRecv, - const std::chrono::microseconds time_received, const std::atomic<bool>& interruptMsgProc) override; + const std::chrono::microseconds time_received, const std::atomic<bool>& interruptMsgProc) override + EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_recent_confirmed_transactions_mutex, !m_most_recent_block_mutex, !m_headers_presync_mutex); + void UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds) override; private: - void _RelayTransaction(const uint256& txid, const uint256& wtxid) - EXCLUSIVE_LOCKS_REQUIRED(cs_main); - /** Consider evicting an outbound peer based on the amount of time they've been behind our tip */ - void ConsiderEviction(CNode& pto, std::chrono::seconds time_in_seconds) EXCLUSIVE_LOCKS_REQUIRED(cs_main); + void ConsiderEviction(CNode& pto, Peer& peer, std::chrono::seconds time_in_seconds) EXCLUSIVE_LOCKS_REQUIRED(cs_main); /** If we have extra outbound peers, try to disconnect the one with the oldest block announcement */ void EvictExtraOutboundPeers(std::chrono::seconds now) EXCLUSIVE_LOCKS_REQUIRED(cs_main); /** Retrieve unbroadcast transactions from the mempool and reattempt sending to peers */ - void ReattemptInitialBroadcast(CScheduler& scheduler); + void ReattemptInitialBroadcast(CScheduler& scheduler) EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); /** Get a shared pointer to the Peer object. * May return an empty shared_ptr if the Peer object can't be found. */ - PeerRef GetPeerRef(NodeId id) const; + PeerRef GetPeerRef(NodeId id) const EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); /** Get a shared pointer to the Peer object and remove it from m_peer_map. * May return an empty shared_ptr if the Peer object can't be found. */ - PeerRef RemovePeer(NodeId id); + PeerRef RemovePeer(NodeId id) EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); + + /** + * Increment peer's misbehavior score. If the new value >= DISCOURAGEMENT_THRESHOLD, mark the node + * to be discouraged, meaning the peer might be disconnected and added to the discouragement filter. + */ + void Misbehaving(Peer& peer, int howmuch, const std::string& message); /** * Potentially mark a node discouraged based on the contents of a BlockValidationState object @@ -362,14 +570,16 @@ private: * @return Returns true if the peer was punished (probably disconnected) */ bool MaybePunishNodeForBlock(NodeId nodeid, const BlockValidationState& state, - bool via_compact_block, const std::string& message = ""); + bool via_compact_block, const std::string& message = "") + EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); /** * Potentially disconnect and discourage a node based on the contents of a TxValidationState object * * @return Returns true if the peer was punished (probably disconnected) */ - bool MaybePunishNodeForTx(NodeId nodeid, const TxValidationState& state, const std::string& message = ""); + bool MaybePunishNodeForTx(NodeId nodeid, const TxValidationState& state, const std::string& message = "") + EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); /** Maybe disconnect a peer and discourage future connections from its address. * @@ -379,13 +589,83 @@ private: */ bool MaybeDiscourageAndDisconnect(CNode& pnode, Peer& peer); - void ProcessOrphanTx(std::set<uint256>& orphan_work_set) EXCLUSIVE_LOCKS_REQUIRED(cs_main, g_cs_orphans); - /** Process a single headers message from a peer. */ - void ProcessHeadersMessage(CNode& pfrom, const Peer& peer, - const std::vector<CBlockHeader>& headers, - bool via_compact_block); + void ProcessOrphanTx(std::set<uint256>& orphan_work_set) EXCLUSIVE_LOCKS_REQUIRED(cs_main, g_cs_orphans) + EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); + /** Process a single headers message from a peer. + * + * @param[in] pfrom CNode of the peer + * @param[in] peer The peer sending us the headers + * @param[in] headers The headers received. Note that this may be modified within ProcessHeadersMessage. + * @param[in] via_compact_block Whether this header came in via compact block handling. + */ + void ProcessHeadersMessage(CNode& pfrom, Peer& peer, + std::vector<CBlockHeader>&& headers, + bool via_compact_block) + EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_headers_presync_mutex); + /** Various helpers for headers processing, invoked by ProcessHeadersMessage() */ + /** Return true if headers are continuous and have valid proof-of-work (DoS points assigned on failure) */ + bool CheckHeadersPoW(const std::vector<CBlockHeader>& headers, const Consensus::Params& consensusParams, Peer& peer); + /** Calculate an anti-DoS work threshold for headers chains */ + arith_uint256 GetAntiDoSWorkThreshold(); + /** Deal with state tracking and headers sync for peers that send the + * occasional non-connecting header (this can happen due to BIP 130 headers + * announcements for blocks interacting with the 2hr (MAX_FUTURE_BLOCK_TIME) rule). */ + void HandleFewUnconnectingHeaders(CNode& pfrom, Peer& peer, const std::vector<CBlockHeader>& headers); + /** Return true if the headers connect to each other, false otherwise */ + bool CheckHeadersAreContinuous(const std::vector<CBlockHeader>& headers) const; + /** Try to continue a low-work headers sync that has already begun. + * Assumes the caller has already verified the headers connect, and has + * checked that each header satisfies the proof-of-work target included in + * the header. + * @param[in] peer The peer we're syncing with. + * @param[in] pfrom CNode of the peer + * @param[in,out] headers The headers to be processed. + * @return True if the passed in headers were successfully processed + * as the continuation of a low-work headers sync in progress; + * false otherwise. + * If false, the passed in headers will be returned back to + * the caller. + * If true, the returned headers may be empty, indicating + * there is no more work for the caller to do; or the headers + * may be populated with entries that have passed anti-DoS + * checks (and therefore may be validated for block index + * acceptance by the caller). + */ + bool IsContinuationOfLowWorkHeadersSync(Peer& peer, CNode& pfrom, + std::vector<CBlockHeader>& headers) + EXCLUSIVE_LOCKS_REQUIRED(peer.m_headers_sync_mutex, !m_headers_presync_mutex); + /** Check work on a headers chain to be processed, and if insufficient, + * initiate our anti-DoS headers sync mechanism. + * + * @param[in] peer The peer whose headers we're processing. + * @param[in] pfrom CNode of the peer + * @param[in] chain_start_header Where these headers connect in our index. + * @param[in,out] headers The headers to be processed. + * + * @return True if chain was low work and a headers sync was + * initiated (and headers will be empty after calling); false + * otherwise. + */ + bool TryLowWorkHeadersSync(Peer& peer, CNode& pfrom, + const CBlockIndex* chain_start_header, + std::vector<CBlockHeader>& headers) + EXCLUSIVE_LOCKS_REQUIRED(!peer.m_headers_sync_mutex, !m_peer_mutex, !m_headers_presync_mutex); + + /** Return true if the given header is an ancestor of + * m_chainman.m_best_header or our current tip */ + bool IsAncestorOfBestHeaderOrTip(const CBlockIndex* header) EXCLUSIVE_LOCKS_REQUIRED(cs_main); + + /** Request further headers from this peer with a given locator. + * We don't issue a getheaders message if we have a recent one outstanding. + * This returns true if a getheaders is actually sent, and false otherwise. + */ + bool MaybeSendGetHeaders(CNode& pfrom, const CBlockLocator& locator, Peer& peer); + /** Potentially fetch blocks from this peer upon receipt of a new headers tip */ + void HeadersDirectFetchBlocks(CNode& pfrom, const Peer& peer, const CBlockIndex* pindexLast); + /** Update peer state based on received headers message */ + void UpdatePeerStateForReceivedHeaders(CNode& pfrom, const CBlockIndex *pindexLast, bool received_new_header, bool may_have_more_headers); - void SendBlockTransactions(CNode& pfrom, const CBlock& block, const BlockTransactionsRequest& req); + void SendBlockTransactions(CNode& pfrom, Peer& peer, const CBlock& block, const BlockTransactionsRequest& req); /** Register with TxRequestTracker that an INV has been received from a * peer. The announcement parameters are decided in PeerManager and then @@ -394,7 +674,7 @@ private: EXCLUSIVE_LOCKS_REQUIRED(::cs_main); /** Send a version message to a peer */ - void PushNodeVersion(CNode& pnode); + void PushNodeVersion(CNode& pnode, const Peer& peer); /** Send a ping message every PING_INTERVAL or if requested via RPC. May * mark the peer to be disconnected if a ping has timed out. @@ -405,6 +685,9 @@ private: /** Send `addr` messages on a regular schedule. */ void MaybeSendAddr(CNode& node, Peer& peer, std::chrono::microseconds current_time); + /** Send a single `sendheaders` message, after we have completed headers sync with a peer. */ + void MaybeSendSendHeaders(CNode& node, Peer& peer); + /** Relay (gossip) an address to a few randomly chosen nodes. * * @param[in] originator The id of the peer that sent us the address. We don't want to relay it back. @@ -412,10 +695,10 @@ private: * @param[in] fReachable Whether the address' network is reachable. We relay unreachable * addresses less. */ - void RelayAddress(NodeId originator, const CAddress& addr, bool fReachable); + void RelayAddress(NodeId originator, const CAddress& addr, bool fReachable) EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); /** Send `feefilter` message. */ - void MaybeSendFeefilter(CNode& node, std::chrono::microseconds current_time) EXCLUSIVE_LOCKS_REQUIRED(cs_main); + void MaybeSendFeefilter(CNode& node, Peer& peer, std::chrono::microseconds current_time); const CChainParams& m_chainparams; CConnman& m_connman; @@ -430,14 +713,16 @@ private: std::atomic<int> m_best_height{-1}; /** Next time to check for stale tip */ - std::chrono::seconds m_stale_tip_check_time{0s}; + std::chrono::seconds m_stale_tip_check_time GUARDED_BY(cs_main){0s}; - /** Whether this node is running in blocks only mode */ + /** Whether this node is running in -blocksonly mode */ const bool m_ignore_incoming_txs; + bool RejectIncomingTxs(const CNode& peer) const; + /** Whether we've completed initial sync yet, for determining when to turn * on extra block-relay-only peers. */ - bool m_initial_sync_finished{false}; + bool m_initial_sync_finished GUARDED_BY(cs_main){false}; /** Protects m_peer_map. This mutex must not be locked while holding a lock * on any of the mutexes inside a Peer object. */ @@ -450,11 +735,24 @@ private: */ std::map<NodeId, PeerRef> m_peer_map GUARDED_BY(m_peer_mutex); + /** Map maintaining per-node state. */ + std::map<NodeId, CNodeState> m_node_states GUARDED_BY(cs_main); + + /** Get a pointer to a const CNodeState, used when not mutating the CNodeState object. */ + const CNodeState* State(NodeId pnode) const EXCLUSIVE_LOCKS_REQUIRED(cs_main); + /** Get a pointer to a mutable CNodeState. */ + CNodeState* State(NodeId pnode) EXCLUSIVE_LOCKS_REQUIRED(cs_main); + + uint32_t GetFetchFlags(const Peer& peer) const; + std::atomic<std::chrono::microseconds> m_next_inv_to_inbounds{0us}; /** Number of nodes with fSyncStarted. */ int nSyncStarted GUARDED_BY(cs_main) = 0; + /** Hash of the last block we received via INV */ + uint256 m_last_block_inv_triggering_headers_sync{}; + /** * Sources of received blocks, saved to be able punish them when processing * happens afterwards. @@ -464,12 +762,16 @@ private: std::map<uint256, std::pair<NodeId, bool>> mapBlockSource GUARDED_BY(cs_main); /** Number of peers with wtxid relay. */ - int m_wtxid_relay_peers GUARDED_BY(cs_main) = 0; + std::atomic<int> m_wtxid_relay_peers{0}; /** Number of outbound peers with m_chain_sync.m_protect. */ int m_outbound_peers_with_protect_from_disconnect GUARDED_BY(cs_main) = 0; - bool AlreadyHaveTx(const GenTxid& gtxid) EXCLUSIVE_LOCKS_REQUIRED(cs_main); + /** Number of preferable block download peers. */ + int m_num_preferred_download_peers GUARDED_BY(cs_main){0}; + + bool AlreadyHaveTx(const GenTxid& gtxid) + EXCLUSIVE_LOCKS_REQUIRED(cs_main, !m_recent_confirmed_transactions_mutex); /** * Filter for transactions that were recently rejected by the mempool. @@ -535,6 +837,34 @@ private: std::chrono::microseconds NextInvToInbounds(std::chrono::microseconds now, std::chrono::seconds average_interval); + + // All of the following cache a recent block, and are protected by m_most_recent_block_mutex + Mutex m_most_recent_block_mutex; + std::shared_ptr<const CBlock> m_most_recent_block GUARDED_BY(m_most_recent_block_mutex); + std::shared_ptr<const CBlockHeaderAndShortTxIDs> m_most_recent_compact_block GUARDED_BY(m_most_recent_block_mutex); + uint256 m_most_recent_block_hash GUARDED_BY(m_most_recent_block_mutex); + + // Data about the low-work headers synchronization, aggregated from all peers' HeadersSyncStates. + /** Mutex guarding the other m_headers_presync_* variables. */ + Mutex m_headers_presync_mutex; + /** A type to represent statistics about a peer's low-work headers sync. + * + * - The first field is the total verified amount of work in that synchronization. + * - The second is: + * - nullopt: the sync is in REDOWNLOAD phase (phase 2). + * - {height, timestamp}: the sync has the specified tip height and block timestamp (phase 1). + */ + using HeadersPresyncStats = std::pair<arith_uint256, std::optional<std::pair<int64_t, uint32_t>>>; + /** Statistics for all peers in low-work headers sync. */ + std::map<NodeId, HeadersPresyncStats> m_headers_presync_stats GUARDED_BY(m_headers_presync_mutex) {}; + /** The peer with the most-work entry in m_headers_presync_stats. */ + NodeId m_headers_presync_bestpeer GUARDED_BY(m_headers_presync_mutex) {-1}; + /** The m_headers_presync_stats improved, and needs signalling. */ + std::atomic_bool m_headers_presync_should_signal{false}; + + /** Height of the highest block announced using BIP 152 high-bandwidth mode. */ + int m_highest_fast_announce{0}; + /** Have we requested this block from a peer */ bool IsBlockRequested(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main); @@ -555,7 +885,7 @@ private: /** Update pindexLastCommonBlock and add not-in-flight missing successors to vBlocks, until it has * at most count entries. */ - void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, NodeId& nodeStaller) EXCLUSIVE_LOCKS_REQUIRED(cs_main); + void FindNextBlocksToDownload(const Peer& peer, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, NodeId& nodeStaller) EXCLUSIVE_LOCKS_REQUIRED(cs_main); std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> > mapBlocksInFlight GUARDED_BY(cs_main); @@ -565,10 +895,11 @@ private: /** Determine whether or not a peer can request a transaction, and return it (or nullptr if not found or not allowed). */ CTransactionRef FindTxForGetData(const CNode& peer, const GenTxid& gtxid, const std::chrono::seconds mempool_req, const std::chrono::seconds now) LOCKS_EXCLUDED(cs_main); - void ProcessGetData(CNode& pfrom, Peer& peer, const std::atomic<bool>& interruptMsgProc) EXCLUSIVE_LOCKS_REQUIRED(peer.m_getdata_requests_mutex) LOCKS_EXCLUDED(::cs_main); + void ProcessGetData(CNode& pfrom, Peer& peer, const std::atomic<bool>& interruptMsgProc) + EXCLUSIVE_LOCKS_REQUIRED(!m_most_recent_block_mutex, peer.m_getdata_requests_mutex) LOCKS_EXCLUDED(::cs_main); /** Process a new block. Perform any post-processing housekeeping */ - void ProcessBlock(CNode& node, const std::shared_ptr<const CBlock>& block, bool force_processing); + void ProcessBlock(CNode& node, const std::shared_ptr<const CBlock>& block, bool force_processing, bool min_pow_checked); /** Relay map (txid or wtxid -> CTransactionRef) */ typedef std::map<uint256, CTransactionRef> MapRelay; @@ -616,13 +947,15 @@ private: */ bool BlockRequestAllowed(const CBlockIndex* pindex) EXCLUSIVE_LOCKS_REQUIRED(cs_main); bool AlreadyHaveBlock(const uint256& block_hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main); - void ProcessGetBlockData(CNode& pfrom, Peer& peer, const CInv& inv); + void ProcessGetBlockData(CNode& pfrom, Peer& peer, const CInv& inv) + EXCLUSIVE_LOCKS_REQUIRED(!m_most_recent_block_mutex); /** * Validation logic for compact filters request handling. * * May disconnect from the peer in the case of a bad request. * + * @param[in] node The node that we received the request from * @param[in] peer The peer that we received the request from * @param[in] filter_type The filter type the request is for. Must be basic filters. * @param[in] start_height The start height for the request @@ -632,7 +965,7 @@ private: * @param[out] filter_index The filter index, if the request can be serviced. * @return True if the request can be serviced. */ - bool PrepareBlockFilterRequest(CNode& peer, + bool PrepareBlockFilterRequest(CNode& node, Peer& peer, BlockFilterType filter_type, uint32_t start_height, const uint256& stop_hash, uint32_t max_height_diff, const CBlockIndex*& stop_index, @@ -643,30 +976,33 @@ private: * * May disconnect from the peer in the case of a bad request. * + * @param[in] node The node that we received the request from * @param[in] peer The peer that we received the request from * @param[in] vRecv The raw message received */ - void ProcessGetCFilters(CNode& peer, CDataStream& vRecv); + void ProcessGetCFilters(CNode& node, Peer& peer, CDataStream& vRecv); /** * Handle a cfheaders request. * * May disconnect from the peer in the case of a bad request. * + * @param[in] node The node that we received the request from * @param[in] peer The peer that we received the request from * @param[in] vRecv The raw message received */ - void ProcessGetCFHeaders(CNode& peer, CDataStream& vRecv); + void ProcessGetCFHeaders(CNode& node, Peer& peer, CDataStream& vRecv); /** * Handle a getcfcheckpt request. * * May disconnect from the peer in the case of a bad request. * + * @param[in] node The node that we received the request from * @param[in] peer The peer that we received the request from * @param[in] vRecv The raw message received */ - void ProcessGetCFCheckPt(CNode& peer, CDataStream& vRecv); + void ProcessGetCFCheckPt(CNode& node, Peer& peer, CDataStream& vRecv); /** Checks if address relay is permitted with peer. If needed, initializes * the m_addr_known bloom filter and sets m_addr_relay_enabled to true. @@ -676,125 +1012,20 @@ private: */ bool SetupAddressRelay(const CNode& node, Peer& peer); }; -} // namespace - -namespace { - /** Number of preferable block download peers. */ - int nPreferredDownload GUARDED_BY(cs_main) = 0; -} // namespace -namespace { -/** - * Maintain validation-specific state about nodes, protected by cs_main, instead - * by CNode's own locks. This simplifies asynchronous operation, where - * processing of incoming data is done after the ProcessMessage call returns, - * and we're no longer holding the node's locks. - */ -struct CNodeState { - //! The best known block we know this peer has announced. - const CBlockIndex* pindexBestKnownBlock{nullptr}; - //! The hash of the last unknown block this peer has announced. - uint256 hashLastUnknownBlock{}; - //! The last full block we both have. - const CBlockIndex* pindexLastCommonBlock{nullptr}; - //! The best header we have sent our peer. - const CBlockIndex* pindexBestHeaderSent{nullptr}; - //! Length of current-streak of unconnecting headers announcements - int nUnconnectingHeaders{0}; - //! Whether we've started headers synchronization with this peer. - bool fSyncStarted{false}; - //! When to potentially disconnect peer for stalling headers download - std::chrono::microseconds m_headers_sync_timeout{0us}; - //! Since when we're stalling block download progress (in microseconds), or 0. - std::chrono::microseconds m_stalling_since{0us}; - std::list<QueuedBlock> vBlocksInFlight; - //! When the first entry in vBlocksInFlight started downloading. Don't care when vBlocksInFlight is empty. - std::chrono::microseconds m_downloading_since{0us}; - int nBlocksInFlight{0}; - //! Whether we consider this a preferred download peer. - bool fPreferredDownload{false}; - //! Whether this peer wants invs or headers (when possible) for block announcements. - bool fPreferHeaders{false}; - //! Whether this peer wants invs or cmpctblocks (when possible) for block announcements. - bool fPreferHeaderAndIDs{false}; - /** - * Whether this peer will send us cmpctblocks if we request them. - * This is not used to gate request logic, as we really only care about fSupportsDesiredCmpctVersion, - * but is used as a flag to "lock in" the version of compact blocks (fWantsCmpctWitness) we send. - */ - bool fProvidesHeaderAndIDs{false}; - //! Whether this peer can give us witnesses - bool fHaveWitness{false}; - //! Whether this peer wants witnesses in cmpctblocks/blocktxns - bool fWantsCmpctWitness{false}; - /** - * If we've announced NODE_WITNESS to this peer: whether the peer sends witnesses in cmpctblocks/blocktxns, - * otherwise: whether this peer sends non-witnesses in cmpctblocks/blocktxns. - */ - bool fSupportsDesiredCmpctVersion{false}; - - /** State used to enforce CHAIN_SYNC_TIMEOUT and EXTRA_PEER_CHECK_INTERVAL logic. - * - * Both are only in effect for outbound, non-manual, non-protected connections. - * Any peer protected (m_protect = true) is not chosen for eviction. A peer is - * marked as protected if all of these are true: - * - its connection type is IsBlockOnlyConn() == false - * - it gave us a valid connecting header - * - we haven't reached MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT yet - * - its chain tip has at least as much work as ours - * - * CHAIN_SYNC_TIMEOUT: if a peer's best known block has less work than our tip, - * set a timeout CHAIN_SYNC_TIMEOUT in the future: - * - If at timeout their best known block now has more work than our tip - * when the timeout was set, then either reset the timeout or clear it - * (after comparing against our current tip's work) - * - If at timeout their best known block still has less work than our - * tip did when the timeout was set, then send a getheaders message, - * and set a shorter timeout, HEADERS_RESPONSE_TIME seconds in future. - * If their best known block is still behind when that new timeout is - * reached, disconnect. - * - * EXTRA_PEER_CHECK_INTERVAL: after each interval, if we have too many outbound peers, - * drop the outbound one that least recently announced us a new block. - */ - struct ChainSyncTimeoutState { - //! A timeout used for checking whether our peer has sufficiently synced - std::chrono::seconds m_timeout{0s}; - //! A header with the work we require on our peer's chain - const CBlockIndex* m_work_header{nullptr}; - //! After timeout is reached, set to true after sending getheaders - bool m_sent_getheaders{false}; - //! Whether this peer is protected from disconnection due to a bad/slow chain - bool m_protect{false}; - }; - - ChainSyncTimeoutState m_chain_sync; - - //! Time of last new block announcement - int64_t m_last_block_announcement{0}; - - //! Whether this peer is an inbound connection - const bool m_is_inbound; - - //! A rolling bloom filter of all announced tx CInvs to this peer. - CRollingBloomFilter m_recently_announced_invs = CRollingBloomFilter{INVENTORY_MAX_RECENT_RELAY, 0.000001}; - - //! Whether this peer relays txs via wtxid - bool m_wtxid_relay{false}; - - CNodeState(bool is_inbound) : m_is_inbound(is_inbound) {} -}; - -/** Map maintaining per-node state. */ -static std::map<NodeId, CNodeState> mapNodeState GUARDED_BY(cs_main); - -static CNodeState *State(NodeId pnode) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { - std::map<NodeId, CNodeState>::iterator it = mapNodeState.find(pnode); - if (it == mapNodeState.end()) +const CNodeState* PeerManagerImpl::State(NodeId pnode) const EXCLUSIVE_LOCKS_REQUIRED(cs_main) +{ + std::map<NodeId, CNodeState>::const_iterator it = m_node_states.find(pnode); + if (it == m_node_states.end()) return nullptr; return &it->second; } +CNodeState* PeerManagerImpl::State(NodeId pnode) EXCLUSIVE_LOCKS_REQUIRED(cs_main) +{ + return const_cast<CNodeState*>(std::as_const(*this).State(pnode)); +} + /** * Whether the peer supports the address. For example, a peer that does not * implement BIP155 cannot receive Tor v3 addresses because it requires @@ -826,14 +1057,33 @@ static void PushAddress(Peer& peer, const CAddress& addr, FastRandomContext& ins } } -static void UpdatePreferredDownload(const CNode& node, CNodeState* state) EXCLUSIVE_LOCKS_REQUIRED(cs_main) +static void AddKnownTx(Peer& peer, const uint256& hash) +{ + auto tx_relay = peer.GetTxRelay(); + if (!tx_relay) return; + + LOCK(tx_relay->m_tx_inventory_mutex); + tx_relay->m_tx_inventory_known_filter.insert(hash); +} + +/** Whether this peer can serve us blocks. */ +static bool CanServeBlocks(const Peer& peer) { - nPreferredDownload -= state->fPreferredDownload; + return peer.m_their_services & (NODE_NETWORK|NODE_NETWORK_LIMITED); +} - // Whether this node should be marked as a preferred download node. - state->fPreferredDownload = (!node.IsInboundConn() || node.HasPermission(NetPermissionFlags::NoBan)) && !node.IsAddrFetchConn() && !node.fClient; +/** Whether this peer can only serve limited recent blocks (e.g. because + * it prunes old blocks) */ +static bool IsLimitedPeer(const Peer& peer) +{ + return (!(peer.m_their_services & NODE_NETWORK) && + (peer.m_their_services & NODE_NETWORK_LIMITED)); +} - nPreferredDownload += state->fPreferredDownload; +/** Whether this peer can serve us witness data */ +static bool CanServeWitnesses(const Peer& peer) +{ + return peer.m_their_services & NODE_WITNESS; } std::chrono::microseconds PeerManagerImpl::NextInvToInbounds(std::chrono::microseconds now, @@ -918,60 +1168,58 @@ void PeerManagerImpl::MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid) { AssertLockHeld(cs_main); - // Never request high-bandwidth mode from peers if we're blocks-only. Our + // When in -blocksonly mode, never request high-bandwidth mode from peers. Our // mempool will not contain the transactions necessary to reconstruct the // compact block. if (m_ignore_incoming_txs) return; CNodeState* nodestate = State(nodeid); - if (!nodestate || !nodestate->fSupportsDesiredCmpctVersion) { - // Never ask from peers who can't provide witnesses. + if (!nodestate || !nodestate->m_provides_cmpctblocks) { + // Don't request compact blocks if the peer has not signalled support return; } - if (nodestate->fProvidesHeaderAndIDs) { - int num_outbound_hb_peers = 0; - for (std::list<NodeId>::iterator it = lNodesAnnouncingHeaderAndIDs.begin(); it != lNodesAnnouncingHeaderAndIDs.end(); it++) { - if (*it == nodeid) { - lNodesAnnouncingHeaderAndIDs.erase(it); - lNodesAnnouncingHeaderAndIDs.push_back(nodeid); - return; - } - CNodeState *state = State(*it); - if (state != nullptr && !state->m_is_inbound) ++num_outbound_hb_peers; - } - if (nodestate->m_is_inbound) { - // If we're adding an inbound HB peer, make sure we're not removing - // our last outbound HB peer in the process. - if (lNodesAnnouncingHeaderAndIDs.size() >= 3 && num_outbound_hb_peers == 1) { - CNodeState *remove_node = State(lNodesAnnouncingHeaderAndIDs.front()); - if (remove_node != nullptr && !remove_node->m_is_inbound) { - // Put the HB outbound peer in the second slot, so that it - // doesn't get removed. - std::swap(lNodesAnnouncingHeaderAndIDs.front(), *std::next(lNodesAnnouncingHeaderAndIDs.begin())); - } - } + + int num_outbound_hb_peers = 0; + for (std::list<NodeId>::iterator it = lNodesAnnouncingHeaderAndIDs.begin(); it != lNodesAnnouncingHeaderAndIDs.end(); it++) { + if (*it == nodeid) { + lNodesAnnouncingHeaderAndIDs.erase(it); + lNodesAnnouncingHeaderAndIDs.push_back(nodeid); + return; } - m_connman.ForNode(nodeid, [this](CNode* pfrom) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) { - AssertLockHeld(::cs_main); - uint64_t nCMPCTBLOCKVersion = 2; - if (lNodesAnnouncingHeaderAndIDs.size() >= 3) { - // As per BIP152, we only get 3 of our peers to announce - // blocks using compact encodings. - m_connman.ForNode(lNodesAnnouncingHeaderAndIDs.front(), [this, nCMPCTBLOCKVersion](CNode* pnodeStop){ - m_connman.PushMessage(pnodeStop, CNetMsgMaker(pnodeStop->GetCommonVersion()).Make(NetMsgType::SENDCMPCT, /*fAnnounceUsingCMPCTBLOCK=*/false, nCMPCTBLOCKVersion)); - // save BIP152 bandwidth state: we select peer to be low-bandwidth - pnodeStop->m_bip152_highbandwidth_to = false; - return true; - }); - lNodesAnnouncingHeaderAndIDs.pop_front(); + CNodeState *state = State(*it); + if (state != nullptr && !state->m_is_inbound) ++num_outbound_hb_peers; + } + if (nodestate->m_is_inbound) { + // If we're adding an inbound HB peer, make sure we're not removing + // our last outbound HB peer in the process. + if (lNodesAnnouncingHeaderAndIDs.size() >= 3 && num_outbound_hb_peers == 1) { + CNodeState *remove_node = State(lNodesAnnouncingHeaderAndIDs.front()); + if (remove_node != nullptr && !remove_node->m_is_inbound) { + // Put the HB outbound peer in the second slot, so that it + // doesn't get removed. + std::swap(lNodesAnnouncingHeaderAndIDs.front(), *std::next(lNodesAnnouncingHeaderAndIDs.begin())); } - m_connman.PushMessage(pfrom, CNetMsgMaker(pfrom->GetCommonVersion()).Make(NetMsgType::SENDCMPCT, /*fAnnounceUsingCMPCTBLOCK=*/true, nCMPCTBLOCKVersion)); - // save BIP152 bandwidth state: we select peer to be high-bandwidth - pfrom->m_bip152_highbandwidth_to = true; - lNodesAnnouncingHeaderAndIDs.push_back(pfrom->GetId()); - return true; - }); + } } + m_connman.ForNode(nodeid, [this](CNode* pfrom) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) { + AssertLockHeld(::cs_main); + if (lNodesAnnouncingHeaderAndIDs.size() >= 3) { + // As per BIP152, we only get 3 of our peers to announce + // blocks using compact encodings. + m_connman.ForNode(lNodesAnnouncingHeaderAndIDs.front(), [this](CNode* pnodeStop){ + m_connman.PushMessage(pnodeStop, CNetMsgMaker(pnodeStop->GetCommonVersion()).Make(NetMsgType::SENDCMPCT, /*high_bandwidth=*/false, /*version=*/CMPCTBLOCKS_VERSION)); + // save BIP152 bandwidth state: we select peer to be low-bandwidth + pnodeStop->m_bip152_highbandwidth_to = false; + return true; + }); + lNodesAnnouncingHeaderAndIDs.pop_front(); + } + m_connman.PushMessage(pfrom, CNetMsgMaker(pfrom->GetCommonVersion()).Make(NetMsgType::SENDCMPCT, /*high_bandwidth=*/true, /*version=*/CMPCTBLOCKS_VERSION)); + // save BIP152 bandwidth state: we select peer to be high-bandwidth + pfrom->m_bip152_highbandwidth_to = true; + lNodesAnnouncingHeaderAndIDs.push_back(pfrom->GetId()); + return true; + }); } bool PeerManagerImpl::TipMayBeStale() @@ -986,7 +1234,7 @@ bool PeerManagerImpl::TipMayBeStale() bool PeerManagerImpl::CanDirectFetch() { - return m_chainman.ActiveChain().Tip()->GetBlockTime() > GetAdjustedTime() - m_chainparams.GetConsensus().nPowTargetSpacing * 20; + return m_chainman.ActiveChain().Tip()->Time() > GetAdjustedTime() - m_chainparams.GetConsensus().PowTargetSpacing() * 20; } static bool PeerHasHeader(CNodeState *state, const CBlockIndex *pindex) EXCLUSIVE_LOCKS_REQUIRED(cs_main) @@ -1031,17 +1279,17 @@ void PeerManagerImpl::UpdateBlockAvailability(NodeId nodeid, const uint256 &hash } } -void PeerManagerImpl::FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, NodeId& nodeStaller) +void PeerManagerImpl::FindNextBlocksToDownload(const Peer& peer, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, NodeId& nodeStaller) { if (count == 0) return; vBlocks.reserve(vBlocks.size() + count); - CNodeState *state = State(nodeid); + CNodeState *state = State(peer.m_id); assert(state != nullptr); // Make sure pindexBestKnownBlock is up to date, we'll need it. - ProcessBlockAvailability(nodeid); + ProcessBlockAvailability(peer.m_id); if (state->pindexBestKnownBlock == nullptr || state->pindexBestKnownBlock->nChainWork < m_chainman.ActiveChain().Tip()->nChainWork || state->pindexBestKnownBlock->nChainWork < nMinimumChainWork) { // This peer has nothing interesting. @@ -1060,7 +1308,6 @@ void PeerManagerImpl::FindNextBlocksToDownload(NodeId nodeid, unsigned int count if (state->pindexLastCommonBlock == state->pindexBestKnownBlock) return; - const Consensus::Params& consensusParams = m_chainparams.GetConsensus(); std::vector<const CBlockIndex*> vToFetch; const CBlockIndex *pindexWalk = state->pindexLastCommonBlock; // Never fetch further than the best block we know the peer has, or more than BLOCK_DOWNLOAD_WINDOW + 1 beyond the last @@ -1090,7 +1337,7 @@ void PeerManagerImpl::FindNextBlocksToDownload(NodeId nodeid, unsigned int count // We consider the chain that this peer is on invalid. return; } - if (!State(nodeid)->fHaveWitness && DeploymentActiveAt(*pindex, consensusParams, Consensus::DEPLOYMENT_SEGWIT)) { + if (!CanServeWitnesses(peer) && DeploymentActiveAt(*pindex, m_chainman, Consensus::DEPLOYMENT_SEGWIT)) { // We wouldn't download this block or its descendants from this peer. return; } @@ -1101,7 +1348,7 @@ void PeerManagerImpl::FindNextBlocksToDownload(NodeId nodeid, unsigned int count // The block is not already downloaded, and not yet in flight. if (pindex->nHeight > nWindowEnd) { // We reached the end of the window. - if (vBlocks.size() == 0 && waitingfor != nodeid) { + if (vBlocks.size() == 0 && waitingfor != peer.m_id) { // We aren't able to fetch anything, but we would be if the download window was one larger. nodeStaller = waitingfor; } @@ -1121,12 +1368,9 @@ void PeerManagerImpl::FindNextBlocksToDownload(NodeId nodeid, unsigned int count } // namespace -void PeerManagerImpl::PushNodeVersion(CNode& pnode) +void PeerManagerImpl::PushNodeVersion(CNode& pnode, const Peer& peer) { - // Note that pnode->GetLocalServices() is a reflection of the local - // services we were offering when the CNode object was created for this - // peer. - uint64_t my_services{pnode.GetLocalServices()}; + uint64_t my_services{peer.m_our_services}; const int64_t nTime{count_seconds(GetTime<std::chrono::seconds>())}; uint64_t nonce = pnode.GetLocalNonce(); const int nNodeStartingHeight{m_best_height}; @@ -1136,7 +1380,7 @@ void PeerManagerImpl::PushNodeVersion(CNode& pnode) CService addr_you = addr.IsRoutable() && !IsProxy(addr) && addr.IsAddrV1Compatible() ? addr : CService(); uint64_t your_services{addr.nServices}; - const bool tx_relay = !m_ignore_incoming_txs && pnode.m_tx_relay != nullptr && !pnode.IsFeelerConn(); + const bool tx_relay = !m_ignore_incoming_txs && !pnode.IsBlockOnlyConn() && !pnode.IsFeelerConn(); m_connman.PushMessage(&pnode, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::VERSION, PROTOCOL_VERSION, my_services, nTime, your_services, addr_you, // Together the pre-version-31402 serialization of CAddress "addrYou" (without nTime) my_services, CService(), // Together the pre-version-31402 serialization of CAddress "addrMe" (without nTime) @@ -1176,30 +1420,28 @@ void PeerManagerImpl::AddTxAnnouncement(const CNode& node, const GenTxid& gtxid, m_txrequest.ReceivedInv(nodeid, gtxid, preferred, current_time + delay); } -// This function is used for testing the stale tip eviction logic, see -// denialofservice_tests.cpp -void UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds) +void PeerManagerImpl::UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds) { LOCK(cs_main); CNodeState *state = State(node); if (state) state->m_last_block_announcement = time_in_seconds; } -void PeerManagerImpl::InitializeNode(CNode *pnode) +void PeerManagerImpl::InitializeNode(CNode& node, ServiceFlags our_services) { - NodeId nodeid = pnode->GetId(); + NodeId nodeid = node.GetId(); { LOCK(cs_main); - mapNodeState.emplace_hint(mapNodeState.end(), std::piecewise_construct, std::forward_as_tuple(nodeid), std::forward_as_tuple(pnode->IsInboundConn())); + m_node_states.emplace_hint(m_node_states.end(), std::piecewise_construct, std::forward_as_tuple(nodeid), std::forward_as_tuple(node.IsInboundConn())); assert(m_txrequest.Count(nodeid) == 0); } + PeerRef peer = std::make_shared<Peer>(nodeid, our_services); { - PeerRef peer = std::make_shared<Peer>(nodeid); LOCK(m_peer_mutex); - m_peer_map.emplace_hint(m_peer_map.end(), nodeid, std::move(peer)); + m_peer_map.emplace_hint(m_peer_map.end(), nodeid, peer); } - if (!pnode->IsInboundConn()) { - PushNodeVersion(*pnode); + if (!node.IsInboundConn()) { + PushNodeVersion(node, *peer); } } @@ -1211,8 +1453,7 @@ void PeerManagerImpl::ReattemptInitialBroadcast(CScheduler& scheduler) CTransactionRef tx = m_mempool.get(txid); if (tx != nullptr) { - LOCK(cs_main); - _RelayTransaction(txid, tx->GetWitnessHash()); + RelayTransaction(txid, tx->GetWitnessHash()); } else { m_mempool.RemoveUnbroadcastTx(txid, true); } @@ -1239,6 +1480,8 @@ void PeerManagerImpl::FinalizeNode(const CNode& node) PeerRef peer = RemovePeer(nodeid); assert(peer != nullptr); misbehavior = WITH_LOCK(peer->m_misbehavior_mutex, return peer->m_misbehavior_score); + m_wtxid_relay_peers -= peer->m_wtxid_relay; + assert(m_wtxid_relay_peers >= 0); } CNodeState *state = State(nodeid); assert(state != nullptr); @@ -1251,20 +1494,18 @@ void PeerManagerImpl::FinalizeNode(const CNode& node) } WITH_LOCK(g_cs_orphans, m_orphanage.EraseForPeer(nodeid)); m_txrequest.DisconnectedPeer(nodeid); - nPreferredDownload -= state->fPreferredDownload; + m_num_preferred_download_peers -= state->fPreferredDownload; m_peers_downloading_from -= (state->nBlocksInFlight != 0); assert(m_peers_downloading_from >= 0); m_outbound_peers_with_protect_from_disconnect -= state->m_chain_sync.m_protect; assert(m_outbound_peers_with_protect_from_disconnect >= 0); - m_wtxid_relay_peers -= state->m_wtxid_relay; - assert(m_wtxid_relay_peers >= 0); - mapNodeState.erase(nodeid); + m_node_states.erase(nodeid); - if (mapNodeState.empty()) { + if (m_node_states.empty()) { // Do a consistency check after the last peer is removed. assert(mapBlocksInFlight.empty()); - assert(nPreferredDownload == 0); + assert(m_num_preferred_download_peers == 0); assert(m_peers_downloading_from == 0); assert(m_outbound_peers_with_protect_from_disconnect == 0); assert(m_wtxid_relay_peers == 0); @@ -1279,6 +1520,10 @@ void PeerManagerImpl::FinalizeNode(const CNode& node) // fSuccessfullyConnected set. m_addrman.Connected(node.addr); } + { + LOCK(m_headers_presync_mutex); + m_headers_presync_stats.erase(nodeid); + } LogPrint(BCLog::NET, "Cleared nodestate for peer=%d\n", nodeid); } @@ -1305,7 +1550,7 @@ bool PeerManagerImpl::GetNodeStateStats(NodeId nodeid, CNodeStateStats& stats) c { { LOCK(cs_main); - CNodeState* state = State(nodeid); + const CNodeState* state = State(nodeid); if (state == nullptr) return false; stats.nSyncHeight = state->pindexBestKnownBlock ? state->pindexBestKnownBlock->nHeight : -1; @@ -1318,6 +1563,7 @@ bool PeerManagerImpl::GetNodeStateStats(NodeId nodeid, CNodeStateStats& stats) c PeerRef peer = GetPeerRef(nodeid); if (peer == nullptr) return false; + stats.their_services = peer->m_their_services; stats.m_starting_height = peer->m_starting_height; // It is common for nodes with good ping times to suddenly become lagged, // due to a new block arriving or other large transfer. @@ -1330,10 +1576,24 @@ bool PeerManagerImpl::GetNodeStateStats(NodeId nodeid, CNodeStateStats& stats) c ping_wait = GetTime<std::chrono::microseconds>() - peer->m_ping_start.load(); } + if (auto tx_relay = peer->GetTxRelay(); tx_relay != nullptr) { + stats.m_relay_txs = WITH_LOCK(tx_relay->m_bloom_filter_mutex, return tx_relay->m_relay_txs); + stats.m_fee_filter_received = tx_relay->m_fee_filter_received.load(); + } else { + stats.m_relay_txs = false; + stats.m_fee_filter_received = 0; + } + stats.m_ping_wait = ping_wait; stats.m_addr_processed = peer->m_addr_processed.load(); stats.m_addr_rate_limited = peer->m_addr_rate_limited.load(); stats.m_addr_relay_enabled = peer->m_addr_relay_enabled.load(); + { + LOCK(peer->m_headers_sync_mutex); + if (peer->m_headers_sync) { + stats.presync_height = peer->m_headers_sync->GetPresyncHeight(); + } + } return true; } @@ -1349,41 +1609,43 @@ void PeerManagerImpl::AddToCompactExtraTransactions(const CTransactionRef& tx) vExtraTxnForCompactIt = (vExtraTxnForCompactIt + 1) % max_extra_txn; } -void PeerManagerImpl::Misbehaving(const NodeId pnode, const int howmuch, const std::string& message) +void PeerManagerImpl::Misbehaving(Peer& peer, int howmuch, const std::string& message) { assert(howmuch > 0); - PeerRef peer = GetPeerRef(pnode); - if (peer == nullptr) return; - - LOCK(peer->m_misbehavior_mutex); - const int score_before{peer->m_misbehavior_score}; - peer->m_misbehavior_score += howmuch; - const int score_now{peer->m_misbehavior_score}; + LOCK(peer.m_misbehavior_mutex); + const int score_before{peer.m_misbehavior_score}; + peer.m_misbehavior_score += howmuch; + const int score_now{peer.m_misbehavior_score}; const std::string message_prefixed = message.empty() ? "" : (": " + message); std::string warning; if (score_now >= DISCOURAGEMENT_THRESHOLD && score_before < DISCOURAGEMENT_THRESHOLD) { warning = " DISCOURAGE THRESHOLD EXCEEDED"; - peer->m_should_discourage = true; + peer.m_should_discourage = true; } LogPrint(BCLog::NET, "Misbehaving: peer=%d (%d -> %d)%s%s\n", - pnode, score_before, score_now, warning, message_prefixed); + peer.m_id, score_before, score_now, warning, message_prefixed); } bool PeerManagerImpl::MaybePunishNodeForBlock(NodeId nodeid, const BlockValidationState& state, bool via_compact_block, const std::string& message) { + PeerRef peer{GetPeerRef(nodeid)}; switch (state.GetResult()) { case BlockValidationResult::BLOCK_RESULT_UNSET: break; + case BlockValidationResult::BLOCK_HEADER_LOW_WORK: + // We didn't try to process the block because the header chain may have + // too little work. + break; // The node is providing invalid data: case BlockValidationResult::BLOCK_CONSENSUS: case BlockValidationResult::BLOCK_MUTATED: if (!via_compact_block) { - Misbehaving(nodeid, 100, message); + if (peer) Misbehaving(*peer, 100, message); return true; } break; @@ -1398,7 +1660,7 @@ bool PeerManagerImpl::MaybePunishNodeForBlock(NodeId nodeid, const BlockValidati // Discourage outbound (but not inbound) peers if on an invalid chain. // Exempt HB compact block peers. Manual connections are always protected from discouragement. if (!via_compact_block && !node_state->m_is_inbound) { - Misbehaving(nodeid, 100, message); + if (peer) Misbehaving(*peer, 100, message); return true; } break; @@ -1406,12 +1668,12 @@ bool PeerManagerImpl::MaybePunishNodeForBlock(NodeId nodeid, const BlockValidati case BlockValidationResult::BLOCK_INVALID_HEADER: case BlockValidationResult::BLOCK_CHECKPOINT: case BlockValidationResult::BLOCK_INVALID_PREV: - Misbehaving(nodeid, 100, message); + if (peer) Misbehaving(*peer, 100, message); return true; // Conflicting (but not necessarily invalid) data or different policy: case BlockValidationResult::BLOCK_MISSING_PREV: // TODO: Handle this much more gracefully (10 DoS points is super arbitrary) - Misbehaving(nodeid, 10, message); + if (peer) Misbehaving(*peer, 10, message); return true; case BlockValidationResult::BLOCK_RECENT_CONSENSUS_CHANGE: case BlockValidationResult::BLOCK_TIME_FUTURE: @@ -1425,12 +1687,13 @@ bool PeerManagerImpl::MaybePunishNodeForBlock(NodeId nodeid, const BlockValidati bool PeerManagerImpl::MaybePunishNodeForTx(NodeId nodeid, const TxValidationState& state, const std::string& message) { + PeerRef peer{GetPeerRef(nodeid)}; switch (state.GetResult()) { case TxValidationResult::TX_RESULT_UNSET: break; // The node is providing invalid data: case TxValidationResult::TX_CONSENSUS: - Misbehaving(nodeid, 100, message); + if (peer) Misbehaving(*peer, 100, message); return true; // Conflicting (but not necessarily invalid) data or different policy: case TxValidationResult::TX_RECENT_CONSENSUS_CHANGE: @@ -1455,9 +1718,9 @@ bool PeerManagerImpl::BlockRequestAllowed(const CBlockIndex* pindex) { AssertLockHeld(cs_main); if (m_chainman.ActiveChain().Contains(pindex)) return true; - return pindex->IsValid(BLOCK_VALID_SCRIPTS) && (pindexBestHeader != nullptr) && - (pindexBestHeader->GetBlockTime() - pindex->GetBlockTime() < STALE_RELAY_AGE_LIMIT) && - (GetBlockProofEquivalentTime(*pindexBestHeader, *pindex, *pindexBestHeader, m_chainparams.GetConsensus()) < STALE_RELAY_AGE_LIMIT); + return pindex->IsValid(BLOCK_VALID_SCRIPTS) && (m_chainman.m_best_header != nullptr) && + (m_chainman.m_best_header->GetBlockTime() - pindex->GetBlockTime() < STALE_RELAY_AGE_LIMIT) && + (GetBlockProofEquivalentTime(*m_chainman.m_best_header, *pindex, *m_chainman.m_best_header, m_chainparams.GetConsensus()) < STALE_RELAY_AGE_LIMIT); } std::optional<std::string> PeerManagerImpl::FetchBlock(NodeId peer_id, const CBlockIndex& block_index) @@ -1465,12 +1728,14 @@ std::optional<std::string> PeerManagerImpl::FetchBlock(NodeId peer_id, const CBl if (fImporting) return "Importing..."; if (fReindex) return "Reindexing..."; - LOCK(cs_main); // Ensure this peer exists and hasn't been disconnected - CNodeState* state = State(peer_id); - if (state == nullptr) return "Peer does not exist"; + PeerRef peer = GetPeerRef(peer_id); + if (peer == nullptr) return "Peer does not exist"; + // Ignore pre-segwit peers - if (!state->fHaveWitness) return "Pre-SegWit peer"; + if (!CanServeWitnesses(*peer)) return "Pre-SegWit peer"; + + LOCK(cs_main); // Mark block as in-flight unless it already is (for this peer). // If a block was already in-flight for a different peer, its BLOCKTXN @@ -1495,17 +1760,17 @@ std::optional<std::string> PeerManagerImpl::FetchBlock(NodeId peer_id, const CBl return std::nullopt; } -std::unique_ptr<PeerManager> PeerManager::make(const CChainParams& chainparams, CConnman& connman, AddrMan& addrman, +std::unique_ptr<PeerManager> PeerManager::make(CConnman& connman, AddrMan& addrman, BanMan* banman, ChainstateManager& chainman, CTxMemPool& pool, bool ignore_incoming_txs) { - return std::make_unique<PeerManagerImpl>(chainparams, connman, addrman, banman, chainman, pool, ignore_incoming_txs); + return std::make_unique<PeerManagerImpl>(connman, addrman, banman, chainman, pool, ignore_incoming_txs); } -PeerManagerImpl::PeerManagerImpl(const CChainParams& chainparams, CConnman& connman, AddrMan& addrman, +PeerManagerImpl::PeerManagerImpl(CConnman& connman, AddrMan& addrman, BanMan* banman, ChainstateManager& chainman, CTxMemPool& pool, bool ignore_incoming_txs) - : m_chainparams(chainparams), + : m_chainparams(chainman.GetParams()), m_connman(connman), m_addrman(addrman), m_banman(banman), @@ -1571,56 +1836,50 @@ void PeerManagerImpl::BlockDisconnected(const std::shared_ptr<const CBlock> &blo m_recent_confirmed_transactions.reset(); } -// All of the following cache a recent block, and are protected by cs_most_recent_block -static RecursiveMutex cs_most_recent_block; -static std::shared_ptr<const CBlock> most_recent_block GUARDED_BY(cs_most_recent_block); -static std::shared_ptr<const CBlockHeaderAndShortTxIDs> most_recent_compact_block GUARDED_BY(cs_most_recent_block); -static uint256 most_recent_block_hash GUARDED_BY(cs_most_recent_block); -static bool fWitnessesPresentInMostRecentCompactBlock GUARDED_BY(cs_most_recent_block); - /** * Maintain state about the best-seen block and fast-announce a compact block * to compatible peers. */ void PeerManagerImpl::NewPoWValidBlock(const CBlockIndex *pindex, const std::shared_ptr<const CBlock>& pblock) { - std::shared_ptr<const CBlockHeaderAndShortTxIDs> pcmpctblock = std::make_shared<const CBlockHeaderAndShortTxIDs> (*pblock, true); + auto pcmpctblock = std::make_shared<const CBlockHeaderAndShortTxIDs>(*pblock); const CNetMsgMaker msgMaker(PROTOCOL_VERSION); LOCK(cs_main); - static int nHighestFastAnnounce = 0; - if (pindex->nHeight <= nHighestFastAnnounce) + if (pindex->nHeight <= m_highest_fast_announce) return; - nHighestFastAnnounce = pindex->nHeight; + m_highest_fast_announce = pindex->nHeight; + + if (!DeploymentActiveAt(*pindex, m_chainman, Consensus::DEPLOYMENT_SEGWIT)) return; - bool fWitnessEnabled = DeploymentActiveAt(*pindex, m_chainparams.GetConsensus(), Consensus::DEPLOYMENT_SEGWIT); uint256 hashBlock(pblock->GetHash()); + const std::shared_future<CSerializedNetMsg> lazy_ser{ + std::async(std::launch::deferred, [&] { return msgMaker.Make(NetMsgType::CMPCTBLOCK, *pcmpctblock); })}; { - LOCK(cs_most_recent_block); - most_recent_block_hash = hashBlock; - most_recent_block = pblock; - most_recent_compact_block = pcmpctblock; - fWitnessesPresentInMostRecentCompactBlock = fWitnessEnabled; + LOCK(m_most_recent_block_mutex); + m_most_recent_block_hash = hashBlock; + m_most_recent_block = pblock; + m_most_recent_compact_block = pcmpctblock; } - m_connman.ForEachNode([this, &pcmpctblock, pindex, &msgMaker, fWitnessEnabled, &hashBlock](CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) { + m_connman.ForEachNode([this, pindex, &lazy_ser, &hashBlock](CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) { AssertLockHeld(::cs_main); - // TODO: Avoid the repeated-serialization here if (pnode->GetCommonVersion() < INVALID_CB_NO_BAN_VERSION || pnode->fDisconnect) return; ProcessBlockAvailability(pnode->GetId()); CNodeState &state = *State(pnode->GetId()); // If the peer has, or we announced to them the previous block already, // but we don't think they have this one, go ahead and announce it - if (state.fPreferHeaderAndIDs && (!fWitnessEnabled || state.fWantsCmpctWitness) && - !PeerHasHeader(&state, pindex) && PeerHasHeader(&state, pindex->pprev)) { + if (state.m_requested_hb_cmpctblocks && !PeerHasHeader(&state, pindex) && PeerHasHeader(&state, pindex->pprev)) { LogPrint(BCLog::NET, "%s sending header-and-ids %s to peer=%d\n", "PeerManager::NewPoWValidBlock", hashBlock.ToString(), pnode->GetId()); - m_connman.PushMessage(pnode, msgMaker.Make(NetMsgType::CMPCTBLOCK, *pcmpctblock)); + + const CSerializedNetMsg& ser_cmpctblock{lazy_ser.get()}; + m_connman.PushMessage(pnode, ser_cmpctblock.Copy()); state.pindexBestHeaderSent = pindex; } }); @@ -1742,22 +2001,18 @@ void PeerManagerImpl::SendPings() void PeerManagerImpl::RelayTransaction(const uint256& txid, const uint256& wtxid) { - WITH_LOCK(cs_main, _RelayTransaction(txid, wtxid);); -} - -void PeerManagerImpl::_RelayTransaction(const uint256& txid, const uint256& wtxid) -{ - m_connman.ForEachNode([&txid, &wtxid](CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) { - AssertLockHeld(::cs_main); + LOCK(m_peer_mutex); + for(auto& it : m_peer_map) { + Peer& peer = *it.second; + auto tx_relay = peer.GetTxRelay(); + if (!tx_relay) continue; - CNodeState* state = State(pnode->GetId()); - if (state == nullptr) return; - if (state->m_wtxid_relay) { - pnode->PushTxInventory(wtxid); - } else { - pnode->PushTxInventory(txid); + const uint256& hash{peer.m_wtxid_relay ? wtxid : txid}; + LOCK(tx_relay->m_tx_inventory_mutex); + if (!tx_relay->m_tx_inventory_known_filter.contains(hash)) { + tx_relay->m_tx_inventory_to_send.insert(hash); } - }); + }; } void PeerManagerImpl::RelayAddress(NodeId originator, @@ -1776,9 +2031,12 @@ void PeerManagerImpl::RelayAddress(NodeId originator, // Use deterministic randomness to send to the same nodes for 24 hours // at a time so the m_addr_knowns of the chosen nodes prevent repeats const uint64_t hash_addr{CServiceHash(0, 0)(addr)}; + const auto current_time{GetTime<std::chrono::seconds>()}; + // Adding address hash makes exact rotation time different per address, while preserving periodicity. + const uint64_t time_addr{(static_cast<uint64_t>(count_seconds(current_time)) + hash_addr) / count_seconds(ROTATE_ADDR_RELAY_DEST_INTERVAL)}; const CSipHasher hasher{m_connman.GetDeterministicRandomizer(RANDOMIZER_ID_ADDRESS_RELAY) .Write(hash_addr) - .Write((GetTime() + hash_addr) / (24 * 60 * 60))}; + .Write(time_addr)}; FastRandomContext insecure_rand; // Relay reachable addresses to 2 peers. Unreachable addresses are relayed randomly to 1 or 2 peers. @@ -1811,12 +2069,10 @@ void PeerManagerImpl::ProcessGetBlockData(CNode& pfrom, Peer& peer, const CInv& { std::shared_ptr<const CBlock> a_recent_block; std::shared_ptr<const CBlockHeaderAndShortTxIDs> a_recent_compact_block; - bool fWitnessesPresentInARecentCompactBlock; { - LOCK(cs_most_recent_block); - a_recent_block = most_recent_block; - a_recent_compact_block = most_recent_compact_block; - fWitnessesPresentInARecentCompactBlock = fWitnessesPresentInMostRecentCompactBlock; + LOCK(m_most_recent_block_mutex); + a_recent_block = m_most_recent_block; + a_recent_compact_block = m_most_recent_compact_block; } bool need_activate_chain = false; @@ -1854,7 +2110,7 @@ void PeerManagerImpl::ProcessGetBlockData(CNode& pfrom, Peer& peer, const CInv& const CNetMsgMaker msgMaker(pfrom.GetCommonVersion()); // disconnect node in case we have reached the outbound limit for serving historical blocks if (m_connman.OutboundTargetReached(true) && - (((pindexBestHeader != nullptr) && (pindexBestHeader->GetBlockTime() - pindex->GetBlockTime() > HISTORICAL_BLOCK_AGE)) || inv.IsMsgFilteredBlk()) && + (((m_chainman.m_best_header != nullptr) && (m_chainman.m_best_header->GetBlockTime() - pindex->GetBlockTime() > HISTORICAL_BLOCK_AGE)) || inv.IsMsgFilteredBlk()) && !pfrom.HasPermission(NetPermissionFlags::Download) // nodes with the download permission may exceed target ) { LogPrint(BCLog::NET, "historical block serving limit reached, disconnect peer=%d\n", pfrom.GetId()); @@ -1863,7 +2119,7 @@ void PeerManagerImpl::ProcessGetBlockData(CNode& pfrom, Peer& peer, const CInv& } // Avoid leaking prune-height by never sending blocks below the NODE_NETWORK_LIMITED threshold if (!pfrom.HasPermission(NetPermissionFlags::NoBan) && ( - (((pfrom.GetLocalServices() & NODE_NETWORK_LIMITED) == NODE_NETWORK_LIMITED) && ((pfrom.GetLocalServices() & NODE_NETWORK) != NODE_NETWORK) && (m_chainman.ActiveChain().Tip()->nHeight - pindex->nHeight > (int)NODE_NETWORK_LIMITED_MIN_BLOCKS + 2 /* add two blocks buffer extension for possible races */) ) + (((peer.m_our_services & NODE_NETWORK_LIMITED) == NODE_NETWORK_LIMITED) && ((peer.m_our_services & NODE_NETWORK) != NODE_NETWORK) && (m_chainman.ActiveChain().Tip()->nHeight - pindex->nHeight > (int)NODE_NETWORK_LIMITED_MIN_BLOCKS + 2 /* add two blocks buffer extension for possible races */) ) )) { LogPrint(BCLog::NET, "Ignore block request below NODE_NETWORK_LIMITED threshold, disconnect peer=%d\n", pfrom.GetId()); //disconnect node and prevent it from stalling (would otherwise wait for the missing block) @@ -1903,11 +2159,11 @@ void PeerManagerImpl::ProcessGetBlockData(CNode& pfrom, Peer& peer, const CInv& } else if (inv.IsMsgFilteredBlk()) { bool sendMerkleBlock = false; CMerkleBlock merkleBlock; - if (pfrom.m_tx_relay != nullptr) { - LOCK(pfrom.m_tx_relay->cs_filter); - if (pfrom.m_tx_relay->pfilter) { + if (auto tx_relay = peer.GetTxRelay(); tx_relay != nullptr) { + LOCK(tx_relay->m_bloom_filter_mutex); + if (tx_relay->m_bloom_filter) { sendMerkleBlock = true; - merkleBlock = CMerkleBlock(*pblock, *pfrom.m_tx_relay->pfilter); + merkleBlock = CMerkleBlock(*pblock, *tx_relay->m_bloom_filter); } } if (sendMerkleBlock) { @@ -1929,17 +2185,15 @@ void PeerManagerImpl::ProcessGetBlockData(CNode& pfrom, Peer& peer, const CInv& // they won't have a useful mempool to match against a compact block, // and we don't feel like constructing the object for them, so // instead we respond with the full, non-compact block. - bool fPeerWantsWitness = State(pfrom.GetId())->fWantsCmpctWitness; - int nSendFlags = fPeerWantsWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS; if (CanDirectFetch() && pindex->nHeight >= m_chainman.ActiveChain().Height() - MAX_CMPCTBLOCK_DEPTH) { - if ((fPeerWantsWitness || !fWitnessesPresentInARecentCompactBlock) && a_recent_compact_block && a_recent_compact_block->header.GetHash() == pindex->GetBlockHash()) { - m_connman.PushMessage(&pfrom, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, *a_recent_compact_block)); + if (a_recent_compact_block && a_recent_compact_block->header.GetHash() == pindex->GetBlockHash()) { + m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::CMPCTBLOCK, *a_recent_compact_block)); } else { - CBlockHeaderAndShortTxIDs cmpctblock(*pblock, fPeerWantsWitness); - m_connman.PushMessage(&pfrom, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, cmpctblock)); + CBlockHeaderAndShortTxIDs cmpctblock{*pblock}; + m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::CMPCTBLOCK, cmpctblock)); } } else { - m_connman.PushMessage(&pfrom, msgMaker.Make(nSendFlags, NetMsgType::BLOCK, *pblock)); + m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::BLOCK, *pblock)); } } } @@ -1990,13 +2244,15 @@ void PeerManagerImpl::ProcessGetData(CNode& pfrom, Peer& peer, const std::atomic { AssertLockNotHeld(cs_main); + auto tx_relay = peer.GetTxRelay(); + std::deque<CInv>::iterator it = peer.m_getdata_requests.begin(); std::vector<CInv> vNotFound; const CNetMsgMaker msgMaker(pfrom.GetCommonVersion()); const auto now{GetTime<std::chrono::seconds>()}; // Get last mempool request time - const auto mempool_req = pfrom.m_tx_relay != nullptr ? pfrom.m_tx_relay->m_last_mempool_req.load() : std::chrono::seconds::min(); + const auto mempool_req = tx_relay != nullptr ? tx_relay->m_last_mempool_req.load() : std::chrono::seconds::min(); // Process as many TX items from the front of the getdata queue as // possible, since they're common and it's efficient to batch process @@ -2009,8 +2265,9 @@ void PeerManagerImpl::ProcessGetData(CNode& pfrom, Peer& peer, const std::atomic const CInv &inv = *it++; - if (pfrom.m_tx_relay == nullptr) { - // Ignore GETDATA requests for transactions from blocks-only peers. + if (tx_relay == nullptr) { + // Ignore GETDATA requests for transactions from block-relay-only + // peers and peers that asked us not to announce transactions. continue; } @@ -2037,7 +2294,7 @@ void PeerManagerImpl::ProcessGetData(CNode& pfrom, Peer& peer, const std::atomic } for (const uint256& parent_txid : parent_ids_to_add) { // Relaying a transaction with a recent but unconfirmed parent. - if (WITH_LOCK(pfrom.m_tx_relay->cs_tx_inventory, return !pfrom.m_tx_relay->filterInventoryKnown.contains(parent_txid))) { + if (WITH_LOCK(tx_relay->m_tx_inventory_mutex, return !tx_relay->m_tx_inventory_known_filter.contains(parent_txid))) { LOCK(cs_main); State(pfrom.GetId())->m_recently_announced_invs.insert(parent_txid); } @@ -2079,216 +2336,539 @@ void PeerManagerImpl::ProcessGetData(CNode& pfrom, Peer& peer, const std::atomic } } -static uint32_t GetFetchFlags(const CNode& pfrom) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { +uint32_t PeerManagerImpl::GetFetchFlags(const Peer& peer) const +{ uint32_t nFetchFlags = 0; - if (State(pfrom.GetId())->fHaveWitness) { + if (CanServeWitnesses(peer)) { nFetchFlags |= MSG_WITNESS_FLAG; } return nFetchFlags; } -void PeerManagerImpl::SendBlockTransactions(CNode& pfrom, const CBlock& block, const BlockTransactionsRequest& req) +void PeerManagerImpl::SendBlockTransactions(CNode& pfrom, Peer& peer, const CBlock& block, const BlockTransactionsRequest& req) { BlockTransactions resp(req); for (size_t i = 0; i < req.indexes.size(); i++) { if (req.indexes[i] >= block.vtx.size()) { - Misbehaving(pfrom.GetId(), 100, "getblocktxn with out-of-bounds tx indices"); + Misbehaving(peer, 100, "getblocktxn with out-of-bounds tx indices"); return; } resp.txn[i] = block.vtx[req.indexes[i]]; } - LOCK(cs_main); + const CNetMsgMaker msgMaker(pfrom.GetCommonVersion()); - int nSendFlags = State(pfrom.GetId())->fWantsCmpctWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS; - m_connman.PushMessage(&pfrom, msgMaker.Make(nSendFlags, NetMsgType::BLOCKTXN, resp)); + m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::BLOCKTXN, resp)); } -void PeerManagerImpl::ProcessHeadersMessage(CNode& pfrom, const Peer& peer, - const std::vector<CBlockHeader>& headers, - bool via_compact_block) +bool PeerManagerImpl::CheckHeadersPoW(const std::vector<CBlockHeader>& headers, const Consensus::Params& consensusParams, Peer& peer) +{ + // Do these headers have proof-of-work matching what's claimed? + if (!HasValidProofOfWork(headers, consensusParams)) { + Misbehaving(peer, 100, "header with invalid proof of work"); + return false; + } + + // Are these headers connected to each other? + if (!CheckHeadersAreContinuous(headers)) { + Misbehaving(peer, 20, "non-continuous headers sequence"); + return false; + } + return true; +} + +arith_uint256 PeerManagerImpl::GetAntiDoSWorkThreshold() +{ + arith_uint256 near_chaintip_work = 0; + LOCK(cs_main); + if (m_chainman.ActiveChain().Tip() != nullptr) { + const CBlockIndex *tip = m_chainman.ActiveChain().Tip(); + // Use a 144 block buffer, so that we'll accept headers that fork from + // near our tip. + near_chaintip_work = tip->nChainWork - std::min<arith_uint256>(144*GetBlockProof(*tip), tip->nChainWork); + } + return std::max(near_chaintip_work, arith_uint256(nMinimumChainWork)); +} + +/** + * Special handling for unconnecting headers that might be part of a block + * announcement. + * + * We'll send a getheaders message in response to try to connect the chain. + * + * The peer can send up to MAX_UNCONNECTING_HEADERS in a row that + * don't connect before given DoS points. + * + * Once a headers message is received that is valid and does connect, + * nUnconnectingHeaders gets reset back to 0. + */ +void PeerManagerImpl::HandleFewUnconnectingHeaders(CNode& pfrom, Peer& peer, + const std::vector<CBlockHeader>& headers) { const CNetMsgMaker msgMaker(pfrom.GetCommonVersion()); - size_t nCount = headers.size(); - if (nCount == 0) { - // Nothing interesting. Stop asking this peers for more headers. - return; + LOCK(cs_main); + CNodeState *nodestate = State(pfrom.GetId()); + + nodestate->nUnconnectingHeaders++; + // Try to fill in the missing headers. + if (MaybeSendGetHeaders(pfrom, GetLocator(m_chainman.m_best_header), peer)) { + LogPrint(BCLog::NET, "received header %s: missing prev block %s, sending getheaders (%d) to end (peer=%d, nUnconnectingHeaders=%d)\n", + headers[0].GetHash().ToString(), + headers[0].hashPrevBlock.ToString(), + m_chainman.m_best_header->nHeight, + pfrom.GetId(), nodestate->nUnconnectingHeaders); + } + // Set hashLastUnknownBlock for this peer, so that if we + // eventually get the headers - even from a different peer - + // we can use this peer to download. + UpdateBlockAvailability(pfrom.GetId(), headers.back().GetHash()); + + // The peer may just be broken, so periodically assign DoS points if this + // condition persists. + if (nodestate->nUnconnectingHeaders % MAX_UNCONNECTING_HEADERS == 0) { + Misbehaving(peer, 20, strprintf("%d non-connecting headers", nodestate->nUnconnectingHeaders)); } +} - bool received_new_header = false; - const CBlockIndex *pindexLast = nullptr; - { - LOCK(cs_main); - CNodeState *nodestate = State(pfrom.GetId()); +bool PeerManagerImpl::CheckHeadersAreContinuous(const std::vector<CBlockHeader>& headers) const +{ + uint256 hashLastBlock; + for (const CBlockHeader& header : headers) { + if (!hashLastBlock.IsNull() && header.hashPrevBlock != hashLastBlock) { + return false; + } + hashLastBlock = header.GetHash(); + } + return true; +} - // If this looks like it could be a block announcement (nCount < - // MAX_BLOCKS_TO_ANNOUNCE), use special logic for handling headers that - // don't connect: - // - Send a getheaders message in response to try to connect the chain. - // - The peer can send up to MAX_UNCONNECTING_HEADERS in a row that - // don't connect before giving DoS points - // - Once a headers message is received that is valid and does connect, - // nUnconnectingHeaders gets reset back to 0. - if (!m_chainman.m_blockman.LookupBlockIndex(headers[0].hashPrevBlock) && nCount < MAX_BLOCKS_TO_ANNOUNCE) { - nodestate->nUnconnectingHeaders++; - m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETHEADERS, m_chainman.ActiveChain().GetLocator(pindexBestHeader), uint256())); - LogPrint(BCLog::NET, "received header %s: missing prev block %s, sending getheaders (%d) to end (peer=%d, nUnconnectingHeaders=%d)\n", - headers[0].GetHash().ToString(), - headers[0].hashPrevBlock.ToString(), - pindexBestHeader->nHeight, - pfrom.GetId(), nodestate->nUnconnectingHeaders); - // Set hashLastUnknownBlock for this peer, so that if we - // eventually get the headers - even from a different peer - - // we can use this peer to download. - UpdateBlockAvailability(pfrom.GetId(), headers.back().GetHash()); - - if (nodestate->nUnconnectingHeaders % MAX_UNCONNECTING_HEADERS == 0) { - Misbehaving(pfrom.GetId(), 20, strprintf("%d non-connecting headers", nodestate->nUnconnectingHeaders)); +bool PeerManagerImpl::IsContinuationOfLowWorkHeadersSync(Peer& peer, CNode& pfrom, std::vector<CBlockHeader>& headers) +{ + if (peer.m_headers_sync) { + auto result = peer.m_headers_sync->ProcessNextHeaders(headers, headers.size() == MAX_HEADERS_RESULTS); + if (result.request_more) { + auto locator = peer.m_headers_sync->NextHeadersRequestLocator(); + // If we were instructed to ask for a locator, it should not be empty. + Assume(!locator.vHave.empty()); + if (!locator.vHave.empty()) { + // It should be impossible for the getheaders request to fail, + // because we should have cleared the last getheaders timestamp + // when processing the headers that triggered this call. But + // it may be possible to bypass this via compactblock + // processing, so check the result before logging just to be + // safe. + bool sent_getheaders = MaybeSendGetHeaders(pfrom, locator, peer); + if (sent_getheaders) { + LogPrint(BCLog::NET, "more getheaders (from %s) to peer=%d\n", + locator.vHave.front().ToString(), pfrom.GetId()); + } else { + LogPrint(BCLog::NET, "error sending next getheaders (from %s) to continue sync with peer=%d\n", + locator.vHave.front().ToString(), pfrom.GetId()); + } } - return; } - uint256 hashLastBlock; - for (const CBlockHeader& header : headers) { - if (!hashLastBlock.IsNull() && header.hashPrevBlock != hashLastBlock) { - Misbehaving(pfrom.GetId(), 20, "non-continuous headers sequence"); - return; + if (peer.m_headers_sync->GetState() == HeadersSyncState::State::FINAL) { + peer.m_headers_sync.reset(nullptr); + + // Delete this peer's entry in m_headers_presync_stats. + // If this is m_headers_presync_bestpeer, it will be replaced later + // by the next peer that triggers the else{} branch below. + LOCK(m_headers_presync_mutex); + m_headers_presync_stats.erase(pfrom.GetId()); + } else { + // Build statistics for this peer's sync. + HeadersPresyncStats stats; + stats.first = peer.m_headers_sync->GetPresyncWork(); + if (peer.m_headers_sync->GetState() == HeadersSyncState::State::PRESYNC) { + stats.second = {peer.m_headers_sync->GetPresyncHeight(), + peer.m_headers_sync->GetPresyncTime()}; + } + + // Update statistics in stats. + LOCK(m_headers_presync_mutex); + m_headers_presync_stats[pfrom.GetId()] = stats; + auto best_it = m_headers_presync_stats.find(m_headers_presync_bestpeer); + bool best_updated = false; + if (best_it == m_headers_presync_stats.end()) { + // If the cached best peer is outdated, iterate over all remaining ones (including + // newly updated one) to find the best one. + NodeId peer_best{-1}; + const HeadersPresyncStats* stat_best{nullptr}; + for (const auto& [peer, stat] : m_headers_presync_stats) { + if (!stat_best || stat > *stat_best) { + peer_best = peer; + stat_best = &stat; + } + } + m_headers_presync_bestpeer = peer_best; + best_updated = (peer_best == pfrom.GetId()); + } else if (best_it->first == pfrom.GetId() || stats > best_it->second) { + // pfrom was and remains the best peer, or pfrom just became best. + m_headers_presync_bestpeer = pfrom.GetId(); + best_updated = true; + } + if (best_updated && stats.second.has_value()) { + // If the best peer updated, and it is in its first phase, signal. + m_headers_presync_should_signal = true; } - hashLastBlock = header.GetHash(); } - // If we don't have the last header, then they'll have given us - // something new (if these headers are valid). - if (!m_chainman.m_blockman.LookupBlockIndex(hashLastBlock)) { - received_new_header = true; + if (result.success) { + // We only overwrite the headers passed in if processing was + // successful. + headers.swap(result.pow_validated_headers); } + + return result.success; } + // Either we didn't have a sync in progress, or something went wrong + // processing these headers, or we are returning headers to the caller to + // process. + return false; +} - BlockValidationState state; - if (!m_chainman.ProcessNewBlockHeaders(headers, state, m_chainparams, &pindexLast)) { - if (state.IsInvalid()) { - MaybePunishNodeForBlock(pfrom.GetId(), state, via_compact_block, "invalid header received"); - return; +bool PeerManagerImpl::TryLowWorkHeadersSync(Peer& peer, CNode& pfrom, const CBlockIndex* chain_start_header, std::vector<CBlockHeader>& headers) +{ + // Calculate the total work on this chain. + arith_uint256 total_work = chain_start_header->nChainWork + CalculateHeadersWork(headers); + + // Our dynamic anti-DoS threshold (minimum work required on a headers chain + // before we'll store it) + arith_uint256 minimum_chain_work = GetAntiDoSWorkThreshold(); + + // Avoid DoS via low-difficulty-headers by only processing if the headers + // are part of a chain with sufficient work. + if (total_work < minimum_chain_work) { + // Only try to sync with this peer if their headers message was full; + // otherwise they don't have more headers after this so no point in + // trying to sync their too-little-work chain. + if (headers.size() == MAX_HEADERS_RESULTS) { + // Note: we could advance to the last header in this set that is + // known to us, rather than starting at the first header (which we + // may already have); however this is unlikely to matter much since + // ProcessHeadersMessage() already handles the case where all + // headers in a received message are already known and are + // ancestors of m_best_header or chainActive.Tip(), by skipping + // this logic in that case. So even if the first header in this set + // of headers is known, some header in this set must be new, so + // advancing to the first unknown header would be a small effect. + LOCK(peer.m_headers_sync_mutex); + peer.m_headers_sync.reset(new HeadersSyncState(peer.m_id, m_chainparams.GetConsensus(), + chain_start_header, minimum_chain_work)); + + // Now a HeadersSyncState object for tracking this synchronization is created, + // process the headers using it as normal. + return IsContinuationOfLowWorkHeadersSync(peer, pfrom, headers); + } else { + LogPrint(BCLog::NET, "Ignoring low-work chain (height=%u) from peer=%d\n", chain_start_header->nHeight + headers.size(), pfrom.GetId()); + // Since this is a low-work headers chain, no further processing is required. + headers = {}; + return true; } } + return false; +} - { - LOCK(cs_main); - CNodeState *nodestate = State(pfrom.GetId()); - if (nodestate->nUnconnectingHeaders > 0) { - LogPrint(BCLog::NET, "peer=%d: resetting nUnconnectingHeaders (%d -> 0)\n", pfrom.GetId(), nodestate->nUnconnectingHeaders); - } - nodestate->nUnconnectingHeaders = 0; +bool PeerManagerImpl::IsAncestorOfBestHeaderOrTip(const CBlockIndex* header) +{ + if (header == nullptr) { + return false; + } else if (m_chainman.m_best_header != nullptr && header == m_chainman.m_best_header->GetAncestor(header->nHeight)) { + return true; + } else if (m_chainman.ActiveChain().Contains(header)) { + return true; + } + return false; +} - assert(pindexLast); - UpdateBlockAvailability(pfrom.GetId(), pindexLast->GetBlockHash()); +bool PeerManagerImpl::MaybeSendGetHeaders(CNode& pfrom, const CBlockLocator& locator, Peer& peer) +{ + const CNetMsgMaker msgMaker(pfrom.GetCommonVersion()); - // From here, pindexBestKnownBlock should be guaranteed to be non-null, - // because it is set in UpdateBlockAvailability. Some nullptr checks - // are still present, however, as belt-and-suspenders. + const auto current_time = NodeClock::now(); - if (received_new_header && pindexLast->nChainWork > m_chainman.ActiveChain().Tip()->nChainWork) { - nodestate->m_last_block_announcement = GetTime(); - } + // Only allow a new getheaders message to go out if we don't have a recent + // one already in-flight + if (current_time - peer.m_last_getheaders_timestamp > HEADERS_RESPONSE_TIME) { + m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETHEADERS, locator, uint256())); + peer.m_last_getheaders_timestamp = current_time; + return true; + } + return false; +} - if (nCount == MAX_HEADERS_RESULTS) { - // Headers message had its maximum size; the peer may have more headers. - // TODO: optimize: if pindexLast is an ancestor of m_chainman.ActiveChain().Tip or pindexBestHeader, continue - // from there instead. - LogPrint(BCLog::NET, "more getheaders (%d) to end to peer=%d (startheight:%d)\n", - pindexLast->nHeight, pfrom.GetId(), peer.m_starting_height); - m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETHEADERS, m_chainman.ActiveChain().GetLocator(pindexLast), uint256())); - } - - // If this set of headers is valid and ends in a block with at least as - // much work as our tip, download as much as possible. - if (CanDirectFetch() && pindexLast->IsValid(BLOCK_VALID_TREE) && m_chainman.ActiveChain().Tip()->nChainWork <= pindexLast->nChainWork) { - std::vector<const CBlockIndex*> vToFetch; - const CBlockIndex *pindexWalk = pindexLast; - // Calculate all the blocks we'd need to switch to pindexLast, up to a limit. - while (pindexWalk && !m_chainman.ActiveChain().Contains(pindexWalk) && vToFetch.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER) { - if (!(pindexWalk->nStatus & BLOCK_HAVE_DATA) && - !IsBlockRequested(pindexWalk->GetBlockHash()) && - (!DeploymentActiveAt(*pindexWalk, m_chainparams.GetConsensus(), Consensus::DEPLOYMENT_SEGWIT) || State(pfrom.GetId())->fHaveWitness)) { - // We don't have this block, and it's not yet in flight. - vToFetch.push_back(pindexWalk); - } - pindexWalk = pindexWalk->pprev; +/* + * Given a new headers tip ending in pindexLast, potentially request blocks towards that tip. + * We require that the given tip have at least as much work as our tip, and for + * our current tip to be "close to synced" (see CanDirectFetch()). + */ +void PeerManagerImpl::HeadersDirectFetchBlocks(CNode& pfrom, const Peer& peer, const CBlockIndex* pindexLast) +{ + const CNetMsgMaker msgMaker(pfrom.GetCommonVersion()); + + LOCK(cs_main); + CNodeState *nodestate = State(pfrom.GetId()); + + if (CanDirectFetch() && pindexLast->IsValid(BLOCK_VALID_TREE) && m_chainman.ActiveChain().Tip()->nChainWork <= pindexLast->nChainWork) { + + std::vector<const CBlockIndex*> vToFetch; + const CBlockIndex *pindexWalk = pindexLast; + // Calculate all the blocks we'd need to switch to pindexLast, up to a limit. + while (pindexWalk && !m_chainman.ActiveChain().Contains(pindexWalk) && vToFetch.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER) { + if (!(pindexWalk->nStatus & BLOCK_HAVE_DATA) && + !IsBlockRequested(pindexWalk->GetBlockHash()) && + (!DeploymentActiveAt(*pindexWalk, m_chainman, Consensus::DEPLOYMENT_SEGWIT) || CanServeWitnesses(peer))) { + // We don't have this block, and it's not yet in flight. + vToFetch.push_back(pindexWalk); } - // If pindexWalk still isn't on our main chain, we're looking at a - // very large reorg at a time we think we're close to caught up to - // the main chain -- this shouldn't really happen. Bail out on the - // direct fetch and rely on parallel download instead. - if (!m_chainman.ActiveChain().Contains(pindexWalk)) { - LogPrint(BCLog::NET, "Large reorg, won't direct fetch to %s (%d)\n", - pindexLast->GetBlockHash().ToString(), - pindexLast->nHeight); - } else { - std::vector<CInv> vGetData; - // Download as much as possible, from earliest to latest. - for (const CBlockIndex *pindex : reverse_iterate(vToFetch)) { - if (nodestate->nBlocksInFlight >= MAX_BLOCKS_IN_TRANSIT_PER_PEER) { - // Can't download any more from this peer - break; - } - uint32_t nFetchFlags = GetFetchFlags(pfrom); - vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash())); - BlockRequested(pfrom.GetId(), *pindex); - LogPrint(BCLog::NET, "Requesting block %s from peer=%d\n", - pindex->GetBlockHash().ToString(), pfrom.GetId()); - } - if (vGetData.size() > 1) { - LogPrint(BCLog::NET, "Downloading blocks toward %s (%d) via headers direct fetch\n", - pindexLast->GetBlockHash().ToString(), pindexLast->nHeight); + pindexWalk = pindexWalk->pprev; + } + // If pindexWalk still isn't on our main chain, we're looking at a + // very large reorg at a time we think we're close to caught up to + // the main chain -- this shouldn't really happen. Bail out on the + // direct fetch and rely on parallel download instead. + if (!m_chainman.ActiveChain().Contains(pindexWalk)) { + LogPrint(BCLog::NET, "Large reorg, won't direct fetch to %s (%d)\n", + pindexLast->GetBlockHash().ToString(), + pindexLast->nHeight); + } else { + std::vector<CInv> vGetData; + // Download as much as possible, from earliest to latest. + for (const CBlockIndex *pindex : reverse_iterate(vToFetch)) { + if (nodestate->nBlocksInFlight >= MAX_BLOCKS_IN_TRANSIT_PER_PEER) { + // Can't download any more from this peer + break; } - if (vGetData.size() > 0) { - if (!m_ignore_incoming_txs && - nodestate->fSupportsDesiredCmpctVersion && + uint32_t nFetchFlags = GetFetchFlags(peer); + vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash())); + BlockRequested(pfrom.GetId(), *pindex); + LogPrint(BCLog::NET, "Requesting block %s from peer=%d\n", + pindex->GetBlockHash().ToString(), pfrom.GetId()); + } + if (vGetData.size() > 1) { + LogPrint(BCLog::NET, "Downloading blocks toward %s (%d) via headers direct fetch\n", + pindexLast->GetBlockHash().ToString(), pindexLast->nHeight); + } + if (vGetData.size() > 0) { + if (!m_ignore_incoming_txs && + nodestate->m_provides_cmpctblocks && vGetData.size() == 1 && mapBlocksInFlight.size() == 1 && pindexLast->pprev->IsValid(BLOCK_VALID_CHAIN)) { - // In any case, we want to download using a compact block, not a regular one - vGetData[0] = CInv(MSG_CMPCT_BLOCK, vGetData[0].hash); - } - m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, vGetData)); + // In any case, we want to download using a compact block, not a regular one + vGetData[0] = CInv(MSG_CMPCT_BLOCK, vGetData[0].hash); } + m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, vGetData)); } } - // If we're in IBD, we want outbound peers that will serve us a useful - // chain. Disconnect peers that are on chains with insufficient work. - if (m_chainman.ActiveChainstate().IsInitialBlockDownload() && nCount != MAX_HEADERS_RESULTS) { - // When nCount < MAX_HEADERS_RESULTS, we know we have no more - // headers to fetch from this peer. - if (nodestate->pindexBestKnownBlock && nodestate->pindexBestKnownBlock->nChainWork < nMinimumChainWork) { - // This peer has too little work on their headers chain to help - // us sync -- disconnect if it is an outbound disconnection - // candidate. - // Note: We compare their tip to nMinimumChainWork (rather than - // m_chainman.ActiveChain().Tip()) because we won't start block download - // until we have a headers chain that has at least - // nMinimumChainWork, even if a peer has a chain past our tip, - // as an anti-DoS measure. - if (pfrom.IsOutboundOrBlockRelayConn()) { - LogPrintf("Disconnecting outbound peer %d -- headers chain has insufficient work\n", pfrom.GetId()); - pfrom.fDisconnect = true; - } + } +} + +/** + * Given receipt of headers from a peer ending in pindexLast, along with + * whether that header was new and whether the headers message was full, + * update the state we keep for the peer. + */ +void PeerManagerImpl::UpdatePeerStateForReceivedHeaders(CNode& pfrom, + const CBlockIndex *pindexLast, bool received_new_header, bool may_have_more_headers) +{ + LOCK(cs_main); + CNodeState *nodestate = State(pfrom.GetId()); + if (nodestate->nUnconnectingHeaders > 0) { + LogPrint(BCLog::NET, "peer=%d: resetting nUnconnectingHeaders (%d -> 0)\n", pfrom.GetId(), nodestate->nUnconnectingHeaders); + } + nodestate->nUnconnectingHeaders = 0; + + assert(pindexLast); + UpdateBlockAvailability(pfrom.GetId(), pindexLast->GetBlockHash()); + + // From here, pindexBestKnownBlock should be guaranteed to be non-null, + // because it is set in UpdateBlockAvailability. Some nullptr checks + // are still present, however, as belt-and-suspenders. + + if (received_new_header && pindexLast->nChainWork > m_chainman.ActiveChain().Tip()->nChainWork) { + nodestate->m_last_block_announcement = GetTime(); + } + + // If we're in IBD, we want outbound peers that will serve us a useful + // chain. Disconnect peers that are on chains with insufficient work. + if (m_chainman.ActiveChainstate().IsInitialBlockDownload() && !may_have_more_headers) { + // If the peer has no more headers to give us, then we know we have + // their tip. + if (nodestate->pindexBestKnownBlock && nodestate->pindexBestKnownBlock->nChainWork < nMinimumChainWork) { + // This peer has too little work on their headers chain to help + // us sync -- disconnect if it is an outbound disconnection + // candidate. + // Note: We compare their tip to nMinimumChainWork (rather than + // m_chainman.ActiveChain().Tip()) because we won't start block download + // until we have a headers chain that has at least + // nMinimumChainWork, even if a peer has a chain past our tip, + // as an anti-DoS measure. + if (pfrom.IsOutboundOrBlockRelayConn()) { + LogPrintf("Disconnecting outbound peer %d -- headers chain has insufficient work\n", pfrom.GetId()); + pfrom.fDisconnect = true; } } + } - // If this is an outbound full-relay peer, check to see if we should protect - // it from the bad/lagging chain logic. - // Note that outbound block-relay peers are excluded from this protection, and - // thus always subject to eviction under the bad/lagging chain logic. - // See ChainSyncTimeoutState. - if (!pfrom.fDisconnect && pfrom.IsFullOutboundConn() && nodestate->pindexBestKnownBlock != nullptr) { - if (m_outbound_peers_with_protect_from_disconnect < MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT && nodestate->pindexBestKnownBlock->nChainWork >= m_chainman.ActiveChain().Tip()->nChainWork && !nodestate->m_chain_sync.m_protect) { - LogPrint(BCLog::NET, "Protecting outbound peer=%d from eviction\n", pfrom.GetId()); - nodestate->m_chain_sync.m_protect = true; - ++m_outbound_peers_with_protect_from_disconnect; - } + // If this is an outbound full-relay peer, check to see if we should protect + // it from the bad/lagging chain logic. + // Note that outbound block-relay peers are excluded from this protection, and + // thus always subject to eviction under the bad/lagging chain logic. + // See ChainSyncTimeoutState. + if (!pfrom.fDisconnect && pfrom.IsFullOutboundConn() && nodestate->pindexBestKnownBlock != nullptr) { + if (m_outbound_peers_with_protect_from_disconnect < MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT && nodestate->pindexBestKnownBlock->nChainWork >= m_chainman.ActiveChain().Tip()->nChainWork && !nodestate->m_chain_sync.m_protect) { + LogPrint(BCLog::NET, "Protecting outbound peer=%d from eviction\n", pfrom.GetId()); + nodestate->m_chain_sync.m_protect = true; + ++m_outbound_peers_with_protect_from_disconnect; + } + } +} + +void PeerManagerImpl::ProcessHeadersMessage(CNode& pfrom, Peer& peer, + std::vector<CBlockHeader>&& headers, + bool via_compact_block) +{ + size_t nCount = headers.size(); + + if (nCount == 0) { + // Nothing interesting. Stop asking this peers for more headers. + // If we were in the middle of headers sync, receiving an empty headers + // message suggests that the peer suddenly has nothing to give us + // (perhaps it reorged to our chain). Clear download state for this peer. + LOCK(peer.m_headers_sync_mutex); + if (peer.m_headers_sync) { + peer.m_headers_sync.reset(nullptr); + LOCK(m_headers_presync_mutex); + m_headers_presync_stats.erase(pfrom.GetId()); } + return; } + // Before we do any processing, make sure these pass basic sanity checks. + // We'll rely on headers having valid proof-of-work further down, as an + // anti-DoS criteria (note: this check is required before passing any + // headers into HeadersSyncState). + if (!CheckHeadersPoW(headers, m_chainparams.GetConsensus(), peer)) { + // Misbehaving() calls are handled within CheckHeadersPoW(), so we can + // just return. (Note that even if a header is announced via compact + // block, the header itself should be valid, so this type of error can + // always be punished.) + return; + } + + const CBlockIndex *pindexLast = nullptr; + + // We'll set already_validated_work to true if these headers are + // successfully processed as part of a low-work headers sync in progress + // (either in PRESYNC or REDOWNLOAD phase). + // If true, this will mean that any headers returned to us (ie during + // REDOWNLOAD) can be validated without further anti-DoS checks. + bool already_validated_work = false; + + // If we're in the middle of headers sync, let it do its magic. + bool have_headers_sync = false; + { + LOCK(peer.m_headers_sync_mutex); + + already_validated_work = IsContinuationOfLowWorkHeadersSync(peer, pfrom, headers); + + // The headers we passed in may have been: + // - untouched, perhaps if no headers-sync was in progress, or some + // failure occurred + // - erased, such as if the headers were successfully processed and no + // additional headers processing needs to take place (such as if we + // are still in PRESYNC) + // - replaced with headers that are now ready for validation, such as + // during the REDOWNLOAD phase of a low-work headers sync. + // So just check whether we still have headers that we need to process, + // or not. + if (headers.empty()) { + return; + } + + have_headers_sync = !!peer.m_headers_sync; + } + + // Do these headers connect to something in our block index? + const CBlockIndex *chain_start_header{WITH_LOCK(::cs_main, return m_chainman.m_blockman.LookupBlockIndex(headers[0].hashPrevBlock))}; + bool headers_connect_blockindex{chain_start_header != nullptr}; + + if (!headers_connect_blockindex) { + if (nCount <= MAX_BLOCKS_TO_ANNOUNCE) { + // If this looks like it could be a BIP 130 block announcement, use + // special logic for handling headers that don't connect, as this + // could be benign. + HandleFewUnconnectingHeaders(pfrom, peer, headers); + } else { + Misbehaving(peer, 10, "invalid header received"); + } + return; + } + + // If the headers we received are already in memory and an ancestor of + // m_best_header or our tip, skip anti-DoS checks. These headers will not + // use any more memory (and we are not leaking information that could be + // used to fingerprint us). + const CBlockIndex *last_received_header{nullptr}; + { + LOCK(cs_main); + last_received_header = m_chainman.m_blockman.LookupBlockIndex(headers.back().GetHash()); + if (IsAncestorOfBestHeaderOrTip(last_received_header)) { + already_validated_work = true; + } + } + + // If our peer has NetPermissionFlags::NoBan privileges, then bypass our + // anti-DoS logic (this saves bandwidth when we connect to a trusted peer + // on startup). + if (pfrom.HasPermission(NetPermissionFlags::NoBan)) { + already_validated_work = true; + } + + // At this point, the headers connect to something in our block index. + // Do anti-DoS checks to determine if we should process or store for later + // processing. + if (!already_validated_work && TryLowWorkHeadersSync(peer, pfrom, + chain_start_header, headers)) { + // If we successfully started a low-work headers sync, then there + // should be no headers to process any further. + Assume(headers.empty()); + return; + } + + // At this point, we have a set of headers with sufficient work on them + // which can be processed. + + // If we don't have the last header, then this peer will have given us + // something new (if these headers are valid). + bool received_new_header{last_received_header != nullptr}; + + // Now process all the headers. + BlockValidationState state; + if (!m_chainman.ProcessNewBlockHeaders(headers, /*min_pow_checked=*/true, state, &pindexLast)) { + if (state.IsInvalid()) { + MaybePunishNodeForBlock(pfrom.GetId(), state, via_compact_block, "invalid header received"); + return; + } + } + Assume(pindexLast); + + // Consider fetching more headers if we are not using our headers-sync mechanism. + if (nCount == MAX_HEADERS_RESULTS && !have_headers_sync) { + // Headers message had its maximum size; the peer may have more headers. + if (MaybeSendGetHeaders(pfrom, GetLocator(pindexLast), peer)) { + LogPrint(BCLog::NET, "more getheaders (%d) to end to peer=%d (startheight:%d)\n", + pindexLast->nHeight, pfrom.GetId(), peer.m_starting_height); + } + } + + UpdatePeerStateForReceivedHeaders(pfrom, pindexLast, received_new_header, nCount == MAX_HEADERS_RESULTS); + + // Consider immediately downloading blocks. + HeadersDirectFetchBlocks(pfrom, peer, pindexLast); + return; } @@ -2317,7 +2897,7 @@ void PeerManagerImpl::ProcessOrphanTx(std::set<uint256>& orphan_work_set) if (result.m_result_type == MempoolAcceptResult::ResultType::VALID) { LogPrint(BCLog::MEMPOOL, " accepted orphan tx %s\n", orphanHash.ToString()); - _RelayTransaction(orphanHash, porphanTx->GetWitnessHash()); + RelayTransaction(orphanHash, porphanTx->GetWitnessHash()); m_orphanage.AddChildrenToWorkSet(*porphanTx, orphan_work_set); m_orphanage.EraseTx(orphanHash); for (const CTransactionRef& removedTx : result.m_replaced_transactions.value()) { @@ -2371,7 +2951,7 @@ void PeerManagerImpl::ProcessOrphanTx(std::set<uint256>& orphan_work_set) } } -bool PeerManagerImpl::PrepareBlockFilterRequest(CNode& peer, +bool PeerManagerImpl::PrepareBlockFilterRequest(CNode& node, Peer& peer, BlockFilterType filter_type, uint32_t start_height, const uint256& stop_hash, uint32_t max_height_diff, const CBlockIndex*& stop_index, @@ -2379,11 +2959,11 @@ bool PeerManagerImpl::PrepareBlockFilterRequest(CNode& peer, { const bool supported_filter_type = (filter_type == BlockFilterType::BASIC && - (peer.GetLocalServices() & NODE_COMPACT_FILTERS)); + (peer.m_our_services & NODE_COMPACT_FILTERS)); if (!supported_filter_type) { LogPrint(BCLog::NET, "peer %d requested unsupported block filter type: %d\n", - peer.GetId(), static_cast<uint8_t>(filter_type)); - peer.fDisconnect = true; + node.GetId(), static_cast<uint8_t>(filter_type)); + node.fDisconnect = true; return false; } @@ -2394,8 +2974,8 @@ bool PeerManagerImpl::PrepareBlockFilterRequest(CNode& peer, // Check that the stop block exists and the peer would be allowed to fetch it. if (!stop_index || !BlockRequestAllowed(stop_index)) { LogPrint(BCLog::NET, "peer %d requested invalid block hash: %s\n", - peer.GetId(), stop_hash.ToString()); - peer.fDisconnect = true; + node.GetId(), stop_hash.ToString()); + node.fDisconnect = true; return false; } } @@ -2404,14 +2984,14 @@ bool PeerManagerImpl::PrepareBlockFilterRequest(CNode& peer, if (start_height > stop_height) { LogPrint(BCLog::NET, "peer %d sent invalid getcfilters/getcfheaders with " /* Continued */ "start height %d and stop height %d\n", - peer.GetId(), start_height, stop_height); - peer.fDisconnect = true; + node.GetId(), start_height, stop_height); + node.fDisconnect = true; return false; } if (stop_height - start_height >= max_height_diff) { LogPrint(BCLog::NET, "peer %d requested too many cfilters/cfheaders: %d / %d\n", - peer.GetId(), stop_height - start_height + 1, max_height_diff); - peer.fDisconnect = true; + node.GetId(), stop_height - start_height + 1, max_height_diff); + node.fDisconnect = true; return false; } @@ -2424,7 +3004,7 @@ bool PeerManagerImpl::PrepareBlockFilterRequest(CNode& peer, return true; } -void PeerManagerImpl::ProcessGetCFilters(CNode& peer, CDataStream& vRecv) +void PeerManagerImpl::ProcessGetCFilters(CNode& node,Peer& peer, CDataStream& vRecv) { uint8_t filter_type_ser; uint32_t start_height; @@ -2436,7 +3016,7 @@ void PeerManagerImpl::ProcessGetCFilters(CNode& peer, CDataStream& vRecv) const CBlockIndex* stop_index; BlockFilterIndex* filter_index; - if (!PrepareBlockFilterRequest(peer, filter_type, start_height, stop_hash, + if (!PrepareBlockFilterRequest(node, peer, filter_type, start_height, stop_hash, MAX_GETCFILTERS_SIZE, stop_index, filter_index)) { return; } @@ -2449,13 +3029,13 @@ void PeerManagerImpl::ProcessGetCFilters(CNode& peer, CDataStream& vRecv) } for (const auto& filter : filters) { - CSerializedNetMsg msg = CNetMsgMaker(peer.GetCommonVersion()) + CSerializedNetMsg msg = CNetMsgMaker(node.GetCommonVersion()) .Make(NetMsgType::CFILTER, filter); - m_connman.PushMessage(&peer, std::move(msg)); + m_connman.PushMessage(&node, std::move(msg)); } } -void PeerManagerImpl::ProcessGetCFHeaders(CNode& peer, CDataStream& vRecv) +void PeerManagerImpl::ProcessGetCFHeaders(CNode& node, Peer& peer, CDataStream& vRecv) { uint8_t filter_type_ser; uint32_t start_height; @@ -2467,7 +3047,7 @@ void PeerManagerImpl::ProcessGetCFHeaders(CNode& peer, CDataStream& vRecv) const CBlockIndex* stop_index; BlockFilterIndex* filter_index; - if (!PrepareBlockFilterRequest(peer, filter_type, start_height, stop_hash, + if (!PrepareBlockFilterRequest(node, peer, filter_type, start_height, stop_hash, MAX_GETCFHEADERS_SIZE, stop_index, filter_index)) { return; } @@ -2490,16 +3070,16 @@ void PeerManagerImpl::ProcessGetCFHeaders(CNode& peer, CDataStream& vRecv) return; } - CSerializedNetMsg msg = CNetMsgMaker(peer.GetCommonVersion()) + CSerializedNetMsg msg = CNetMsgMaker(node.GetCommonVersion()) .Make(NetMsgType::CFHEADERS, filter_type_ser, stop_index->GetBlockHash(), prev_header, filter_hashes); - m_connman.PushMessage(&peer, std::move(msg)); + m_connman.PushMessage(&node, std::move(msg)); } -void PeerManagerImpl::ProcessGetCFCheckPt(CNode& peer, CDataStream& vRecv) +void PeerManagerImpl::ProcessGetCFCheckPt(CNode& node, Peer& peer, CDataStream& vRecv) { uint8_t filter_type_ser; uint256 stop_hash; @@ -2510,7 +3090,7 @@ void PeerManagerImpl::ProcessGetCFCheckPt(CNode& peer, CDataStream& vRecv) const CBlockIndex* stop_index; BlockFilterIndex* filter_index; - if (!PrepareBlockFilterRequest(peer, filter_type, /*start_height=*/0, stop_hash, + if (!PrepareBlockFilterRequest(node, peer, filter_type, /*start_height=*/0, stop_hash, /*max_height_diff=*/std::numeric_limits<uint32_t>::max(), stop_index, filter_index)) { return; @@ -2531,18 +3111,18 @@ void PeerManagerImpl::ProcessGetCFCheckPt(CNode& peer, CDataStream& vRecv) } } - CSerializedNetMsg msg = CNetMsgMaker(peer.GetCommonVersion()) + CSerializedNetMsg msg = CNetMsgMaker(node.GetCommonVersion()) .Make(NetMsgType::CFCHECKPT, filter_type_ser, stop_index->GetBlockHash(), headers); - m_connman.PushMessage(&peer, std::move(msg)); + m_connman.PushMessage(&node, std::move(msg)); } -void PeerManagerImpl::ProcessBlock(CNode& node, const std::shared_ptr<const CBlock>& block, bool force_processing) +void PeerManagerImpl::ProcessBlock(CNode& node, const std::shared_ptr<const CBlock>& block, bool force_processing, bool min_pow_checked) { bool new_block{false}; - m_chainman.ProcessNewBlock(m_chainparams, block, force_processing, &new_block); + m_chainman.ProcessNewBlock(block, force_processing, min_pow_checked, &new_block); if (new_block) { node.m_last_block_time = GetTime<std::chrono::seconds>(); } else { @@ -2633,7 +3213,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, // Inbound peers send us their version message when they connect. // We send our version message in response. if (pfrom.IsInboundConn()) { - PushNodeVersion(pfrom); + PushNodeVersion(pfrom, *peer); } // Change version @@ -2658,7 +3238,8 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, m_connman.PushMessage(&pfrom, msg_maker.Make(NetMsgType::VERACK)); - pfrom.nServices = nServices; + pfrom.m_has_all_wanted_services = HasAllDesirableServiceFlags(nServices); + peer->m_their_services = nServices; pfrom.SetAddrLocal(addrMe); { LOCK(pfrom.m_subver_mutex); @@ -2666,27 +3247,26 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, } peer->m_starting_height = starting_height; - // set nodes not relaying blocks and tx and not serving (parts) of the historical blockchain as "clients" - pfrom.fClient = (!(nServices & NODE_NETWORK) && !(nServices & NODE_NETWORK_LIMITED)); - - // set nodes not capable of serving the complete blockchain history as "limited nodes" - pfrom.m_limited_node = (!(nServices & NODE_NETWORK) && (nServices & NODE_NETWORK_LIMITED)); - - if (pfrom.m_tx_relay != nullptr) { - LOCK(pfrom.m_tx_relay->cs_filter); - pfrom.m_tx_relay->fRelayTxes = fRelay; // set to true after we get the first filter* message - } - - if((nServices & NODE_WITNESS)) - { - LOCK(cs_main); - State(pfrom.GetId())->fHaveWitness = true; + // We only initialize the m_tx_relay data structure if: + // - this isn't an outbound block-relay-only connection; and + // - fRelay=true or we're offering NODE_BLOOM to this peer + // (NODE_BLOOM means that the peer may turn on tx relay later) + if (!pfrom.IsBlockOnlyConn() && + (fRelay || (peer->m_our_services & NODE_BLOOM))) { + auto* const tx_relay = peer->SetTxRelay(); + { + LOCK(tx_relay->m_bloom_filter_mutex); + tx_relay->m_relay_txs = fRelay; // set to true after we get the first filter* message + } + if (fRelay) pfrom.m_relays_txs = true; } // Potentially mark this peer as a preferred download peer. { - LOCK(cs_main); - UpdatePreferredDownload(pfrom, State(pfrom.GetId())); + LOCK(cs_main); + CNodeState* state = State(pfrom.GetId()); + state->fPreferredDownload = (!pfrom.IsInboundConn() || pfrom.HasPermission(NetPermissionFlags::NoBan)) && !pfrom.IsAddrFetchConn() && CanServeBlocks(*peer); + m_num_preferred_download_peers += state->fPreferredDownload; } // Self advertisement & GETADDR logic @@ -2704,7 +3284,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, // indicate to the peer that we will participate in addr relay. if (fListen && !m_chainman.ActiveChainstate().IsInitialBlockDownload()) { - CAddress addr = GetLocalAddress(&pfrom.addr, pfrom.GetLocalServices()); + CAddress addr{GetLocalAddress(pfrom.addr), peer->m_our_services, Now<NodeSeconds>()}; FastRandomContext insecure_rand; if (addr.IsRoutable()) { @@ -2800,24 +3380,13 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, pfrom.ConnectionTypeAsString()); } - if (pfrom.GetCommonVersion() >= SENDHEADERS_VERSION) { - // Tell our peer we prefer to receive headers rather than inv's - // We send this to non-NODE NETWORK peers as well, because even - // non-NODE NETWORK peers can announce blocks (such as pruning - // nodes) - m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::SENDHEADERS)); - } if (pfrom.GetCommonVersion() >= SHORT_IDS_BLOCKS_VERSION) { - // Tell our peer we are willing to provide version 1 or 2 cmpctblocks + // Tell our peer we are willing to provide version 2 cmpctblocks. // However, we do not request new block announcements using // cmpctblock messages. // We send this to non-NODE NETWORK peers as well, because // they may wish to request compact blocks from us - bool fAnnounceUsingCMPCTBLOCK = false; - uint64_t nCMPCTBLOCKVersion = 2; - m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion)); - nCMPCTBLOCKVersion = 1; - m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion)); + m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::SENDCMPCT, /*high_bandwidth=*/false, /*version=*/CMPCTBLOCKS_VERSION)); } pfrom.fSuccessfullyConnected = true; return; @@ -2830,26 +3399,20 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, } if (msg_type == NetMsgType::SENDCMPCT) { - bool fAnnounceUsingCMPCTBLOCK = false; - uint64_t nCMPCTBLOCKVersion = 0; - vRecv >> fAnnounceUsingCMPCTBLOCK >> nCMPCTBLOCKVersion; - if (nCMPCTBLOCKVersion == 1 || nCMPCTBLOCKVersion == 2) { - LOCK(cs_main); - // fProvidesHeaderAndIDs is used to "lock in" version of compact blocks we send (fWantsCmpctWitness) - if (!State(pfrom.GetId())->fProvidesHeaderAndIDs) { - State(pfrom.GetId())->fProvidesHeaderAndIDs = true; - State(pfrom.GetId())->fWantsCmpctWitness = nCMPCTBLOCKVersion == 2; - } - if (State(pfrom.GetId())->fWantsCmpctWitness == (nCMPCTBLOCKVersion == 2)) { // ignore later version announces - State(pfrom.GetId())->fPreferHeaderAndIDs = fAnnounceUsingCMPCTBLOCK; - // save whether peer selects us as BIP152 high-bandwidth peer - // (receiving sendcmpct(1) signals high-bandwidth, sendcmpct(0) low-bandwidth) - pfrom.m_bip152_highbandwidth_from = fAnnounceUsingCMPCTBLOCK; - } - if (!State(pfrom.GetId())->fSupportsDesiredCmpctVersion) { - State(pfrom.GetId())->fSupportsDesiredCmpctVersion = (nCMPCTBLOCKVersion == 2); - } - } + bool sendcmpct_hb{false}; + uint64_t sendcmpct_version{0}; + vRecv >> sendcmpct_hb >> sendcmpct_version; + + // Only support compact block relay with witnesses + if (sendcmpct_version != CMPCTBLOCKS_VERSION) return; + + LOCK(cs_main); + CNodeState* nodestate = State(pfrom.GetId()); + nodestate->m_provides_cmpctblocks = true; + nodestate->m_requested_hb_cmpctblocks = sendcmpct_hb; + // save whether peer selects us as BIP152 high-bandwidth peer + // (receiving sendcmpct(1) signals high-bandwidth, sendcmpct(0) low-bandwidth) + pfrom.m_bip152_highbandwidth_from = sendcmpct_hb; return; } @@ -2863,9 +3426,8 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, return; } if (pfrom.GetCommonVersion() >= WTXID_RELAY_VERSION) { - LOCK(cs_main); - if (!State(pfrom.GetId())->m_wtxid_relay) { - State(pfrom.GetId())->m_wtxid_relay = true; + if (!peer->m_wtxid_relay) { + peer->m_wtxid_relay = true; m_wtxid_relay_peers++; } else { LogPrint(BCLog::NET, "ignoring duplicate wtxidrelay from peer=%d\n", pfrom.GetId()); @@ -2914,21 +3476,20 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, if (vAddr.size() > MAX_ADDR_TO_SEND) { - Misbehaving(pfrom.GetId(), 20, strprintf("%s message size = %u", msg_type, vAddr.size())); + Misbehaving(*peer, 20, strprintf("%s message size = %u", msg_type, vAddr.size())); return; } // Store the new addresses std::vector<CAddress> vAddrOk; - int64_t nNow = GetAdjustedTime(); - int64_t nSince = nNow - 10 * 60; + const auto current_a_time{Now<NodeSeconds>()}; // Update/increment addr rate limiting bucket. const auto current_time{GetTime<std::chrono::microseconds>()}; if (peer->m_addr_token_bucket < MAX_ADDR_PROCESSING_TOKEN_BUCKET) { // Don't increment bucket if it's already full const auto time_diff = std::max(current_time - peer->m_addr_token_timestamp, 0us); - const double increment = CountSecondsDouble(time_diff) * MAX_ADDR_RATE_PER_SECOND; + const double increment = Ticks<SecondsDouble>(time_diff) * MAX_ADDR_RATE_PER_SECOND; peer->m_addr_token_bucket = std::min<double>(peer->m_addr_token_bucket + increment, MAX_ADDR_PROCESSING_TOKEN_BUCKET); } peer->m_addr_token_timestamp = current_time; @@ -2957,8 +3518,9 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, if (!MayHaveUsefulAddressDB(addr.nServices) && !HasAllDesirableServiceFlags(addr.nServices)) continue; - if (addr.nTime <= 100000000 || addr.nTime > nNow + 10 * 60) - addr.nTime = nNow - 5 * 24 * 60 * 60; + if (addr.nTime <= NodeSeconds{100000000s} || addr.nTime > current_a_time + 10min) { + addr.nTime = current_a_time - 5 * 24h; + } AddAddressKnown(*peer, addr); if (m_banman && (m_banman->IsDiscouraged(addr) || m_banman->IsBanned(addr))) { // Do not process banned/discouraged addresses beyond remembering we received them @@ -2966,7 +3528,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, } ++num_proc; bool fReachable = IsReachable(addr); - if (addr.nTime > nSince && !peer->m_getaddr_sent && vAddr.size() <= 10 && addr.IsRoutable()) { + if (addr.nTime > current_a_time - 10min && !peer->m_getaddr_sent && vAddr.size() <= 10 && addr.IsRoutable()) { // Relay to a limited number of other nodes RelayAddress(pfrom.GetId(), addr, fReachable); } @@ -2979,7 +3541,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, LogPrint(BCLog::NET, "Received addr: %u addresses (%u processed, %u rate-limited) from peer=%d\n", vAddr.size(), num_proc, num_rate_limit, pfrom.GetId()); - m_addrman.Add(vAddrOk, pfrom.addr, 2 * 60 * 60); + m_addrman.Add(vAddrOk, pfrom.addr, 2h); if (vAddr.size() < 1000) peer->m_getaddr_sent = false; // AddrFetch: Require multiple addresses to avoid disconnecting on self-announcements @@ -2995,18 +3557,11 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, vRecv >> vInv; if (vInv.size() > MAX_INV_SZ) { - Misbehaving(pfrom.GetId(), 20, strprintf("inv message size = %u", vInv.size())); + Misbehaving(*peer, 20, strprintf("inv message size = %u", vInv.size())); return; } - // Reject tx INVs when the -blocksonly setting is enabled, or this is a - // block-relay-only peer - bool reject_tx_invs{m_ignore_incoming_txs || (pfrom.m_tx_relay == nullptr)}; - - // Allow peers with relay permission to send data other than blocks in blocks only mode - if (pfrom.HasPermission(NetPermissionFlags::Relay)) { - reject_tx_invs = false; - } + const bool reject_tx_invs{RejectIncomingTxs(pfrom)}; LOCK(cs_main); @@ -3019,7 +3574,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, // Ignore INVs that don't match wtxidrelay setting. // Note that orphan parent fetching always uses MSG_TX GETDATAs regardless of the wtxidrelay setting. // This is fine as no INV messages are involved in that process. - if (State(pfrom.GetId())->m_wtxid_relay) { + if (peer->m_wtxid_relay) { if (inv.IsMsgTx()) continue; } else { if (inv.IsMsgWtx()) continue; @@ -3032,8 +3587,9 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, UpdateBlockAvailability(pfrom.GetId(), inv.hash); if (!fAlreadyHave && !fImporting && !fReindex && !IsBlockRequested(inv.hash)) { // Headers-first is the primary method of announcement on - // the network. If a node fell back to sending blocks by inv, - // it's probably for a re-org. The final block hash + // the network. If a node fell back to sending blocks by + // inv, it may be for a re-org, or because we haven't + // completed initial headers sync. The final block hash // provided should be the highest, so send a getheaders and // then fetch the blocks we need to catch up. best_block = &inv.hash; @@ -3048,7 +3604,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, const bool fAlreadyHave = AlreadyHaveTx(gtxid); LogPrint(BCLog::NET, "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom.GetId()); - pfrom.AddKnownTx(inv.hash); + AddKnownTx(*peer, inv.hash); if (!fAlreadyHave && !m_chainman.ActiveChainstate().IsInitialBlockDownload()) { AddTxAnnouncement(pfrom, gtxid, current_time); } @@ -3058,8 +3614,31 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, } if (best_block != nullptr) { - m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETHEADERS, m_chainman.ActiveChain().GetLocator(pindexBestHeader), *best_block)); - LogPrint(BCLog::NET, "getheaders (%d) %s to peer=%d\n", pindexBestHeader->nHeight, best_block->ToString(), pfrom.GetId()); + // If we haven't started initial headers-sync with this peer, then + // consider sending a getheaders now. On initial startup, there's a + // reliability vs bandwidth tradeoff, where we are only trying to do + // initial headers sync with one peer at a time, with a long + // timeout (at which point, if the sync hasn't completed, we will + // disconnect the peer and then choose another). In the meantime, + // as new blocks are found, we are willing to add one new peer per + // block to sync with as well, to sync quicker in the case where + // our initial peer is unresponsive (but less bandwidth than we'd + // use if we turned on sync with all peers). + CNodeState& state{*Assert(State(pfrom.GetId()))}; + if (state.fSyncStarted || (!peer->m_inv_triggered_getheaders_before_sync && *best_block != m_last_block_inv_triggering_headers_sync)) { + if (MaybeSendGetHeaders(pfrom, GetLocator(m_chainman.m_best_header), *peer)) { + LogPrint(BCLog::NET, "getheaders (%d) %s to peer=%d\n", + m_chainman.m_best_header->nHeight, best_block->ToString(), + pfrom.GetId()); + } + if (!state.fSyncStarted) { + peer->m_inv_triggered_getheaders_before_sync = true; + // Update the last block hash that triggered a new headers + // sync, so that we don't turn on headers sync with more + // than 1 new peer every new block. + m_last_block_inv_triggering_headers_sync = *best_block; + } + } } return; @@ -3070,7 +3649,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, vRecv >> vInv; if (vInv.size() > MAX_INV_SZ) { - Misbehaving(pfrom.GetId(), 20, strprintf("getdata message size = %u", vInv.size())); + Misbehaving(*peer, 20, strprintf("getdata message size = %u", vInv.size())); return; } @@ -3110,8 +3689,8 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, { std::shared_ptr<const CBlock> a_recent_block; { - LOCK(cs_most_recent_block); - a_recent_block = most_recent_block; + LOCK(m_most_recent_block_mutex); + a_recent_block = m_most_recent_block; } BlockValidationState state; if (!m_chainman.ActiveChainstate().ActivateBestChain(state, a_recent_block)) { @@ -3162,13 +3741,13 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, std::shared_ptr<const CBlock> recent_block; { - LOCK(cs_most_recent_block); - if (most_recent_block_hash == req.blockhash) - recent_block = most_recent_block; - // Unlock cs_most_recent_block to avoid cs_main lock inversion + LOCK(m_most_recent_block_mutex); + if (m_most_recent_block_hash == req.blockhash) + recent_block = m_most_recent_block; + // Unlock m_most_recent_block_mutex to avoid cs_main lock inversion } if (recent_block) { - SendBlockTransactions(pfrom, *recent_block, req); + SendBlockTransactions(pfrom, *peer, *recent_block, req); return; } @@ -3186,7 +3765,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, bool ret = ReadBlockFromDisk(block, pindex, m_chainparams.GetConsensus()); assert(ret); - SendBlockTransactions(pfrom, block, req); + SendBlockTransactions(pfrom, *peer, block, req); return; } } @@ -3199,9 +3778,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, // expensive disk reads, because it will require the peer to // actually receive all the data read from disk over the network. LogPrint(BCLog::NET, "Peer %d sent us a getblocktxn for a block > %i deep\n", pfrom.GetId(), MAX_BLOCKTXN_DEPTH); - CInv inv; - WITH_LOCK(cs_main, inv.type = State(pfrom.GetId())->fWantsCmpctWitness ? MSG_WITNESS_BLOCK : MSG_BLOCK); - inv.hash = req.blockhash; + CInv inv{MSG_WITNESS_BLOCK, req.blockhash}; WITH_LOCK(peer->m_getdata_requests_mutex, peer->m_getdata_requests.push_back(inv)); // The message processing loop will go around again (without pausing) and we'll respond then return; @@ -3218,9 +3795,26 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, return; } + if (fImporting || fReindex) { + LogPrint(BCLog::NET, "Ignoring getheaders from peer=%d while importing/reindexing\n", pfrom.GetId()); + return; + } + LOCK(cs_main); - if (m_chainman.ActiveChainstate().IsInitialBlockDownload() && !pfrom.HasPermission(NetPermissionFlags::Download)) { - LogPrint(BCLog::NET, "Ignoring getheaders from peer=%d because node is in initial block download\n", pfrom.GetId()); + + // Note that if we were to be on a chain that forks from the checkpointed + // chain, then serving those headers to a peer that has seen the + // checkpointed chain would cause that peer to disconnect us. Requiring + // that our chainwork exceed nMinimumChainWork is a protection against + // being fed a bogus chain when we started up for the first time and + // getting partitioned off the honest network for serving that chain to + // others. + if (m_chainman.ActiveTip() == nullptr || + (m_chainman.ActiveTip()->nChainWork < nMinimumChainWork && !pfrom.HasPermission(NetPermissionFlags::Download))) { + LogPrint(BCLog::NET, "Ignoring getheaders from peer=%d because active chain has too little work; sending empty response\n", pfrom.GetId()); + // Just respond with an empty headers message, to tell the peer to + // go away but not treat us as unresponsive. + m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::HEADERS, std::vector<CBlock>())); return; } @@ -3275,11 +3869,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, } if (msg_type == NetMsgType::TX) { - // Stop processing the transaction early if - // 1) We are in blocks only mode and peer has no relay permission - // 2) This peer is a block-relay-only peer - if ((m_ignore_incoming_txs && !pfrom.HasPermission(NetPermissionFlags::Relay)) || (pfrom.m_tx_relay == nullptr)) - { + if (RejectIncomingTxs(pfrom)) { LogPrint(BCLog::NET, "transaction sent in violation of protocol peer=%d\n", pfrom.GetId()); pfrom.fDisconnect = true; return; @@ -3297,21 +3887,19 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, const uint256& txid = ptx->GetHash(); const uint256& wtxid = ptx->GetWitnessHash(); - LOCK2(cs_main, g_cs_orphans); - - CNodeState* nodestate = State(pfrom.GetId()); - - const uint256& hash = nodestate->m_wtxid_relay ? wtxid : txid; - pfrom.AddKnownTx(hash); - if (nodestate->m_wtxid_relay && txid != wtxid) { - // Insert txid into filterInventoryKnown, even for + const uint256& hash = peer->m_wtxid_relay ? wtxid : txid; + AddKnownTx(*peer, hash); + if (peer->m_wtxid_relay && txid != wtxid) { + // Insert txid into m_tx_inventory_known_filter, even for // wtxidrelay peers. This prevents re-adding of // unconfirmed parents to the recently_announced // filter, when a child tx is requested. See // ProcessGetData(). - pfrom.AddKnownTx(txid); + AddKnownTx(*peer, txid); } + LOCK2(cs_main, g_cs_orphans); + m_txrequest.ReceivedResponse(pfrom.GetId(), txid); if (tx.HasWitness()) m_txrequest.ReceivedResponse(pfrom.GetId(), wtxid); @@ -3336,7 +3924,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, LogPrintf("Not relaying non-mempool transaction %s from forcerelay peer=%d\n", tx.GetHash().ToString(), pfrom.GetId()); } else { LogPrintf("Force relaying tx %s from peer=%d\n", tx.GetHash().ToString(), pfrom.GetId()); - _RelayTransaction(tx.GetHash(), tx.GetWitnessHash()); + RelayTransaction(tx.GetHash(), tx.GetWitnessHash()); } } return; @@ -3350,7 +3938,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, // requests for it. m_txrequest.ForgetTxHash(tx.GetHash()); m_txrequest.ForgetTxHash(tx.GetWitnessHash()); - _RelayTransaction(tx.GetHash(), tx.GetWitnessHash()); + RelayTransaction(tx.GetHash(), tx.GetWitnessHash()); m_orphanage.AddChildrenToWorkSet(tx, peer->m_orphan_work_set); pfrom.m_last_tx_time = GetTime<std::chrono::seconds>(); @@ -3397,7 +3985,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, // Eventually we should replace this with an improved // protocol for getting all unconfirmed parents. const auto gtxid{GenTxid::Txid(parent_txid)}; - pfrom.AddKnownTx(parent_txid); + AddKnownTx(*peer, parent_txid); if (!AlreadyHaveTx(gtxid)) AddTxAnnouncement(pfrom, gtxid, current_time); } @@ -3411,10 +3999,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, // DoS prevention: do not allow m_orphanage to grow unbounded (see CVE-2012-3789) unsigned int nMaxOrphanTx = (unsigned int)std::max((int64_t)0, gArgs.GetIntArg("-maxorphantx", DEFAULT_MAX_ORPHAN_TRANSACTIONS)); - unsigned int nEvicted = m_orphanage.LimitOrphans(nMaxOrphanTx); - if (nEvicted > 0) { - LogPrint(BCLog::MEMPOOL, "orphanage overflow, removed %u tx\n", nEvicted); - } + m_orphanage.LimitOrphans(nMaxOrphanTx); } else { LogPrint(BCLog::MEMPOOL, "not keeping orphan with rejected parents %s\n",tx.GetHash().ToString()); // We will continue to reject this tx since it has rejected @@ -3505,10 +4090,16 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, { LOCK(cs_main); - if (!m_chainman.m_blockman.LookupBlockIndex(cmpctblock.header.hashPrevBlock)) { + const CBlockIndex* prev_block = m_chainman.m_blockman.LookupBlockIndex(cmpctblock.header.hashPrevBlock); + if (!prev_block) { // Doesn't connect (or is genesis), instead of DoSing in AcceptBlockHeader, request deeper headers - if (!m_chainman.ActiveChainstate().IsInitialBlockDownload()) - m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETHEADERS, m_chainman.ActiveChain().GetLocator(pindexBestHeader), uint256())); + if (!m_chainman.ActiveChainstate().IsInitialBlockDownload()) { + MaybeSendGetHeaders(pfrom, GetLocator(m_chainman.m_best_header), *peer); + } + return; + } else if (prev_block->nChainWork + CalculateHeadersWork({cmpctblock.header}) < GetAntiDoSWorkThreshold()) { + // If we get a low-work header in a compact block, we can ignore it. + LogPrint(BCLog::NET, "Ignoring low-work compact block from peer %d\n", pfrom.GetId()); return; } @@ -3519,9 +4110,9 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, const CBlockIndex *pindex = nullptr; BlockValidationState state; - if (!m_chainman.ProcessNewBlockHeaders({cmpctblock.header}, state, m_chainparams, &pindex)) { + if (!m_chainman.ProcessNewBlockHeaders({cmpctblock.header}, /*min_pow_checked=*/true, state, &pindex)) { if (state.IsInvalid()) { - MaybePunishNodeForBlock(pfrom.GetId(), state, /*via_compact_block*/ true, "invalid header via cmpctblock"); + MaybePunishNodeForBlock(pfrom.GetId(), state, /*via_compact_block=*/true, "invalid header via cmpctblock"); return; } } @@ -3568,7 +4159,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, // We requested this block for some reason, but our mempool will probably be useless // so we just grab the block via normal getdata std::vector<CInv> vInv(1); - vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(pfrom), cmpctblock.header.GetHash()); + vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(*peer), cmpctblock.header.GetHash()); m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv)); } return; @@ -3579,12 +4170,6 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, return; } - if (DeploymentActiveAt(*pindex, m_chainparams.GetConsensus(), Consensus::DEPLOYMENT_SEGWIT) && !nodestate->fSupportsDesiredCmpctVersion) { - // Don't bother trying to process compact blocks from v1 peers - // after segwit activates. - return; - } - // We want to be a bit conservative just to be extra careful about DoS // possibilities in compact block processing... if (pindex->nHeight <= m_chainman.ActiveChain().Height() + 2) { @@ -3605,12 +4190,12 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, ReadStatus status = partialBlock.InitData(cmpctblock, vExtraTxnForCompact); if (status == READ_STATUS_INVALID) { RemoveBlockRequest(pindex->GetBlockHash()); // Reset in-flight state in case Misbehaving does not result in a disconnect - Misbehaving(pfrom.GetId(), 100, "invalid compact block"); + Misbehaving(*peer, 100, "invalid compact block"); return; } else if (status == READ_STATUS_FAILED) { // Duplicate txindexes, the block is now in-flight, so just request it std::vector<CInv> vInv(1); - vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(pfrom), cmpctblock.header.GetHash()); + vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(*peer), cmpctblock.header.GetHash()); m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv)); return; } @@ -3653,7 +4238,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, // We requested this block, but its far into the future, so our // mempool will probably be useless - request the block normally std::vector<CInv> vInv(1); - vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(pfrom), cmpctblock.header.GetHash()); + vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(*peer), cmpctblock.header.GetHash()); m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv)); return; } else { @@ -3692,7 +4277,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, // we have a chain with at least nMinimumChainWork), and we ignore // compact blocks with less work than our tip, it is safe to treat // reconstructed compact blocks as having been requested. - ProcessBlock(pfrom, pblock, /*force_processing=*/true); + ProcessBlock(pfrom, pblock, /*force_processing=*/true, /*min_pow_checked=*/true); LOCK(cs_main); // hold cs_main for CBlockIndex::IsValid() if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS)) { // Clear download state for this block, which is in @@ -3732,12 +4317,12 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, ReadStatus status = partialBlock.FillBlock(*pblock, resp.txn); if (status == READ_STATUS_INVALID) { RemoveBlockRequest(resp.blockhash); // Reset in-flight state in case Misbehaving does not result in a disconnect - Misbehaving(pfrom.GetId(), 100, "invalid compact block/non-matching block transactions"); + Misbehaving(*peer, 100, "invalid compact block/non-matching block transactions"); return; } else if (status == READ_STATUS_FAILED) { // Might have collided, fall back to getdata now :( std::vector<CInv> invs; - invs.push_back(CInv(MSG_BLOCK | GetFetchFlags(pfrom), resp.blockhash)); + invs.push_back(CInv(MSG_BLOCK | GetFetchFlags(*peer), resp.blockhash)); m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, invs)); } else { // Block is either okay, or possibly we received @@ -3775,7 +4360,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, // disk-space attacks), but this should be safe due to the // protections in the compact block handler -- see related comment // in compact block optimistic reconstruction handling. - ProcessBlock(pfrom, pblock, /*force_processing=*/true); + ProcessBlock(pfrom, pblock, /*force_processing=*/true, /*min_pow_checked=*/true); } return; } @@ -3788,12 +4373,16 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, return; } + // Assume that this is in response to any outstanding getheaders + // request we may have sent, and clear out the time of our last request + peer->m_last_getheaders_timestamp = {}; + std::vector<CBlockHeader> headers; // Bypass the normal CBlock deserialization, as we don't want to risk deserializing 2000 full blocks. unsigned int nCount = ReadCompactSize(vRecv); if (nCount > MAX_HEADERS_RESULTS) { - Misbehaving(pfrom.GetId(), 20, strprintf("headers message size = %u", nCount)); + Misbehaving(*peer, 20, strprintf("headers message size = %u", nCount)); return; } headers.resize(nCount); @@ -3802,7 +4391,23 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, ReadCompactSize(vRecv); // ignore tx count; assume it is 0. } - return ProcessHeadersMessage(pfrom, *peer, headers, /*via_compact_block=*/false); + ProcessHeadersMessage(pfrom, *peer, std::move(headers), /*via_compact_block=*/false); + + // Check if the headers presync progress needs to be reported to validation. + // This needs to be done without holding the m_headers_presync_mutex lock. + if (m_headers_presync_should_signal.exchange(false)) { + HeadersPresyncStats stats; + { + LOCK(m_headers_presync_mutex); + auto it = m_headers_presync_stats.find(m_headers_presync_bestpeer); + if (it != m_headers_presync_stats.end()) stats = it->second; + } + if (stats.second) { + m_chainman.ReportHeadersPresync(stats.first, stats.second->first, stats.second->second); + } + } + + return; } if (msg_type == NetMsgType::BLOCK) @@ -3820,6 +4425,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, bool forceProcessing = false; const uint256 hash(pblock->GetHash()); + bool min_pow_checked = false; { LOCK(cs_main); // Always process the block if we requested it, since we may @@ -3830,8 +4436,14 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, // which peers send us compact blocks, so the race between here and // cs_main in ProcessNewBlock is fine. mapBlockSource.emplace(hash, std::make_pair(pfrom.GetId(), true)); + + // Check work on this block against our anti-dos thresholds. + const CBlockIndex* prev_block = m_chainman.m_blockman.LookupBlockIndex(pblock->hashPrevBlock); + if (prev_block && prev_block->nChainWork + CalculateHeadersWork({pblock->GetBlockHeader()}) >= GetAntiDoSWorkThreshold()) { + min_pow_checked = true; + } } - ProcessBlock(pfrom, pblock, forceProcessing); + ProcessBlock(pfrom, pblock, forceProcessing, min_pow_checked); return; } @@ -3861,7 +4473,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, peer->m_addrs_to_send.clear(); std::vector<CAddress> vAddr; if (pfrom.HasPermission(NetPermissionFlags::Addr)) { - vAddr = m_connman.GetAddresses(MAX_ADDR_TO_SEND, MAX_PCT_ADDR_TO_SEND, /* network */ std::nullopt); + vAddr = m_connman.GetAddresses(MAX_ADDR_TO_SEND, MAX_PCT_ADDR_TO_SEND, /*network=*/std::nullopt); } else { vAddr = m_connman.GetAddresses(pfrom, MAX_ADDR_TO_SEND, MAX_PCT_ADDR_TO_SEND); } @@ -3873,7 +4485,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, } if (msg_type == NetMsgType::MEMPOOL) { - if (!(pfrom.GetLocalServices() & NODE_BLOOM) && !pfrom.HasPermission(NetPermissionFlags::Mempool)) + if (!(peer->m_our_services & NODE_BLOOM) && !pfrom.HasPermission(NetPermissionFlags::Mempool)) { if (!pfrom.HasPermission(NetPermissionFlags::NoBan)) { @@ -3893,9 +4505,9 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, return; } - if (pfrom.m_tx_relay != nullptr) { - LOCK(pfrom.m_tx_relay->cs_tx_inventory); - pfrom.m_tx_relay->fSendMempool = true; + if (auto tx_relay = peer->GetTxRelay(); tx_relay != nullptr) { + LOCK(tx_relay->m_tx_inventory_mutex); + tx_relay->m_send_mempool = true; } return; } @@ -3976,7 +4588,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, } if (msg_type == NetMsgType::FILTERLOAD) { - if (!(pfrom.GetLocalServices() & NODE_BLOOM)) { + if (!(peer->m_our_services & NODE_BLOOM)) { LogPrint(BCLog::NET, "filterload received despite not offering bloom services from peer=%d; disconnecting\n", pfrom.GetId()); pfrom.fDisconnect = true; return; @@ -3987,19 +4599,21 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, if (!filter.IsWithinSizeConstraints()) { // There is no excuse for sending a too-large filter - Misbehaving(pfrom.GetId(), 100, "too-large bloom filter"); - } - else if (pfrom.m_tx_relay != nullptr) - { - LOCK(pfrom.m_tx_relay->cs_filter); - pfrom.m_tx_relay->pfilter.reset(new CBloomFilter(filter)); - pfrom.m_tx_relay->fRelayTxes = true; + Misbehaving(*peer, 100, "too-large bloom filter"); + } else if (auto tx_relay = peer->GetTxRelay(); tx_relay != nullptr) { + { + LOCK(tx_relay->m_bloom_filter_mutex); + tx_relay->m_bloom_filter.reset(new CBloomFilter(filter)); + tx_relay->m_relay_txs = true; + } + pfrom.m_bloom_filter_loaded = true; + pfrom.m_relays_txs = true; } return; } if (msg_type == NetMsgType::FILTERADD) { - if (!(pfrom.GetLocalServices() & NODE_BLOOM)) { + if (!(peer->m_our_services & NODE_BLOOM)) { LogPrint(BCLog::NET, "filteradd received despite not offering bloom services from peer=%d; disconnecting\n", pfrom.GetId()); pfrom.fDisconnect = true; return; @@ -4012,32 +4626,36 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, bool bad = false; if (vData.size() > MAX_SCRIPT_ELEMENT_SIZE) { bad = true; - } else if (pfrom.m_tx_relay != nullptr) { - LOCK(pfrom.m_tx_relay->cs_filter); - if (pfrom.m_tx_relay->pfilter) { - pfrom.m_tx_relay->pfilter->insert(vData); + } else if (auto tx_relay = peer->GetTxRelay(); tx_relay != nullptr) { + LOCK(tx_relay->m_bloom_filter_mutex); + if (tx_relay->m_bloom_filter) { + tx_relay->m_bloom_filter->insert(vData); } else { bad = true; } } if (bad) { - Misbehaving(pfrom.GetId(), 100, "bad filteradd message"); + Misbehaving(*peer, 100, "bad filteradd message"); } return; } if (msg_type == NetMsgType::FILTERCLEAR) { - if (!(pfrom.GetLocalServices() & NODE_BLOOM)) { + if (!(peer->m_our_services & NODE_BLOOM)) { LogPrint(BCLog::NET, "filterclear received despite not offering bloom services from peer=%d; disconnecting\n", pfrom.GetId()); pfrom.fDisconnect = true; return; } - if (pfrom.m_tx_relay == nullptr) { - return; + auto tx_relay = peer->GetTxRelay(); + if (!tx_relay) return; + + { + LOCK(tx_relay->m_bloom_filter_mutex); + tx_relay->m_bloom_filter = nullptr; + tx_relay->m_relay_txs = true; } - LOCK(pfrom.m_tx_relay->cs_filter); - pfrom.m_tx_relay->pfilter = nullptr; - pfrom.m_tx_relay->fRelayTxes = true; + pfrom.m_bloom_filter_loaded = false; + pfrom.m_relays_txs = true; return; } @@ -4045,8 +4663,8 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, CAmount newFeeFilter = 0; vRecv >> newFeeFilter; if (MoneyRange(newFeeFilter)) { - if (pfrom.m_tx_relay != nullptr) { - pfrom.m_tx_relay->minFeeFilter = newFeeFilter; + if (auto tx_relay = peer->GetTxRelay(); tx_relay != nullptr) { + tx_relay->m_fee_filter_received = newFeeFilter; } LogPrint(BCLog::NET, "received: feefilter of %s from peer=%d\n", CFeeRate(newFeeFilter).ToString(), pfrom.GetId()); } @@ -4054,17 +4672,17 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, } if (msg_type == NetMsgType::GETCFILTERS) { - ProcessGetCFilters(pfrom, vRecv); + ProcessGetCFilters(pfrom, *peer, vRecv); return; } if (msg_type == NetMsgType::GETCFHEADERS) { - ProcessGetCFHeaders(pfrom, vRecv); + ProcessGetCFHeaders(pfrom, *peer, vRecv); return; } if (msg_type == NetMsgType::GETCFCHECKPT) { - ProcessGetCFCheckPt(pfrom, vRecv); + ProcessGetCFCheckPt(pfrom, *peer, vRecv); return; } @@ -4210,7 +4828,7 @@ bool PeerManagerImpl::ProcessMessages(CNode* pfrom, std::atomic<bool>& interrupt return fMoreWork; } -void PeerManagerImpl::ConsiderEviction(CNode& pto, std::chrono::seconds time_in_seconds) +void PeerManagerImpl::ConsiderEviction(CNode& pto, Peer& peer, std::chrono::seconds time_in_seconds) { AssertLockHeld(cs_main); @@ -4248,10 +4866,15 @@ void PeerManagerImpl::ConsiderEviction(CNode& pto, std::chrono::seconds time_in_ pto.fDisconnect = true; } else { assert(state.m_chain_sync.m_work_header); + // Here, we assume that the getheaders message goes out, + // because it'll either go out or be skipped because of a + // getheaders in-flight already, in which case the peer should + // still respond to us with a sufficiently high work chain tip. + MaybeSendGetHeaders(pto, + GetLocator(state.m_chain_sync.m_work_header->pprev), + peer); LogPrint(BCLog::NET, "sending getheaders to outbound peer=%d to verify chain work (current best known block:%s, benchmark blockhash: %s)\n", pto.GetId(), state.pindexBestKnownBlock != nullptr ? state.pindexBestKnownBlock->GetBlockHash().ToString() : "<none>", state.m_chain_sync.m_work_header->GetBlockHash().ToString()); - m_connman.PushMessage(&pto, msgMaker.Make(NetMsgType::GETHEADERS, m_chainman.ActiveChain().GetLocator(state.m_chain_sync.m_work_header->pprev), uint256())); state.m_chain_sync.m_sent_getheaders = true; - constexpr auto HEADERS_RESPONSE_TIME{2min}; // Bump the timeout to allow a response, which could clear the timeout // (if the response shows the peer has synced), reset the timeout (if // the peer syncs to the required work but not to our tip), or result @@ -4420,10 +5043,10 @@ void PeerManagerImpl::MaybeSendPing(CNode& node_to, Peer& peer, std::chrono::mic } if (pingSend) { - uint64_t nonce = 0; - while (nonce == 0) { - GetRandBytes((unsigned char*)&nonce, sizeof(nonce)); - } + uint64_t nonce; + do { + nonce = GetRand<uint64_t>(); + } while (nonce == 0); peer.m_ping_queued = false; peer.m_ping_start = now; if (node_to.GetCommonVersion() > BIP0031_VERSION) { @@ -4455,9 +5078,10 @@ void PeerManagerImpl::MaybeSendAddr(CNode& node, Peer& peer, std::chrono::micros if (peer.m_next_local_addr_send != 0us) { peer.m_addr_known->reset(); } - if (std::optional<CAddress> local_addr = GetLocalAddrForPeer(&node)) { + if (std::optional<CService> local_service = GetLocalAddrForPeer(node)) { + CAddress local_addr{*local_service, peer.m_our_services, Now<NodeSeconds>()}; FastRandomContext insecure_rand; - PushAddress(peer, *local_addr, insecure_rand); + PushAddress(peer, local_addr, insecure_rand); } peer.m_next_local_addr_send = GetExponentialRand(current_time, AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL); } @@ -4504,17 +5128,38 @@ void PeerManagerImpl::MaybeSendAddr(CNode& node, Peer& peer, std::chrono::micros } } -void PeerManagerImpl::MaybeSendFeefilter(CNode& pto, std::chrono::microseconds current_time) +void PeerManagerImpl::MaybeSendSendHeaders(CNode& node, Peer& peer) { - AssertLockHeld(cs_main); + // Delay sending SENDHEADERS (BIP 130) until we're done with an + // initial-headers-sync with this peer. Receiving headers announcements for + // new blocks while trying to sync their headers chain is problematic, + // because of the state tracking done. + if (!peer.m_sent_sendheaders && node.GetCommonVersion() >= SENDHEADERS_VERSION) { + LOCK(cs_main); + CNodeState &state = *State(node.GetId()); + if (state.pindexBestKnownBlock != nullptr && + state.pindexBestKnownBlock->nChainWork > nMinimumChainWork) { + // Tell our peer we prefer to receive headers rather than inv's + // We send this to non-NODE NETWORK peers as well, because even + // non-NODE NETWORK peers can announce blocks (such as pruning + // nodes) + m_connman.PushMessage(&node, CNetMsgMaker(node.GetCommonVersion()).Make(NetMsgType::SENDHEADERS)); + peer.m_sent_sendheaders = true; + } + } +} +void PeerManagerImpl::MaybeSendFeefilter(CNode& pto, Peer& peer, std::chrono::microseconds current_time) +{ if (m_ignore_incoming_txs) return; - if (!pto.m_tx_relay) return; if (pto.GetCommonVersion() < FEEFILTER_VERSION) return; // peers with the forcerelay permission should not filter txs to us if (pto.HasPermission(NetPermissionFlags::ForceRelay)) return; + // Don't send feefilter messages to outbound block-relay-only peers since they should never announce + // transactions to us, regardless of feefilter state. + if (pto.IsBlockOnlyConn()) return; - CAmount currentFilter = m_mempool.GetMinFee(gArgs.GetIntArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000).GetFeePerK(); + CAmount currentFilter = m_mempool.GetMinFee().GetFeePerK(); static FeeFilterRounder g_filter_rounder{CFeeRate{DEFAULT_MIN_RELAY_TX_FEE}}; if (m_chainman.ActiveChainstate().IsInitialBlockDownload()) { @@ -4523,34 +5168,34 @@ void PeerManagerImpl::MaybeSendFeefilter(CNode& pto, std::chrono::microseconds c currentFilter = MAX_MONEY; } else { static const CAmount MAX_FILTER{g_filter_rounder.round(MAX_MONEY)}; - if (pto.m_tx_relay->lastSentFeeFilter == MAX_FILTER) { + if (peer.m_fee_filter_sent == MAX_FILTER) { // Send the current filter if we sent MAX_FILTER previously // and made it out of IBD. - pto.m_tx_relay->m_next_send_feefilter = 0us; + peer.m_next_send_feefilter = 0us; } } - if (current_time > pto.m_tx_relay->m_next_send_feefilter) { + if (current_time > peer.m_next_send_feefilter) { CAmount filterToSend = g_filter_rounder.round(currentFilter); - // We always have a fee filter of at least minRelayTxFee - filterToSend = std::max(filterToSend, ::minRelayTxFee.GetFeePerK()); - if (filterToSend != pto.m_tx_relay->lastSentFeeFilter) { + // We always have a fee filter of at least the min relay fee + filterToSend = std::max(filterToSend, m_mempool.m_min_relay_feerate.GetFeePerK()); + if (filterToSend != peer.m_fee_filter_sent) { m_connman.PushMessage(&pto, CNetMsgMaker(pto.GetCommonVersion()).Make(NetMsgType::FEEFILTER, filterToSend)); - pto.m_tx_relay->lastSentFeeFilter = filterToSend; + peer.m_fee_filter_sent = filterToSend; } - pto.m_tx_relay->m_next_send_feefilter = GetExponentialRand(current_time, AVG_FEEFILTER_BROADCAST_INTERVAL); + peer.m_next_send_feefilter = GetExponentialRand(current_time, AVG_FEEFILTER_BROADCAST_INTERVAL); } // If the fee filter has changed substantially and it's still more than MAX_FEEFILTER_CHANGE_DELAY // until scheduled broadcast, then move the broadcast to within MAX_FEEFILTER_CHANGE_DELAY. - else if (current_time + MAX_FEEFILTER_CHANGE_DELAY < pto.m_tx_relay->m_next_send_feefilter && - (currentFilter < 3 * pto.m_tx_relay->lastSentFeeFilter / 4 || currentFilter > 4 * pto.m_tx_relay->lastSentFeeFilter / 3)) { - pto.m_tx_relay->m_next_send_feefilter = current_time + GetRandomDuration<std::chrono::microseconds>(MAX_FEEFILTER_CHANGE_DELAY); + else if (current_time + MAX_FEEFILTER_CHANGE_DELAY < peer.m_next_send_feefilter && + (currentFilter < 3 * peer.m_fee_filter_sent / 4 || currentFilter > 4 * peer.m_fee_filter_sent / 3)) { + peer.m_next_send_feefilter = current_time + GetRandomDuration<std::chrono::microseconds>(MAX_FEEFILTER_CHANGE_DELAY); } } namespace { class CompareInvMempoolOrder { - CTxMemPool *mp; + CTxMemPool* mp; bool m_wtxid_relay; public: explicit CompareInvMempoolOrder(CTxMemPool *_mempool, bool use_wtxid) @@ -4566,6 +5211,15 @@ public: return mp->CompareDepthAndScore(*b, *a, m_wtxid_relay); } }; +} // namespace + +bool PeerManagerImpl::RejectIncomingTxs(const CNode& peer) const +{ + // block-relay-only peers may never send txs to us + if (peer.IsBlockOnlyConn()) return true; + // In -blocksonly mode, peers need the 'relay' permission to send txs to us + if (m_ignore_incoming_txs && !peer.HasPermission(NetPermissionFlags::Relay)) return true; + return false; } bool PeerManagerImpl::SetupAddressRelay(const CNode& node, Peer& peer) @@ -4616,39 +5270,65 @@ bool PeerManagerImpl::SendMessages(CNode* pto) MaybeSendAddr(*pto, *peer, current_time); + MaybeSendSendHeaders(*pto, *peer); + { LOCK(cs_main); CNodeState &state = *State(pto->GetId()); // Start block sync - if (pindexBestHeader == nullptr) - pindexBestHeader = m_chainman.ActiveChain().Tip(); - bool fFetch = state.fPreferredDownload || (nPreferredDownload == 0 && !pto->fClient && !pto->IsAddrFetchConn()); // Download if this is a nice peer, or we have no nice peers and this one might do. - if (!state.fSyncStarted && !pto->fClient && !fImporting && !fReindex) { + if (m_chainman.m_best_header == nullptr) { + m_chainman.m_best_header = m_chainman.ActiveChain().Tip(); + } + + // Determine whether we might try initial headers sync or parallel + // block download from this peer -- this mostly affects behavior while + // in IBD (once out of IBD, we sync from all peers). + bool sync_blocks_and_headers_from_peer = false; + if (state.fPreferredDownload) { + sync_blocks_and_headers_from_peer = true; + } else if (CanServeBlocks(*peer) && !pto->IsAddrFetchConn()) { + // Typically this is an inbound peer. If we don't have any outbound + // peers, or if we aren't downloading any blocks from such peers, + // then allow block downloads from this peer, too. + // We prefer downloading blocks from outbound peers to avoid + // putting undue load on (say) some home user who is just making + // outbound connections to the network, but if our only source of + // the latest blocks is from an inbound peer, we have to be sure to + // eventually download it (and not just wait indefinitely for an + // outbound peer to have it). + if (m_num_preferred_download_peers == 0 || mapBlocksInFlight.empty()) { + sync_blocks_and_headers_from_peer = true; + } + } + + if (!state.fSyncStarted && CanServeBlocks(*peer) && !fImporting && !fReindex) { // Only actively request headers from a single peer, unless we're close to today. - if ((nSyncStarted == 0 && fFetch) || pindexBestHeader->GetBlockTime() > GetAdjustedTime() - 24 * 60 * 60) { - state.fSyncStarted = true; - state.m_headers_sync_timeout = current_time + HEADERS_DOWNLOAD_TIMEOUT_BASE + - ( - // Convert HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER to microseconds before scaling - // to maintain precision - std::chrono::microseconds{HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER} * - (GetAdjustedTime() - pindexBestHeader->GetBlockTime()) / consensusParams.nPowTargetSpacing - ); - nSyncStarted++; - const CBlockIndex *pindexStart = pindexBestHeader; + if ((nSyncStarted == 0 && sync_blocks_and_headers_from_peer) || m_chainman.m_best_header->Time() > GetAdjustedTime() - 24h) { + const CBlockIndex* pindexStart = m_chainman.m_best_header; /* If possible, start at the block preceding the currently best known header. This ensures that we always get a non-empty list of headers back as long as the peer is up-to-date. With a non-empty response, we can initialise the peer's known best block. This wouldn't be possible - if we requested starting at pindexBestHeader and + if we requested starting at m_chainman.m_best_header and got back an empty response. */ if (pindexStart->pprev) pindexStart = pindexStart->pprev; - LogPrint(BCLog::NET, "initial getheaders (%d) to peer=%d (startheight:%d)\n", pindexStart->nHeight, pto->GetId(), peer->m_starting_height); - m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::GETHEADERS, m_chainman.ActiveChain().GetLocator(pindexStart), uint256())); + if (MaybeSendGetHeaders(*pto, GetLocator(pindexStart), *peer)) { + LogPrint(BCLog::NET, "initial getheaders (%d) to peer=%d (startheight:%d)\n", pindexStart->nHeight, pto->GetId(), peer->m_starting_height); + + state.fSyncStarted = true; + state.m_headers_sync_timeout = current_time + HEADERS_DOWNLOAD_TIMEOUT_BASE + + ( + // Convert HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER to microseconds before scaling + // to maintain precision + std::chrono::microseconds{HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER} * + Ticks<std::chrono::seconds>(GetAdjustedTime() - m_chainman.m_best_header->Time()) / consensusParams.nPowTargetSpacing + ); + nSyncStarted++; + } } } @@ -4656,7 +5336,7 @@ bool PeerManagerImpl::SendMessages(CNode* pto) // Try sending block announcements via headers // { - // If we have less than MAX_BLOCKS_TO_ANNOUNCE in our + // If we have no more than MAX_BLOCKS_TO_ANNOUNCE in our // list of block hashes we're relaying, and our peer wants // headers announcements, then find the first header // not yet known to our peer but would connect, and send. @@ -4666,7 +5346,7 @@ bool PeerManagerImpl::SendMessages(CNode* pto) LOCK(peer->m_block_inv_mutex); std::vector<CBlock> vHeaders; bool fRevertToInv = ((!state.fPreferHeaders && - (!state.fPreferHeaderAndIDs || peer->m_blocks_for_headers_relay.size() > 1)) || + (!state.m_requested_hb_cmpctblocks || peer->m_blocks_for_headers_relay.size() > 1)) || peer->m_blocks_for_headers_relay.size() > MAX_BLOCKS_TO_ANNOUNCE); const CBlockIndex *pBestIndex = nullptr; // last header queued for delivery ProcessBlockAvailability(pto->GetId()); // ensure pindexBestKnownBlock is up-to-date @@ -4719,33 +5399,27 @@ bool PeerManagerImpl::SendMessages(CNode* pto) } } if (!fRevertToInv && !vHeaders.empty()) { - if (vHeaders.size() == 1 && state.fPreferHeaderAndIDs) { + if (vHeaders.size() == 1 && state.m_requested_hb_cmpctblocks) { // We only send up to 1 block as header-and-ids, as otherwise // probably means we're doing an initial-ish-sync or they're slow LogPrint(BCLog::NET, "%s sending header-and-ids %s to peer=%d\n", __func__, vHeaders.front().GetHash().ToString(), pto->GetId()); - int nSendFlags = state.fWantsCmpctWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS; - - bool fGotBlockFromCache = false; + std::optional<CSerializedNetMsg> cached_cmpctblock_msg; { - LOCK(cs_most_recent_block); - if (most_recent_block_hash == pBestIndex->GetBlockHash()) { - if (state.fWantsCmpctWitness || !fWitnessesPresentInMostRecentCompactBlock) - m_connman.PushMessage(pto, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, *most_recent_compact_block)); - else { - CBlockHeaderAndShortTxIDs cmpctblock(*most_recent_block, state.fWantsCmpctWitness); - m_connman.PushMessage(pto, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, cmpctblock)); - } - fGotBlockFromCache = true; + LOCK(m_most_recent_block_mutex); + if (m_most_recent_block_hash == pBestIndex->GetBlockHash()) { + cached_cmpctblock_msg = msgMaker.Make(NetMsgType::CMPCTBLOCK, *m_most_recent_compact_block); } } - if (!fGotBlockFromCache) { + if (cached_cmpctblock_msg.has_value()) { + m_connman.PushMessage(pto, std::move(cached_cmpctblock_msg.value())); + } else { CBlock block; bool ret = ReadBlockFromDisk(block, pBestIndex, consensusParams); assert(ret); - CBlockHeaderAndShortTxIDs cmpctblock(block, state.fWantsCmpctWitness); - m_connman.PushMessage(pto, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, cmpctblock)); + CBlockHeaderAndShortTxIDs cmpctblock{block}; + m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::CMPCTBLOCK, cmpctblock)); } state.pindexBestHeaderSent = pBestIndex; } else if (state.fPreferHeaders) { @@ -4810,45 +5484,45 @@ bool PeerManagerImpl::SendMessages(CNode* pto) peer->m_blocks_for_inv_relay.clear(); } - if (pto->m_tx_relay != nullptr) { - LOCK(pto->m_tx_relay->cs_tx_inventory); + if (auto tx_relay = peer->GetTxRelay(); tx_relay != nullptr) { + LOCK(tx_relay->m_tx_inventory_mutex); // Check whether periodic sends should happen bool fSendTrickle = pto->HasPermission(NetPermissionFlags::NoBan); - if (pto->m_tx_relay->nNextInvSend < current_time) { + if (tx_relay->m_next_inv_send_time < current_time) { fSendTrickle = true; if (pto->IsInboundConn()) { - pto->m_tx_relay->nNextInvSend = NextInvToInbounds(current_time, INBOUND_INVENTORY_BROADCAST_INTERVAL); + tx_relay->m_next_inv_send_time = NextInvToInbounds(current_time, INBOUND_INVENTORY_BROADCAST_INTERVAL); } else { - pto->m_tx_relay->nNextInvSend = GetExponentialRand(current_time, OUTBOUND_INVENTORY_BROADCAST_INTERVAL); + tx_relay->m_next_inv_send_time = GetExponentialRand(current_time, OUTBOUND_INVENTORY_BROADCAST_INTERVAL); } } // Time to send but the peer has requested we not relay transactions. if (fSendTrickle) { - LOCK(pto->m_tx_relay->cs_filter); - if (!pto->m_tx_relay->fRelayTxes) pto->m_tx_relay->setInventoryTxToSend.clear(); + LOCK(tx_relay->m_bloom_filter_mutex); + if (!tx_relay->m_relay_txs) tx_relay->m_tx_inventory_to_send.clear(); } // Respond to BIP35 mempool requests - if (fSendTrickle && pto->m_tx_relay->fSendMempool) { + if (fSendTrickle && tx_relay->m_send_mempool) { auto vtxinfo = m_mempool.infoAll(); - pto->m_tx_relay->fSendMempool = false; - const CFeeRate filterrate{pto->m_tx_relay->minFeeFilter.load()}; + tx_relay->m_send_mempool = false; + const CFeeRate filterrate{tx_relay->m_fee_filter_received.load()}; - LOCK(pto->m_tx_relay->cs_filter); + LOCK(tx_relay->m_bloom_filter_mutex); for (const auto& txinfo : vtxinfo) { - const uint256& hash = state.m_wtxid_relay ? txinfo.tx->GetWitnessHash() : txinfo.tx->GetHash(); - CInv inv(state.m_wtxid_relay ? MSG_WTX : MSG_TX, hash); - pto->m_tx_relay->setInventoryTxToSend.erase(hash); + const uint256& hash = peer->m_wtxid_relay ? txinfo.tx->GetWitnessHash() : txinfo.tx->GetHash(); + CInv inv(peer->m_wtxid_relay ? MSG_WTX : MSG_TX, hash); + tx_relay->m_tx_inventory_to_send.erase(hash); // Don't send transactions that peers will not put into their mempool if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) { continue; } - if (pto->m_tx_relay->pfilter) { - if (!pto->m_tx_relay->pfilter->IsRelevantAndUpdate(*txinfo.tx)) continue; + if (tx_relay->m_bloom_filter) { + if (!tx_relay->m_bloom_filter->IsRelevantAndUpdate(*txinfo.tx)) continue; } - pto->m_tx_relay->filterInventoryKnown.insert(hash); + tx_relay->m_tx_inventory_known_filter.insert(hash); // Responses to MEMPOOL requests bypass the m_recently_announced_invs filter. vInv.push_back(inv); if (vInv.size() == MAX_INV_SZ) { @@ -4856,37 +5530,37 @@ bool PeerManagerImpl::SendMessages(CNode* pto) vInv.clear(); } } - pto->m_tx_relay->m_last_mempool_req = std::chrono::duration_cast<std::chrono::seconds>(current_time); + tx_relay->m_last_mempool_req = std::chrono::duration_cast<std::chrono::seconds>(current_time); } // Determine transactions to relay if (fSendTrickle) { // Produce a vector with all candidates for sending std::vector<std::set<uint256>::iterator> vInvTx; - vInvTx.reserve(pto->m_tx_relay->setInventoryTxToSend.size()); - for (std::set<uint256>::iterator it = pto->m_tx_relay->setInventoryTxToSend.begin(); it != pto->m_tx_relay->setInventoryTxToSend.end(); it++) { + vInvTx.reserve(tx_relay->m_tx_inventory_to_send.size()); + for (std::set<uint256>::iterator it = tx_relay->m_tx_inventory_to_send.begin(); it != tx_relay->m_tx_inventory_to_send.end(); it++) { vInvTx.push_back(it); } - const CFeeRate filterrate{pto->m_tx_relay->minFeeFilter.load()}; + const CFeeRate filterrate{tx_relay->m_fee_filter_received.load()}; // Topologically and fee-rate sort the inventory we send for privacy and priority reasons. // A heap is used so that not all items need sorting if only a few are being sent. - CompareInvMempoolOrder compareInvMempoolOrder(&m_mempool, state.m_wtxid_relay); + CompareInvMempoolOrder compareInvMempoolOrder(&m_mempool, peer->m_wtxid_relay); std::make_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder); // No reason to drain out at many times the network's capacity, // especially since we have many peers and some will draw much shorter delays. unsigned int nRelayedTransactions = 0; - LOCK(pto->m_tx_relay->cs_filter); + LOCK(tx_relay->m_bloom_filter_mutex); while (!vInvTx.empty() && nRelayedTransactions < INVENTORY_BROADCAST_MAX) { // Fetch the top element from the heap std::pop_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder); std::set<uint256>::iterator it = vInvTx.back(); vInvTx.pop_back(); uint256 hash = *it; - CInv inv(state.m_wtxid_relay ? MSG_WTX : MSG_TX, hash); + CInv inv(peer->m_wtxid_relay ? MSG_WTX : MSG_TX, hash); // Remove it from the to-be-sent set - pto->m_tx_relay->setInventoryTxToSend.erase(it); + tx_relay->m_tx_inventory_to_send.erase(it); // Check if not in the filter already - if (pto->m_tx_relay->filterInventoryKnown.contains(hash)) { + if (tx_relay->m_tx_inventory_known_filter.contains(hash)) { continue; } // Not in the mempool anymore? don't bother sending it. @@ -4900,7 +5574,7 @@ bool PeerManagerImpl::SendMessages(CNode* pto) if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) { continue; } - if (pto->m_tx_relay->pfilter && !pto->m_tx_relay->pfilter->IsRelevantAndUpdate(*txinfo.tx)) continue; + if (tx_relay->m_bloom_filter && !tx_relay->m_bloom_filter->IsRelevantAndUpdate(*txinfo.tx)) continue; // Send State(pto->GetId())->m_recently_announced_invs.insert(hash); vInv.push_back(inv); @@ -4927,14 +5601,14 @@ bool PeerManagerImpl::SendMessages(CNode* pto) m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv)); vInv.clear(); } - pto->m_tx_relay->filterInventoryKnown.insert(hash); + tx_relay->m_tx_inventory_known_filter.insert(hash); if (hash != txid) { - // Insert txid into filterInventoryKnown, even for + // Insert txid into m_tx_inventory_known_filter, even for // wtxidrelay peers. This prevents re-adding of // unconfirmed parents to the recently_announced // filter, when a child tx is requested. See // ProcessGetData(). - pto->m_tx_relay->filterInventoryKnown.insert(txid); + tx_relay->m_tx_inventory_known_filter.insert(txid); } } } @@ -4968,8 +5642,8 @@ bool PeerManagerImpl::SendMessages(CNode* pto) // Check for headers sync timeouts if (state.fSyncStarted && state.m_headers_sync_timeout < std::chrono::microseconds::max()) { // Detect whether this is a stalling initial-headers-sync peer - if (pindexBestHeader->GetBlockTime() <= GetAdjustedTime() - 24 * 60 * 60) { - if (current_time > state.m_headers_sync_timeout && nSyncStarted == 1 && (nPreferredDownload - state.fPreferredDownload >= 1)) { + if (m_chainman.m_best_header->Time() <= GetAdjustedTime() - 24h) { + if (current_time > state.m_headers_sync_timeout && nSyncStarted == 1 && (m_num_preferred_download_peers - state.fPreferredDownload >= 1)) { // Disconnect a peer (without NetPermissionFlags::NoBan permission) if it is our only sync peer, // and we have others we could be using instead. // Note: If all our peers are inbound, then we won't @@ -5000,18 +5674,18 @@ bool PeerManagerImpl::SendMessages(CNode* pto) // Check that outbound peers have reasonable chains // GetTime() is used by this anti-DoS logic so we can test this using mocktime - ConsiderEviction(*pto, GetTime<std::chrono::seconds>()); + ConsiderEviction(*pto, *peer, GetTime<std::chrono::seconds>()); // // Message: getdata (blocks) // std::vector<CInv> vGetData; - if (!pto->fClient && ((fFetch && !pto->m_limited_node) || !m_chainman.ActiveChainstate().IsInitialBlockDownload()) && state.nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) { + if (CanServeBlocks(*peer) && ((sync_blocks_and_headers_from_peer && !IsLimitedPeer(*peer)) || !m_chainman.ActiveChainstate().IsInitialBlockDownload()) && state.nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) { std::vector<const CBlockIndex*> vToDownload; NodeId staller = -1; - FindNextBlocksToDownload(pto->GetId(), MAX_BLOCKS_IN_TRANSIT_PER_PEER - state.nBlocksInFlight, vToDownload, staller); + FindNextBlocksToDownload(*peer, MAX_BLOCKS_IN_TRANSIT_PER_PEER - state.nBlocksInFlight, vToDownload, staller); for (const CBlockIndex *pindex : vToDownload) { - uint32_t nFetchFlags = GetFetchFlags(*pto); + uint32_t nFetchFlags = GetFetchFlags(*peer); vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash())); BlockRequested(pto->GetId(), *pindex); LogPrint(BCLog::NET, "Requesting block %s (%d) peer=%d\n", pindex->GetBlockHash().ToString(), @@ -5038,7 +5712,7 @@ bool PeerManagerImpl::SendMessages(CNode* pto) if (!AlreadyHaveTx(gtxid)) { LogPrint(BCLog::NET, "Requesting %s %s peer=%d\n", gtxid.IsWtxid() ? "wtx" : "tx", gtxid.GetHash().ToString(), pto->GetId()); - vGetData.emplace_back(gtxid.IsWtxid() ? MSG_WTX : (MSG_TX | GetFetchFlags(*pto)), gtxid.GetHash()); + vGetData.emplace_back(gtxid.IsWtxid() ? MSG_WTX : (MSG_TX | GetFetchFlags(*peer)), gtxid.GetHash()); if (vGetData.size() >= MAX_GETDATA_SZ) { m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::GETDATA, vGetData)); vGetData.clear(); @@ -5054,8 +5728,7 @@ bool PeerManagerImpl::SendMessages(CNode* pto) if (!vGetData.empty()) m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::GETDATA, vGetData)); - - MaybeSendFeefilter(*pto, current_time); } // release cs_main + MaybeSendFeefilter(*pto, *peer, current_time); return true; } |