aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorJohn Newbery <john@johnnewbery.com>2020-07-09 10:51:20 +0100
committerJohn Newbery <john@johnnewbery.com>2021-03-29 12:15:23 +0100
commitad719297f2ecdd2394eff668b3be7070bc9cb3e2 (patch)
tree1956802a26e43cb2195639633e36c5e17b52d46e /src
parent4ad4abcf07efefafd439b28679dff8d6bbf62943 (diff)
[net processing] Extract `addr` send functionality into MaybeSendAddr()
Reviewer hint: review with `git diff --color-moved=dimmed-zebra --ignore-all-space`
Diffstat (limited to 'src')
-rw-r--r--src/net_processing.cpp136
1 files changed, 71 insertions, 65 deletions
diff --git a/src/net_processing.cpp b/src/net_processing.cpp
index 359b7d9843..b79da072de 100644
--- a/src/net_processing.cpp
+++ b/src/net_processing.cpp
@@ -322,6 +322,9 @@ private:
* to time out. */
void MaybeSendPing(CNode& node_to, Peer& peer, std::chrono::microseconds now);
+ /** Send `addr` messages on a regular schedule. */
+ void MaybeSendAddr(CNode* pto, std::chrono::microseconds current_time);
+
const CChainParams& m_chainparams;
CConnman& m_connman;
/** Pointer to this node's banman. May be nullptr - check existence before dereferencing. */
@@ -4138,6 +4141,72 @@ void PeerManagerImpl::MaybeSendPing(CNode& node_to, Peer& peer, std::chrono::mic
}
}
+void PeerManagerImpl::MaybeSendAddr(CNode* pto, std::chrono::microseconds current_time)
+{
+ LOCK(pto->m_addr_send_times_mutex);
+ const CNetMsgMaker msgMaker(pto->GetCommonVersion());
+
+ if (fListen && pto->RelayAddrsWithConn() &&
+ !m_chainman.ActiveChainstate().IsInitialBlockDownload() &&
+ pto->m_next_local_addr_send < current_time) {
+ // If we've sent before, clear the bloom filter for the peer, so that our
+ // self-announcement will actually go out.
+ // This might be unnecessary if the bloom filter has already rolled
+ // over since our last self-announcement, but there is only a small
+ // bandwidth cost that we can incur by doing this (which happens
+ // once a day on average).
+ if (pto->m_next_local_addr_send != 0us) {
+ pto->m_addr_known->reset();
+ }
+ if (std::optional<CAddress> local_addr = GetLocalAddrForPeer(pto)) {
+ FastRandomContext insecure_rand;
+ pto->PushAddress(*local_addr, insecure_rand);
+ }
+ pto->m_next_local_addr_send = PoissonNextSend(current_time, AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL);
+ }
+
+ //
+ // Message: addr
+ //
+ if (pto->RelayAddrsWithConn() && pto->m_next_addr_send < current_time) {
+ pto->m_next_addr_send = PoissonNextSend(current_time, AVG_ADDRESS_BROADCAST_INTERVAL);
+ std::vector<CAddress> vAddr;
+ vAddr.reserve(pto->vAddrToSend.size());
+ assert(pto->m_addr_known);
+
+ const char* msg_type;
+ int make_flags;
+ if (pto->m_wants_addrv2) {
+ msg_type = NetMsgType::ADDRV2;
+ make_flags = ADDRV2_FORMAT;
+ } else {
+ msg_type = NetMsgType::ADDR;
+ make_flags = 0;
+ }
+
+ for (const CAddress& addr : pto->vAddrToSend)
+ {
+ if (!pto->m_addr_known->contains(addr.GetKey()))
+ {
+ pto->m_addr_known->insert(addr.GetKey());
+ vAddr.push_back(addr);
+ // receiver rejects addr messages larger than MAX_ADDR_TO_SEND
+ if (vAddr.size() >= MAX_ADDR_TO_SEND)
+ {
+ m_connman.PushMessage(pto, msgMaker.Make(make_flags, msg_type, vAddr));
+ vAddr.clear();
+ }
+ }
+ }
+ pto->vAddrToSend.clear();
+ if (!vAddr.empty())
+ m_connman.PushMessage(pto, msgMaker.Make(make_flags, msg_type, vAddr));
+ // we only send the big addr message once
+ if (pto->vAddrToSend.capacity() > 40)
+ pto->vAddrToSend.shrink_to_fit();
+ }
+}
+
namespace {
class CompareInvMempoolOrder
{
@@ -4183,76 +4252,13 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
// MaybeSendPing may have marked peer for disconnection
if (pto->fDisconnect) return true;
+ MaybeSendAddr(pto, current_time);
+
{
LOCK(cs_main);
CNodeState &state = *State(pto->GetId());
- // Address refresh broadcast
- {
- LOCK(pto->m_addr_send_times_mutex);
-
- if (fListen && pto->RelayAddrsWithConn() &&
- !m_chainman.ActiveChainstate().IsInitialBlockDownload() &&
- pto->m_next_local_addr_send < current_time) {
- // If we've sent before, clear the bloom filter for the peer, so that our
- // self-announcement will actually go out.
- // This might be unnecessary if the bloom filter has already rolled
- // over since our last self-announcement, but there is only a small
- // bandwidth cost that we can incur by doing this (which happens
- // once a day on average).
- if (pto->m_next_local_addr_send != 0us) {
- pto->m_addr_known->reset();
- }
- if (std::optional<CAddress> local_addr = GetLocalAddrForPeer(pto)) {
- FastRandomContext insecure_rand;
- pto->PushAddress(*local_addr, insecure_rand);
- }
- pto->m_next_local_addr_send = PoissonNextSend(current_time, AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL);
- }
-
- //
- // Message: addr
- //
- if (pto->RelayAddrsWithConn() && pto->m_next_addr_send < current_time) {
- pto->m_next_addr_send = PoissonNextSend(current_time, AVG_ADDRESS_BROADCAST_INTERVAL);
- std::vector<CAddress> vAddr;
- vAddr.reserve(pto->vAddrToSend.size());
- assert(pto->m_addr_known);
-
- const char* msg_type;
- int make_flags;
- if (pto->m_wants_addrv2) {
- msg_type = NetMsgType::ADDRV2;
- make_flags = ADDRV2_FORMAT;
- } else {
- msg_type = NetMsgType::ADDR;
- make_flags = 0;
- }
-
- for (const CAddress& addr : pto->vAddrToSend)
- {
- if (!pto->m_addr_known->contains(addr.GetKey()))
- {
- pto->m_addr_known->insert(addr.GetKey());
- vAddr.push_back(addr);
- // receiver rejects addr messages larger than MAX_ADDR_TO_SEND
- if (vAddr.size() >= MAX_ADDR_TO_SEND)
- {
- m_connman.PushMessage(pto, msgMaker.Make(make_flags, msg_type, vAddr));
- vAddr.clear();
- }
- }
- }
- pto->vAddrToSend.clear();
- if (!vAddr.empty())
- m_connman.PushMessage(pto, msgMaker.Make(make_flags, msg_type, vAddr));
- // we only send the big addr message once
- if (pto->vAddrToSend.capacity() > 40)
- pto->vAddrToSend.shrink_to_fit();
- }
- } // pto->m_addr_send_times_mutex
-
// Start block sync
if (pindexBestHeader == nullptr)
pindexBestHeader = m_chainman.ActiveChain().Tip();