aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAmiti Uttarwar <amiti@uttarwar.org>2020-06-02 21:23:44 -0700
committerAmiti Uttarwar <amiti@uttarwar.org>2020-08-07 17:18:16 -0700
commit7f7b83deb2427599c129f4ff581d4d045461e459 (patch)
tree0612ff469f49f98f59a985d21c8847a97840cce8
parent35839e963bf61d2da0d12f5b8cea74ac0e0fbd7b (diff)
downloadbitcoin-7f7b83deb2427599c129f4ff581d4d045461e459.tar.xz
[net/refactor] Rework ThreadOpenConnections logic
Make the connection counts explicit and extract into interface functions around m_conn_type. Using explicit counting and switch statements where possible should help prevent counting bugs in the future.
-rw-r--r--src/net.cpp30
-rw-r--r--src/net.h23
2 files changed, 41 insertions, 12 deletions
diff --git a/src/net.cpp b/src/net.cpp
index f2dcec784f..35daddbbe2 100644
--- a/src/net.cpp
+++ b/src/net.cpp
@@ -1829,21 +1829,27 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect)
int nOutboundFullRelay = 0;
int nOutboundBlockRelay = 0;
std::set<std::vector<unsigned char> > setConnected;
+
{
LOCK(cs_vNodes);
for (const CNode* pnode : vNodes) {
- if (!pnode->IsInboundConn() && (pnode->m_conn_type != ConnectionType::MANUAL)) {
- // Netgroups for inbound and addnode peers are not excluded because our goal here
- // is to not use multiple of our limited outbound slots on a single netgroup
- // but inbound and addnode peers do not use our outbound slots. Inbound peers
- // also have the added issue that they're attacker controlled and could be used
- // to prevent us from connecting to particular hosts if we used them here.
- setConnected.insert(pnode->addr.GetGroup(addrman.m_asmap));
- if (pnode->m_tx_relay == nullptr) {
- nOutboundBlockRelay++;
- } else if (pnode->m_conn_type == ConnectionType::OUTBOUND) {
- nOutboundFullRelay++;
- }
+ if (pnode->IsFullOutboundConn()) nOutboundFullRelay++;
+ if (pnode->IsBlockOnlyConn()) nOutboundBlockRelay++;
+
+ // Netgroups for inbound and manual peers are not excluded because our goal here
+ // is to not use multiple of our limited outbound slots on a single netgroup
+ // but inbound and manual peers do not use our outbound slots. Inbound peers
+ // also have the added issue that they could be attacker controlled and used
+ // to prevent us from connecting to particular hosts if we used them here.
+ switch(pnode->m_conn_type){
+ case ConnectionType::INBOUND:
+ case ConnectionType::MANUAL:
+ break;
+ case ConnectionType::OUTBOUND:
+ case ConnectionType::BLOCK_RELAY:
+ case ConnectionType::ADDR_FETCH:
+ case ConnectionType::FEELER:
+ setConnected.insert(pnode->addr.GetGroup(addrman.m_asmap));
}
}
}
diff --git a/src/net.h b/src/net.h
index f52fea72be..4834050122 100644
--- a/src/net.h
+++ b/src/net.h
@@ -789,10 +789,18 @@ public:
std::atomic_bool fPauseRecv{false};
std::atomic_bool fPauseSend{false};
+ bool IsFullOutboundConn() const {
+ return m_conn_type == ConnectionType::OUTBOUND;
+ }
+
bool IsManualConn() const {
return m_conn_type == ConnectionType::MANUAL;
}
+ bool IsBlockOnlyConn() const {
+ return m_conn_type == ConnectionType::BLOCK_RELAY;
+ }
+
bool IsFeelerConn() const {
return m_conn_type == ConnectionType::FEELER;
}
@@ -805,6 +813,21 @@ public:
return m_conn_type == ConnectionType::INBOUND;
}
+ bool ExpectServicesFromConn() const {
+ switch(m_conn_type) {
+ case ConnectionType::INBOUND:
+ case ConnectionType::MANUAL:
+ case ConnectionType::FEELER:
+ return false;
+ case ConnectionType::OUTBOUND:
+ case ConnectionType::BLOCK_RELAY:
+ case ConnectionType::ADDR_FETCH:
+ return true;
+ }
+
+ assert(false);
+ }
+
protected:
mapMsgCmdSize mapSendBytesPerMsgCmd;
mapMsgCmdSize mapRecvBytesPerMsgCmd GUARDED_BY(cs_vRecv);