diff options
Diffstat (limited to 'src/net.cpp')
-rw-r--r-- | src/net.cpp | 892 |
1 files changed, 464 insertions, 428 deletions
diff --git a/src/net.cpp b/src/net.cpp index e35d05cec0..4759287c36 100644 --- a/src/net.cpp +++ b/src/net.cpp @@ -10,13 +10,13 @@ #include <net.h> #include <banman.h> -#include <chainparams.h> #include <clientversion.h> #include <consensus/consensus.h> #include <crypto/sha256.h> #include <net_permissions.h> #include <netbase.h> #include <node/ui_interface.h> +#include <optional.h> #include <protocol.h> #include <random.h> #include <scheduler.h> @@ -33,20 +33,19 @@ #include <poll.h> #endif -#ifdef USE_UPNP -#include <miniupnpc/miniupnpc.h> -#include <miniupnpc/upnpcommands.h> -#include <miniupnpc/upnperrors.h> -// The minimum supported miniUPnPc API version is set to 10. This keeps compatibility -// with Ubuntu 16.04 LTS and Debian 8 libminiupnpc-dev packages. -static_assert(MINIUPNPC_API_VERSION >= 10, "miniUPnPc API version >= 10 assumed"); -#endif - +#include <algorithm> #include <cstdint> +#include <functional> #include <unordered_map> #include <math.h> +/** Maximum number of block-relay-only anchor connections */ +static constexpr size_t MAX_BLOCK_RELAY_ONLY_ANCHORS = 2; +static_assert (MAX_BLOCK_RELAY_ONLY_ANCHORS <= static_cast<size_t>(MAX_BLOCK_RELAY_ONLY_CONNECTIONS), "MAX_BLOCK_RELAY_ONLY_ANCHORS must not exceed MAX_BLOCK_RELAY_ONLY_CONNECTIONS."); +/** Anchor IP address database file name */ +const char* const ANCHORS_DATABASE_FILENAME = "anchors.dat"; + // How often to dump addresses to peers.dat static constexpr std::chrono::minutes DUMP_PEERS_INTERVAL{15}; @@ -66,6 +65,9 @@ static constexpr std::chrono::seconds DNSSEEDS_DELAY_FEW_PEERS{11}; static constexpr std::chrono::minutes DNSSEEDS_DELAY_MANY_PEERS{5}; static constexpr int DNSSEEDS_DELAY_PEER_THRESHOLD = 1000; // "many" vs "few" peers +/** The default timeframe for -maxuploadtarget. 1 day. */ +static constexpr std::chrono::seconds MAX_UPLOAD_TIMEFRAME{60 * 60 * 24}; + // We add a random period time (0 to 1 seconds) to feeler connections to prevent synchronization. #define FEELER_SLEEP_WINDOW 1 @@ -84,6 +86,11 @@ enum BindFlags { BF_NONE = 0, BF_EXPLICIT = (1U << 0), BF_REPORT_ERROR = (1U << 1), + /** + * Do not call AddLocal() for our special addresses, e.g., for incoming + * Tor connections, to prevent gossiping them over the network. + */ + BF_DONT_ADVERTISE = (1U << 2), }; // The set of sockets cannot be modified while waiting @@ -94,12 +101,12 @@ const std::string NET_MESSAGE_COMMAND_OTHER = "*other*"; static const uint64_t RANDOMIZER_ID_NETGROUP = 0x6c0edd8036ef4036ULL; // SHA256("netgroup")[0:8] static const uint64_t RANDOMIZER_ID_LOCALHOSTNONCE = 0xd93e69e2bbfa5735ULL; // SHA256("localhostnonce")[0:8] +static const uint64_t RANDOMIZER_ID_ADDRCACHE = 0x1cf2e4ddd306dda9ULL; // SHA256("addrcache")[0:8] // // Global state variables // bool fDiscover = true; bool fListen = true; -bool g_relay_txes = !DEFAULT_BLOCKSONLY; RecursiveMutex cs_mapLocalHost; std::map<CNetAddr, LocalServiceInfo> mapLocalHost GUARDED_BY(cs_mapLocalHost); static bool vfLimited[NET_MAX] GUARDED_BY(cs_mapLocalHost) = {}; @@ -342,6 +349,11 @@ CNode* CConnman::FindNode(const CService& addr) return nullptr; } +bool CConnman::AlreadyConnectedToAddress(const CAddress& addr) +{ + return FindNode(static_cast<CNetAddr>(addr)) || FindNode(addr.ToStringIPPort()); +} + bool CConnman::CheckIncomingNonce(uint64_t nonce) { LOCK(cs_vNodes); @@ -461,7 +473,7 @@ CNode* CConnman::ConnectNode(CAddress addrConnect, const char *pszDest, bool fCo NodeId id = GetNewNodeId(); uint64_t nonce = GetDeterministicRandomizer(RANDOMIZER_ID_LOCALHOSTNONCE).Write(id).Finalize(); CAddress addr_bind = GetBindAddress(hSocket); - CNode* pnode = new CNode(id, nLocalServices, GetBestHeight(), hSocket, addrConnect, CalculateKeyedNetGroup(addrConnect), nonce, addr_bind, pszDest ? pszDest : "", conn_type); + CNode* pnode = new CNode(id, nLocalServices, hSocket, addrConnect, CalculateKeyedNetGroup(addrConnect), nonce, addr_bind, pszDest ? pszDest : "", conn_type); pnode->AddRef(); // We're making a new connection, harvest entropy from the time (and our peer count) @@ -487,6 +499,26 @@ void CConnman::AddWhitelistPermissionFlags(NetPermissionFlags& flags, const CNet } } +std::string ConnectionTypeAsString(ConnectionType conn_type) +{ + switch (conn_type) { + case ConnectionType::INBOUND: + return "inbound"; + case ConnectionType::MANUAL: + return "manual"; + case ConnectionType::FEELER: + return "feeler"; + case ConnectionType::OUTBOUND_FULL_RELAY: + return "outbound-full-relay"; + case ConnectionType::BLOCK_RELAY: + return "block-relay-only"; + case ConnectionType::ADDR_FETCH: + return "addr-fetch"; + } // no default case, so the compiler can warn about missing cases + + assert(false); +} + std::string CNode::GetAddrName() const { LOCK(cs_addrName); return addrName; @@ -513,6 +545,11 @@ void CNode::SetAddrLocal(const CService& addrLocalIn) { } } +Network CNode::ConnectedThroughNetwork() const +{ + return m_inbound_onion ? NET_ONION : addr.GetNetClass(); +} + #undef X #define X(name) stats.name = name void CNode::copyStats(CNodeStats &stats, const std::vector<bool> &m_asmap) @@ -521,6 +558,7 @@ void CNode::copyStats(CNodeStats &stats, const std::vector<bool> &m_asmap) X(nServices); X(addr); X(addrBind); + stats.m_network = ConnectedThroughNetwork(); stats.m_mapped_as = addr.GetMappedAS(m_asmap); if (m_tx_relay != nullptr) { LOCK(m_tx_relay->cs_filter); @@ -541,8 +579,8 @@ void CNode::copyStats(CNodeStats &stats, const std::vector<bool> &m_asmap) X(cleanSubVer); } stats.fInbound = IsInboundConn(); - stats.m_manual_connection = IsManualConn(); - X(nStartingHeight); + X(m_bip152_highbandwidth_to); + X(m_bip152_highbandwidth_from); { LOCK(cs_vSend); X(mapSendBytesPerMsgCmd); @@ -553,10 +591,8 @@ void CNode::copyStats(CNodeStats &stats, const std::vector<bool> &m_asmap) X(mapRecvBytesPerMsgCmd); X(nRecvBytes); } - X(m_legacyWhitelisted); X(m_permissionFlags); if (m_tx_relay != nullptr) { - LOCK(m_tx_relay->cs_feeFilter); stats.minFeeFilter = m_tx_relay->minFeeFilter; } else { stats.minFeeFilter = 0; @@ -581,38 +617,47 @@ void CNode::copyStats(CNodeStats &stats, const std::vector<bool> &m_asmap) // Leave string empty if addrLocal invalid (not filled in yet) CService addrLocalUnlocked = GetAddrLocal(); stats.addrLocal = addrLocalUnlocked.IsValid() ? addrLocalUnlocked.ToString() : ""; + + X(m_conn_type); } #undef X -bool CNode::ReceiveMsgBytes(const char *pch, unsigned int nBytes, bool& complete) +bool CNode::ReceiveMsgBytes(Span<const uint8_t> msg_bytes, bool& complete) { complete = false; const auto time = GetTime<std::chrono::microseconds>(); LOCK(cs_vRecv); nLastRecv = std::chrono::duration_cast<std::chrono::seconds>(time).count(); - nRecvBytes += nBytes; - while (nBytes > 0) { + nRecvBytes += msg_bytes.size(); + while (msg_bytes.size() > 0) { // absorb network data - int handled = m_deserializer->Read(pch, nBytes); - if (handled < 0) return false; - - pch += handled; - nBytes -= handled; + int handled = m_deserializer->Read(msg_bytes); + if (handled < 0) { + // Serious header problem, disconnect from the peer. + return false; + } if (m_deserializer->Complete()) { // decompose a transport agnostic CNetMessage from the deserializer - CNetMessage msg = m_deserializer->GetMessage(Params().MessageStart(), time); + uint32_t out_err_raw_size{0}; + Optional<CNetMessage> result{m_deserializer->GetMessage(time, out_err_raw_size)}; + if (!result) { + // Message deserialization failed. Drop the message but don't disconnect the peer. + // store the size of the corrupt message + mapRecvBytesPerMsgCmd.find(NET_MESSAGE_COMMAND_OTHER)->second += out_err_raw_size; + continue; + } //store received bytes per message command //to prevent a memory DOS, only allow valid commands - mapMsgCmdSize::iterator i = mapRecvBytesPerMsgCmd.find(msg.m_command); + mapMsgCmdSize::iterator i = mapRecvBytesPerMsgCmd.find(result->m_command); if (i == mapRecvBytesPerMsgCmd.end()) i = mapRecvBytesPerMsgCmd.find(NET_MESSAGE_COMMAND_OTHER); assert(i != mapRecvBytesPerMsgCmd.end()); - i->second += msg.m_raw_message_size; + i->second += result->m_raw_message_size; // push the message to the process queue, - vRecvMsg.push_back(std::move(msg)); + vRecvMsg.push_back(std::move(*result)); complete = true; } @@ -621,39 +666,13 @@ bool CNode::ReceiveMsgBytes(const char *pch, unsigned int nBytes, bool& complete return true; } -void CNode::SetSendVersion(int nVersionIn) -{ - // Send version may only be changed in the version message, and - // only one version message is allowed per session. We can therefore - // treat this value as const and even atomic as long as it's only used - // once a version message has been successfully processed. Any attempt to - // set this twice is an error. - if (nSendVersion != 0) { - error("Send version already set for node: %i. Refusing to change from %i to %i", id, nSendVersion, nVersionIn); - } else { - nSendVersion = nVersionIn; - } -} - -int CNode::GetSendVersion() const -{ - // The send version should always be explicitly set to - // INIT_PROTO_VERSION rather than using this value until SetSendVersion - // has been called. - if (nSendVersion == 0) { - error("Requesting unset send version for node: %i. Using %i", id, INIT_PROTO_VERSION); - return INIT_PROTO_VERSION; - } - return nSendVersion; -} - -int V1TransportDeserializer::readHeader(const char *pch, unsigned int nBytes) +int V1TransportDeserializer::readHeader(Span<const uint8_t> msg_bytes) { // copy data to temporary parsing buffer unsigned int nRemaining = CMessageHeader::HEADER_SIZE - nHdrPos; - unsigned int nCopy = std::min(nRemaining, nBytes); + unsigned int nCopy = std::min<unsigned int>(nRemaining, msg_bytes.size()); - memcpy(&hdrbuf[nHdrPos], pch, nCopy); + memcpy(&hdrbuf[nHdrPos], msg_bytes.data(), nCopy); nHdrPos += nCopy; // if header incomplete, exit @@ -665,11 +684,19 @@ int V1TransportDeserializer::readHeader(const char *pch, unsigned int nBytes) hdrbuf >> hdr; } catch (const std::exception&) { + LogPrint(BCLog::NET, "HEADER ERROR - UNABLE TO DESERIALIZE, peer=%d\n", m_node_id); + return -1; + } + + // Check start string, network magic + if (memcmp(hdr.pchMessageStart, m_chain_params.MessageStart(), CMessageHeader::MESSAGE_START_SIZE) != 0) { + LogPrint(BCLog::NET, "HEADER ERROR - MESSAGESTART (%s, %u bytes), received %s, peer=%d\n", hdr.GetCommand(), hdr.nMessageSize, HexStr(hdr.pchMessageStart), m_node_id); return -1; } // reject messages larger than MAX_SIZE or MAX_PROTOCOL_MESSAGE_LENGTH if (hdr.nMessageSize > MAX_SIZE || hdr.nMessageSize > MAX_PROTOCOL_MESSAGE_LENGTH) { + LogPrint(BCLog::NET, "HEADER ERROR - SIZE (%s, %u bytes), peer=%d\n", hdr.GetCommand(), hdr.nMessageSize, m_node_id); return -1; } @@ -679,18 +706,18 @@ int V1TransportDeserializer::readHeader(const char *pch, unsigned int nBytes) return nCopy; } -int V1TransportDeserializer::readData(const char *pch, unsigned int nBytes) +int V1TransportDeserializer::readData(Span<const uint8_t> msg_bytes) { unsigned int nRemaining = hdr.nMessageSize - nDataPos; - unsigned int nCopy = std::min(nRemaining, nBytes); + unsigned int nCopy = std::min<unsigned int>(nRemaining, msg_bytes.size()); if (vRecv.size() < nDataPos + nCopy) { // Allocate up to 256 KiB ahead, but never more than the total message size. vRecv.resize(std::min(hdr.nMessageSize, nDataPos + nCopy + 256 * 1024)); } - hasher.Write({(const unsigned char*)pch, nCopy}); - memcpy(&vRecv[nDataPos], pch, nCopy); + hasher.Write(msg_bytes.first(nCopy)); + memcpy(&vRecv[nDataPos], msg_bytes.data(), nCopy); nDataPos += nCopy; return nCopy; @@ -704,36 +731,39 @@ const uint256& V1TransportDeserializer::GetMessageHash() const return data_hash; } -CNetMessage V1TransportDeserializer::GetMessage(const CMessageHeader::MessageStartChars& message_start, const std::chrono::microseconds time) +Optional<CNetMessage> V1TransportDeserializer::GetMessage(const std::chrono::microseconds time, uint32_t& out_err_raw_size) { // decompose a single CNetMessage from the TransportDeserializer - CNetMessage msg(std::move(vRecv)); + Optional<CNetMessage> msg(std::move(vRecv)); - // store state about valid header, netmagic and checksum - msg.m_valid_header = hdr.IsValid(message_start); - msg.m_valid_netmagic = (memcmp(hdr.pchMessageStart, message_start, CMessageHeader::MESSAGE_START_SIZE) == 0); - uint256 hash = GetMessageHash(); + // store command string, time, and sizes + msg->m_command = hdr.GetCommand(); + msg->m_time = time; + msg->m_message_size = hdr.nMessageSize; + msg->m_raw_message_size = hdr.nMessageSize + CMessageHeader::HEADER_SIZE; - // store command string, payload size - msg.m_command = hdr.GetCommand(); - msg.m_message_size = hdr.nMessageSize; - msg.m_raw_message_size = hdr.nMessageSize + CMessageHeader::HEADER_SIZE; + uint256 hash = GetMessageHash(); // We just received a message off the wire, harvest entropy from the time (and the message checksum) RandAddEvent(ReadLE32(hash.begin())); - msg.m_valid_checksum = (memcmp(hash.begin(), hdr.pchChecksum, CMessageHeader::CHECKSUM_SIZE) == 0); - if (!msg.m_valid_checksum) { - LogPrint(BCLog::NET, "CHECKSUM ERROR (%s, %u bytes), expected %s was %s\n", - SanitizeString(msg.m_command), msg.m_message_size, + // Check checksum and header command string + if (memcmp(hash.begin(), hdr.pchChecksum, CMessageHeader::CHECKSUM_SIZE) != 0) { + LogPrint(BCLog::NET, "CHECKSUM ERROR (%s, %u bytes), expected %s was %s, peer=%d\n", + SanitizeString(msg->m_command), msg->m_message_size, HexStr(Span<uint8_t>(hash.begin(), hash.begin() + CMessageHeader::CHECKSUM_SIZE)), - HexStr(hdr.pchChecksum)); - } - - // store receive time - msg.m_time = time; - - // reset the network deserializer (prepare for the next message) + HexStr(hdr.pchChecksum), + m_node_id); + out_err_raw_size = msg->m_raw_message_size; + msg = nullopt; + } else if (!hdr.IsCommandValid()) { + LogPrint(BCLog::NET, "HEADER ERROR - COMMAND (%s, %u bytes), peer=%d\n", + hdr.GetCommand(), msg->m_message_size, m_node_id); + out_err_raw_size = msg->m_raw_message_size; + msg = nullopt; + } + + // Always reset the network deserializer (prepare for the next message) Reset(); return msg; } @@ -751,30 +781,30 @@ void V1TransportSerializer::prepareForTransport(CSerializedNetMsg& msg, std::vec CVectorWriter{SER_NETWORK, INIT_PROTO_VERSION, header, 0, hdr}; } -size_t CConnman::SocketSendData(CNode *pnode) const EXCLUSIVE_LOCKS_REQUIRED(pnode->cs_vSend) +size_t CConnman::SocketSendData(CNode& node) const { - auto it = pnode->vSendMsg.begin(); + auto it = node.vSendMsg.begin(); size_t nSentSize = 0; - while (it != pnode->vSendMsg.end()) { - const auto &data = *it; - assert(data.size() > pnode->nSendOffset); + while (it != node.vSendMsg.end()) { + const auto& data = *it; + assert(data.size() > node.nSendOffset); int nBytes = 0; { - LOCK(pnode->cs_hSocket); - if (pnode->hSocket == INVALID_SOCKET) + LOCK(node.cs_hSocket); + if (node.hSocket == INVALID_SOCKET) break; - nBytes = send(pnode->hSocket, reinterpret_cast<const char*>(data.data()) + pnode->nSendOffset, data.size() - pnode->nSendOffset, MSG_NOSIGNAL | MSG_DONTWAIT); + nBytes = send(node.hSocket, reinterpret_cast<const char*>(data.data()) + node.nSendOffset, data.size() - node.nSendOffset, MSG_NOSIGNAL | MSG_DONTWAIT); } if (nBytes > 0) { - pnode->nLastSend = GetSystemTimeInSeconds(); - pnode->nSendBytes += nBytes; - pnode->nSendOffset += nBytes; + node.nLastSend = GetSystemTimeInSeconds(); + node.nSendBytes += nBytes; + node.nSendOffset += nBytes; nSentSize += nBytes; - if (pnode->nSendOffset == data.size()) { - pnode->nSendOffset = 0; - pnode->nSendSize -= data.size(); - pnode->fPauseSend = pnode->nSendSize > nSendBufferMaxSize; + if (node.nSendOffset == data.size()) { + node.nSendOffset = 0; + node.nSendSize -= data.size(); + node.fPauseSend = node.nSendSize > nSendBufferMaxSize; it++; } else { // could not send full message; stop sending more @@ -784,10 +814,9 @@ size_t CConnman::SocketSendData(CNode *pnode) const EXCLUSIVE_LOCKS_REQUIRED(pno if (nBytes < 0) { // error int nErr = WSAGetLastError(); - if (nErr != WSAEWOULDBLOCK && nErr != WSAEMSGSIZE && nErr != WSAEINTR && nErr != WSAEINPROGRESS) - { + if (nErr != WSAEWOULDBLOCK && nErr != WSAEMSGSIZE && nErr != WSAEINTR && nErr != WSAEINPROGRESS) { LogPrintf("socket send error %s\n", NetworkErrorString(nErr)); - pnode->CloseSocketDisconnect(); + node.CloseSocketDisconnect(); } } // couldn't send anything at all @@ -795,30 +824,14 @@ size_t CConnman::SocketSendData(CNode *pnode) const EXCLUSIVE_LOCKS_REQUIRED(pno } } - if (it == pnode->vSendMsg.end()) { - assert(pnode->nSendOffset == 0); - assert(pnode->nSendSize == 0); + if (it == node.vSendMsg.end()) { + assert(node.nSendOffset == 0); + assert(node.nSendSize == 0); } - pnode->vSendMsg.erase(pnode->vSendMsg.begin(), it); + node.vSendMsg.erase(node.vSendMsg.begin(), it); return nSentSize; } -struct NodeEvictionCandidate -{ - NodeId id; - int64_t nTimeConnected; - int64_t nMinPingUsecTime; - int64_t nLastBlockTime; - int64_t nLastTXTime; - bool fRelevantServices; - bool fRelayTxes; - bool fBloomFilter; - CAddress addr; - uint64_t nKeyedNetGroup; - bool prefer_evict; - bool m_is_local; -}; - static bool ReverseCompareNodeMinPingTime(const NodeEvictionCandidate &a, const NodeEvictionCandidate &b) { return a.nMinPingUsecTime > b.nMinPingUsecTime; @@ -874,43 +887,8 @@ static void EraseLastKElements(std::vector<T> &elements, Comparator comparator, elements.erase(elements.end() - eraseSize, elements.end()); } -/** Try to find a connection to evict when the node is full. - * Extreme care must be taken to avoid opening the node to attacker - * triggered network partitioning. - * The strategy used here is to protect a small number of peers - * for each of several distinct characteristics which are difficult - * to forge. In order to partition a node the attacker must be - * simultaneously better at all of them than honest peers. - */ -bool CConnman::AttemptToEvictConnection() +[[nodiscard]] Optional<NodeId> SelectNodeToEvict(std::vector<NodeEvictionCandidate>&& vEvictionCandidates) { - std::vector<NodeEvictionCandidate> vEvictionCandidates; - { - LOCK(cs_vNodes); - - for (const CNode* node : vNodes) { - if (node->HasPermission(PF_NOBAN)) - continue; - if (!node->IsInboundConn()) - continue; - if (node->fDisconnect) - continue; - bool peer_relay_txes = false; - bool peer_filter_not_null = false; - if (node->m_tx_relay != nullptr) { - LOCK(node->m_tx_relay->cs_filter); - peer_relay_txes = node->m_tx_relay->fRelayTxes; - peer_filter_not_null = node->m_tx_relay->pfilter != nullptr; - } - NodeEvictionCandidate candidate = {node->GetId(), node->nTimeConnected, node->nMinPingUsecTime, - node->nLastBlockTime, node->nLastTXTime, - HasAllDesirableServiceFlags(node->nServices), - peer_relay_txes, peer_filter_not_null, node->addr, node->nKeyedNetGroup, - node->m_prefer_evict, node->addr.IsLocal()}; - vEvictionCandidates.push_back(candidate); - } - } - // Protect connections with certain characteristics // Deterministically select 4 peers to protect by netgroup. @@ -948,7 +926,7 @@ bool CConnman::AttemptToEvictConnection() total_protect_size -= initial_size - vEvictionCandidates.size(); EraseLastKElements(vEvictionCandidates, ReverseCompareNodeTimeConnected, total_protect_size); - if (vEvictionCandidates.empty()) return false; + if (vEvictionCandidates.empty()) return nullopt; // If any remaining peers are preferred for eviction consider only them. // This happens after the other preferences since if a peer is really the best by other criteria (esp relaying blocks) @@ -980,10 +958,52 @@ bool CConnman::AttemptToEvictConnection() vEvictionCandidates = std::move(mapNetGroupNodes[naMostConnections]); // Disconnect from the network group with the most connections - NodeId evicted = vEvictionCandidates.front().id; + return vEvictionCandidates.front().id; +} + +/** Try to find a connection to evict when the node is full. + * Extreme care must be taken to avoid opening the node to attacker + * triggered network partitioning. + * The strategy used here is to protect a small number of peers + * for each of several distinct characteristics which are difficult + * to forge. In order to partition a node the attacker must be + * simultaneously better at all of them than honest peers. + */ +bool CConnman::AttemptToEvictConnection() +{ + std::vector<NodeEvictionCandidate> vEvictionCandidates; + { + + LOCK(cs_vNodes); + for (const CNode* node : vNodes) { + if (node->HasPermission(PF_NOBAN)) + continue; + if (!node->IsInboundConn()) + continue; + if (node->fDisconnect) + continue; + bool peer_relay_txes = false; + bool peer_filter_not_null = false; + if (node->m_tx_relay != nullptr) { + LOCK(node->m_tx_relay->cs_filter); + peer_relay_txes = node->m_tx_relay->fRelayTxes; + peer_filter_not_null = node->m_tx_relay->pfilter != nullptr; + } + NodeEvictionCandidate candidate = {node->GetId(), node->nTimeConnected, node->nMinPingUsecTime, + node->nLastBlockTime, node->nLastTXTime, + HasAllDesirableServiceFlags(node->nServices), + peer_relay_txes, peer_filter_not_null, node->nKeyedNetGroup, + node->m_prefer_evict, node->addr.IsLocal()}; + vEvictionCandidates.push_back(candidate); + } + } + const Optional<NodeId> node_id_to_evict = SelectNodeToEvict(std::move(vEvictionCandidates)); + if (!node_id_to_evict) { + return false; + } LOCK(cs_vNodes); for (CNode* pnode : vNodes) { - if (pnode->GetId() == evicted) { + if (pnode->GetId() == *node_id_to_evict) { pnode->fDisconnect = true; return true; } @@ -1008,14 +1028,12 @@ void CConnman::AcceptConnection(const ListenSocket& hListenSocket) { NetPermissionFlags permissionFlags = NetPermissionFlags::PF_NONE; hListenSocket.AddSocketPermissionFlags(permissionFlags); AddWhitelistPermissionFlags(permissionFlags, addr); - bool legacyWhitelisted = false; if (NetPermissions::HasFlag(permissionFlags, NetPermissionFlags::PF_ISIMPLICIT)) { NetPermissions::ClearFlag(permissionFlags, PF_ISIMPLICIT); if (gArgs.GetBoolArg("-whitelistforcerelay", DEFAULT_WHITELISTFORCERELAY)) NetPermissions::AddFlag(permissionFlags, PF_FORCERELAY); if (gArgs.GetBoolArg("-whitelistrelay", DEFAULT_WHITELISTRELAY)) NetPermissions::AddFlag(permissionFlags, PF_RELAY); NetPermissions::AddFlag(permissionFlags, PF_MEMPOOL); NetPermissions::AddFlag(permissionFlags, PF_NOBAN); - legacyWhitelisted = true; } { @@ -1086,11 +1104,11 @@ void CConnman::AcceptConnection(const ListenSocket& hListenSocket) { if (NetPermissions::HasFlag(permissionFlags, PF_BLOOMFILTER)) { nodeServices = static_cast<ServiceFlags>(nodeServices | NODE_BLOOM); } - CNode* pnode = new CNode(id, nodeServices, GetBestHeight(), hSocket, addr, CalculateKeyedNetGroup(addr), nonce, addr_bind, "", ConnectionType::INBOUND); + + const bool inbound_onion = std::find(m_onion_binds.begin(), m_onion_binds.end(), addr_bind) != m_onion_binds.end(); + CNode* pnode = new CNode(id, nodeServices, hSocket, addr, CalculateKeyedNetGroup(addr), nonce, addr_bind, "", ConnectionType::INBOUND, inbound_onion); pnode->AddRef(); pnode->m_permissionFlags = permissionFlags; - // If this flag is present, the user probably expect that RPC and QT report it as whitelisted (backward compatibility) - pnode->m_legacyWhitelisted = legacyWhitelisted; pnode->m_prefer_evict = discouraged; m_msgproc->InitializeNode(pnode); @@ -1105,6 +1123,27 @@ void CConnman::AcceptConnection(const ListenSocket& hListenSocket) { RandAddEvent((uint32_t)id); } +bool CConnman::AddConnection(const std::string& address, ConnectionType conn_type) +{ + if (conn_type != ConnectionType::OUTBOUND_FULL_RELAY && conn_type != ConnectionType::BLOCK_RELAY) return false; + + const int max_connections = conn_type == ConnectionType::OUTBOUND_FULL_RELAY ? m_max_outbound_full_relay : m_max_outbound_block_relay; + + // Count existing connections + int existing_connections = WITH_LOCK(cs_vNodes, + return std::count_if(vNodes.begin(), vNodes.end(), [conn_type](CNode* node) { return node->m_conn_type == conn_type; });); + + // Max connections of specified type already exist + if (existing_connections >= max_connections) return false; + + // Max total outbound connections already exist + CSemaphoreGrant grant(*semOutbound, true); + if (!grant) return false; + + OpenNetworkConnection(CAddress(), false, &grant, address.c_str(), conn_type); + return true; +} + void CConnman::DisconnectNodes() { { @@ -1178,37 +1217,47 @@ void CConnman::NotifyNumConnectionsChanged() } } -void CConnman::InactivityCheck(CNode *pnode) +bool CConnman::InactivityCheck(const CNode& node) const { - int64_t nTime = GetSystemTimeInSeconds(); - if (nTime - pnode->nTimeConnected > m_peer_connect_timeout) - { - if (pnode->nLastRecv == 0 || pnode->nLastSend == 0) - { - LogPrint(BCLog::NET, "socket no message in first %i seconds, %d %d from %d\n", m_peer_connect_timeout, pnode->nLastRecv != 0, pnode->nLastSend != 0, pnode->GetId()); - pnode->fDisconnect = true; - } - else if (nTime - pnode->nLastSend > TIMEOUT_INTERVAL) - { - LogPrintf("socket sending timeout: %is\n", nTime - pnode->nLastSend); - pnode->fDisconnect = true; - } - else if (nTime - pnode->nLastRecv > (pnode->nVersion > BIP0031_VERSION ? TIMEOUT_INTERVAL : 90*60)) - { - LogPrintf("socket receive timeout: %is\n", nTime - pnode->nLastRecv); - pnode->fDisconnect = true; - } - else if (pnode->nPingNonceSent && pnode->m_ping_start.load() + std::chrono::seconds{TIMEOUT_INTERVAL} < GetTime<std::chrono::microseconds>()) - { - LogPrintf("ping timeout: %fs\n", 0.000001 * count_microseconds(GetTime<std::chrono::microseconds>() - pnode->m_ping_start.load())); - pnode->fDisconnect = true; - } - else if (!pnode->fSuccessfullyConnected) - { - LogPrint(BCLog::NET, "version handshake timeout from %d\n", pnode->GetId()); - pnode->fDisconnect = true; - } + // Use non-mockable system time (otherwise these timers will pop when we + // use setmocktime in the tests). + int64_t now = GetSystemTimeInSeconds(); + + if (now <= node.nTimeConnected + m_peer_connect_timeout) { + // Only run inactivity checks if the peer has been connected longer + // than m_peer_connect_timeout. + return false; + } + + if (node.nLastRecv == 0 || node.nLastSend == 0) { + LogPrint(BCLog::NET, "socket no message in first %i seconds, %d %d from %d\n", m_peer_connect_timeout, node.nLastRecv != 0, node.nLastSend != 0, node.GetId()); + return true; + } + + if (now > node.nLastSend + TIMEOUT_INTERVAL) { + LogPrintf("socket sending timeout: %is\n", now - node.nLastSend); + return true; + } + + if (now > node.nLastRecv + TIMEOUT_INTERVAL) { + LogPrintf("socket receive timeout: %is\n", now - node.nLastRecv); + return true; + } + + if (node.nPingNonceSent && node.m_ping_start.load() + std::chrono::seconds{TIMEOUT_INTERVAL} < GetTime<std::chrono::microseconds>()) { + // We use mockable time for ping timeouts. This means that setmocktime + // may cause pings to time out for peers that have been connected for + // longer than m_peer_connect_timeout. + LogPrintf("ping timeout: %fs\n", 0.000001 * count_microseconds(GetTime<std::chrono::microseconds>() - node.m_ping_start.load())); + return true; + } + + if (!node.fSuccessfullyConnected) { + LogPrint(BCLog::NET, "version handshake timeout from %d\n", node.GetId()); + return true; } + + return false; } bool CConnman::GenerateSelectSet(std::set<SOCKET> &recv_set, std::set<SOCKET> &send_set, std::set<SOCKET> &error_set) @@ -1425,18 +1474,18 @@ void CConnman::SocketHandler() if (recvSet || errorSet) { // typical socket buffer is 8K-64K - char pchBuf[0x10000]; + uint8_t pchBuf[0x10000]; int nBytes = 0; { LOCK(pnode->cs_hSocket); if (pnode->hSocket == INVALID_SOCKET) continue; - nBytes = recv(pnode->hSocket, pchBuf, sizeof(pchBuf), MSG_DONTWAIT); + nBytes = recv(pnode->hSocket, (char*)pchBuf, sizeof(pchBuf), MSG_DONTWAIT); } if (nBytes > 0) { bool notify = false; - if (!pnode->ReceiveMsgBytes(pchBuf, nBytes, notify)) + if (!pnode->ReceiveMsgBytes(Span<const uint8_t>(pchBuf, nBytes), notify)) pnode->CloseSocketDisconnect(); RecordBytesRecv(nBytes); if (notify) { @@ -1478,19 +1527,13 @@ void CConnman::SocketHandler() } } - // - // Send - // - if (sendSet) - { - LOCK(pnode->cs_vSend); - size_t nBytes = SocketSendData(pnode); - if (nBytes) { - RecordBytesSent(nBytes); - } + if (sendSet) { + // Send data + size_t bytes_sent = WITH_LOCK(pnode->cs_vSend, return SocketSendData(*pnode)); + if (bytes_sent) RecordBytesSent(bytes_sent); } - InactivityCheck(pnode); + if (InactivityCheck(*pnode)) pnode->fDisconnect = true; } { LOCK(cs_vNodes); @@ -1518,121 +1561,6 @@ void CConnman::WakeMessageHandler() condMsgProc.notify_one(); } - - - - - -#ifdef USE_UPNP -static CThreadInterrupt g_upnp_interrupt; -static std::thread g_upnp_thread; -static void ThreadMapPort() -{ - std::string port = strprintf("%u", GetListenPort()); - const char * multicastif = nullptr; - const char * minissdpdpath = nullptr; - struct UPNPDev * devlist = nullptr; - char lanaddr[64]; - - int error = 0; -#if MINIUPNPC_API_VERSION < 14 - devlist = upnpDiscover(2000, multicastif, minissdpdpath, 0, 0, &error); -#else - devlist = upnpDiscover(2000, multicastif, minissdpdpath, 0, 0, 2, &error); -#endif - - struct UPNPUrls urls; - struct IGDdatas data; - int r; - - r = UPNP_GetValidIGD(devlist, &urls, &data, lanaddr, sizeof(lanaddr)); - if (r == 1) - { - if (fDiscover) { - char externalIPAddress[40]; - r = UPNP_GetExternalIPAddress(urls.controlURL, data.first.servicetype, externalIPAddress); - if (r != UPNPCOMMAND_SUCCESS) { - LogPrintf("UPnP: GetExternalIPAddress() returned %d\n", r); - } else { - if (externalIPAddress[0]) { - CNetAddr resolved; - if (LookupHost(externalIPAddress, resolved, false)) { - LogPrintf("UPnP: ExternalIPAddress = %s\n", resolved.ToString()); - AddLocal(resolved, LOCAL_UPNP); - } - } else { - LogPrintf("UPnP: GetExternalIPAddress failed.\n"); - } - } - } - - std::string strDesc = PACKAGE_NAME " " + FormatFullVersion(); - - do { - r = UPNP_AddPortMapping(urls.controlURL, data.first.servicetype, port.c_str(), port.c_str(), lanaddr, strDesc.c_str(), "TCP", 0, "0"); - - if (r != UPNPCOMMAND_SUCCESS) { - LogPrintf("AddPortMapping(%s, %s, %s) failed with code %d (%s)\n", port, port, lanaddr, r, strupnperror(r)); - } else { - LogPrintf("UPnP Port Mapping successful.\n"); - } - } while (g_upnp_interrupt.sleep_for(std::chrono::minutes(20))); - - r = UPNP_DeletePortMapping(urls.controlURL, data.first.servicetype, port.c_str(), "TCP", 0); - LogPrintf("UPNP_DeletePortMapping() returned: %d\n", r); - freeUPNPDevlist(devlist); devlist = nullptr; - FreeUPNPUrls(&urls); - } else { - LogPrintf("No valid UPnP IGDs found\n"); - freeUPNPDevlist(devlist); devlist = nullptr; - if (r != 0) - FreeUPNPUrls(&urls); - } -} - -void StartMapPort() -{ - if (!g_upnp_thread.joinable()) { - assert(!g_upnp_interrupt); - g_upnp_thread = std::thread((std::bind(&TraceThread<void (*)()>, "upnp", &ThreadMapPort))); - } -} - -void InterruptMapPort() -{ - if(g_upnp_thread.joinable()) { - g_upnp_interrupt(); - } -} - -void StopMapPort() -{ - if(g_upnp_thread.joinable()) { - g_upnp_thread.join(); - g_upnp_interrupt.reset(); - } -} - -#else -void StartMapPort() -{ - // Intentionally left blank. -} -void InterruptMapPort() -{ - // Intentionally left blank. -} -void StopMapPort() -{ - // Intentionally left blank. -} -#endif - - - - - - void CConnman::ThreadDNSAddressSeed() { FastRandomContext rng; @@ -1784,22 +1712,36 @@ void CConnman::SetTryNewOutboundPeer(bool flag) // Return the number of peers we have over our outbound connection limit // Exclude peers that are marked for disconnect, or are going to be -// disconnected soon (eg one-shots and feelers) +// disconnected soon (eg ADDR_FETCH and FEELER) // Also exclude peers that haven't finished initial connection handshake yet // (so that we don't decide we're over our desired connection limit, and then // evict some peer that has finished the handshake) -int CConnman::GetExtraOutboundCount() +int CConnman::GetExtraFullOutboundCount() { - int nOutbound = 0; + int full_outbound_peers = 0; { LOCK(cs_vNodes); for (const CNode* pnode : vNodes) { - if (pnode->fSuccessfullyConnected && !pnode->fDisconnect && pnode->IsOutboundOrBlockRelayConn()) { - ++nOutbound; + if (pnode->fSuccessfullyConnected && !pnode->fDisconnect && pnode->IsFullOutboundConn()) { + ++full_outbound_peers; } } } - return std::max(nOutbound - m_max_outbound_full_relay - m_max_outbound_block_relay, 0); + return std::max(full_outbound_peers - m_max_outbound_full_relay, 0); +} + +int CConnman::GetExtraBlockRelayCount() +{ + int block_relay_peers = 0; + { + LOCK(cs_vNodes); + for (const CNode* pnode : vNodes) { + if (pnode->fSuccessfullyConnected && !pnode->fDisconnect && pnode->IsBlockOnlyConn()) { + ++block_relay_peers; + } + } + } + return std::max(block_relay_peers - m_max_outbound_block_relay, 0); } void CConnman::ThreadOpenConnections(const std::vector<std::string> connect) @@ -1830,6 +1772,7 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect) // Minimum time before next feeler connection (in microseconds). int64_t nNextFeeler = PoissonNextSend(nStart*1000*1000, FEELER_INTERVAL); + int64_t nNextExtraBlockRelay = PoissonNextSend(nStart*1000*1000, EXTRA_BLOCK_RELAY_ONLY_PEER_INTERVAL); while (!interruptNet) { ProcessAddrFetch(); @@ -1892,23 +1835,53 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect) ConnectionType conn_type = ConnectionType::OUTBOUND_FULL_RELAY; int64_t nTime = GetTimeMicros(); + bool anchor = false; bool fFeeler = false; // Determine what type of connection to open. Opening - // OUTBOUND_FULL_RELAY connections gets the highest priority until we + // BLOCK_RELAY connections to addresses from anchors.dat gets the highest + // priority. Then we open OUTBOUND_FULL_RELAY priority until we // meet our full-relay capacity. Then we open BLOCK_RELAY connection // until we hit our block-relay-only peer limit. // GetTryNewOutboundPeer() gets set when a stale tip is detected, so we // try opening an additional OUTBOUND_FULL_RELAY connection. If none of - // these conditions are met, check the nNextFeeler timer to decide if - // we should open a FEELER. + // these conditions are met, check to see if it's time to try an extra + // block-relay-only peer (to confirm our tip is current, see below) or the nNextFeeler + // timer to decide if we should open a FEELER. - if (nOutboundFullRelay < m_max_outbound_full_relay) { + if (!m_anchors.empty() && (nOutboundBlockRelay < m_max_outbound_block_relay)) { + conn_type = ConnectionType::BLOCK_RELAY; + anchor = true; + } else if (nOutboundFullRelay < m_max_outbound_full_relay) { // OUTBOUND_FULL_RELAY } else if (nOutboundBlockRelay < m_max_outbound_block_relay) { conn_type = ConnectionType::BLOCK_RELAY; } else if (GetTryNewOutboundPeer()) { // OUTBOUND_FULL_RELAY + } else if (nTime > nNextExtraBlockRelay && m_start_extra_block_relay_peers) { + // Periodically connect to a peer (using regular outbound selection + // methodology from addrman) and stay connected long enough to sync + // headers, but not much else. + // + // Then disconnect the peer, if we haven't learned anything new. + // + // The idea is to make eclipse attacks very difficult to pull off, + // because every few minutes we're finding a new peer to learn headers + // from. + // + // This is similar to the logic for trying extra outbound (full-relay) + // peers, except: + // - we do this all the time on a poisson timer, rather than just when + // our tip is stale + // - we potentially disconnect our next-youngest block-relay-only peer, if our + // newest block-relay-only peer delivers a block more recently. + // See the eviction logic in net_processing.cpp. + // + // Because we can promote these connections to block-relay-only + // connections, they do not get their own ConnectionType enum + // (similar to how we deal with extra outbound peers). + nNextExtraBlockRelay = PoissonNextSend(nTime, EXTRA_BLOCK_RELAY_ONLY_PEER_INTERVAL); + conn_type = ConnectionType::BLOCK_RELAY; } else if (nTime > nNextFeeler) { nNextFeeler = PoissonNextSend(nTime, FEELER_INTERVAL); conn_type = ConnectionType::FEELER; @@ -1924,11 +1897,48 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect) int nTries = 0; while (!interruptNet) { - CAddrInfo addr = addrman.SelectTriedCollision(); + if (anchor && !m_anchors.empty()) { + const CAddress addr = m_anchors.back(); + m_anchors.pop_back(); + if (!addr.IsValid() || IsLocal(addr) || !IsReachable(addr) || + !HasAllDesirableServiceFlags(addr.nServices) || + setConnected.count(addr.GetGroup(addrman.m_asmap))) continue; + addrConnect = addr; + LogPrint(BCLog::NET, "Trying to make an anchor connection to %s\n", addrConnect.ToString()); + break; + } - // SelectTriedCollision returns an invalid address if it is empty. - if (!fFeeler || !addr.IsValid()) { - addr = addrman.Select(fFeeler); + // If we didn't find an appropriate destination after trying 100 addresses fetched from addrman, + // stop this loop, and let the outer loop run again (which sleeps, adds seed nodes, recalculates + // already-connected network ranges, ...) before trying new addrman addresses. + nTries++; + if (nTries > 100) + break; + + CAddrInfo addr; + + if (fFeeler) { + // First, try to get a tried table collision address. This returns + // an empty (invalid) address if there are no collisions to try. + addr = addrman.SelectTriedCollision(); + + if (!addr.IsValid()) { + // No tried table collisions. Select a new table address + // for our feeler. + addr = addrman.Select(true); + } else if (AlreadyConnectedToAddress(addr)) { + // If test-before-evict logic would have us connect to a + // peer that we're already connected to, just mark that + // address as Good(). We won't be able to initiate the + // connection anyway, so this avoids inadvertently evicting + // a currently-connected peer. + addrman.Good(addr); + // Select a new table address for our feeler instead. + addr = addrman.Select(true); + } + } else { + // Not a feeler + addr = addrman.Select(); } // Require outbound connections, other than feelers, to be to distinct network groups @@ -1941,13 +1951,6 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect) break; } - // If we didn't find an appropriate destination after trying 100 addresses fetched from addrman, - // stop this loop, and let the outer loop run again (which sleeps, adds seed nodes, recalculates - // already-connected network ranges, ...) before trying new addrman addresses. - nTries++; - if (nTries > 100) - break; - if (!IsReachable(addr)) continue; @@ -1964,7 +1967,11 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect) continue; } - // do not allow non-default ports, unless after 50 invalid addresses selected already + // Do not allow non-default ports, unless after 50 invalid + // addresses selected already. This is to prevent malicious peers + // from advertising themselves as a service on another host and + // port, causing a DoS attack as nodes around the network attempt + // to connect to it fruitlessly. if (addr.GetPort() != Params().GetDefaultPort() && nTries < 50) continue; @@ -1987,6 +1994,19 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect) } } +std::vector<CAddress> CConnman::GetCurrentBlockRelayOnlyConns() const +{ + std::vector<CAddress> ret; + LOCK(cs_vNodes); + for (const CNode* pnode : vNodes) { + if (pnode->IsBlockOnlyConn()) { + ret.push_back(pnode->addr); + } + } + + return ret; +} + std::vector<AddedNodeInfo> CConnman::GetAddedNodeInfo() { std::vector<AddedNodeInfo> ret; @@ -2084,7 +2104,7 @@ void CConnman::OpenNetworkConnection(const CAddress& addrConnect, bool fCountFai } if (!pszDest) { bool banned_or_discouraged = m_banman && (m_banman->IsDiscouraged(addrConnect) || m_banman->IsBanned(addrConnect)); - if (IsLocal(addrConnect) || FindNode(static_cast<CNetAddr>(addrConnect)) || banned_or_discouraged || FindNode(addrConnect.ToStringIPPort())) { + if (IsLocal(addrConnect) || banned_or_discouraged || AlreadyConnectedToAddress(addrConnect)) { return; } } else if (FindNode(std::string(pszDest))) @@ -2214,10 +2234,6 @@ bool CConnman::BindListenPort(const CService& addrBind, bilingual_str& strError, } vhListenSocket.push_back(ListenSocket(hListenSocket, permissions)); - - if (addrBind.IsRoutable() && fDiscover && (permissions & PF_NOBAN) == 0) - AddLocal(addrBind, LOCAL_BIND); - return true; } @@ -2311,10 +2327,18 @@ bool CConnman::Bind(const CService &addr, unsigned int flags, NetPermissionFlags } return false; } + + if (addr.IsRoutable() && fDiscover && !(flags & BF_DONT_ADVERTISE) && !(permissions & PF_NOBAN)) { + AddLocal(addr, LOCAL_BIND); + } + return true; } -bool CConnman::InitBinds(const std::vector<CService>& binds, const std::vector<NetWhitebindPermissions>& whiteBinds) +bool CConnman::InitBinds( + const std::vector<CService>& binds, + const std::vector<NetWhitebindPermissions>& whiteBinds, + const std::vector<CService>& onion_binds) { bool fBound = false; for (const auto& addrBind : binds) { @@ -2325,11 +2349,16 @@ bool CConnman::InitBinds(const std::vector<CService>& binds, const std::vector<N } if (binds.empty() && whiteBinds.empty()) { struct in_addr inaddr_any; - inaddr_any.s_addr = INADDR_ANY; + inaddr_any.s_addr = htonl(INADDR_ANY); struct in6_addr inaddr6_any = IN6ADDR_ANY_INIT; fBound |= Bind(CService(inaddr6_any, GetListenPort()), BF_NONE, NetPermissionFlags::PF_NONE); fBound |= Bind(CService(inaddr_any, GetListenPort()), !fBound ? BF_REPORT_ERROR : BF_NONE, NetPermissionFlags::PF_NONE); } + + for (const auto& addr_bind : onion_binds) { + fBound |= Bind(addr_bind, BF_EXPLICIT | BF_DONT_ADVERTISE, NetPermissionFlags::PF_NONE); + } + return fBound; } @@ -2337,18 +2366,7 @@ bool CConnman::Start(CScheduler& scheduler, const Options& connOptions) { Init(connOptions); - { - LOCK(cs_totalBytesRecv); - nTotalBytesRecv = 0; - } - { - LOCK(cs_totalBytesSent); - nTotalBytesSent = 0; - nMaxOutboundTotalBytesSentInCycle = 0; - nMaxOutboundCycleStartTime = 0; - } - - if (fListen && !InitBinds(connOptions.vBinds, connOptions.vWhiteBinds)) { + if (fListen && !InitBinds(connOptions.vBinds, connOptions.vWhiteBinds, connOptions.onion_binds)) { if (clientInterface) { clientInterface->ThreadSafeMessageBox( _("Failed to listen on any port. Use -listen=0 if you want this."), @@ -2377,6 +2395,15 @@ bool CConnman::Start(CScheduler& scheduler, const Options& connOptions) } } + if (m_use_addrman_outgoing) { + // Load addresses from anchors.dat + m_anchors = ReadAnchors(GetDataDir() / ANCHORS_DATABASE_FILENAME); + if (m_anchors.size() > MAX_BLOCK_RELAY_ONLY_ANCHORS) { + m_anchors.resize(MAX_BLOCK_RELAY_ONLY_ANCHORS); + } + LogPrintf("%i block-relay-only anchors will be tried for connections.\n", m_anchors.size()); + } + uiInterface.InitMessage(_("Starting network threads...").translated); fAddressesInitialized = true; @@ -2492,6 +2519,15 @@ void CConnman::StopNodes() if (fAddressesInitialized) { DumpAddresses(); fAddressesInitialized = false; + + if (m_use_addrman_outgoing) { + // Anchor connections are only dumped during clean shutdown. + std::vector<CAddress> anchors_to_dump = GetCurrentBlockRelayOnlyConns(); + if (anchors_to_dump.size() > MAX_BLOCK_RELAY_ONLY_ANCHORS) { + anchors_to_dump.resize(MAX_BLOCK_RELAY_ONLY_ANCHORS); + } + DumpAnchors(GetDataDir() / ANCHORS_DATABASE_FILENAME, anchors_to_dump); + } } // Close sockets @@ -2521,7 +2557,7 @@ void CConnman::DeleteNode(CNode* pnode) { assert(pnode); bool fUpdateConnectionTime = false; - m_msgproc->FinalizeNode(pnode->GetId(), fUpdateConnectionTime); + m_msgproc->FinalizeNode(*pnode, fUpdateConnectionTime); if (fUpdateConnectionTime) { addrman.Connected(pnode->addr); } @@ -2560,15 +2596,47 @@ std::vector<CAddress> CConnman::GetAddresses(size_t max_addresses, size_t max_pc return addresses; } -std::vector<CAddress> CConnman::GetAddresses(Network requestor_network, size_t max_addresses, size_t max_pct) +std::vector<CAddress> CConnman::GetAddresses(CNode& requestor, size_t max_addresses, size_t max_pct) { + SOCKET socket; + WITH_LOCK(requestor.cs_hSocket, socket = requestor.hSocket); + auto local_socket_bytes = GetBindAddress(socket).GetAddrBytes(); + uint64_t cache_id = GetDeterministicRandomizer(RANDOMIZER_ID_ADDRCACHE) + .Write(requestor.addr.GetNetwork()) + .Write(local_socket_bytes.data(), local_socket_bytes.size()) + .Finalize(); const auto current_time = GetTime<std::chrono::microseconds>(); - if (m_addr_response_caches.find(requestor_network) == m_addr_response_caches.end() || - m_addr_response_caches[requestor_network].m_update_addr_response < current_time) { - m_addr_response_caches[requestor_network].m_addrs_response_cache = GetAddresses(max_addresses, max_pct); - m_addr_response_caches[requestor_network].m_update_addr_response = current_time + std::chrono::hours(21) + GetRandMillis(std::chrono::hours(6)); + auto r = m_addr_response_caches.emplace(cache_id, CachedAddrResponse{}); + CachedAddrResponse& cache_entry = r.first->second; + if (cache_entry.m_cache_entry_expiration < current_time) { // If emplace() added new one it has expiration 0. + cache_entry.m_addrs_response_cache = GetAddresses(max_addresses, max_pct); + // Choosing a proper cache lifetime is a trade-off between the privacy leak minimization + // and the usefulness of ADDR responses to honest users. + // + // Longer cache lifetime makes it more difficult for an attacker to scrape + // enough AddrMan data to maliciously infer something useful. + // By the time an attacker scraped enough AddrMan records, most of + // the records should be old enough to not leak topology info by + // e.g. analyzing real-time changes in timestamps. + // + // It takes only several hundred requests to scrape everything from an AddrMan containing 100,000 nodes, + // so ~24 hours of cache lifetime indeed makes the data less inferable by the time + // most of it could be scraped (considering that timestamps are updated via + // ADDR self-announcements and when nodes communicate). + // We also should be robust to those attacks which may not require scraping *full* victim's AddrMan + // (because even several timestamps of the same handful of nodes may leak privacy). + // + // On the other hand, longer cache lifetime makes ADDR responses + // outdated and less useful for an honest requestor, e.g. if most nodes + // in the ADDR response are no longer active. + // + // However, the churn in the network is known to be rather low. Since we consider + // nodes to be "terrible" (see IsTerrible()) if the timestamps are older than 30 days, + // max. 24 hours of "penalty" due to cache shouldn't make any meaningful difference + // in terms of the freshness of the response. + cache_entry.m_cache_entry_expiration = current_time + std::chrono::hours(21) + GetRandMillis(std::chrono::hours(6)); } - return m_addr_response_caches[requestor_network].m_addrs_response_cache; + return cache_entry.m_addrs_response_cache; } bool CConnman::AddNode(const std::string& strNode) @@ -2672,8 +2740,8 @@ void CConnman::RecordBytesSent(uint64_t bytes) LOCK(cs_totalBytesSent); nTotalBytesSent += bytes; - uint64_t now = GetTime(); - if (nMaxOutboundCycleStartTime + nMaxOutboundTimeframe < now) + const auto now = GetTime<std::chrono::seconds>(); + if (nMaxOutboundCycleStartTime + MAX_UPLOAD_TIMEFRAME < now) { // timeframe expired, reset cycle nMaxOutboundCycleStartTime = now; @@ -2684,48 +2752,29 @@ void CConnman::RecordBytesSent(uint64_t bytes) nMaxOutboundTotalBytesSentInCycle += bytes; } -void CConnman::SetMaxOutboundTarget(uint64_t limit) -{ - LOCK(cs_totalBytesSent); - nMaxOutboundLimit = limit; -} - uint64_t CConnman::GetMaxOutboundTarget() { LOCK(cs_totalBytesSent); return nMaxOutboundLimit; } -uint64_t CConnman::GetMaxOutboundTimeframe() +std::chrono::seconds CConnman::GetMaxOutboundTimeframe() { - LOCK(cs_totalBytesSent); - return nMaxOutboundTimeframe; + return MAX_UPLOAD_TIMEFRAME; } -uint64_t CConnman::GetMaxOutboundTimeLeftInCycle() +std::chrono::seconds CConnman::GetMaxOutboundTimeLeftInCycle() { LOCK(cs_totalBytesSent); if (nMaxOutboundLimit == 0) - return 0; + return 0s; - if (nMaxOutboundCycleStartTime == 0) - return nMaxOutboundTimeframe; + if (nMaxOutboundCycleStartTime.count() == 0) + return MAX_UPLOAD_TIMEFRAME; - uint64_t cycleEndTime = nMaxOutboundCycleStartTime + nMaxOutboundTimeframe; - uint64_t now = GetTime(); - return (cycleEndTime < now) ? 0 : cycleEndTime - GetTime(); -} - -void CConnman::SetMaxOutboundTimeframe(uint64_t timeframe) -{ - LOCK(cs_totalBytesSent); - if (nMaxOutboundTimeframe != timeframe) - { - // reset measure-cycle in case of changing - // the timeframe - nMaxOutboundCycleStartTime = GetTime(); - } - nMaxOutboundTimeframe = timeframe; + const std::chrono::seconds cycleEndTime = nMaxOutboundCycleStartTime + MAX_UPLOAD_TIMEFRAME; + const auto now = GetTime<std::chrono::seconds>(); + return (cycleEndTime < now) ? 0s : cycleEndTime - now; } bool CConnman::OutboundTargetReached(bool historicalBlockServingLimit) @@ -2737,8 +2786,8 @@ bool CConnman::OutboundTargetReached(bool historicalBlockServingLimit) if (historicalBlockServingLimit) { // keep a large enough buffer to at least relay each block once - uint64_t timeLeftInCycle = GetMaxOutboundTimeLeftInCycle(); - uint64_t buffer = timeLeftInCycle / 600 * MAX_BLOCK_SERIALIZED_SIZE; + const std::chrono::seconds timeLeftInCycle = GetMaxOutboundTimeLeftInCycle(); + const uint64_t buffer = timeLeftInCycle / std::chrono::minutes{10} * MAX_BLOCK_SERIALIZED_SIZE; if (buffer >= nMaxOutboundLimit || nMaxOutboundTotalBytesSentInCycle >= nMaxOutboundLimit - buffer) return true; } @@ -2774,35 +2823,22 @@ ServiceFlags CConnman::GetLocalServices() const return nLocalServices; } -void CConnman::SetBestHeight(int height) -{ - nBestHeight.store(height, std::memory_order_release); -} - -int CConnman::GetBestHeight() const -{ - return nBestHeight.load(std::memory_order_acquire); -} - unsigned int CConnman::GetReceiveFloodSize() const { return nReceiveFloodSize; } -CNode::CNode(NodeId idIn, ServiceFlags nLocalServicesIn, int nMyStartingHeightIn, SOCKET hSocketIn, const CAddress& addrIn, uint64_t nKeyedNetGroupIn, uint64_t nLocalHostNonceIn, const CAddress& addrBindIn, const std::string& addrNameIn, ConnectionType conn_type_in) +CNode::CNode(NodeId idIn, ServiceFlags nLocalServicesIn, SOCKET hSocketIn, const CAddress& addrIn, uint64_t nKeyedNetGroupIn, uint64_t nLocalHostNonceIn, const CAddress& addrBindIn, const std::string& addrNameIn, ConnectionType conn_type_in, bool inbound_onion) : nTimeConnected(GetSystemTimeInSeconds()), - addr(addrIn), - addrBind(addrBindIn), - nKeyedNetGroup(nKeyedNetGroupIn), - // Don't relay addr messages to peers that we connect to as block-relay-only - // peers (to prevent adversaries from inferring these links from addr - // traffic). - id(idIn), - nLocalHostNonce(nLocalHostNonceIn), - m_conn_type(conn_type_in), - nLocalServices(nLocalServicesIn), - nMyStartingHeight(nMyStartingHeightIn) -{ + addr(addrIn), + addrBind(addrBindIn), + nKeyedNetGroup(nKeyedNetGroupIn), + id(idIn), + nLocalHostNonce(nLocalHostNonceIn), + m_conn_type(conn_type_in), + nLocalServices(nLocalServicesIn), + m_inbound_onion(inbound_onion) +{ + if (inbound_onion) assert(conn_type_in == ConnectionType::INBOUND); hSocket = hSocketIn; addrName = addrNameIn == "" ? addr.ToStringIPPort() : addrNameIn; - hashContinue = uint256(); if (conn_type_in != ConnectionType::BLOCK_RELAY) { m_tx_relay = MakeUnique<TxRelay>(); } @@ -2821,7 +2857,7 @@ CNode::CNode(NodeId idIn, ServiceFlags nLocalServicesIn, int nMyStartingHeightIn LogPrint(BCLog::NET, "Added connection peer=%d\n", id); } - m_deserializer = MakeUnique<V1TransportDeserializer>(V1TransportDeserializer(Params().MessageStart(), SER_NETWORK, INIT_PROTO_VERSION)); + m_deserializer = MakeUnique<V1TransportDeserializer>(V1TransportDeserializer(Params(), GetId(), SER_NETWORK, INIT_PROTO_VERSION)); m_serializer = MakeUnique<V1TransportSerializer>(V1TransportSerializer()); } @@ -2862,7 +2898,7 @@ void CConnman::PushMessage(CNode* pnode, CSerializedNetMsg&& msg) // If write queue empty, attempt "optimistic write" if (optimisticSend == true) - nBytesSent = SocketSendData(pnode); + nBytesSent = SocketSendData(*pnode); } if (nBytesSent) RecordBytesSent(nBytesSent); |