aboutsummaryrefslogtreecommitdiff
path: root/src/net_processing.cpp
diff options
context:
space:
mode:
authorfanquake <fanquake@gmail.com>2021-03-04 20:00:06 +0800
committerfanquake <fanquake@gmail.com>2021-03-04 20:13:43 +0800
commit33921379b6bb989c9708867fedea1352a8091aa5 (patch)
tree57e0e7c1fa7e998a2610f1bc70ee78c8557b0d69 /src/net_processing.cpp
parent92b7efcf54d3154e4b31c9a6eae60f27e349f45e (diff)
parent0eaea66e8bfdfb23ff86c0f0924c2d75f5aca75f (diff)
downloadbitcoin-33921379b6bb989c9708867fedea1352a8091aa5.tar.xz
Merge #21015: Make all of net_processing (and some of net) use std::chrono types
0eaea66e8bfdfb23ff86c0f0924c2d75f5aca75f Make tx relay data structure use std::chrono types (Pieter Wuille) 55e82881a1503bff146970856c1474a6ea659c94 Make all Poisson delays use std::chrono types (Pieter Wuille) c733ac4d8a597b1001555b142f488ed9fbecc405 Convert block/header sync timeouts to std::chrono types (Pieter Wuille) 4d98b401fbd821700f7a792b0a4cb52c9b71bc9f Change all ping times to std::chrono types (Pieter Wuille) Pull request description: (Picking up #20044. Rebased against master.) This changes various uses of integers to represent timestamps and durations to `std::chrono` duration types with type-safe conversions, getting rid of various `.count()`, constructors, and conversion factors. ACKs for top commit: jnewbery: utACK 0eaea66e8bfdfb23ff86c0f0924c2d75f5aca75f vasild: ACK 0eaea66e8bfdfb23ff86c0f0924c2d75f5aca75f MarcoFalke: re-ACK 0eaea66e8bfdfb23ff86c0f0924c2d75f5aca75f, only changes: minor rename, using C++11 member initializer, using 2min chrono literal, rebase 🤚 ajtowns: utACK 0eaea66e8bfdfb23ff86c0f0924c2d75f5aca75f Tree-SHA512: 2dbd8d53bf82e98f9b4611e61dc14c448e8957d1a02575b837fadfd59f80e98614d0ccf890fc351f960ade76a6fb8051b282e252e81675a8ee753dba8b1d7f57
Diffstat (limited to 'src/net_processing.cpp')
-rw-r--r--src/net_processing.cpp110
1 files changed, 58 insertions, 52 deletions
diff --git a/src/net_processing.cpp b/src/net_processing.cpp
index af61e7064e..2e06f03b86 100644
--- a/src/net_processing.cpp
+++ b/src/net_processing.cpp
@@ -37,13 +37,13 @@
#include <typeinfo>
/** How long to cache transactions in mapRelay for normal relay */
-static constexpr std::chrono::seconds RELAY_TX_CACHE_TIME = std::chrono::minutes{15};
+static constexpr auto RELAY_TX_CACHE_TIME = 15min;
/** How long a transaction has to be in the mempool before it can unconditionally be relayed (even when not in mapRelay). */
-static constexpr std::chrono::seconds UNCONDITIONAL_RELAY_DELAY = std::chrono::minutes{2};
-/** Headers download timeout expressed in microseconds
+static constexpr auto UNCONDITIONAL_RELAY_DELAY = 2min;
+/** Headers download timeout.
* Timeout = base + per_header * (expected number of headers) */
-static constexpr int64_t HEADERS_DOWNLOAD_TIMEOUT_BASE = 15 * 60 * 1000000; // 15 minutes
-static constexpr int64_t HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER = 1000; // 1ms/header
+static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_BASE = 15min;
+static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER = 1ms;
/** Protect at least this many outbound peers from disconnection due to slow/
* behind headers chain.
*/
@@ -90,8 +90,8 @@ static constexpr std::chrono::microseconds GETDATA_TX_INTERVAL{std::chrono::seco
static const unsigned int MAX_GETDATA_SZ = 1000;
/** Number of blocks that can be requested at any given time from a single peer. */
static const int MAX_BLOCKS_IN_TRANSIT_PER_PEER = 16;
-/** Timeout in seconds during which a peer must stall block download progress before being disconnected. */
-static const unsigned int BLOCK_STALLING_TIMEOUT = 2;
+/** Time during which a peer must stall block download progress before being disconnected. */
+static constexpr auto BLOCK_STALLING_TIMEOUT = 2s;
/** Number of headers sent in one getheaders result. We rely on the assumption that if a peer sends
* less than this number, we reached its tip. Changing this value is a protocol upgrade. */
static const unsigned int MAX_HEADERS_RESULTS = 2000;
@@ -105,10 +105,10 @@ static const int MAX_BLOCKTXN_DEPTH = 10;
* degree of disordering of blocks on disk (which make reindexing and pruning harder). We'll probably
* want to make this a per-peer adaptive value at some point. */
static const unsigned int BLOCK_DOWNLOAD_WINDOW = 1024;
-/** Block download timeout base, expressed in millionths of the block interval (i.e. 10 min) */
-static const int64_t BLOCK_DOWNLOAD_TIMEOUT_BASE = 1000000;
+/** Block download timeout base, expressed in multiples of the block interval (i.e. 10 min) */
+static constexpr double BLOCK_DOWNLOAD_TIMEOUT_BASE = 1;
/** Additional block download timeout per parallel downloading peer (i.e. 5 min) */
-static const int64_t BLOCK_DOWNLOAD_TIMEOUT_PER_PEER = 500000;
+static constexpr double BLOCK_DOWNLOAD_TIMEOUT_PER_PEER = 0.5;
/** Maximum number of headers to announce when relaying blocks with headers message.*/
static const unsigned int MAX_BLOCKS_TO_ANNOUNCE = 8;
/** Maximum number of unconnecting headers announcements before DoS score */
@@ -116,17 +116,21 @@ static const int MAX_UNCONNECTING_HEADERS = 10;
/** Minimum blocks required to signal NODE_NETWORK_LIMITED */
static const unsigned int NODE_NETWORK_LIMITED_MIN_BLOCKS = 288;
/** Average delay between local address broadcasts */
-static constexpr std::chrono::hours AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL{24};
+static constexpr auto AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL = 24h;
/** Average delay between peer address broadcasts */
-static constexpr std::chrono::seconds AVG_ADDRESS_BROADCAST_INTERVAL{30};
-/** Average delay between trickled inventory transmissions in seconds.
- * Blocks and peers with noban permission bypass this, outbound peers get half this delay. */
-static const unsigned int INVENTORY_BROADCAST_INTERVAL = 5;
+static constexpr auto AVG_ADDRESS_BROADCAST_INTERVAL = 30s;
+/** Average delay between trickled inventory transmissions for inbound peers.
+ * Blocks and peers with noban permission bypass this. */
+static constexpr auto INBOUND_INVENTORY_BROADCAST_INTERVAL = 5s;
+/** Average delay between trickled inventory transmissions for outbound peers.
+ * Use a smaller delay as there is less privacy concern for them.
+ * Blocks and peers with noban permission bypass this. */
+static constexpr auto OUTBOUND_INVENTORY_BROADCAST_INTERVAL = 2s;
/** Maximum rate of inventory items to send per second.
* Limits the impact of low-fee transaction floods. */
static constexpr unsigned int INVENTORY_BROADCAST_PER_SECOND = 7;
/** Maximum number of inventory items to send per transmission. */
-static constexpr unsigned int INVENTORY_BROADCAST_MAX = INVENTORY_BROADCAST_PER_SECOND * INVENTORY_BROADCAST_INTERVAL;
+static constexpr unsigned int INVENTORY_BROADCAST_MAX = INVENTORY_BROADCAST_PER_SECOND * count_seconds(INBOUND_INVENTORY_BROADCAST_INTERVAL);
/** The number of most recently announced transactions a peer can request. */
static constexpr unsigned int INVENTORY_MAX_RECENT_RELAY = 3500;
/** Verify that INVENTORY_MAX_RECENT_RELAY is enough to cache everything typically
@@ -135,9 +139,9 @@ static constexpr unsigned int INVENTORY_MAX_RECENT_RELAY = 3500;
* peers, and random variations in the broadcast mechanism. */
static_assert(INVENTORY_MAX_RECENT_RELAY >= INVENTORY_BROADCAST_PER_SECOND * UNCONDITIONAL_RELAY_DELAY / std::chrono::seconds{1}, "INVENTORY_RELAY_MAX too low");
/** Average delay between feefilter broadcasts in seconds. */
-static constexpr unsigned int AVG_FEEFILTER_BROADCAST_INTERVAL = 10 * 60;
+static constexpr auto AVG_FEEFILTER_BROADCAST_INTERVAL = 10min;
/** Maximum feefilter broadcast delay after significant change. */
-static constexpr unsigned int MAX_FEEFILTER_CHANGE_DELAY = 5 * 60;
+static constexpr auto MAX_FEEFILTER_CHANGE_DELAY = 5min;
/** Maximum number of compact filters that may be requested with one getcfilters. See BIP 157. */
static constexpr uint32_t MAX_GETCFILTERS_SIZE = 1000;
/** Maximum number of cf hashes that may be requested with one getcfheaders. See BIP 157. */
@@ -441,7 +445,7 @@ private:
typedef std::map<uint256, CTransactionRef> MapRelay;
MapRelay mapRelay GUARDED_BY(cs_main);
/** Expiration-time ordered list of (expire time, relay map entry) pairs. */
- std::deque<std::pair<int64_t, MapRelay::iterator>> vRelayExpiration GUARDED_BY(cs_main);
+ std::deque<std::pair<std::chrono::microseconds, MapRelay::iterator>> g_relay_expiration GUARDED_BY(cs_main);
/**
* When a peer sends us a valid block, instruct it to announce blocks to us
@@ -499,12 +503,12 @@ struct CNodeState {
//! Whether we've started headers synchronization with this peer.
bool fSyncStarted;
//! When to potentially disconnect peer for stalling headers download
- int64_t nHeadersSyncTimeout;
+ std::chrono::microseconds m_headers_sync_timeout{0us};
//! Since when we're stalling block download progress (in microseconds), or 0.
- int64_t nStallingSince;
+ std::chrono::microseconds m_stalling_since{0us};
std::list<QueuedBlock> vBlocksInFlight;
//! When the first entry in vBlocksInFlight started downloading. Don't care when vBlocksInFlight is empty.
- int64_t nDownloadingSince;
+ std::chrono::microseconds m_downloading_since{0us};
int nBlocksInFlight;
int nBlocksInFlightValidHeaders;
//! Whether we consider this a preferred download peer.
@@ -587,9 +591,6 @@ struct CNodeState {
pindexBestHeaderSent = nullptr;
nUnconnectingHeaders = 0;
fSyncStarted = false;
- nHeadersSyncTimeout = 0;
- nStallingSince = 0;
- nDownloadingSince = 0;
nBlocksInFlight = 0;
nBlocksInFlightValidHeaders = 0;
fPreferredDownload = false;
@@ -638,11 +639,11 @@ bool PeerManagerImpl::MarkBlockAsReceived(const uint256& hash)
}
if (state->vBlocksInFlight.begin() == itInFlight->second.second) {
// First block on the queue was received, update the start download time for the next one
- state->nDownloadingSince = std::max(state->nDownloadingSince, count_microseconds(GetTime<std::chrono::microseconds>()));
+ state->m_downloading_since = std::max(state->m_downloading_since, GetTime<std::chrono::microseconds>());
}
state->vBlocksInFlight.erase(itInFlight->second.second);
state->nBlocksInFlight--;
- state->nStallingSince = 0;
+ state->m_stalling_since = 0us;
mapBlocksInFlight.erase(itInFlight);
return true;
}
@@ -672,7 +673,7 @@ bool PeerManagerImpl::MarkBlockAsInFlight(NodeId nodeid, const uint256& hash, co
state->nBlocksInFlightValidHeaders += it->fValidatedHeaders;
if (state->nBlocksInFlight == 1) {
// We're starting a block download (batch) from this peer.
- state->nDownloadingSince = GetTime<std::chrono::microseconds>().count();
+ state->m_downloading_since = GetTime<std::chrono::microseconds>();
}
if (state->nBlocksInFlightValidHeaders == 1 && pindex != nullptr) {
nPeersWithValidatedDownloads++;
@@ -1078,7 +1079,7 @@ bool PeerManagerImpl::GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats)
ping_wait = GetTime<std::chrono::microseconds>() - peer->m_ping_start.load();
}
- stats.m_ping_wait_usec = count_microseconds(ping_wait);
+ stats.m_ping_wait = ping_wait;
return true;
}
@@ -4279,7 +4280,13 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
// Only actively request headers from a single peer, unless we're close to today.
if ((nSyncStarted == 0 && fFetch) || pindexBestHeader->GetBlockTime() > GetAdjustedTime() - 24 * 60 * 60) {
state.fSyncStarted = true;
- state.nHeadersSyncTimeout = count_microseconds(current_time) + HEADERS_DOWNLOAD_TIMEOUT_BASE + HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER * (GetAdjustedTime() - pindexBestHeader->GetBlockTime())/(consensusParams.nPowTargetSpacing);
+ state.m_headers_sync_timeout = current_time + HEADERS_DOWNLOAD_TIMEOUT_BASE +
+ (
+ // Convert HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER to microseconds before scaling
+ // to maintain precision
+ std::chrono::microseconds{HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER} *
+ (GetAdjustedTime() - pindexBestHeader->GetBlockTime()) / consensusParams.nPowTargetSpacing
+ );
nSyncStarted++;
const CBlockIndex *pindexStart = pindexBestHeader;
/* If possible, start at the block preceding the currently
@@ -4460,10 +4467,9 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
if (pto->m_tx_relay->nNextInvSend < current_time) {
fSendTrickle = true;
if (pto->IsInboundConn()) {
- pto->m_tx_relay->nNextInvSend = std::chrono::microseconds{m_connman.PoissonNextSendInbound(count_microseconds(current_time), INVENTORY_BROADCAST_INTERVAL)};
+ pto->m_tx_relay->nNextInvSend = m_connman.PoissonNextSendInbound(current_time, INBOUND_INVENTORY_BROADCAST_INTERVAL);
} else {
- // Use half the delay for outbound peers, as there is less privacy concern for them.
- pto->m_tx_relay->nNextInvSend = PoissonNextSend(current_time, std::chrono::seconds{INVENTORY_BROADCAST_INTERVAL >> 1});
+ pto->m_tx_relay->nNextInvSend = PoissonNextSend(current_time, OUTBOUND_INVENTORY_BROADCAST_INTERVAL);
}
}
@@ -4551,20 +4557,20 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
nRelayedTransactions++;
{
// Expire old relay messages
- while (!vRelayExpiration.empty() && vRelayExpiration.front().first < count_microseconds(current_time))
+ while (!g_relay_expiration.empty() && g_relay_expiration.front().first < current_time)
{
- mapRelay.erase(vRelayExpiration.front().second);
- vRelayExpiration.pop_front();
+ mapRelay.erase(g_relay_expiration.front().second);
+ g_relay_expiration.pop_front();
}
auto ret = mapRelay.emplace(txid, std::move(txinfo.tx));
if (ret.second) {
- vRelayExpiration.emplace_back(count_microseconds(current_time + std::chrono::microseconds{RELAY_TX_CACHE_TIME}), ret.first);
+ g_relay_expiration.emplace_back(current_time + RELAY_TX_CACHE_TIME, ret.first);
}
// Add wtxid-based lookup into mapRelay as well, so that peers can request by wtxid
auto ret2 = mapRelay.emplace(wtxid, ret.first->second);
if (ret2.second) {
- vRelayExpiration.emplace_back(count_microseconds(current_time + std::chrono::microseconds{RELAY_TX_CACHE_TIME}), ret2.first);
+ g_relay_expiration.emplace_back(current_time + RELAY_TX_CACHE_TIME, ret2.first);
}
}
if (vInv.size() == MAX_INV_SZ) {
@@ -4589,7 +4595,7 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
// Detect whether we're stalling
current_time = GetTime<std::chrono::microseconds>();
- if (state.nStallingSince && state.nStallingSince < count_microseconds(current_time) - 1000000 * BLOCK_STALLING_TIMEOUT) {
+ if (state.m_stalling_since.count() && state.m_stalling_since < current_time - BLOCK_STALLING_TIMEOUT) {
// Stalling only triggers when the block download window cannot move. During normal steady state,
// the download window should be much larger than the to-be-downloaded set of blocks, so disconnection
// should only happen during initial block download.
@@ -4597,7 +4603,7 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
pto->fDisconnect = true;
return true;
}
- // In case there is a block that has been in flight from this peer for 2 + 0.5 * N times the block interval
+ // In case there is a block that has been in flight from this peer for block_interval * (1 + 0.5 * N)
// (with N the number of peers from which we're downloading validated blocks), disconnect due to timeout.
// We compensate for other peers to prevent killing off peers due to our own downstream link
// being saturated. We only count validated in-flight blocks so peers can't advertise non-existing block hashes
@@ -4605,17 +4611,17 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
if (state.vBlocksInFlight.size() > 0) {
QueuedBlock &queuedBlock = state.vBlocksInFlight.front();
int nOtherPeersWithValidatedDownloads = nPeersWithValidatedDownloads - (state.nBlocksInFlightValidHeaders > 0);
- if (count_microseconds(current_time) > state.nDownloadingSince + consensusParams.nPowTargetSpacing * (BLOCK_DOWNLOAD_TIMEOUT_BASE + BLOCK_DOWNLOAD_TIMEOUT_PER_PEER * nOtherPeersWithValidatedDownloads)) {
+ if (current_time > state.m_downloading_since + std::chrono::seconds{consensusParams.nPowTargetSpacing} * (BLOCK_DOWNLOAD_TIMEOUT_BASE + BLOCK_DOWNLOAD_TIMEOUT_PER_PEER * nOtherPeersWithValidatedDownloads)) {
LogPrintf("Timeout downloading block %s from peer=%d, disconnecting\n", queuedBlock.hash.ToString(), pto->GetId());
pto->fDisconnect = true;
return true;
}
}
// Check for headers sync timeouts
- if (state.fSyncStarted && state.nHeadersSyncTimeout < std::numeric_limits<int64_t>::max()) {
+ if (state.fSyncStarted && state.m_headers_sync_timeout < std::chrono::microseconds::max()) {
// Detect whether this is a stalling initial-headers-sync peer
if (pindexBestHeader->GetBlockTime() <= GetAdjustedTime() - 24 * 60 * 60) {
- if (count_microseconds(current_time) > state.nHeadersSyncTimeout && nSyncStarted == 1 && (nPreferredDownload - state.fPreferredDownload >= 1)) {
+ if (current_time > state.m_headers_sync_timeout && nSyncStarted == 1 && (nPreferredDownload - state.fPreferredDownload >= 1)) {
// Disconnect a peer (without the noban permission) if it is our only sync peer,
// and we have others we could be using instead.
// Note: If all our peers are inbound, then we won't
@@ -4634,13 +4640,13 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
// this peer (eventually).
state.fSyncStarted = false;
nSyncStarted--;
- state.nHeadersSyncTimeout = 0;
+ state.m_headers_sync_timeout = 0us;
}
}
} else {
// After we've caught up once, reset the timeout so we can't trigger
// disconnect later.
- state.nHeadersSyncTimeout = std::numeric_limits<int64_t>::max();
+ state.m_headers_sync_timeout = std::chrono::microseconds::max();
}
}
@@ -4664,8 +4670,8 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
pindex->nHeight, pto->GetId());
}
if (state.nBlocksInFlight == 0 && staller != -1) {
- if (State(staller)->nStallingSince == 0) {
- State(staller)->nStallingSince = count_microseconds(current_time);
+ if (State(staller)->m_stalling_since == 0us) {
+ State(staller)->m_stalling_since = current_time;
LogPrint(BCLog::NET, "Stall started peer=%d\n", staller);
}
}
@@ -4718,10 +4724,10 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
if (pto->m_tx_relay->lastSentFeeFilter == MAX_FILTER) {
// Send the current filter if we sent MAX_FILTER previously
// and made it out of IBD.
- pto->m_tx_relay->nextSendTimeFeeFilter = count_microseconds(current_time) - 1;
+ pto->m_tx_relay->m_next_send_feefilter = 0us;
}
}
- if (count_microseconds(current_time) > pto->m_tx_relay->nextSendTimeFeeFilter) {
+ if (current_time > pto->m_tx_relay->m_next_send_feefilter) {
CAmount filterToSend = g_filter_rounder.round(currentFilter);
// We always have a fee filter of at least minRelayTxFee
filterToSend = std::max(filterToSend, ::minRelayTxFee.GetFeePerK());
@@ -4729,13 +4735,13 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::FEEFILTER, filterToSend));
pto->m_tx_relay->lastSentFeeFilter = filterToSend;
}
- pto->m_tx_relay->nextSendTimeFeeFilter = PoissonNextSend(count_microseconds(current_time), AVG_FEEFILTER_BROADCAST_INTERVAL);
+ pto->m_tx_relay->m_next_send_feefilter = PoissonNextSend(current_time, AVG_FEEFILTER_BROADCAST_INTERVAL);
}
// If the fee filter has changed substantially and it's still more than MAX_FEEFILTER_CHANGE_DELAY
// until scheduled broadcast, then move the broadcast to within MAX_FEEFILTER_CHANGE_DELAY.
- else if (count_microseconds(current_time) + MAX_FEEFILTER_CHANGE_DELAY * 1000000 < pto->m_tx_relay->nextSendTimeFeeFilter &&
+ else if (current_time + MAX_FEEFILTER_CHANGE_DELAY < pto->m_tx_relay->m_next_send_feefilter &&
(currentFilter < 3 * pto->m_tx_relay->lastSentFeeFilter / 4 || currentFilter > 4 * pto->m_tx_relay->lastSentFeeFilter / 3)) {
- pto->m_tx_relay->nextSendTimeFeeFilter = count_microseconds(current_time) + GetRandInt(MAX_FEEFILTER_CHANGE_DELAY) * 1000000;
+ pto->m_tx_relay->m_next_send_feefilter = current_time + GetRandomDuration<std::chrono::microseconds>(MAX_FEEFILTER_CHANGE_DELAY);
}
}
} // release cs_main