aboutsummaryrefslogtreecommitdiff
path: root/src/net_processing.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/net_processing.cpp')
-rw-r--r--src/net_processing.cpp166
1 files changed, 92 insertions, 74 deletions
diff --git a/src/net_processing.cpp b/src/net_processing.cpp
index 2b3b4767c5..951b8a1811 100644
--- a/src/net_processing.cpp
+++ b/src/net_processing.cpp
@@ -154,8 +154,14 @@ struct COrphanTx {
int64_t nTimeExpire;
size_t list_pos;
};
+
+/** Guards orphan transactions and extra txs for compact blocks */
RecursiveMutex g_cs_orphans;
+/** Map from txid to orphan transaction record. Limited by
+ * -maxorphantx/DEFAULT_MAX_ORPHAN_TRANSACTIONS */
std::map<uint256, COrphanTx> mapOrphanTransactions GUARDED_BY(g_cs_orphans);
+/** Index from wtxid into the mapOrphanTransactions to lookup orphan
+ * transactions using their witness ids. */
std::map<uint256, std::map<uint256, COrphanTx>::iterator> g_orphans_by_wtxid GUARDED_BY(g_cs_orphans);
void EraseOrphansFor(NodeId peer);
@@ -259,12 +265,19 @@ namespace {
return &(*a) < &(*b);
}
};
- std::map<COutPoint, std::set<std::map<uint256, COrphanTx>::iterator, IteratorComparator>> mapOrphanTransactionsByPrev GUARDED_BY(g_cs_orphans);
- std::vector<std::map<uint256, COrphanTx>::iterator> g_orphan_list GUARDED_BY(g_cs_orphans); //! For random eviction
+ /** Index from the parents' COutPoint into the mapOrphanTransactions. Used
+ * to remove orphan transactions from the mapOrphanTransactions */
+ std::map<COutPoint, std::set<std::map<uint256, COrphanTx>::iterator, IteratorComparator>> mapOrphanTransactionsByPrev GUARDED_BY(g_cs_orphans);
+ /** Orphan transactions in vector for quick random eviction */
+ std::vector<std::map<uint256, COrphanTx>::iterator> g_orphan_list GUARDED_BY(g_cs_orphans);
- static size_t vExtraTxnForCompactIt GUARDED_BY(g_cs_orphans) = 0;
+ /** Orphan/conflicted/etc transactions that are kept for compact block reconstruction.
+ * The last -blockreconstructionextratxn/DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN of
+ * these are kept in a ring buffer */
static std::vector<std::pair<uint256, CTransactionRef>> vExtraTxnForCompact GUARDED_BY(g_cs_orphans);
+ /** Offset into vExtraTxnForCompact to insert the next tx */
+ static size_t vExtraTxnForCompactIt GUARDED_BY(g_cs_orphans) = 0;
} // namespace
namespace {
@@ -322,10 +335,17 @@ struct CNodeState {
*/
bool fSupportsDesiredCmpctVersion;
- /** State used to enforce CHAIN_SYNC_TIMEOUT
- * Only in effect for outbound, non-manual, full-relay connections, with
- * m_protect == false
- * Algorithm: if a peer's best known block has less work than our tip,
+ /** State used to enforce CHAIN_SYNC_TIMEOUT and EXTRA_PEER_CHECK_INTERVAL logic.
+ *
+ * Both are only in effect for outbound, non-manual, non-protected connections.
+ * Any peer protected (m_protect = true) is not chosen for eviction. A peer is
+ * marked as protected if all of these are true:
+ * - its connection type is IsBlockOnlyConn() == false
+ * - it gave us a valid connecting header
+ * - we haven't reached MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT yet
+ * - it has a better chain than we have
+ *
+ * CHAIN_SYNC_TIMEOUT: if a peer's best known block has less work than our tip,
* set a timeout CHAIN_SYNC_TIMEOUT seconds in the future:
* - If at timeout their best known block now has more work than our tip
* when the timeout was set, then either reset the timeout or clear it
@@ -335,6 +355,9 @@ struct CNodeState {
* and set a shorter timeout, HEADERS_RESPONSE_TIME seconds in future.
* If their best known block is still behind when that new timeout is
* reached, disconnect.
+ *
+ * EXTRA_PEER_CHECK_INTERVAL: after each interval, if we have too many outbound peers,
+ * drop the outbound one that least recently announced us a new block.
*/
struct ChainSyncTimeoutState {
//! A timeout used for checking whether our peer has sufficiently synced
@@ -560,7 +583,7 @@ static bool MarkBlockAsReceived(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs
}
if (state->vBlocksInFlight.begin() == itInFlight->second.second) {
// First block on the queue was received, update the start download time for the next one
- state->nDownloadingSince = std::max(state->nDownloadingSince, GetTimeMicros());
+ state->nDownloadingSince = std::max(state->nDownloadingSince, count_microseconds(GetTime<std::chrono::microseconds>()));
}
state->vBlocksInFlight.erase(itInFlight->second.second);
state->nBlocksInFlight--;
@@ -595,7 +618,7 @@ static bool MarkBlockAsInFlight(CTxMemPool& mempool, NodeId nodeid, const uint25
state->nBlocksInFlightValidHeaders += it->fValidatedHeaders;
if (state->nBlocksInFlight == 1) {
// We're starting a block download (batch) from this peer.
- state->nDownloadingSince = GetTimeMicros();
+ state->nDownloadingSince = GetTime<std::chrono::microseconds>().count();
}
if (state->nBlocksInFlightValidHeaders == 1 && pindex != nullptr) {
nPeersWithValidatedDownloads++;
@@ -2006,11 +2029,12 @@ void PeerManager::ProcessHeadersMessage(CNode& pfrom, const std::vector<CBlockHe
}
}
+ // If this is an outbound full-relay peer, check to see if we should protect
+ // it from the bad/lagging chain logic.
+ // Note that outbound block-relay peers are excluded from this protection, and
+ // thus always subject to eviction under the bad/lagging chain logic.
+ // See ChainSyncTimeoutState.
if (!pfrom.fDisconnect && pfrom.IsFullOutboundConn() && nodestate->pindexBestKnownBlock != nullptr) {
- // If this is an outbound full-relay peer, check to see if we should protect
- // it from the bad/lagging chain logic.
- // Note that block-relay-only peers are already implicitly protected, so we
- // only consider setting m_protect for the full-relay peers.
if (g_outbound_peers_with_protect_from_disconnect < MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT && nodestate->pindexBestKnownBlock->nChainWork >= ::ChainActive().Tip()->nChainWork && !nodestate->m_chain_sync.m_protect) {
LogPrint(BCLog::NET, "Protecting outbound peer=%d from eviction\n", pfrom.GetId());
nodestate->m_chain_sync.m_protect = true;
@@ -2022,13 +2046,20 @@ void PeerManager::ProcessHeadersMessage(CNode& pfrom, const std::vector<CBlockHe
return;
}
-void PeerManager::ProcessOrphanTx(std::set<uint256>& orphan_work_set, std::list<CTransactionRef>& removed_txn)
+/**
+ * Reconsider orphan transactions after a parent has been accepted to the mempool.
+ *
+ * @param[in/out] orphan_work_set The set of orphan transactions to reconsider. Generally only one
+ * orphan will be reconsidered on each call of this function. This set
+ * may be added to if accepting an orphan causes its children to be
+ * reconsidered.
+ */
+void PeerManager::ProcessOrphanTx(std::set<uint256>& orphan_work_set)
{
AssertLockHeld(cs_main);
AssertLockHeld(g_cs_orphans);
- std::set<NodeId> setMisbehaving;
- bool done = false;
- while (!done && !orphan_work_set.empty()) {
+
+ while (!orphan_work_set.empty()) {
const uint256 orphanHash = *orphan_work_set.begin();
orphan_work_set.erase(orphan_work_set.begin());
@@ -2036,18 +2067,13 @@ void PeerManager::ProcessOrphanTx(std::set<uint256>& orphan_work_set, std::list<
if (orphan_it == mapOrphanTransactions.end()) continue;
const CTransactionRef porphanTx = orphan_it->second.tx;
- const CTransaction& orphanTx = *porphanTx;
- NodeId fromPeer = orphan_it->second.fromPeer;
- // Use a new TxValidationState because orphans come from different peers (and we call
- // MaybePunishNodeForTx based on the source peer from the orphan map, not based on the peer
- // that relayed the previous transaction).
- TxValidationState orphan_state;
-
- if (setMisbehaving.count(fromPeer)) continue;
- if (AcceptToMemoryPool(m_mempool, orphan_state, porphanTx, &removed_txn, false /* bypass_limits */, 0 /* nAbsurdFee */)) {
+ TxValidationState state;
+ std::list<CTransactionRef> removed_txn;
+
+ if (AcceptToMemoryPool(m_mempool, state, porphanTx, &removed_txn, false /* bypass_limits */)) {
LogPrint(BCLog::MEMPOOL, " accepted orphan tx %s\n", orphanHash.ToString());
RelayTransaction(orphanHash, porphanTx->GetWitnessHash(), m_connman);
- for (unsigned int i = 0; i < orphanTx.vout.size(); i++) {
+ for (unsigned int i = 0; i < porphanTx->vout.size(); i++) {
auto it_by_prev = mapOrphanTransactionsByPrev.find(COutPoint(orphanHash, i));
if (it_by_prev != mapOrphanTransactionsByPrev.end()) {
for (const auto& elem : it_by_prev->second) {
@@ -2056,22 +2082,23 @@ void PeerManager::ProcessOrphanTx(std::set<uint256>& orphan_work_set, std::list<
}
}
EraseOrphanTx(orphanHash);
- done = true;
- } else if (orphan_state.GetResult() != TxValidationResult::TX_MISSING_INPUTS) {
- if (orphan_state.IsInvalid()) {
- // Punish peer that gave us an invalid orphan tx
- if (MaybePunishNodeForTx(fromPeer, orphan_state)) {
- setMisbehaving.insert(fromPeer);
- }
+ for (const CTransactionRef& removedTx : removed_txn) {
+ AddToCompactExtraTransactions(removedTx);
+ }
+ break;
+ } else if (state.GetResult() != TxValidationResult::TX_MISSING_INPUTS) {
+ if (state.IsInvalid()) {
LogPrint(BCLog::MEMPOOL, " invalid orphan tx %s from peer=%d. %s\n",
orphanHash.ToString(),
- fromPeer,
- orphan_state.ToString());
+ orphan_it->second.fromPeer,
+ state.ToString());
+ // Maybe punish peer that gave us an invalid orphan tx
+ MaybePunishNodeForTx(orphan_it->second.fromPeer, state);
}
// Has inputs but not accepted to mempool
// Probably non-standard or insufficient fee
LogPrint(BCLog::MEMPOOL, " removed orphan tx %s\n", orphanHash.ToString());
- if (orphan_state.GetResult() != TxValidationResult::TX_WITNESS_STRIPPED) {
+ if (state.GetResult() != TxValidationResult::TX_WITNESS_STRIPPED) {
// We can add the wtxid of this transaction to our reject filter.
// Do not add txids of witness transactions or witness-stripped
// transactions to the filter, as they can have been malleated;
@@ -2086,7 +2113,7 @@ void PeerManager::ProcessOrphanTx(std::set<uint256>& orphan_work_set, std::list<
// for concerns around weakening security of unupgraded nodes
// if we start doing this too early.
assert(recentRejects);
- recentRejects->insert(orphanTx.GetWitnessHash());
+ recentRejects->insert(porphanTx->GetWitnessHash());
// If the transaction failed for TX_INPUTS_NOT_STANDARD,
// then we know that the witness was irrelevant to the policy
// failure, since this check depends only on the txid
@@ -2095,17 +2122,17 @@ void PeerManager::ProcessOrphanTx(std::set<uint256>& orphan_work_set, std::list<
// processing of this transaction in the event that child
// transactions are later received (resulting in
// parent-fetching by txid via the orphan-handling logic).
- if (orphan_state.GetResult() == TxValidationResult::TX_INPUTS_NOT_STANDARD && orphanTx.GetWitnessHash() != orphanTx.GetHash()) {
+ if (state.GetResult() == TxValidationResult::TX_INPUTS_NOT_STANDARD && porphanTx->GetWitnessHash() != porphanTx->GetHash()) {
// We only add the txid if it differs from the wtxid, to
// avoid wasting entries in the rolling bloom filter.
- recentRejects->insert(orphanTx.GetHash());
+ recentRejects->insert(porphanTx->GetHash());
}
}
EraseOrphanTx(orphanHash);
- done = true;
+ break;
}
- m_mempool.check(&::ChainstateActive().CoinsTip());
}
+ m_mempool.check(&::ChainstateActive().CoinsTip());
}
/**
@@ -2583,8 +2610,7 @@ void PeerManager::ProcessMessage(CNode& pfrom, const std::string& msg_type, CDat
}
if (!pfrom.fSuccessfullyConnected) {
- // Must have a verack message before anything else
- Misbehaving(pfrom.GetId(), 1, "non-verack message before version handshake");
+ LogPrint(BCLog::NET, "Unsupported message \"%s\" prior to verack from peer=%d\n", SanitizeString(msg_type), pfrom.GetId());
return;
}
@@ -3018,7 +3044,7 @@ void PeerManager::ProcessMessage(CNode& pfrom, const std::string& msg_type, CDat
// (older than our recency filter) if trying to DoS us, without any need
// for witness malleation.
if (!AlreadyHaveTx(GenTxid(/* is_wtxid=*/true, wtxid), m_mempool) &&
- AcceptToMemoryPool(m_mempool, state, ptx, &lRemovedTxn, false /* bypass_limits */, 0 /* nAbsurdFee */)) {
+ AcceptToMemoryPool(m_mempool, state, ptx, &lRemovedTxn, false /* bypass_limits */)) {
m_mempool.check(&::ChainstateActive().CoinsTip());
RelayTransaction(tx.GetHash(), tx.GetWitnessHash(), m_connman);
for (unsigned int i = 0; i < tx.vout.size(); i++) {
@@ -3037,8 +3063,12 @@ void PeerManager::ProcessMessage(CNode& pfrom, const std::string& msg_type, CDat
tx.GetHash().ToString(),
m_mempool.size(), m_mempool.DynamicMemoryUsage() / 1000);
+ for (const CTransactionRef& removedTx : lRemovedTxn) {
+ AddToCompactExtraTransactions(removedTx);
+ }
+
// Recursively process any orphan transactions that depended on this one
- ProcessOrphanTx(pfrom.orphan_work_set, lRemovedTxn);
+ ProcessOrphanTx(pfrom.orphan_work_set);
}
else if (state.GetResult() == TxValidationResult::TX_MISSING_INPUTS)
{
@@ -3139,9 +3169,6 @@ void PeerManager::ProcessMessage(CNode& pfrom, const std::string& msg_type, CDat
}
}
- for (const CTransactionRef& removedTx : lRemovedTxn)
- AddToCompactExtraTransactions(removedTx);
-
// If a tx has been detected by recentRejects, we will have reached
// this point and the tx will have been ignored. Because we haven't run
// the tx through AcceptToMemoryPool, we won't have computed a DoS
@@ -3630,7 +3657,7 @@ void PeerManager::ProcessMessage(CNode& pfrom, const std::string& msg_type, CDat
// Matching pong received, this ping is no longer outstanding
bPingFinished = true;
const auto ping_time = ping_end - pfrom.m_ping_start.load();
- if (ping_time.count() > 0) {
+ if (ping_time.count() >= 0) {
// Successful ping time measurement, replace previous
pfrom.nPingUsecTime = count_microseconds(ping_time);
pfrom.nMinPingUsecTime = std::min(pfrom.nMinPingUsecTime.load(), count_microseconds(ping_time));
@@ -3841,12 +3868,8 @@ bool PeerManager::ProcessMessages(CNode* pfrom, std::atomic<bool>& interruptMsgP
ProcessGetData(*pfrom, m_chainparams, m_connman, m_mempool, interruptMsgProc);
if (!pfrom->orphan_work_set.empty()) {
- std::list<CTransactionRef> removed_txn;
LOCK2(cs_main, g_cs_orphans);
- ProcessOrphanTx(pfrom->orphan_work_set, removed_txn);
- for (const CTransactionRef& removedTx : removed_txn) {
- AddToCompactExtraTransactions(removedTx);
- }
+ ProcessOrphanTx(pfrom->orphan_work_set);
}
if (pfrom->fDisconnect)
@@ -4099,7 +4122,6 @@ bool PeerManager::SendMessages(CNode* pto)
CNodeState &state = *State(pto->GetId());
// Address refresh broadcast
- int64_t nNow = GetTimeMicros();
auto current_time = GetTime<std::chrono::microseconds>();
if (pto->RelayAddrsWithConn() && !::ChainstateActive().IsInitialBlockDownload() && pto->m_next_local_addr_send < current_time) {
@@ -4156,7 +4178,7 @@ bool PeerManager::SendMessages(CNode* pto)
// Only actively request headers from a single peer, unless we're close to today.
if ((nSyncStarted == 0 && fFetch) || pindexBestHeader->GetBlockTime() > GetAdjustedTime() - 24 * 60 * 60) {
state.fSyncStarted = true;
- state.nHeadersSyncTimeout = GetTimeMicros() + HEADERS_DOWNLOAD_TIMEOUT_BASE + HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER * (GetAdjustedTime() - pindexBestHeader->GetBlockTime())/(consensusParams.nPowTargetSpacing);
+ state.nHeadersSyncTimeout = count_microseconds(current_time) + HEADERS_DOWNLOAD_TIMEOUT_BASE + HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER * (GetAdjustedTime() - pindexBestHeader->GetBlockTime())/(consensusParams.nPowTargetSpacing);
nSyncStarted++;
const CBlockIndex *pindexStart = pindexBestHeader;
/* If possible, start at the block preceding the currently
@@ -4337,7 +4359,7 @@ bool PeerManager::SendMessages(CNode* pto)
if (pto->m_tx_relay->nNextInvSend < current_time) {
fSendTrickle = true;
if (pto->IsInboundConn()) {
- pto->m_tx_relay->nNextInvSend = std::chrono::microseconds{m_connman.PoissonNextSendInbound(nNow, INVENTORY_BROADCAST_INTERVAL)};
+ pto->m_tx_relay->nNextInvSend = std::chrono::microseconds{m_connman.PoissonNextSendInbound(count_microseconds(current_time), INVENTORY_BROADCAST_INTERVAL)};
} else {
// Use half the delay for outbound peers, as there is less privacy concern for them.
pto->m_tx_relay->nNextInvSend = PoissonNextSend(current_time, std::chrono::seconds{INVENTORY_BROADCAST_INTERVAL >> 1});
@@ -4436,7 +4458,7 @@ bool PeerManager::SendMessages(CNode* pto)
nRelayedTransactions++;
{
// Expire old relay messages
- while (!vRelayExpiration.empty() && vRelayExpiration.front().first < nNow)
+ while (!vRelayExpiration.empty() && vRelayExpiration.front().first < count_microseconds(current_time))
{
mapRelay.erase(vRelayExpiration.front().second);
vRelayExpiration.pop_front();
@@ -4444,12 +4466,12 @@ bool PeerManager::SendMessages(CNode* pto)
auto ret = mapRelay.emplace(txid, std::move(txinfo.tx));
if (ret.second) {
- vRelayExpiration.emplace_back(nNow + std::chrono::microseconds{RELAY_TX_CACHE_TIME}.count(), ret.first);
+ vRelayExpiration.emplace_back(count_microseconds(current_time + std::chrono::microseconds{RELAY_TX_CACHE_TIME}), ret.first);
}
// Add wtxid-based lookup into mapRelay as well, so that peers can request by wtxid
auto ret2 = mapRelay.emplace(wtxid, ret.first->second);
if (ret2.second) {
- vRelayExpiration.emplace_back(nNow + std::chrono::microseconds{RELAY_TX_CACHE_TIME}.count(), ret2.first);
+ vRelayExpiration.emplace_back(count_microseconds(current_time + std::chrono::microseconds{RELAY_TX_CACHE_TIME}), ret2.first);
}
}
if (vInv.size() == MAX_INV_SZ) {
@@ -4474,10 +4496,7 @@ bool PeerManager::SendMessages(CNode* pto)
// Detect whether we're stalling
current_time = GetTime<std::chrono::microseconds>();
- // nNow is the current system time (GetTimeMicros is not mockable) and
- // should be replaced by the mockable current_time eventually
- nNow = GetTimeMicros();
- if (state.nStallingSince && state.nStallingSince < nNow - 1000000 * BLOCK_STALLING_TIMEOUT) {
+ if (state.nStallingSince && state.nStallingSince < count_microseconds(current_time) - 1000000 * BLOCK_STALLING_TIMEOUT) {
// Stalling only triggers when the block download window cannot move. During normal steady state,
// the download window should be much larger than the to-be-downloaded set of blocks, so disconnection
// should only happen during initial block download.
@@ -4493,7 +4512,7 @@ bool PeerManager::SendMessages(CNode* pto)
if (state.vBlocksInFlight.size() > 0) {
QueuedBlock &queuedBlock = state.vBlocksInFlight.front();
int nOtherPeersWithValidatedDownloads = nPeersWithValidatedDownloads - (state.nBlocksInFlightValidHeaders > 0);
- if (nNow > state.nDownloadingSince + consensusParams.nPowTargetSpacing * (BLOCK_DOWNLOAD_TIMEOUT_BASE + BLOCK_DOWNLOAD_TIMEOUT_PER_PEER * nOtherPeersWithValidatedDownloads)) {
+ if (count_microseconds(current_time) > state.nDownloadingSince + consensusParams.nPowTargetSpacing * (BLOCK_DOWNLOAD_TIMEOUT_BASE + BLOCK_DOWNLOAD_TIMEOUT_PER_PEER * nOtherPeersWithValidatedDownloads)) {
LogPrintf("Timeout downloading block %s from peer=%d, disconnecting\n", queuedBlock.hash.ToString(), pto->GetId());
pto->fDisconnect = true;
return true;
@@ -4503,7 +4522,7 @@ bool PeerManager::SendMessages(CNode* pto)
if (state.fSyncStarted && state.nHeadersSyncTimeout < std::numeric_limits<int64_t>::max()) {
// Detect whether this is a stalling initial-headers-sync peer
if (pindexBestHeader->GetBlockTime() <= GetAdjustedTime() - 24 * 60 * 60) {
- if (nNow > state.nHeadersSyncTimeout && nSyncStarted == 1 && (nPreferredDownload - state.fPreferredDownload >= 1)) {
+ if (count_microseconds(current_time) > state.nHeadersSyncTimeout && nSyncStarted == 1 && (nPreferredDownload - state.fPreferredDownload >= 1)) {
// Disconnect a peer (without the noban permission) if it is our only sync peer,
// and we have others we could be using instead.
// Note: If all our peers are inbound, then we won't
@@ -4553,7 +4572,7 @@ bool PeerManager::SendMessages(CNode* pto)
}
if (state.nBlocksInFlight == 0 && staller != -1) {
if (State(staller)->nStallingSince == 0) {
- State(staller)->nStallingSince = nNow;
+ State(staller)->nStallingSince = count_microseconds(current_time);
LogPrint(BCLog::NET, "Stall started peer=%d\n", staller);
}
}
@@ -4637,7 +4656,6 @@ bool PeerManager::SendMessages(CNode* pto)
!pto->HasPermission(PF_FORCERELAY) // peers with the forcerelay permission should not filter txs to us
) {
CAmount currentFilter = m_mempool.GetMinFee(gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000).GetFeePerK();
- int64_t timeNow = GetTimeMicros();
static FeeFilterRounder g_filter_rounder{CFeeRate{DEFAULT_MIN_RELAY_TX_FEE}};
if (m_chainman.ActiveChainstate().IsInitialBlockDownload()) {
// Received tx-inv messages are discarded when the active
@@ -4648,10 +4666,10 @@ bool PeerManager::SendMessages(CNode* pto)
if (pto->m_tx_relay->lastSentFeeFilter == MAX_FILTER) {
// Send the current filter if we sent MAX_FILTER previously
// and made it out of IBD.
- pto->m_tx_relay->nextSendTimeFeeFilter = timeNow - 1;
+ pto->m_tx_relay->nextSendTimeFeeFilter = count_microseconds(current_time) - 1;
}
}
- if (timeNow > pto->m_tx_relay->nextSendTimeFeeFilter) {
+ if (count_microseconds(current_time) > pto->m_tx_relay->nextSendTimeFeeFilter) {
CAmount filterToSend = g_filter_rounder.round(currentFilter);
// We always have a fee filter of at least minRelayTxFee
filterToSend = std::max(filterToSend, ::minRelayTxFee.GetFeePerK());
@@ -4659,13 +4677,13 @@ bool PeerManager::SendMessages(CNode* pto)
m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::FEEFILTER, filterToSend));
pto->m_tx_relay->lastSentFeeFilter = filterToSend;
}
- pto->m_tx_relay->nextSendTimeFeeFilter = PoissonNextSend(timeNow, AVG_FEEFILTER_BROADCAST_INTERVAL);
+ pto->m_tx_relay->nextSendTimeFeeFilter = PoissonNextSend(count_microseconds(current_time), AVG_FEEFILTER_BROADCAST_INTERVAL);
}
// If the fee filter has changed substantially and it's still more than MAX_FEEFILTER_CHANGE_DELAY
// until scheduled broadcast, then move the broadcast to within MAX_FEEFILTER_CHANGE_DELAY.
- else if (timeNow + MAX_FEEFILTER_CHANGE_DELAY * 1000000 < pto->m_tx_relay->nextSendTimeFeeFilter &&
+ else if (count_microseconds(current_time) + MAX_FEEFILTER_CHANGE_DELAY * 1000000 < pto->m_tx_relay->nextSendTimeFeeFilter &&
(currentFilter < 3 * pto->m_tx_relay->lastSentFeeFilter / 4 || currentFilter > 4 * pto->m_tx_relay->lastSentFeeFilter / 3)) {
- pto->m_tx_relay->nextSendTimeFeeFilter = timeNow + GetRandInt(MAX_FEEFILTER_CHANGE_DELAY) * 1000000;
+ pto->m_tx_relay->nextSendTimeFeeFilter = count_microseconds(current_time) + GetRandInt(MAX_FEEFILTER_CHANGE_DELAY) * 1000000;
}
}
} // release cs_main