aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorJohn Newbery <john@johnnewbery.com>2021-03-31 18:44:18 +0100
committerJohn Newbery <john@johnnewbery.com>2021-07-20 13:14:32 +0100
commit37dcd12d539e4a875581fa049aa0f7fafeb932a4 (patch)
treeb0dc965d307bc5028be3186cb74c4d20e771e312 /src
parentcd9902ac5054c01228d52616bf85f7196364d4ff (diff)
scripted-diff: Rename recentRejects
-BEGIN VERIFY SCRIPT- ren() { sed -i "s:\<$1\>:$2:g" $(git grep -l "\<$1\>" ./src ./test); } ren recentRejects m_recent_rejects -END VERIFY SCRIPT-
Diffstat (limited to 'src')
-rw-r--r--src/net_processing.cpp28
1 files changed, 14 insertions, 14 deletions
diff --git a/src/net_processing.cpp b/src/net_processing.cpp
index 0f5f18aada..94ecbdf983 100644
--- a/src/net_processing.cpp
+++ b/src/net_processing.cpp
@@ -470,7 +470,7 @@ private:
*
* Memory used: 1.3 MB
*/
- CRollingBloomFilter recentRejects GUARDED_BY(::cs_main){120'000, 0.000'001};
+ CRollingBloomFilter m_recent_rejects GUARDED_BY(::cs_main){120'000, 0.000'001};
uint256 hashRecentRejectsChainTip GUARDED_BY(cs_main);
/*
@@ -1604,7 +1604,7 @@ bool PeerManagerImpl::AlreadyHaveTx(const GenTxid& gtxid)
// or a double-spend. Reset the rejects filter and give those
// txs a second chance.
hashRecentRejectsChainTip = m_chainman.ActiveChain().Tip()->GetBlockHash();
- recentRejects.reset();
+ m_recent_rejects.reset();
}
const uint256& hash = gtxid.GetHash();
@@ -1616,7 +1616,7 @@ bool PeerManagerImpl::AlreadyHaveTx(const GenTxid& gtxid)
if (m_recent_confirmed_transactions->contains(hash)) return true;
}
- return recentRejects.contains(hash) || m_mempool.exists(gtxid);
+ return m_recent_rejects.contains(hash) || m_mempool.exists(gtxid);
}
bool PeerManagerImpl::AlreadyHaveBlock(const uint256& block_hash)
@@ -2235,7 +2235,7 @@ void PeerManagerImpl::ProcessOrphanTx(std::set<uint256>& orphan_work_set)
// See also comments in https://github.com/bitcoin/bitcoin/pull/18044#discussion_r443419034
// for concerns around weakening security of unupgraded nodes
// if we start doing this too early.
- recentRejects.insert(porphanTx->GetWitnessHash());
+ m_recent_rejects.insert(porphanTx->GetWitnessHash());
// If the transaction failed for TX_INPUTS_NOT_STANDARD,
// then we know that the witness was irrelevant to the policy
// failure, since this check depends only on the txid
@@ -2247,7 +2247,7 @@ void PeerManagerImpl::ProcessOrphanTx(std::set<uint256>& orphan_work_set)
if (state.GetResult() == TxValidationResult::TX_INPUTS_NOT_STANDARD && porphanTx->GetWitnessHash() != porphanTx->GetHash()) {
// We only add the txid if it differs from the wtxid, to
// avoid wasting entries in the rolling bloom filter.
- recentRejects.insert(porphanTx->GetHash());
+ m_recent_rejects.insert(porphanTx->GetHash());
}
}
m_orphanage.EraseTx(orphanHash);
@@ -3250,7 +3250,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
std::sort(unique_parents.begin(), unique_parents.end());
unique_parents.erase(std::unique(unique_parents.begin(), unique_parents.end()), unique_parents.end());
for (const uint256& parent_txid : unique_parents) {
- if (recentRejects.contains(parent_txid)) {
+ if (m_recent_rejects.contains(parent_txid)) {
fRejectedParents = true;
break;
}
@@ -3291,8 +3291,8 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
// regardless of what witness is provided, we will not accept
// this, so we don't need to allow for redownload of this txid
// from any of our non-wtxidrelay peers.
- recentRejects.insert(tx.GetHash());
- recentRejects.insert(tx.GetWitnessHash());
+ m_recent_rejects.insert(tx.GetHash());
+ m_recent_rejects.insert(tx.GetWitnessHash());
m_txrequest.ForgetTxHash(tx.GetHash());
m_txrequest.ForgetTxHash(tx.GetWitnessHash());
}
@@ -3311,7 +3311,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
// See also comments in https://github.com/bitcoin/bitcoin/pull/18044#discussion_r443419034
// for concerns around weakening security of unupgraded nodes
// if we start doing this too early.
- recentRejects.insert(tx.GetWitnessHash());
+ m_recent_rejects.insert(tx.GetWitnessHash());
m_txrequest.ForgetTxHash(tx.GetWitnessHash());
// If the transaction failed for TX_INPUTS_NOT_STANDARD,
// then we know that the witness was irrelevant to the policy
@@ -3322,7 +3322,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
// transactions are later received (resulting in
// parent-fetching by txid via the orphan-handling logic).
if (state.GetResult() == TxValidationResult::TX_INPUTS_NOT_STANDARD && tx.GetWitnessHash() != tx.GetHash()) {
- recentRejects.insert(tx.GetHash());
+ m_recent_rejects.insert(tx.GetHash());
m_txrequest.ForgetTxHash(tx.GetHash());
}
if (RecursiveDynamicUsage(*ptx) < 100000) {
@@ -3331,21 +3331,21 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
}
}
- // If a tx has been detected by recentRejects, we will have reached
+ // If a tx has been detected by m_recent_rejects, we will have reached
// this point and the tx will have been ignored. Because we haven't run
// the tx through AcceptToMemoryPool, we won't have computed a DoS
// score for it or determined exactly why we consider it invalid.
//
// This means we won't penalize any peer subsequently relaying a DoSy
// tx (even if we penalized the first peer who gave it to us) because
- // we have to account for recentRejects showing false positives. In
+ // we have to account for m_recent_rejects showing false positives. In
// other words, we shouldn't penalize a peer if we aren't *sure* they
// submitted a DoSy tx.
//
- // Note that recentRejects doesn't just record DoSy or invalid
+ // Note that m_recent_rejects doesn't just record DoSy or invalid
// transactions, but any tx not accepted by the mempool, which may be
// due to node policy (vs. consensus). So we can't blanket penalize a
- // peer simply for relaying a tx that our recentRejects has caught,
+ // peer simply for relaying a tx that our m_recent_rejects has caught,
// regardless of false positives.
if (state.IsInvalid()) {