aboutsummaryrefslogtreecommitdiff
path: root/src/net_processing.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/net_processing.cpp')
-rw-r--r--src/net_processing.cpp93
1 files changed, 68 insertions, 25 deletions
diff --git a/src/net_processing.cpp b/src/net_processing.cpp
index 1e065da07d..81deaea0df 100644
--- a/src/net_processing.cpp
+++ b/src/net_processing.cpp
@@ -148,6 +148,14 @@ namespace {
std::unique_ptr<CRollingBloomFilter> recentRejects GUARDED_BY(cs_main);
uint256 hashRecentRejectsChainTip GUARDED_BY(cs_main);
+ /*
+ * Filter for transactions that have been recently confirmed.
+ * We use this to avoid requesting transactions that have already been
+ * confirnmed.
+ */
+ RecursiveMutex g_cs_recent_confirmed_transactions;
+ std::unique_ptr<CRollingBloomFilter> g_recent_confirmed_transactions GUARDED_BY(g_cs_recent_confirmed_transactions);
+
/** Blocks that are in flight, and that are in the queue to be downloaded. */
struct QueuedBlock {
uint256 hash;
@@ -1116,6 +1124,16 @@ PeerLogicValidation::PeerLogicValidation(CConnman* connmanIn, BanMan* banman, CS
// Initialize global variables that cannot be constructed at startup.
recentRejects.reset(new CRollingBloomFilter(120000, 0.000001));
+ // Blocks don't typically have more than 4000 transactions, so this should
+ // be at least six blocks (~1 hr) worth of transactions that we can store.
+ // If the number of transactions appearing in a block goes up, or if we are
+ // seeing getdata requests more than an hour after initial announcement, we
+ // can increase this number.
+ // The false positive rate of 1/1M should come out to less than 1
+ // transaction per day that would be inadvertently ignored (which is the
+ // same probability that we have in the reject filter).
+ g_recent_confirmed_transactions.reset(new CRollingBloomFilter(24000, 0.000001));
+
const Consensus::Params& consensusParams = Params().GetConsensus();
// Stale tip checking and peer eviction are on two different timers, but we
// don't want them to get out of sync due to drift in the scheduler, so we
@@ -1129,36 +1147,59 @@ PeerLogicValidation::PeerLogicValidation(CConnman* connmanIn, BanMan* banman, CS
* Evict orphan txn pool entries (EraseOrphanTx) based on a newly connected
* block. Also save the time of the last tip update.
*/
-void PeerLogicValidation::BlockConnected(const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindex, const std::vector<CTransactionRef>& vtxConflicted) {
- LOCK(g_cs_orphans);
+void PeerLogicValidation::BlockConnected(const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindex, const std::vector<CTransactionRef>& vtxConflicted)
+{
+ {
+ LOCK(g_cs_orphans);
- std::vector<uint256> vOrphanErase;
+ std::vector<uint256> vOrphanErase;
- for (const CTransactionRef& ptx : pblock->vtx) {
- const CTransaction& tx = *ptx;
+ for (const CTransactionRef& ptx : pblock->vtx) {
+ const CTransaction& tx = *ptx;
- // Which orphan pool entries must we evict?
- for (const auto& txin : tx.vin) {
- auto itByPrev = mapOrphanTransactionsByPrev.find(txin.prevout);
- if (itByPrev == mapOrphanTransactionsByPrev.end()) continue;
- for (auto mi = itByPrev->second.begin(); mi != itByPrev->second.end(); ++mi) {
- const CTransaction& orphanTx = *(*mi)->second.tx;
- const uint256& orphanHash = orphanTx.GetHash();
- vOrphanErase.push_back(orphanHash);
+ // Which orphan pool entries must we evict?
+ for (const auto& txin : tx.vin) {
+ auto itByPrev = mapOrphanTransactionsByPrev.find(txin.prevout);
+ if (itByPrev == mapOrphanTransactionsByPrev.end()) continue;
+ for (auto mi = itByPrev->second.begin(); mi != itByPrev->second.end(); ++mi) {
+ const CTransaction& orphanTx = *(*mi)->second.tx;
+ const uint256& orphanHash = orphanTx.GetHash();
+ vOrphanErase.push_back(orphanHash);
+ }
}
}
- }
- // Erase orphan transactions included or precluded by this block
- if (vOrphanErase.size()) {
- int nErased = 0;
- for (const uint256& orphanHash : vOrphanErase) {
- nErased += EraseOrphanTx(orphanHash);
+ // Erase orphan transactions included or precluded by this block
+ if (vOrphanErase.size()) {
+ int nErased = 0;
+ for (const uint256& orphanHash : vOrphanErase) {
+ nErased += EraseOrphanTx(orphanHash);
+ }
+ LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx included or conflicted by block\n", nErased);
+ }
+
+ g_last_tip_update = GetTime();
+ }
+ {
+ LOCK(g_cs_recent_confirmed_transactions);
+ for (const auto ptx : pblock->vtx) {
+ g_recent_confirmed_transactions->insert(ptx->GetHash());
}
- LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx included or conflicted by block\n", nErased);
}
+}
- g_last_tip_update = GetTime();
+void PeerLogicValidation::BlockDisconnected(const std::shared_ptr<const CBlock> &block, const CBlockIndex* pindex)
+{
+ // To avoid relay problems with transactions that were previously
+ // confirmed, clear our filter of recently confirmed transactions whenever
+ // there's a reorg.
+ // This means that in a 1-block reorg (where 1 block is disconnected and
+ // then another block reconnected), our filter will drop to having only one
+ // block's worth of transactions in it, but that should be fine, since
+ // presumably the most common case of relaying a confirmed transaction
+ // should be just after a new block containing it is found.
+ LOCK(g_cs_recent_confirmed_transactions);
+ g_recent_confirmed_transactions->reset();
}
// All of the following cache a recent block, and are protected by cs_most_recent_block
@@ -1311,12 +1352,14 @@ bool static AlreadyHave(const CInv& inv) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
LOCK(g_cs_orphans);
if (mapOrphanTransactions.count(inv.hash)) return true;
}
- const CCoinsViewCache& coins_cache = ::ChainstateActive().CoinsTip();
+
+ {
+ LOCK(g_cs_recent_confirmed_transactions);
+ if (g_recent_confirmed_transactions->contains(inv.hash)) return true;
+ }
return recentRejects->contains(inv.hash) ||
- mempool.exists(inv.hash) ||
- coins_cache.HaveCoinInCache(COutPoint(inv.hash, 0)) || // Best effort: only try output 0 and 1
- coins_cache.HaveCoinInCache(COutPoint(inv.hash, 1));
+ mempool.exists(inv.hash);
}
case MSG_BLOCK:
case MSG_WITNESS_BLOCK: