From f2d3ba73860e875972738d1da1507124d0971ae5 Mon Sep 17 00:00:00 2001 From: Gregory Maxwell Date: Mon, 4 Apr 2016 02:36:47 +0000 Subject: Eliminate TX trickle bypass, sort TX invs for privacy and priority. Previously Bitcoin would send 1/4 of transactions out to all peers instantly. This causes high overhead because it makes >80% of INVs size 1. Doing so harms privacy, because it limits the amount of source obscurity a transaction can receive. These randomized broadcasts also disobeyed transaction dependencies and required use of the orphan pool. Because the orphan pool is so small this leads to poor propagation for dependent transactions. When the bypass wasn't in effect, transactions were sent in the order they were received. This avoided creating orphans but undermines privacy fairly significantly. This commit: Eliminates the bypass. The bypass is replaced by halving the average delay for outbound peers. Sorts candidate transactions for INV by their topological depth then by their feerate (then hash); removing the information leakage and providing priority service to higher fee transactions. Limits the amount of transactions sent in a single INV to 7tx/sec (and twice that for outbound); this limits the harm of low fee transaction floods, gives faster relay service to higher fee transactions. The 7 sounds lower than it really is because received advertisements need not be sent, and because the aggregate rate is multipled by the number of peers. --- src/main.cpp | 60 +++++++++++++++++++++++++++++++++---------------------- src/main.h | 9 ++++++--- src/txmempool.cpp | 15 ++++++++++++++ src/txmempool.h | 1 + 4 files changed, 58 insertions(+), 27 deletions(-) (limited to 'src') diff --git a/src/main.cpp b/src/main.cpp index a94d52f895..4a28bbb00c 100644 --- a/src/main.cpp +++ b/src/main.cpp @@ -5560,6 +5560,29 @@ bool ProcessMessages(CNode* pfrom) return fOk; } +class CompareInvMempoolOrder +{ + CTxMemPool *mp; +public: + CompareInvMempoolOrder(CTxMemPool *mempool) + { + mp = mempool; + } + + bool operator()(const CInv &a, const CInv &b) + { + if (a.type != MSG_TX && b.type != MSG_TX) { + return false; + } else { + if (a.type != MSG_TX) { + return true; + } else if (b.type != MSG_TX) { + return false; + } + return mp->CompareDepthAndScore(a.hash, b.hash); + } + } +}; bool SendMessages(CNode* pto) { @@ -5790,42 +5813,31 @@ bool SendMessages(CNode* pto) bool fSendTrickle = pto->fWhitelisted; if (pto->nNextInvSend < nNow) { fSendTrickle = true; - pto->nNextInvSend = PoissonNextSend(nNow, AVG_INVENTORY_BROADCAST_INTERVAL); + // Use half the delay for outbound peers, as their is less privacy concern for them. + pto->nNextInvSend = PoissonNextSend(nNow, INVENTORY_BROADCAST_INTERVAL >> !pto->fInbound); } LOCK(pto->cs_inventory); - vInv.reserve(std::min(1000, pto->vInventoryToSend.size())); + if (fSendTrickle && pto->vInventoryToSend.size() > 1) { + // Topologically and fee-rate sort the inventory we send for privacy and priority reasons. + CompareInvMempoolOrder compareInvMempoolOrder(&mempool); + std::stable_sort(pto->vInventoryToSend.begin(), pto->vInventoryToSend.end(), compareInvMempoolOrder); + } + vInv.reserve(std::min(INVENTORY_BROADCAST_MAX, pto->vInventoryToSend.size())); vInvWait.reserve(pto->vInventoryToSend.size()); BOOST_FOREACH(const CInv& inv, pto->vInventoryToSend) { if (inv.type == MSG_TX && pto->filterInventoryKnown.contains(inv.hash)) continue; - - // trickle out tx inv to protect privacy - if (inv.type == MSG_TX && !fSendTrickle) - { - // 1/4 of tx invs blast to all immediately - static uint256 hashSalt; - if (hashSalt.IsNull()) - hashSalt = GetRandHash(); - uint256 hashRand = ArithToUint256(UintToArith256(inv.hash) ^ UintToArith256(hashSalt)); - hashRand = Hash(BEGIN(hashRand), END(hashRand)); - bool fTrickleWait = ((UintToArith256(hashRand) & 3) != 0); - - if (fTrickleWait) - { - vInvWait.push_back(inv); - continue; - } + // No reason to drain out at many times the network's capacity, + // especially since we have many peers and some will draw much shorter delays. + if (vInv.size() >= INVENTORY_BROADCAST_MAX || (inv.type == MSG_TX && !fSendTrickle)) { + vInvWait.push_back(inv); + continue; } pto->filterInventoryKnown.insert(inv.hash); vInv.push_back(inv); - if (vInv.size() >= 1000) - { - pto->PushMessage(NetMsgType::INV, vInv); - vInv.clear(); - } } pto->vInventoryToSend = vInvWait; } diff --git a/src/main.h b/src/main.h index 0962f44e94..4372c16a42 100644 --- a/src/main.h +++ b/src/main.h @@ -99,9 +99,12 @@ static const unsigned int MAX_REJECT_MESSAGE_LENGTH = 111; static const unsigned int AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL = 24 * 24 * 60; /** Average delay between peer address broadcasts in seconds. */ static const unsigned int AVG_ADDRESS_BROADCAST_INTERVAL = 30; -/** Average delay between trickled inventory broadcasts in seconds. - * Blocks, whitelisted receivers, and a random 25% of transactions bypass this. */ -static const unsigned int AVG_INVENTORY_BROADCAST_INTERVAL = 5; +/** Average delay between trickled inventory transmissions in seconds. + * Blocks and whitelisted receivers bypass this, outbound peers get half this delay. */ +static const unsigned int INVENTORY_BROADCAST_INTERVAL = 5; +/** Maximum number of inventory items to send per transmission. + * Limits the impact of low-fee transaction floods. */ +static const unsigned int INVENTORY_BROADCAST_MAX = 7 * INVENTORY_BROADCAST_INTERVAL; /** Average delay between feefilter broadcasts in seconds. */ static const unsigned int AVG_FEEFILTER_BROADCAST_INTERVAL = 10 * 60; /** Maximum feefilter broadcast delay after significant change. */ diff --git a/src/txmempool.cpp b/src/txmempool.cpp index 52c7793118..3aba578fac 100644 --- a/src/txmempool.cpp +++ b/src/txmempool.cpp @@ -752,6 +752,21 @@ void CTxMemPool::check(const CCoinsViewCache *pcoins) const assert(innerUsage == cachedInnerUsage); } +bool CTxMemPool::CompareDepthAndScore(const uint256& hasha, const uint256& hashb) +{ + LOCK(cs); + indexed_transaction_set::const_iterator i = mapTx.find(hasha); + if (i == mapTx.end()) return false; + indexed_transaction_set::const_iterator j = mapTx.find(hashb); + if (j == mapTx.end()) return true; + uint64_t counta = i->GetCountWithAncestors(); + uint64_t countb = j->GetCountWithAncestors(); + if (counta == countb) { + return CompareTxMemPoolEntryByScore()(*i, *j); + } + return counta < countb; +} + void CTxMemPool::queryHashes(vector& vtxid) { vtxid.clear(); diff --git a/src/txmempool.h b/src/txmempool.h index de4ba0b371..e4934336c2 100644 --- a/src/txmempool.h +++ b/src/txmempool.h @@ -511,6 +511,7 @@ public: std::list& conflicts, bool fCurrentEstimate = true); void clear(); void _clear(); //lock free + bool CompareDepthAndScore(const uint256& hasha, const uint256& hashb); void queryHashes(std::vector& vtxid); void pruneSpent(const uint256& hash, CCoins &coins); unsigned int GetTransactionsUpdated() const; -- cgit v1.2.3 From dc13dcd2bec2613a1cd5e0395b09b449d176146f Mon Sep 17 00:00:00 2001 From: Pieter Wuille Date: Thu, 7 Apr 2016 13:57:36 +0200 Subject: Split up and optimize transaction and block inv queues --- src/main.cpp | 76 +++++++++++++++++++++++++++++++++++------------------------- src/net.h | 20 +++++++++++----- 2 files changed, 59 insertions(+), 37 deletions(-) (limited to 'src') diff --git a/src/main.cpp b/src/main.cpp index 4a28bbb00c..61d9301f8f 100644 --- a/src/main.cpp +++ b/src/main.cpp @@ -5569,18 +5569,11 @@ public: mp = mempool; } - bool operator()(const CInv &a, const CInv &b) + bool operator()(std::set::iterator a, std::set::iterator b) { - if (a.type != MSG_TX && b.type != MSG_TX) { - return false; - } else { - if (a.type != MSG_TX) { - return true; - } else if (b.type != MSG_TX) { - return false; - } - return mp->CompareDepthAndScore(a.hash, b.hash); - } + /* As std::make_heap produces a max-heap, we want the entries with the + * fewest ancestors/highest fee to sort later. */ + return mp->CompareDepthAndScore(*b, *a); } }; @@ -5808,38 +5801,59 @@ bool SendMessages(CNode* pto) // Message: inventory // vector vInv; - vector vInvWait; { + LOCK(pto->cs_inventory); + vInv.reserve(std::max(pto->vInventoryBlockToSend.size(), INVENTORY_BROADCAST_MAX)); + + // Add blocks + BOOST_FOREACH(const uint256& hash, pto->vInventoryBlockToSend) { + vInv.push_back(CInv(MSG_BLOCK, hash)); + if (vInv.size() == MAX_INV_SZ) { + pto->PushMessage(NetMsgType::INV, vInv); + vInv.clear(); + } + } + pto->vInventoryBlockToSend.clear(); + + // Determine transactions to relay bool fSendTrickle = pto->fWhitelisted; if (pto->nNextInvSend < nNow) { fSendTrickle = true; - // Use half the delay for outbound peers, as their is less privacy concern for them. + // Use half the delay for outbound peers, as there is less privacy concern for them. pto->nNextInvSend = PoissonNextSend(nNow, INVENTORY_BROADCAST_INTERVAL >> !pto->fInbound); } - LOCK(pto->cs_inventory); - if (fSendTrickle && pto->vInventoryToSend.size() > 1) { + if (fSendTrickle) { + // Produce a vector with all candidates for sending + vector::iterator> vInvTx; + vInvTx.reserve(pto->setInventoryTxToSend.size()); + for (std::set::iterator it = pto->setInventoryTxToSend.begin(); it != pto->setInventoryTxToSend.end(); it++) { + vInvTx.push_back(it); + } // Topologically and fee-rate sort the inventory we send for privacy and priority reasons. + // A heap is used so that not all items need sorting if only a few are being sent. CompareInvMempoolOrder compareInvMempoolOrder(&mempool); - std::stable_sort(pto->vInventoryToSend.begin(), pto->vInventoryToSend.end(), compareInvMempoolOrder); - } - vInv.reserve(std::min(INVENTORY_BROADCAST_MAX, pto->vInventoryToSend.size())); - vInvWait.reserve(pto->vInventoryToSend.size()); - BOOST_FOREACH(const CInv& inv, pto->vInventoryToSend) - { - if (inv.type == MSG_TX && pto->filterInventoryKnown.contains(inv.hash)) - continue; + std::make_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder); // No reason to drain out at many times the network's capacity, // especially since we have many peers and some will draw much shorter delays. - if (vInv.size() >= INVENTORY_BROADCAST_MAX || (inv.type == MSG_TX && !fSendTrickle)) { - vInvWait.push_back(inv); - continue; + unsigned int nRelayedTransactions = 0; + while (!vInvTx.empty() && nRelayedTransactions < INVENTORY_BROADCAST_MAX) { + // Fetch the top element from the heap + std::pop_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder); + std::set::iterator it = vInvTx.back(); + vInvTx.pop_back(); + uint256 hash = *it; + // Remove it from the to-be-sent set + pto->setInventoryTxToSend.erase(it); + // Check if not in the filter already + if (pto->filterInventoryKnown.contains(hash)) { + continue; + } + // Send + vInv.push_back(CInv(MSG_TX, hash)); + nRelayedTransactions++; + pto->filterInventoryKnown.insert(hash); } - - pto->filterInventoryKnown.insert(inv.hash); - - vInv.push_back(inv); } - pto->vInventoryToSend = vInvWait; } if (!vInv.empty()) pto->PushMessage(NetMsgType::INV, vInv); diff --git a/src/net.h b/src/net.h index bf367684f6..a95fa79e7e 100644 --- a/src/net.h +++ b/src/net.h @@ -397,7 +397,13 @@ public: // inventory based relay CRollingBloomFilter filterInventoryKnown; - std::vector vInventoryToSend; + // Set of transaction ids we still have to announce. + // They are sorted by the mempool before relay, so the order is not important. + std::set setInventoryTxToSend; + // List of block ids we still have announce. + // There is no final sorting before sending, as they are always sent immediately + // and in the order requested. + std::vector vInventoryBlockToSend; CCriticalSection cs_inventory; std::set setAskFor; std::multimap mapAskFor; @@ -517,11 +523,13 @@ public: void PushInventory(const CInv& inv) { - { - LOCK(cs_inventory); - if (inv.type == MSG_TX && filterInventoryKnown.contains(inv.hash)) - return; - vInventoryToSend.push_back(inv); + LOCK(cs_inventory); + if (inv.type == MSG_TX) { + if (!filterInventoryKnown.contains(inv.hash)) { + setInventoryTxToSend.insert(inv.hash); + } + } else if (inv.type == MSG_BLOCK) { + vInventoryBlockToSend.push_back(inv.hash); } } -- cgit v1.2.3 From ed7068302c7490e8061cb3a558a0f83a465beeea Mon Sep 17 00:00:00 2001 From: Pieter Wuille Date: Fri, 8 Apr 2016 16:26:41 +0200 Subject: Handle mempool requests in send loop, subject to trickle By eliminating queued entries from the mempool response and responding only at trickle time, this makes the mempool no longer leak transaction arrival order information (as the mempool itself is also sorted)-- at least no more than relay itself leaks it. --- src/main.cpp | 74 +++++++++++++++++++++++++++++++++++++----------------------- src/net.cpp | 1 + src/net.h | 2 ++ 3 files changed, 49 insertions(+), 28 deletions(-) (limited to 'src') diff --git a/src/main.cpp b/src/main.cpp index 61d9301f8f..282c8cdb66 100644 --- a/src/main.cpp +++ b/src/main.cpp @@ -5235,34 +5235,9 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv, pfrom->fDisconnect = true; return true; } - LOCK2(cs_main, pfrom->cs_filter); - std::vector vtxid; - mempool.queryHashes(vtxid); - vector vInv; - BOOST_FOREACH(uint256& hash, vtxid) { - CInv inv(MSG_TX, hash); - if (pfrom->pfilter) { - CTransaction tx; - bool fInMemPool = mempool.lookup(hash, tx); - if (!fInMemPool) continue; // another thread removed since queryHashes, maybe... - if (!pfrom->pfilter->IsRelevantAndUpdate(tx)) continue; - } - if (pfrom->minFeeFilter) { - CFeeRate feeRate; - mempool.lookupFeeRate(hash, feeRate); - LOCK(pfrom->cs_feeFilter); - if (feeRate.GetFeePerK() < pfrom->minFeeFilter) - continue; - } - vInv.push_back(inv); - if (vInv.size() == MAX_INV_SZ) { - pfrom->PushMessage(NetMsgType::INV, vInv); - vInv.clear(); - } - } - if (vInv.size() > 0) - pfrom->PushMessage(NetMsgType::INV, vInv); + LOCK(pfrom->cs_inventory); + pfrom->fSendMempool = true; } @@ -5815,13 +5790,52 @@ bool SendMessages(CNode* pto) } pto->vInventoryBlockToSend.clear(); - // Determine transactions to relay + // Check whether periodic sends should happen bool fSendTrickle = pto->fWhitelisted; if (pto->nNextInvSend < nNow) { fSendTrickle = true; // Use half the delay for outbound peers, as there is less privacy concern for them. pto->nNextInvSend = PoissonNextSend(nNow, INVENTORY_BROADCAST_INTERVAL >> !pto->fInbound); } + + // Respond to BIP35 mempool requests + if (fSendTrickle && pto->fSendMempool) { + std::vector vtxid; + mempool.queryHashes(vtxid); + pto->fSendMempool = false; + CAmount filterrate = 0; + { + LOCK(pto->cs_feeFilter); + filterrate = pto->minFeeFilter; + } + + LOCK(pto->cs_filter); + + BOOST_FOREACH(const uint256& hash, vtxid) { + CInv inv(MSG_TX, hash); + pto->setInventoryTxToSend.erase(hash); + if (filterrate) { + CFeeRate feeRate; + mempool.lookupFeeRate(hash, feeRate); + if (feeRate.GetFeePerK() < filterrate) + continue; + } + if (pto->pfilter) { + CTransaction tx; + bool fInMemPool = mempool.lookup(hash, tx); + if (!fInMemPool) continue; // another thread removed since queryHashes, maybe... + if (!pto->pfilter->IsRelevantAndUpdate(tx)) continue; + } + pto->filterInventoryKnown.insert(hash); + vInv.push_back(inv); + if (vInv.size() == MAX_INV_SZ) { + pto->PushMessage(NetMsgType::INV, vInv); + vInv.clear(); + } + } + } + + // Determine transactions to relay if (fSendTrickle) { // Produce a vector with all candidates for sending vector::iterator> vInvTx; @@ -5851,6 +5865,10 @@ bool SendMessages(CNode* pto) // Send vInv.push_back(CInv(MSG_TX, hash)); nRelayedTransactions++; + if (vInv.size() == MAX_INV_SZ) { + pto->PushMessage(NetMsgType::INV, vInv); + vInv.clear(); + } pto->filterInventoryKnown.insert(hash); } } diff --git a/src/net.cpp b/src/net.cpp index f294e4c667..6b305ebae4 100644 --- a/src/net.cpp +++ b/src/net.cpp @@ -2370,6 +2370,7 @@ CNode::CNode(SOCKET hSocketIn, const CAddress& addrIn, const std::string& addrNa hashContinue = uint256(); nStartingHeight = -1; filterInventoryKnown.reset(); + fSendMempool = false; fGetAddr = false; nNextLocalAddrSend = 0; nNextAddrSend = 0; diff --git a/src/net.h b/src/net.h index a95fa79e7e..26acf59e60 100644 --- a/src/net.h +++ b/src/net.h @@ -411,6 +411,8 @@ public: // Used for headers announcements - unfiltered blocks to relay // Also protected by cs_inventory std::vector vBlockHashesToAnnounce; + // Used for BIP35 mempool sending, also protected by cs_inventory + bool fSendMempool; // Ping time measurement: // The pong reply we're expecting, or 0 if no pong expected. -- cgit v1.2.3 From 4578215e7f787968c1d6478e6df75499bd36dd8d Mon Sep 17 00:00:00 2001 From: Pieter Wuille Date: Sun, 10 Apr 2016 15:33:05 +0200 Subject: Return mempool queries in dependency order --- src/txmempool.cpp | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'src') diff --git a/src/txmempool.cpp b/src/txmempool.cpp index 3aba578fac..0d9fcc982d 100644 --- a/src/txmempool.cpp +++ b/src/txmempool.cpp @@ -767,6 +767,16 @@ bool CTxMemPool::CompareDepthAndScore(const uint256& hasha, const uint256& hashb return counta < countb; } +namespace { +class DepthAndScoreComparator +{ + CTxMemPool *mp; +public: + DepthAndScoreComparator(CTxMemPool *mempool) : mp(mempool) {} + bool operator()(const uint256& a, const uint256& b) { return mp->CompareDepthAndScore(a, b); } +}; +} + void CTxMemPool::queryHashes(vector& vtxid) { vtxid.clear(); @@ -775,6 +785,8 @@ void CTxMemPool::queryHashes(vector& vtxid) vtxid.reserve(mapTx.size()); for (indexed_transaction_set::iterator mi = mapTx.begin(); mi != mapTx.end(); ++mi) vtxid.push_back(mi->GetTx().GetHash()); + + std::sort(vtxid.begin(), vtxid.end(), DepthAndScoreComparator(this)); } bool CTxMemPool::lookup(uint256 hash, CTransaction& result) const -- cgit v1.2.3 From b5599147533103efea896a1fc4ff51f2d3ad5808 Mon Sep 17 00:00:00 2001 From: Gregory Maxwell Date: Wed, 20 Apr 2016 07:05:23 +0000 Subject: Move bloom and feerate filtering to just prior to tx sending. This will avoid sending more pointless INVs around updates, and prevents using filter updates to timetag transactions. Also adds locking for fRelayTxes. --- src/main.cpp | 42 ++++++++++++++++++++++++++++++++++++------ src/net.cpp | 15 +-------------- src/net.h | 2 +- 3 files changed, 38 insertions(+), 21 deletions(-) (limited to 'src') diff --git a/src/main.cpp b/src/main.cpp index 282c8cdb66..b707de2e58 100644 --- a/src/main.cpp +++ b/src/main.cpp @@ -4557,12 +4557,16 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv, vRecv >> LIMITED_STRING(pfrom->strSubVer, MAX_SUBVERSION_LENGTH); pfrom->cleanSubVer = SanitizeString(pfrom->strSubVer); } - if (!vRecv.empty()) + if (!vRecv.empty()) { vRecv >> pfrom->nStartingHeight; - if (!vRecv.empty()) - vRecv >> pfrom->fRelayTxes; // set to true after we get the first filter* message - else - pfrom->fRelayTxes = true; + } + { + LOCK(pfrom->cs_filter); + if (!vRecv.empty()) + vRecv >> pfrom->fRelayTxes; // set to true after we get the first filter* message + else + pfrom->fRelayTxes = true; + } // Disconnect if we connected to ourself if (nNonce == nLocalHostNonce && nNonce > 1) @@ -5325,12 +5329,13 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv, CBloomFilter filter; vRecv >> filter; + LOCK(pfrom->cs_filter); + if (!filter.IsWithinSizeConstraints()) // There is no excuse for sending a too-large filter Misbehaving(pfrom->GetId(), 100); else { - LOCK(pfrom->cs_filter); delete pfrom->pfilter; pfrom->pfilter = new CBloomFilter(filter); pfrom->pfilter->UpdateEmptyFull(); @@ -5798,6 +5803,12 @@ bool SendMessages(CNode* pto) pto->nNextInvSend = PoissonNextSend(nNow, INVENTORY_BROADCAST_INTERVAL >> !pto->fInbound); } + // Time to send but the peer has requested we not relay transactions. + if (fSendTrickle) { + LOCK(pto->cs_filter); + if (!pto->fRelayTxes) pto->setInventoryTxToSend.clear(); + } + // Respond to BIP35 mempool requests if (fSendTrickle && pto->fSendMempool) { std::vector vtxid; @@ -5843,6 +5854,11 @@ bool SendMessages(CNode* pto) for (std::set::iterator it = pto->setInventoryTxToSend.begin(); it != pto->setInventoryTxToSend.end(); it++) { vInvTx.push_back(it); } + CAmount filterrate = 0; + { + LOCK(pto->cs_feeFilter); + filterrate = pto->minFeeFilter; + } // Topologically and fee-rate sort the inventory we send for privacy and priority reasons. // A heap is used so that not all items need sorting if only a few are being sent. CompareInvMempoolOrder compareInvMempoolOrder(&mempool); @@ -5850,6 +5866,7 @@ bool SendMessages(CNode* pto) // No reason to drain out at many times the network's capacity, // especially since we have many peers and some will draw much shorter delays. unsigned int nRelayedTransactions = 0; + LOCK(pto->cs_filter); while (!vInvTx.empty() && nRelayedTransactions < INVENTORY_BROADCAST_MAX) { // Fetch the top element from the heap std::pop_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder); @@ -5862,6 +5879,19 @@ bool SendMessages(CNode* pto) if (pto->filterInventoryKnown.contains(hash)) { continue; } + // Not in the mempool anymore? don't bother sending it. + CFeeRate feeRate; + if (!mempool.lookupFeeRate(hash, feeRate)) { + continue; + } + if (filterrate && feeRate.GetFeePerK() < filterrate) { + continue; + } + if (pto->pfilter) { + CTransaction tx; + if (!mempool.lookup(hash, tx)) continue; + if (!pto->pfilter->IsRelevantAndUpdate(tx)) continue; + } // Send vInv.push_back(CInv(MSG_TX, hash)); nRelayedTransactions++; diff --git a/src/net.cpp b/src/net.cpp index 6b305ebae4..ccc430f5c2 100644 --- a/src/net.cpp +++ b/src/net.cpp @@ -2071,20 +2071,7 @@ void RelayTransaction(const CTransaction& tx, CFeeRate feerate) LOCK(cs_vNodes); BOOST_FOREACH(CNode* pnode, vNodes) { - if(!pnode->fRelayTxes) - continue; - { - LOCK(pnode->cs_feeFilter); - if (feerate.GetFeePerK() < pnode->minFeeFilter) - continue; - } - LOCK(pnode->cs_filter); - if (pnode->pfilter) - { - if (pnode->pfilter->IsRelevantAndUpdate(tx)) - pnode->PushInventory(inv); - } else - pnode->PushInventory(inv); + pnode->PushInventory(inv); } } diff --git a/src/net.h b/src/net.h index 26acf59e60..b6ec7bf3e2 100644 --- a/src/net.h +++ b/src/net.h @@ -357,7 +357,7 @@ public: // a) it allows us to not relay tx invs before receiving the peer's version message // b) the peer may tell us in its version message that we should not relay tx invs // unless it loads a bloom filter. - bool fRelayTxes; + bool fRelayTxes; //protected by cs_filter bool fSentAddr; CSemaphoreGrant grantOutbound; CCriticalSection cs_filter; -- cgit v1.2.3