aboutsummaryrefslogtreecommitdiff
path: root/src/main.cpp
diff options
context:
space:
mode:
authorGregory Maxwell <greg@xiph.org>2016-04-04 02:36:47 +0000
committerPieter Wuille <pieter.wuille@gmail.com>2016-04-20 10:26:37 +0200
commitf2d3ba73860e875972738d1da1507124d0971ae5 (patch)
tree88c35395409424a45ec72d663fa97bf519a9892c /src/main.cpp
parent04a29373571d44be36bd099c3b3ec3cda89e99d1 (diff)
downloadbitcoin-f2d3ba73860e875972738d1da1507124d0971ae5.tar.xz
Eliminate TX trickle bypass, sort TX invs for privacy and priority.
Previously Bitcoin would send 1/4 of transactions out to all peers instantly. This causes high overhead because it makes >80% of INVs size 1. Doing so harms privacy, because it limits the amount of source obscurity a transaction can receive. These randomized broadcasts also disobeyed transaction dependencies and required use of the orphan pool. Because the orphan pool is so small this leads to poor propagation for dependent transactions. When the bypass wasn't in effect, transactions were sent in the order they were received. This avoided creating orphans but undermines privacy fairly significantly. This commit: Eliminates the bypass. The bypass is replaced by halving the average delay for outbound peers. Sorts candidate transactions for INV by their topological depth then by their feerate (then hash); removing the information leakage and providing priority service to higher fee transactions. Limits the amount of transactions sent in a single INV to 7tx/sec (and twice that for outbound); this limits the harm of low fee transaction floods, gives faster relay service to higher fee transactions. The 7 sounds lower than it really is because received advertisements need not be sent, and because the aggregate rate is multipled by the number of peers.
Diffstat (limited to 'src/main.cpp')
-rw-r--r--src/main.cpp60
1 files changed, 36 insertions, 24 deletions
diff --git a/src/main.cpp b/src/main.cpp
index a94d52f895..4a28bbb00c 100644
--- a/src/main.cpp
+++ b/src/main.cpp
@@ -5560,6 +5560,29 @@ bool ProcessMessages(CNode* pfrom)
return fOk;
}
+class CompareInvMempoolOrder
+{
+ CTxMemPool *mp;
+public:
+ CompareInvMempoolOrder(CTxMemPool *mempool)
+ {
+ mp = mempool;
+ }
+
+ bool operator()(const CInv &a, const CInv &b)
+ {
+ if (a.type != MSG_TX && b.type != MSG_TX) {
+ return false;
+ } else {
+ if (a.type != MSG_TX) {
+ return true;
+ } else if (b.type != MSG_TX) {
+ return false;
+ }
+ return mp->CompareDepthAndScore(a.hash, b.hash);
+ }
+ }
+};
bool SendMessages(CNode* pto)
{
@@ -5790,42 +5813,31 @@ bool SendMessages(CNode* pto)
bool fSendTrickle = pto->fWhitelisted;
if (pto->nNextInvSend < nNow) {
fSendTrickle = true;
- pto->nNextInvSend = PoissonNextSend(nNow, AVG_INVENTORY_BROADCAST_INTERVAL);
+ // Use half the delay for outbound peers, as their is less privacy concern for them.
+ pto->nNextInvSend = PoissonNextSend(nNow, INVENTORY_BROADCAST_INTERVAL >> !pto->fInbound);
}
LOCK(pto->cs_inventory);
- vInv.reserve(std::min<size_t>(1000, pto->vInventoryToSend.size()));
+ if (fSendTrickle && pto->vInventoryToSend.size() > 1) {
+ // Topologically and fee-rate sort the inventory we send for privacy and priority reasons.
+ CompareInvMempoolOrder compareInvMempoolOrder(&mempool);
+ std::stable_sort(pto->vInventoryToSend.begin(), pto->vInventoryToSend.end(), compareInvMempoolOrder);
+ }
+ vInv.reserve(std::min<size_t>(INVENTORY_BROADCAST_MAX, pto->vInventoryToSend.size()));
vInvWait.reserve(pto->vInventoryToSend.size());
BOOST_FOREACH(const CInv& inv, pto->vInventoryToSend)
{
if (inv.type == MSG_TX && pto->filterInventoryKnown.contains(inv.hash))
continue;
-
- // trickle out tx inv to protect privacy
- if (inv.type == MSG_TX && !fSendTrickle)
- {
- // 1/4 of tx invs blast to all immediately
- static uint256 hashSalt;
- if (hashSalt.IsNull())
- hashSalt = GetRandHash();
- uint256 hashRand = ArithToUint256(UintToArith256(inv.hash) ^ UintToArith256(hashSalt));
- hashRand = Hash(BEGIN(hashRand), END(hashRand));
- bool fTrickleWait = ((UintToArith256(hashRand) & 3) != 0);
-
- if (fTrickleWait)
- {
- vInvWait.push_back(inv);
- continue;
- }
+ // No reason to drain out at many times the network's capacity,
+ // especially since we have many peers and some will draw much shorter delays.
+ if (vInv.size() >= INVENTORY_BROADCAST_MAX || (inv.type == MSG_TX && !fSendTrickle)) {
+ vInvWait.push_back(inv);
+ continue;
}
pto->filterInventoryKnown.insert(inv.hash);
vInv.push_back(inv);
- if (vInv.size() >= 1000)
- {
- pto->PushMessage(NetMsgType::INV, vInv);
- vInv.clear();
- }
}
pto->vInventoryToSend = vInvWait;
}