aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/bloom.cpp34
-rw-r--r--src/bloom.h16
-rw-r--r--src/main.cpp74
-rw-r--r--src/net.cpp2
-rw-r--r--src/test/bloom_tests.cpp6
5 files changed, 109 insertions, 23 deletions
diff --git a/src/bloom.cpp b/src/bloom.cpp
index 36cba491c4..de87206592 100644
--- a/src/bloom.cpp
+++ b/src/bloom.cpp
@@ -8,6 +8,7 @@
#include "hash.h"
#include "script/script.h"
#include "script/standard.h"
+#include "random.h"
#include "streams.h"
#include <math.h>
@@ -121,6 +122,12 @@ void CBloomFilter::clear()
isEmpty = true;
}
+void CBloomFilter::reset(unsigned int nNewTweak)
+{
+ clear();
+ nTweak = nNewTweak;
+}
+
bool CBloomFilter::IsWithinSizeConstraints() const
{
return vData.size() <= MAX_BLOOM_FILTER_SIZE && nHashFuncs <= MAX_HASH_FUNCS;
@@ -209,15 +216,17 @@ void CBloomFilter::UpdateEmptyFull()
isEmpty = empty;
}
-CRollingBloomFilter::CRollingBloomFilter(unsigned int nElements, double fpRate, unsigned int nTweak) :
- b1(nElements * 2, fpRate, nTweak), b2(nElements * 2, fpRate, nTweak)
+CRollingBloomFilter::CRollingBloomFilter(unsigned int nElements, double fpRate) :
+ b1(nElements * 2, fpRate, 0), b2(nElements * 2, fpRate, 0)
{
// Implemented using two bloom filters of 2 * nElements each.
// We fill them up, and clear them, staggered, every nElements
// inserted, so at least one always contains the last nElements
// inserted.
- nBloomSize = nElements * 2;
nInsertions = 0;
+ nBloomSize = nElements * 2;
+
+ reset();
}
void CRollingBloomFilter::insert(const std::vector<unsigned char>& vKey)
@@ -234,6 +243,12 @@ void CRollingBloomFilter::insert(const std::vector<unsigned char>& vKey)
}
}
+void CRollingBloomFilter::insert(const uint256& hash)
+{
+ vector<unsigned char> data(hash.begin(), hash.end());
+ insert(data);
+}
+
bool CRollingBloomFilter::contains(const std::vector<unsigned char>& vKey) const
{
if (nInsertions < nBloomSize / 2) {
@@ -242,9 +257,16 @@ bool CRollingBloomFilter::contains(const std::vector<unsigned char>& vKey) const
return b1.contains(vKey);
}
-void CRollingBloomFilter::clear()
+bool CRollingBloomFilter::contains(const uint256& hash) const
+{
+ vector<unsigned char> data(hash.begin(), hash.end());
+ return contains(data);
+}
+
+void CRollingBloomFilter::reset()
{
- b1.clear();
- b2.clear();
+ unsigned int nNewTweak = GetRand(std::numeric_limits<unsigned int>::max());
+ b1.reset(nNewTweak);
+ b2.reset(nNewTweak);
nInsertions = 0;
}
diff --git a/src/bloom.h b/src/bloom.h
index bb17f59c86..a4dba8cb4f 100644
--- a/src/bloom.h
+++ b/src/bloom.h
@@ -89,6 +89,7 @@ public:
bool contains(const uint256& hash) const;
void clear();
+ void reset(unsigned int nNewTweak);
//! True if the size is <= MAX_BLOOM_FILTER_SIZE and the number of hash functions is <= MAX_HASH_FUNCS
//! (catch a filter which was just deserialized which was too big)
@@ -103,7 +104,11 @@ public:
/**
* RollingBloomFilter is a probabilistic "keep track of most recently inserted" set.
- * Construct it with the number of items to keep track of, and a false-positive rate.
+ * Construct it with the number of items to keep track of, and a false-positive
+ * rate. Unlike CBloomFilter, by default nTweak is set to a cryptographically
+ * secure random value for you. Similarly rather than clear() the method
+ * reset() is provided, which also changes nTweak to decrease the impact of
+ * false-positives.
*
* contains(item) will always return true if item was one of the last N things
* insert()'ed ... but may also return true for items that were not inserted.
@@ -111,12 +116,17 @@ public:
class CRollingBloomFilter
{
public:
- CRollingBloomFilter(unsigned int nElements, double nFPRate, unsigned int nTweak);
+ // A random bloom filter calls GetRand() at creation time.
+ // Don't create global CRollingBloomFilter objects, as they may be
+ // constructed before the randomizer is properly initialized.
+ CRollingBloomFilter(unsigned int nElements, double nFPRate);
void insert(const std::vector<unsigned char>& vKey);
+ void insert(const uint256& hash);
bool contains(const std::vector<unsigned char>& vKey) const;
+ bool contains(const uint256& hash) const;
- void clear();
+ void reset();
private:
unsigned int nBloomSize;
diff --git a/src/main.cpp b/src/main.cpp
index fa12281655..14f36762c9 100644
--- a/src/main.cpp
+++ b/src/main.cpp
@@ -162,6 +162,29 @@ namespace {
*/
map<uint256, NodeId> mapBlockSource;
+ /**
+ * Filter for transactions that were recently rejected by
+ * AcceptToMemoryPool. These are not rerequested until the chain tip
+ * changes, at which point the entire filter is reset. Protected by
+ * cs_main.
+ *
+ * Without this filter we'd be re-requesting txs from each of our peers,
+ * increasing bandwidth consumption considerably. For instance, with 100
+ * peers, half of which relay a tx we don't accept, that might be a 50x
+ * bandwidth increase. A flooding attacker attempting to roll-over the
+ * filter using minimum-sized, 60byte, transactions might manage to send
+ * 1000/sec if we have fast peers, so we pick 120,000 to give our peers a
+ * two minute window to send invs to us.
+ *
+ * Decreasing the false positive rate is fairly cheap, so we pick one in a
+ * million to make it highly unlikely for users to have issues with this
+ * filter.
+ *
+ * Memory used: 1.7MB
+ */
+ boost::scoped_ptr<CRollingBloomFilter> recentRejects;
+ uint256 hashRecentRejectsChainTip;
+
/** Blocks that are in flight, and that are in the queue to be downloaded. Protected by cs_main. */
struct QueuedBlock {
uint256 hash;
@@ -3248,6 +3271,7 @@ void UnloadBlockIndex()
setDirtyBlockIndex.clear();
setDirtyFileInfo.clear();
mapNodeState.clear();
+ recentRejects.reset(NULL);
BOOST_FOREACH(BlockMap::value_type& entry, mapBlockIndex) {
delete entry.second;
@@ -3268,6 +3292,10 @@ bool LoadBlockIndex()
bool InitBlockIndex() {
const CChainParams& chainparams = Params();
LOCK(cs_main);
+
+ // Initialize global variables that cannot be constructed at startup.
+ recentRejects.reset(new CRollingBloomFilter(120000, 0.000001));
+
// Check whether we're already initialized
if (chainActive.Genesis() != NULL)
return true;
@@ -3670,10 +3698,21 @@ bool static AlreadyHave(const CInv& inv) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{
case MSG_TX:
{
- bool txInMap = false;
- txInMap = mempool.exists(inv.hash);
- return txInMap || mapOrphanTransactions.count(inv.hash) ||
- pcoinsTip->HaveCoins(inv.hash);
+ assert(recentRejects);
+ if (chainActive.Tip()->GetBlockHash() != hashRecentRejectsChainTip)
+ {
+ // If the chain tip has changed previously rejected transactions
+ // might be now valid, e.g. due to a nLockTime'd tx becoming valid,
+ // or a double-spend. Reset the rejects filter and give those
+ // txs a second chance.
+ hashRecentRejectsChainTip = chainActive.Tip()->GetBlockHash();
+ recentRejects->reset();
+ }
+
+ return recentRejects->contains(inv.hash) ||
+ mempool.exists(inv.hash) ||
+ mapOrphanTransactions.count(inv.hash) ||
+ pcoinsTip->HaveCoins(inv.hash);
}
case MSG_BLOCK:
return mapBlockIndex.count(inv.hash);
@@ -4273,6 +4312,8 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
// Probably non-standard or insufficient fee/priority
LogPrint("mempool", " removed orphan tx %s\n", orphanHash.ToString());
vEraseQueue.push_back(orphanHash);
+ assert(recentRejects);
+ recentRejects->insert(orphanHash);
}
mempool.check(pcoinsTip);
}
@@ -4290,11 +4331,24 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
unsigned int nEvicted = LimitOrphanTxSize(nMaxOrphanTx);
if (nEvicted > 0)
LogPrint("mempool", "mapOrphan overflow, removed %u tx\n", nEvicted);
- } else if (pfrom->fWhitelisted) {
- // Always relay transactions received from whitelisted peers, even
- // if they are already in the mempool (allowing the node to function
- // as a gateway for nodes hidden behind it).
- RelayTransaction(tx);
+ } else {
+ // AcceptToMemoryPool() returned false, possibly because the tx is
+ // already in the mempool; if the tx isn't in the mempool that
+ // means it was rejected and we shouldn't ask for it again.
+ if (!mempool.exists(tx.GetHash())) {
+ assert(recentRejects);
+ recentRejects->insert(tx.GetHash());
+ }
+ if (pfrom->fWhitelisted) {
+ // Always relay transactions received from whitelisted peers, even
+ // if they were rejected from the mempool, allowing the node to
+ // function as a gateway for nodes hidden behind it.
+ //
+ // FIXME: This includes invalid transactions, which means a
+ // whitelisted peer could get us banned! We may want to change
+ // that.
+ RelayTransaction(tx);
+ }
}
int nDoS = 0;
if (state.IsInvalid(nDoS))
@@ -4797,7 +4851,7 @@ bool SendMessages(CNode* pto, bool fSendTrickle)
{
// Periodically clear addrKnown to allow refresh broadcasts
if (nLastRebroadcast)
- pnode->addrKnown.clear();
+ pnode->addrKnown.reset();
// Rebroadcast our address
AdvertizeLocal(pnode);
diff --git a/src/net.cpp b/src/net.cpp
index 3cece520de..176fd7195b 100644
--- a/src/net.cpp
+++ b/src/net.cpp
@@ -2060,7 +2060,7 @@ unsigned int SendBufferSize() { return 1000*GetArg("-maxsendbuffer", 1*1000); }
CNode::CNode(SOCKET hSocketIn, const CAddress& addrIn, const std::string& addrNameIn, bool fInboundIn) :
ssSend(SER_NETWORK, INIT_PROTO_VERSION),
- addrKnown(5000, 0.001, insecure_rand()),
+ addrKnown(5000, 0.001),
setInventoryKnown(SendBufferSize() / 1000)
{
nServices = 0;
diff --git a/src/test/bloom_tests.cpp b/src/test/bloom_tests.cpp
index 1bda8a7ea1..6b30d6aa8a 100644
--- a/src/test/bloom_tests.cpp
+++ b/src/test/bloom_tests.cpp
@@ -469,7 +469,7 @@ static std::vector<unsigned char> RandomData()
BOOST_AUTO_TEST_CASE(rolling_bloom)
{
// last-100-entry, 1% false positive:
- CRollingBloomFilter rb1(100, 0.01, 0);
+ CRollingBloomFilter rb1(100, 0.01);
// Overfill:
static const int DATASIZE=399;
@@ -500,7 +500,7 @@ BOOST_AUTO_TEST_CASE(rolling_bloom)
BOOST_CHECK(nHits < 175);
BOOST_CHECK(rb1.contains(data[DATASIZE-1]));
- rb1.clear();
+ rb1.reset();
BOOST_CHECK(!rb1.contains(data[DATASIZE-1]));
// Now roll through data, make sure last 100 entries
@@ -527,7 +527,7 @@ BOOST_AUTO_TEST_CASE(rolling_bloom)
BOOST_CHECK(nHits < 100);
// last-1000-entry, 0.01% false positive:
- CRollingBloomFilter rb2(1000, 0.001, 0);
+ CRollingBloomFilter rb2(1000, 0.001);
for (int i = 0; i < DATASIZE; i++) {
rb2.insert(data[i]);
}