diff options
author | Jonas Schnelli <dev@jonasschnelli.ch> | 2015-11-11 10:10:48 +0100 |
---|---|---|
committer | Jonas Schnelli <dev@jonasschnelli.ch> | 2015-11-13 21:04:12 +0100 |
commit | d61fcff07112411a1e7c28984777480e0c0873aa (patch) | |
tree | aeeba1fe49bc8b2cddf6fc6925e4f56adb02b47e | |
parent | dbd2c135ddb96bdc3a4e870c2371cb1fac227135 (diff) |
don't enforce maxuploadtargets disconnect for whitelisted peers
-rwxr-xr-x | qa/rpc-tests/maxuploadtarget.py | 37 | ||||
-rw-r--r-- | src/main.cpp | 3 |
2 files changed, 37 insertions, 3 deletions
diff --git a/qa/rpc-tests/maxuploadtarget.py b/qa/rpc-tests/maxuploadtarget.py index 148c5f37e4..e714465db1 100755 --- a/qa/rpc-tests/maxuploadtarget.py +++ b/qa/rpc-tests/maxuploadtarget.py @@ -195,7 +195,7 @@ class MaxUploadTest(BitcoinTestFramework): daily_buffer = 144 * 1000000 max_bytes_available = max_bytes_per_day - daily_buffer success_count = max_bytes_available / old_block_size - + # 144MB will be reserved for relaying new blocks, so expect this to # succeed for ~70 tries. for i in xrange(success_count): @@ -228,7 +228,7 @@ class MaxUploadTest(BitcoinTestFramework): test_nodes[1].send_message(getdata_request) test_nodes[1].wait_for_disconnect() assert_equal(len(self.nodes[0].getpeerinfo()), 1) - + print "Peer 1 disconnected after trying to download old block" print "Advancing system time on node to clear counters..." @@ -245,5 +245,38 @@ class MaxUploadTest(BitcoinTestFramework): [c.disconnect_node() for c in connections] + #stop and start node 0 with 1MB maxuploadtarget, whitelist 127.0.0.1 + print "Restarting nodes with -whitelist=127.0.0.1" + stop_node(self.nodes[0], 0) + self.nodes[0] = start_node(0, self.options.tmpdir, ["-debug", "-whitelist=127.0.0.1", "-maxuploadtarget=1", "-blockmaxsize=999000"]) + + #recreate/reconnect 3 test nodes + test_nodes = [] + connections = [] + + for i in xrange(3): + test_nodes.append(TestNode()) + connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_nodes[i])) + test_nodes[i].add_connection(connections[i]) + + NetworkThread().start() # Start up network handling in another thread + [x.wait_for_verack() for x in test_nodes] + + #retrieve 20 blocks which should be enough to break the 1MB limit + getdata_request.inv = [CInv(2, big_new_block)] + for i in xrange(20): + test_nodes[1].send_message(getdata_request) + test_nodes[1].sync_with_ping() + assert_equal(test_nodes[1].block_receive_map[big_new_block], i+1) + + getdata_request.inv = [CInv(2, big_old_block)] + test_nodes[1].send_message(getdata_request) + test_nodes[1].wait_for_disconnect() + assert_equal(len(self.nodes[0].getpeerinfo()), 3) #node is still connected because of the whitelist + + print "Peer 1 still connected after trying to download old block (whitelisted)" + + [c.disconnect_node() for c in connections] + if __name__ == '__main__': MaxUploadTest().main() diff --git a/src/main.cpp b/src/main.cpp index 5208fbb031..4647112d4c 100644 --- a/src/main.cpp +++ b/src/main.cpp @@ -3867,8 +3867,9 @@ void static ProcessGetData(CNode* pfrom, const Consensus::Params& consensusParam } } // disconnect node in case we have reached the outbound limit for serving historical blocks + // never disconnect whitelisted nodes static const int nOneWeek = 7 * 24 * 60 * 60; // assume > 1 week = historical - if (send && CNode::OutboundTargetReached(true) && ( ((pindexBestHeader != NULL) && (pindexBestHeader->GetBlockTime() - mi->second->GetBlockTime() > nOneWeek)) || inv.type == MSG_FILTERED_BLOCK) ) + if (send && CNode::OutboundTargetReached(true) && ( ((pindexBestHeader != NULL) && (pindexBestHeader->GetBlockTime() - mi->second->GetBlockTime() > nOneWeek)) || inv.type == MSG_FILTERED_BLOCK) && !pfrom->fWhitelisted) { LogPrint("net", "historical block serving limit reached, disconnect peer=%d\n", pfrom->GetId()); |