diff options
Diffstat (limited to 'qa')
-rwxr-xr-x | qa/pull-tester/rpc-tests.sh | 2 | ||||
-rwxr-xr-x | qa/rpc-tests/merkle_blocks.py | 90 | ||||
-rwxr-xr-x | qa/rpc-tests/proxy_test.py | 146 | ||||
-rwxr-xr-x | qa/rpc-tests/pruning.py | 356 | ||||
-rw-r--r-- | qa/rpc-tests/socks5.py | 160 | ||||
-rw-r--r-- | qa/rpc-tests/util.py | 7 | ||||
-rwxr-xr-x | qa/rpc-tests/wallet.py | 27 |
7 files changed, 786 insertions, 2 deletions
diff --git a/qa/pull-tester/rpc-tests.sh b/qa/pull-tester/rpc-tests.sh index efeee45530..dd2f8d4e5e 100755 --- a/qa/pull-tester/rpc-tests.sh +++ b/qa/pull-tester/rpc-tests.sh @@ -27,6 +27,8 @@ testScripts=( 'mempool_coinbase_spends.py' 'httpbasics.py' 'zapwallettxes.py' + 'proxy_test.py' + 'merkle_blocks.py' # 'forknotify.py' ); if [ "x${ENABLE_BITCOIND}${ENABLE_UTILS}${ENABLE_WALLET}" = "x111" ]; then diff --git a/qa/rpc-tests/merkle_blocks.py b/qa/rpc-tests/merkle_blocks.py new file mode 100755 index 0000000000..a143d21a21 --- /dev/null +++ b/qa/rpc-tests/merkle_blocks.py @@ -0,0 +1,90 @@ +#!/usr/bin/env python2 +# Copyright (c) 2014 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +# +# Test merkleblock fetch/validation +# + +from test_framework import BitcoinTestFramework +from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException +from util import * +import os +import shutil + +class MerkleBlockTest(BitcoinTestFramework): + + def setup_chain(self): + print("Initializing test directory "+self.options.tmpdir) + initialize_chain_clean(self.options.tmpdir, 4) + + def setup_network(self): + self.nodes = [] + # Nodes 0/1 are "wallet" nodes + self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"])) + self.nodes.append(start_node(1, self.options.tmpdir, ["-debug"])) + # Nodes 2/3 are used for testing + self.nodes.append(start_node(2, self.options.tmpdir, ["-debug"])) + self.nodes.append(start_node(3, self.options.tmpdir, ["-debug", "-txindex"])) + connect_nodes(self.nodes[0], 1) + connect_nodes(self.nodes[0], 2) + connect_nodes(self.nodes[0], 3) + + self.is_network_split = False + self.sync_all() + + def run_test(self): + print "Mining blocks..." + self.nodes[0].generate(105) + self.sync_all() + + chain_height = self.nodes[1].getblockcount() + assert_equal(chain_height, 105) + assert_equal(self.nodes[1].getbalance(), 0) + assert_equal(self.nodes[2].getbalance(), 0) + + node0utxos = self.nodes[0].listunspent(1) + tx1 = self.nodes[0].createrawtransaction([node0utxos.pop()], {self.nodes[1].getnewaddress(): 50}) + txid1 = self.nodes[0].sendrawtransaction(self.nodes[0].signrawtransaction(tx1)["hex"]) + tx2 = self.nodes[0].createrawtransaction([node0utxos.pop()], {self.nodes[1].getnewaddress(): 50}) + txid2 = self.nodes[0].sendrawtransaction(self.nodes[0].signrawtransaction(tx2)["hex"]) + assert_raises(JSONRPCException, self.nodes[0].gettxoutproof, [txid1]) + + self.nodes[0].generate(1) + blockhash = self.nodes[0].getblockhash(chain_height + 1) + self.sync_all() + + txlist = [] + blocktxn = self.nodes[0].getblock(blockhash, True)["tx"] + txlist.append(blocktxn[1]) + txlist.append(blocktxn[2]) + + assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1])), [txid1]) + assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1, txid2])), txlist) + assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1, txid2], blockhash)), txlist) + + txin_spent = self.nodes[1].listunspent(1).pop() + tx3 = self.nodes[1].createrawtransaction([txin_spent], {self.nodes[0].getnewaddress(): 50}) + self.nodes[0].sendrawtransaction(self.nodes[1].signrawtransaction(tx3)["hex"]) + self.nodes[0].generate(1) + self.sync_all() + + txid_spent = txin_spent["txid"] + txid_unspent = txid1 if txin_spent["txid"] != txid1 else txid2 + + # We cant find the block from a fully-spent tx + assert_raises(JSONRPCException, self.nodes[2].gettxoutproof, [txid_spent]) + # ...but we can if we specify the block + assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid_spent], blockhash)), [txid_spent]) + # ...or if the first tx is not fully-spent + assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid_unspent])), [txid_unspent]) + try: + assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1, txid2])), txlist) + except JSONRPCException: + assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid2, txid1])), txlist) + # ...or if we have a -txindex + assert_equal(self.nodes[2].verifytxoutproof(self.nodes[3].gettxoutproof([txid_spent])), [txid_spent]) + +if __name__ == '__main__': + MerkleBlockTest().main() diff --git a/qa/rpc-tests/proxy_test.py b/qa/rpc-tests/proxy_test.py new file mode 100755 index 0000000000..d6d9e6725b --- /dev/null +++ b/qa/rpc-tests/proxy_test.py @@ -0,0 +1,146 @@ +#!/usr/bin/env python2 +# Copyright (c) 2015 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +import socket +import traceback, sys +from binascii import hexlify +import time, os + +from socks5 import Socks5Configuration, Socks5Command, Socks5Server, AddressType +from test_framework import BitcoinTestFramework +from util import * +''' +Test plan: +- Start bitcoind's with different proxy configurations +- Use addnode to initiate connections +- Verify that proxies are connected to, and the right connection command is given +- Proxy configurations to test on bitcoind side: + - `-proxy` (proxy everything) + - `-onion` (proxy just onions) + - `-proxyrandomize` Circuit randomization +- Proxy configurations to test on proxy side, + - support no authentication (other proxy) + - support no authentication + user/pass authentication (Tor) + - proxy on IPv6 + +- Create various proxies (as threads) +- Create bitcoinds that connect to them +- Manipulate the bitcoinds using addnode (onetry) an observe effects + +addnode connect to IPv4 +addnode connect to IPv6 +addnode connect to onion +addnode connect to generic DNS name +''' + +class ProxyTest(BitcoinTestFramework): + def __init__(self): + # Create two proxies on different ports + # ... one unauthenticated + self.conf1 = Socks5Configuration() + self.conf1.addr = ('127.0.0.1', 13000 + (os.getpid() % 1000)) + self.conf1.unauth = True + self.conf1.auth = False + # ... one supporting authenticated and unauthenticated (Tor) + self.conf2 = Socks5Configuration() + self.conf2.addr = ('127.0.0.1', 14000 + (os.getpid() % 1000)) + self.conf2.unauth = True + self.conf2.auth = True + # ... one on IPv6 with similar configuration + self.conf3 = Socks5Configuration() + self.conf3.af = socket.AF_INET6 + self.conf3.addr = ('::1', 15000 + (os.getpid() % 1000)) + self.conf3.unauth = True + self.conf3.auth = True + + self.serv1 = Socks5Server(self.conf1) + self.serv1.start() + self.serv2 = Socks5Server(self.conf2) + self.serv2.start() + self.serv3 = Socks5Server(self.conf3) + self.serv3.start() + + def setup_nodes(self): + # Note: proxies are not used to connect to local nodes + # this is because the proxy to use is based on CService.GetNetwork(), which return NET_UNROUTABLE for localhost + return start_nodes(4, self.options.tmpdir, extra_args=[ + ['-listen', '-debug=net', '-debug=proxy', '-proxy=%s:%i' % (self.conf1.addr),'-proxyrandomize=1'], + ['-listen', '-debug=net', '-debug=proxy', '-proxy=%s:%i' % (self.conf1.addr),'-onion=%s:%i' % (self.conf2.addr),'-proxyrandomize=0'], + ['-listen', '-debug=net', '-debug=proxy', '-proxy=%s:%i' % (self.conf2.addr),'-proxyrandomize=1'], + ['-listen', '-debug=net', '-debug=proxy', '-proxy=[%s]:%i' % (self.conf3.addr),'-proxyrandomize=0'] + ]) + + def node_test(self, node, proxies, auth): + rv = [] + # Test: outgoing IPv4 connection through node + node.addnode("15.61.23.23:1234", "onetry") + cmd = proxies[0].queue.get() + assert(isinstance(cmd, Socks5Command)) + # Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6 + assert_equal(cmd.atyp, AddressType.DOMAINNAME) + assert_equal(cmd.addr, "15.61.23.23") + assert_equal(cmd.port, 1234) + if not auth: + assert_equal(cmd.username, None) + assert_equal(cmd.password, None) + rv.append(cmd) + + # Test: outgoing IPv6 connection through node + node.addnode("[1233:3432:2434:2343:3234:2345:6546:4534]:5443", "onetry") + cmd = proxies[1].queue.get() + assert(isinstance(cmd, Socks5Command)) + # Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6 + assert_equal(cmd.atyp, AddressType.DOMAINNAME) + assert_equal(cmd.addr, "1233:3432:2434:2343:3234:2345:6546:4534") + assert_equal(cmd.port, 5443) + if not auth: + assert_equal(cmd.username, None) + assert_equal(cmd.password, None) + rv.append(cmd) + + # Test: outgoing onion connection through node + node.addnode("bitcoinostk4e4re.onion:8333", "onetry") + cmd = proxies[2].queue.get() + assert(isinstance(cmd, Socks5Command)) + assert_equal(cmd.atyp, AddressType.DOMAINNAME) + assert_equal(cmd.addr, "bitcoinostk4e4re.onion") + assert_equal(cmd.port, 8333) + if not auth: + assert_equal(cmd.username, None) + assert_equal(cmd.password, None) + rv.append(cmd) + + # Test: outgoing DNS name connection through node + node.addnode("node.noumenon:8333", "onetry") + cmd = proxies[3].queue.get() + assert(isinstance(cmd, Socks5Command)) + assert_equal(cmd.atyp, AddressType.DOMAINNAME) + assert_equal(cmd.addr, "node.noumenon") + assert_equal(cmd.port, 8333) + if not auth: + assert_equal(cmd.username, None) + assert_equal(cmd.password, None) + rv.append(cmd) + + return rv + + def run_test(self): + # basic -proxy + self.node_test(self.nodes[0], [self.serv1, self.serv1, self.serv1, self.serv1], False) + + # -proxy plus -onion + self.node_test(self.nodes[1], [self.serv1, self.serv1, self.serv2, self.serv1], False) + + # -proxy plus -onion, -proxyrandomize + rv = self.node_test(self.nodes[2], [self.serv2, self.serv2, self.serv2, self.serv2], True) + # Check that credentials as used for -proxyrandomize connections are unique + credentials = set((x.username,x.password) for x in rv) + assert_equal(len(credentials), 4) + + # proxy on IPv6 localhost + self.node_test(self.nodes[3], [self.serv3, self.serv3, self.serv3, self.serv3], False) + +if __name__ == '__main__': + ProxyTest().main() + diff --git a/qa/rpc-tests/pruning.py b/qa/rpc-tests/pruning.py new file mode 100755 index 0000000000..f26cbee1e2 --- /dev/null +++ b/qa/rpc-tests/pruning.py @@ -0,0 +1,356 @@ +#!/usr/bin/env python2 +# Copyright (c) 2014 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +# +# Test pruning code +# ******** +# WARNING: +# This test uses 4GB of disk space and takes in excess of 30 mins to run +# ******** + +from test_framework import BitcoinTestFramework +from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException +from util import * +import os.path + +def calc_usage(blockdir): + return sum(os.path.getsize(blockdir+f) for f in os.listdir(blockdir) if os.path.isfile(blockdir+f))/(1024*1024) + +class PruneTest(BitcoinTestFramework): + + def __init__(self): + self.utxo = [] + self.address = ["",""] + + # Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create + # So we have big transactions and full blocks to fill up our block files + + # create one script_pubkey + script_pubkey = "6a4d0200" #OP_RETURN OP_PUSH2 512 bytes + for i in xrange (512): + script_pubkey = script_pubkey + "01" + # concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change + self.txouts = "81" + for k in xrange(128): + # add txout value + self.txouts = self.txouts + "0000000000000000" + # add length of script_pubkey + self.txouts = self.txouts + "fd0402" + # add script_pubkey + self.txouts = self.txouts + script_pubkey + + + def setup_chain(self): + print("Initializing test directory "+self.options.tmpdir) + initialize_chain_clean(self.options.tmpdir, 3) + + def setup_network(self): + self.nodes = [] + self.is_network_split = False + + # Create nodes 0 and 1 to mine + self.nodes.append(start_node(0, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5"], timewait=300)) + self.nodes.append(start_node(1, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5"], timewait=300)) + + # Create node 2 to test pruning + self.nodes.append(start_node(2, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-prune=550"], timewait=300)) + self.prunedir = self.options.tmpdir+"/node2/regtest/blocks/" + + self.address[0] = self.nodes[0].getnewaddress() + self.address[1] = self.nodes[1].getnewaddress() + + connect_nodes(self.nodes[0], 1) + connect_nodes(self.nodes[1], 2) + connect_nodes(self.nodes[2], 0) + sync_blocks(self.nodes[0:3]) + + def create_big_chain(self): + # Start by creating some coinbases we can spend later + self.nodes[1].generate(200) + sync_blocks(self.nodes[0:2]) + self.nodes[0].generate(150) + # Then mine enough full blocks to create more than 550MB of data + for i in xrange(645): + self.mine_full_block(self.nodes[0], self.address[0]) + + sync_blocks(self.nodes[0:3]) + + def test_height_min(self): + if not os.path.isfile(self.prunedir+"blk00000.dat"): + raise AssertionError("blk00000.dat is missing, pruning too early") + print "Success" + print "Though we're already using more than 550MB, current usage:", calc_usage(self.prunedir) + print "Mining 25 more blocks should cause the first block file to be pruned" + # Pruning doesn't run until we're allocating another chunk, 20 full blocks past the height cutoff will ensure this + for i in xrange(25): + self.mine_full_block(self.nodes[0],self.address[0]) + + waitstart = time.time() + while os.path.isfile(self.prunedir+"blk00000.dat"): + time.sleep(0.1) + if time.time() - waitstart > 10: + raise AssertionError("blk00000.dat not pruned when it should be") + + print "Success" + usage = calc_usage(self.prunedir) + print "Usage should be below target:", usage + if (usage > 550): + raise AssertionError("Pruning target not being met") + + def create_chain_with_staleblocks(self): + # Create stale blocks in manageable sized chunks + print "Mine 24 (stale) blocks on Node 1, followed by 25 (main chain) block reorg from Node 0, for 12 rounds" + + for j in xrange(12): + # Disconnect node 0 so it can mine a longer reorg chain without knowing about node 1's soon-to-be-stale chain + # Node 2 stays connected, so it hears about the stale blocks and then reorg's when node0 reconnects + # Stopping node 0 also clears its mempool, so it doesn't have node1's transactions to accidentally mine + stop_node(self.nodes[0],0) + self.nodes[0]=start_node(0, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5"], timewait=300) + # Mine 24 blocks in node 1 + self.utxo = self.nodes[1].listunspent() + for i in xrange(24): + if j == 0: + self.mine_full_block(self.nodes[1],self.address[1]) + else: + self.nodes[1].generate(1) #tx's already in mempool from previous disconnects + + # Reorg back with 25 block chain from node 0 + self.utxo = self.nodes[0].listunspent() + for i in xrange(25): + self.mine_full_block(self.nodes[0],self.address[0]) + + # Create connections in the order so both nodes can see the reorg at the same time + connect_nodes(self.nodes[1], 0) + connect_nodes(self.nodes[2], 0) + sync_blocks(self.nodes[0:3]) + + print "Usage can be over target because of high stale rate:", calc_usage(self.prunedir) + + def reorg_test(self): + # Node 1 will mine a 300 block chain starting 287 blocks back from Node 0 and Node 2's tip + # This will cause Node 2 to do a reorg requiring 288 blocks of undo data to the reorg_test chain + # Reboot node 1 to clear its mempool (hopefully make the invalidate faster) + # Lower the block max size so we don't keep mining all our big mempool transactions (from disconnected blocks) + stop_node(self.nodes[1],1) + self.nodes[1]=start_node(1, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"], timewait=300) + + height = self.nodes[1].getblockcount() + print "Current block height:", height + + invalidheight = height-287 + badhash = self.nodes[1].getblockhash(invalidheight) + print "Invalidating block at height:",invalidheight,badhash + self.nodes[1].invalidateblock(badhash) + + # We've now switched to our previously mined-24 block fork on node 1, but thats not what we want + # So invalidate that fork as well, until we're on the same chain as node 0/2 (but at an ancestor 288 blocks ago) + mainchainhash = self.nodes[0].getblockhash(invalidheight - 1) + curhash = self.nodes[1].getblockhash(invalidheight - 1) + while curhash != mainchainhash: + self.nodes[1].invalidateblock(curhash) + curhash = self.nodes[1].getblockhash(invalidheight - 1) + + assert(self.nodes[1].getblockcount() == invalidheight - 1) + print "New best height", self.nodes[1].getblockcount() + + # Reboot node1 to clear those giant tx's from mempool + stop_node(self.nodes[1],1) + self.nodes[1]=start_node(1, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"], timewait=300) + + print "Generating new longer chain of 300 more blocks" + self.nodes[1].generate(300) + + print "Reconnect nodes" + connect_nodes(self.nodes[0], 1) + connect_nodes(self.nodes[2], 1) + sync_blocks(self.nodes[0:3]) + + print "Verify height on node 2:",self.nodes[2].getblockcount() + print "Usage possibly still high bc of stale blocks in block files:", calc_usage(self.prunedir) + + print "Mine 220 more blocks so we have requisite history (some blocks will be big and cause pruning of previous chain)" + self.nodes[0].generate(220) #node 0 has many large tx's in its mempool from the disconnects + sync_blocks(self.nodes[0:3]) + + usage = calc_usage(self.prunedir) + print "Usage should be below target:", usage + if (usage > 550): + raise AssertionError("Pruning target not being met") + + return invalidheight,badhash + + def reorg_back(self): + # Verify that a block on the old main chain fork has been pruned away + try: + self.nodes[2].getblock(self.forkhash) + raise AssertionError("Old block wasn't pruned so can't test redownload") + except JSONRPCException as e: + print "Will need to redownload block",self.forkheight + + # Verify that we have enough history to reorg back to the fork point + # Although this is more than 288 blocks, because this chain was written more recently + # and only its other 299 small and 220 large block are in the block files after it, + # its expected to still be retained + self.nodes[2].getblock(self.nodes[2].getblockhash(self.forkheight)) + + first_reorg_height = self.nodes[2].getblockcount() + curchainhash = self.nodes[2].getblockhash(self.mainchainheight) + self.nodes[2].invalidateblock(curchainhash) + goalbestheight = self.mainchainheight + goalbesthash = self.mainchainhash2 + + # As of 0.10 the current block download logic is not able to reorg to the original chain created in + # create_chain_with_stale_blocks because it doesn't know of any peer thats on that chain from which to + # redownload its missing blocks. + # Invalidate the reorg_test chain in node 0 as well, it can successfully switch to the original chain + # because it has all the block data. + # However it must mine enough blocks to have a more work chain than the reorg_test chain in order + # to trigger node 2's block download logic. + # At this point node 2 is within 288 blocks of the fork point so it will preserve its ability to reorg + if self.nodes[2].getblockcount() < self.mainchainheight: + blocks_to_mine = first_reorg_height + 1 - self.mainchainheight + print "Rewind node 0 to prev main chain to mine longer chain to trigger redownload. Blocks needed:", blocks_to_mine + self.nodes[0].invalidateblock(curchainhash) + assert(self.nodes[0].getblockcount() == self.mainchainheight) + assert(self.nodes[0].getbestblockhash() == self.mainchainhash2) + goalbesthash = self.nodes[0].generate(blocks_to_mine)[-1] + goalbestheight = first_reorg_height + 1 + + print "Verify node 2 reorged back to the main chain, some blocks of which it had to redownload" + waitstart = time.time() + while self.nodes[2].getblockcount() < goalbestheight: + time.sleep(0.1) + if time.time() - waitstart > 300: + raise AssertionError("Node 2 didn't reorg to proper height") + assert(self.nodes[2].getbestblockhash() == goalbesthash) + # Verify we can now have the data for a block previously pruned + assert(self.nodes[2].getblock(self.forkhash)["height"] == self.forkheight) + + def mine_full_block(self, node, address): + # Want to create a full block + # We'll generate a 66k transaction below, and 14 of them is close to the 1MB block limit + for j in xrange(14): + if len(self.utxo) < 14: + self.utxo = node.listunspent() + inputs=[] + outputs = {} + t = self.utxo.pop() + inputs.append({ "txid" : t["txid"], "vout" : t["vout"]}) + remchange = t["amount"] - Decimal("0.001000") + outputs[address]=remchange + # Create a basic transaction that will send change back to ourself after account for a fee + # And then insert the 128 generated transaction outs in the middle rawtx[92] is where the # + # of txouts is stored and is the only thing we overwrite from the original transaction + rawtx = node.createrawtransaction(inputs, outputs) + newtx = rawtx[0:92] + newtx = newtx + self.txouts + newtx = newtx + rawtx[94:] + # Appears to be ever so slightly faster to sign with SIGHASH_NONE + signresult = node.signrawtransaction(newtx,None,None,"NONE") + txid = node.sendrawtransaction(signresult["hex"], True) + # Mine a full sized block which will be these transactions we just created + node.generate(1) + + + def run_test(self): + print "Warning! This test requires 4GB of disk space and takes over 30 mins" + print "Mining a big blockchain of 995 blocks" + self.create_big_chain() + # Chain diagram key: + # * blocks on main chain + # +,&,$,@ blocks on other forks + # X invalidated block + # N1 Node 1 + # + # Start by mining a simple chain that all nodes have + # N0=N1=N2 **...*(995) + + print "Check that we haven't started pruning yet because we're below PruneAfterHeight" + self.test_height_min() + # Extend this chain past the PruneAfterHeight + # N0=N1=N2 **...*(1020) + + print "Check that we'll exceed disk space target if we have a very high stale block rate" + self.create_chain_with_staleblocks() + # Disconnect N0 + # And mine a 24 block chain on N1 and a separate 25 block chain on N0 + # N1=N2 **...*+...+(1044) + # N0 **...**...**(1045) + # + # reconnect nodes causing reorg on N1 and N2 + # N1=N2 **...*(1020) *...**(1045) + # \ + # +...+(1044) + # + # repeat this process until you have 12 stale forks hanging off the + # main chain on N1 and N2 + # N0 *************************...***************************(1320) + # + # N1=N2 **...*(1020) *...**(1045) *.. ..**(1295) *...**(1320) + # \ \ \ + # +...+(1044) &.. $...$(1319) + + # Save some current chain state for later use + self.mainchainheight = self.nodes[2].getblockcount() #1320 + self.mainchainhash2 = self.nodes[2].getblockhash(self.mainchainheight) + + print "Check that we can survive a 288 block reorg still" + (self.forkheight,self.forkhash) = self.reorg_test() #(1033, ) + # Now create a 288 block reorg by mining a longer chain on N1 + # First disconnect N1 + # Then invalidate 1033 on main chain and 1032 on fork so height is 1032 on main chain + # N1 **...*(1020) **...**(1032)X.. + # \ + # ++...+(1031)X.. + # + # Now mine 300 more blocks on N1 + # N1 **...*(1020) **...**(1032) @@...@(1332) + # \ \ + # \ X... + # \ \ + # ++...+(1031)X.. .. + # + # Reconnect nodes and mine 220 more blocks on N1 + # N1 **...*(1020) **...**(1032) @@...@@@(1552) + # \ \ + # \ X... + # \ \ + # ++...+(1031)X.. .. + # + # N2 **...*(1020) **...**(1032) @@...@@@(1552) + # \ \ + # \ *...**(1320) + # \ \ + # ++...++(1044) .. + # + # N0 ********************(1032) @@...@@@(1552) + # \ + # *...**(1320) + + print "Test that we can rerequest a block we previously pruned if needed for a reorg" + self.reorg_back() + # Verify that N2 still has block 1033 on current chain (@), but not on main chain (*) + # Invalidate 1033 on current chain (@) on N2 and we should be able to reorg to + # original main chain (*), but will require redownload of some blocks + # In order to have a peer we think we can download from, must also perform this invalidation + # on N0 and mine a new longest chain to trigger. + # Final result: + # N0 ********************(1032) **...****(1553) + # \ + # X@...@@@(1552) + # + # N2 **...*(1020) **...**(1032) **...****(1553) + # \ \ + # \ X@...@@@(1552) + # \ + # +.. + # + # N1 doesn't change because 1033 on main chain (*) is invalid + + print "Done" + +if __name__ == '__main__': + PruneTest().main() diff --git a/qa/rpc-tests/socks5.py b/qa/rpc-tests/socks5.py new file mode 100644 index 0000000000..1dbfb98d5d --- /dev/null +++ b/qa/rpc-tests/socks5.py @@ -0,0 +1,160 @@ +# Copyright (c) 2015 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +''' +Dummy Socks5 server for testing. +''' +from __future__ import print_function, division, unicode_literals +import socket, threading, Queue +import traceback, sys + +### Protocol constants +class Command: + CONNECT = 0x01 + +class AddressType: + IPV4 = 0x01 + DOMAINNAME = 0x03 + IPV6 = 0x04 + +### Utility functions +def recvall(s, n): + '''Receive n bytes from a socket, or fail''' + rv = bytearray() + while n > 0: + d = s.recv(n) + if not d: + raise IOError('Unexpected end of stream') + rv.extend(d) + n -= len(d) + return rv + +### Implementation classes +class Socks5Configuration(object): + '''Proxy configuration''' + def __init__(self): + self.addr = None # Bind address (must be set) + self.af = socket.AF_INET # Bind address family + self.unauth = False # Support unauthenticated + self.auth = False # Support authentication + +class Socks5Command(object): + '''Information about an incoming socks5 command''' + def __init__(self, cmd, atyp, addr, port, username, password): + self.cmd = cmd # Command (one of Command.*) + self.atyp = atyp # Address type (one of AddressType.*) + self.addr = addr # Address + self.port = port # Port to connect to + self.username = username + self.password = password + def __repr__(self): + return 'Socks5Command(%s,%s,%s,%s,%s,%s)' % (self.cmd, self.atyp, self.addr, self.port, self.username, self.password) + +class Socks5Connection(object): + def __init__(self, serv, conn, peer): + self.serv = serv + self.conn = conn + self.peer = peer + + def handle(self): + ''' + Handle socks5 request according to RFC1928 + ''' + try: + # Verify socks version + ver = recvall(self.conn, 1)[0] + if ver != 0x05: + raise IOError('Invalid socks version %i' % ver) + # Choose authentication method + nmethods = recvall(self.conn, 1)[0] + methods = bytearray(recvall(self.conn, nmethods)) + method = None + if 0x02 in methods and self.serv.conf.auth: + method = 0x02 # username/password + elif 0x00 in methods and self.serv.conf.unauth: + method = 0x00 # unauthenticated + if method is None: + raise IOError('No supported authentication method was offered') + # Send response + self.conn.sendall(bytearray([0x05, method])) + # Read authentication (optional) + username = None + password = None + if method == 0x02: + ver = recvall(self.conn, 1)[0] + if ver != 0x01: + raise IOError('Invalid auth packet version %i' % ver) + ulen = recvall(self.conn, 1)[0] + username = str(recvall(self.conn, ulen)) + plen = recvall(self.conn, 1)[0] + password = str(recvall(self.conn, plen)) + # Send authentication response + self.conn.sendall(bytearray([0x01, 0x00])) + + # Read connect request + (ver,cmd,rsv,atyp) = recvall(self.conn, 4) + if ver != 0x05: + raise IOError('Invalid socks version %i in connect request' % ver) + if cmd != Command.CONNECT: + raise IOError('Unhandled command %i in connect request' % cmd) + + if atyp == AddressType.IPV4: + addr = recvall(self.conn, 4) + elif atyp == AddressType.DOMAINNAME: + n = recvall(self.conn, 1)[0] + addr = str(recvall(self.conn, n)) + elif atyp == AddressType.IPV6: + addr = recvall(self.conn, 16) + else: + raise IOError('Unknown address type %i' % atyp) + port_hi,port_lo = recvall(self.conn, 2) + port = (port_hi << 8) | port_lo + + # Send dummy response + self.conn.sendall(bytearray([0x05, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00])) + + cmdin = Socks5Command(cmd, atyp, addr, port, username, password) + self.serv.queue.put(cmdin) + print('Proxy: ', cmdin) + # Fall through to disconnect + except Exception,e: + traceback.print_exc(file=sys.stderr) + self.serv.queue.put(e) + finally: + self.conn.close() + +class Socks5Server(object): + def __init__(self, conf): + self.conf = conf + self.s = socket.socket(conf.af) + self.s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + self.s.bind(conf.addr) + self.s.listen(5) + self.running = False + self.thread = None + self.queue = Queue.Queue() # report connections and exceptions to client + + def run(self): + while self.running: + (sockconn, peer) = self.s.accept() + if self.running: + conn = Socks5Connection(self, sockconn, peer) + thread = threading.Thread(None, conn.handle) + thread.daemon = True + thread.start() + + def start(self): + assert(not self.running) + self.running = True + self.thread = threading.Thread(None, self.run) + self.thread.daemon = True + self.thread.start() + + def stop(self): + self.running = False + # connect to self to end run loop + s = socket.socket(self.conf.af) + s.connect(self.conf.addr) + s.close() + self.thread.join() + diff --git a/qa/rpc-tests/util.py b/qa/rpc-tests/util.py index 9ecee31959..cf789f48e2 100644 --- a/qa/rpc-tests/util.py +++ b/qa/rpc-tests/util.py @@ -158,7 +158,7 @@ def _rpchost_to_args(rpchost): rv += ['-rpcport=' + rpcport] return rv -def start_node(i, dirname, extra_args=None, rpchost=None): +def start_node(i, dirname, extra_args=None, rpchost=None, timewait=None): """ Start a bitcoind and return RPC connection to it """ @@ -172,7 +172,10 @@ def start_node(i, dirname, extra_args=None, rpchost=None): ["-rpcwait", "getblockcount"], stdout=devnull) devnull.close() url = "http://rt:rt@%s:%d" % (rpchost or '127.0.0.1', rpc_port(i)) - proxy = AuthServiceProxy(url) + if timewait is not None: + proxy = AuthServiceProxy(url, timeout=timewait) + else: + proxy = AuthServiceProxy(url) proxy.url = url # store URL on proxy for info return proxy diff --git a/qa/rpc-tests/wallet.py b/qa/rpc-tests/wallet.py index 5f3178c606..08032fc538 100755 --- a/qa/rpc-tests/wallet.py +++ b/qa/rpc-tests/wallet.py @@ -151,6 +151,33 @@ class WalletTest (BitcoinTestFramework): assert(txid1 in self.nodes[3].getrawmempool()) + #check if we can list zero value tx as available coins + #1. create rawtx + #2. hex-changed one output to 0.0 + #3. sign and send + #4. check if recipient (node0) can list the zero value tx + usp = self.nodes[1].listunspent() + inputs = [{"txid":usp[0]['txid'], "vout":usp[0]['vout']}] + outputs = {self.nodes[1].getnewaddress(): 49.998, self.nodes[0].getnewaddress(): 11.11} + + rawTx = self.nodes[1].createrawtransaction(inputs, outputs).replace("c0833842", "00000000") #replace 11.11 with 0.0 (int32) + decRawTx = self.nodes[1].decoderawtransaction(rawTx) + signedRawTx = self.nodes[1].signrawtransaction(rawTx) + decRawTx = self.nodes[1].decoderawtransaction(signedRawTx['hex']) + zeroValueTxid= decRawTx['txid'] + sendResp = self.nodes[1].sendrawtransaction(signedRawTx['hex']) + + self.sync_all() + self.nodes[1].generate(1) #mine a block + self.sync_all() + + unspentTxs = self.nodes[0].listunspent() #zero value tx must be in listunspents output + found = False + for uTx in unspentTxs: + if uTx['txid'] == zeroValueTxid: + found = True + assert_equal(uTx['amount'], Decimal('0.00000000')); + assert(found) #do some -walletbroadcast tests stop_nodes(self.nodes) |